diff --git a/.evergreen/combine-coverage.sh b/.evergreen/combine-coverage.sh new file mode 100755 index 0000000000..36266c1842 --- /dev/null +++ b/.evergreen/combine-coverage.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# +# Coverage combine merges (and removes) all the coverage files and +# generates a new .coverage file in the current directory. + +set -eu + +. .evergreen/utils.sh + +if [ -z "${PYTHON_BINARY:-}" ]; then + PYTHON_BINARY=$(find_python3) +fi + +createvirtualenv "$PYTHON_BINARY" covenv +# Keep in sync with run-tests.sh +# coverage >=5 is needed for relative_files=true. +pip install -q "coverage[toml]>=5,<=7.5" + +pip list +ls -la coverage/ + +python -m coverage combine coverage/coverage.* +python -m coverage html -d htmlcov diff --git a/.evergreen/config.yml b/.evergreen/config.yml new file mode 100644 index 0000000000..91fa442775 --- /dev/null +++ b/.evergreen/config.yml @@ -0,0 +1,64 @@ +######################################## +# Evergreen Template for MongoDB Drivers +######################################## + +# When a task that used to pass starts to fail +# Go through all versions that may have been skipped to detect +# when the task started failing +stepback: true + +# Mark a failure as a system/bootstrap failure (purple box) rather then a task +# failure by default. +# Actual testing tasks are marked with `type: test` +command_type: system + +# Protect ourselves against rogue test case, or curl gone wild, that runs forever +# Good rule of thumb: the averageish length a task takes, times 5 +# That roughly accounts for variable system performance for various buildvariants +exec_timeout_secs: 3600 # 60 minutes is the longest we'll ever run (primarily + # for macos hosts) + +# What to do when evergreen hits the timeout (`post:` tasks are run automatically) +timeout: + - command: subprocess.exec + params: + binary: ls -la + +include: + - filename: .evergreen/generated_configs/functions.yml + - filename: .evergreen/generated_configs/tasks.yml + - filename: .evergreen/generated_configs/variants.yml + +pre: + - func: "fetch source" + - func: "setup system" + - func: "assume ec2 role" + +post: + # Disabled, causing timeouts + # - func: "upload working dir" + - func: "teardown system" + - func: "upload coverage" + - func: "upload mo artifacts" + - func: "upload test results" + - func: "cleanup" + +tasks: + - name: resync_specs + commands: + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: [AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN] + args: + - .evergreen/scripts/resync-all-specs.sh + working_dir: src + +buildvariants: + - name: resync_specs + display_name: "Resync Specs" + run_on: rhel80-small + cron: '0 16 * * MON' + patchable: true + tasks: + - name: resync_specs diff --git a/.evergreen/generated_configs/functions.yml b/.evergreen/generated_configs/functions.yml new file mode 100644 index 0000000000..ce95648849 --- /dev/null +++ b/.evergreen/generated_configs/functions.yml @@ -0,0 +1,309 @@ +functions: + # Assume ec2 role + assume ec2 role: + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + duration_seconds: 3600 + + # Attach benchmark test results + attach benchmark test results: + - command: attach.results + params: + file_location: src/report.json + + # Cleanup + cleanup: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/cleanup.sh + working_dir: src + type: test + + # Download and merge coverage + download and merge coverage: + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} + type: setup + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/download-and-merge-coverage.sh + - ${bucket_name} + - ${revision} + - ${version_id} + working_dir: src + silent: true + include_expansions_in_env: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + type: test + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/combine-coverage.sh + working_dir: src + type: test + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/upload-coverage-report.sh + - ${bucket_name} + - ${revision} + - ${version_id} + working_dir: src + silent: true + include_expansions_in_env: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + type: test + - command: s3.put + params: + remote_file: coverage/${revision}/${version_id}/htmlcov/index.html + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: src/htmlcov/index.html + permissions: public-read + content_type: text/html + display_name: Coverage Report HTML + optional: "true" + type: setup + + # Fetch source + fetch source: + - command: git.get_project + params: + directory: src + + # Run server + run server: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - run-server + - ${TEST_NAME} + working_dir: src + include_expansions_in_env: + - VERSION + - TOPOLOGY + - AUTH + - SSL + - ORCHESTRATION_FILE + - PYTHON_BINARY + - PYTHON_VERSION + - STORAGE_ENGINE + - REQUIRE_API_VERSION + - DRIVERS_TOOLS + - TEST_CRYPT_SHARED + - AUTH_AWS + - LOAD_BALANCER + - LOCAL_ATLAS + - NO_EXT + type: test + - command: expansions.update + params: + file: ${DRIVERS_TOOLS}/mo-expansion.yml + + # Run tests + run tests: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - setup-tests + - ${TEST_NAME} + - ${SUB_TEST_NAME} + working_dir: src + include_expansions_in_env: + - AUTH + - SSL + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + - COVERAGE + - PYTHON_BINARY + - LIBMONGOCRYPT_URL + - MONGODB_URI + - PYTHON_VERSION + - DISABLE_TEST_COMMANDS + - GREEN_FRAMEWORK + - NO_EXT + - COMPRESSORS + - MONGODB_API_VERSION + - REQUIRE_API_VERSION + - DEBUG_LOG + - DISABLE_FLAKY + - ORCHESTRATION_FILE + - OCSP_SERVER_TYPE + - VERSION + - IS_WIN32 + - REQUIRE_FIPS + - TEST_MIN_DEPS + type: test + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - run-tests + working_dir: src + type: test + + # Send dashboard data + send dashboard data: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/perf-submission-setup.sh + working_dir: src + include_expansions_in_env: + - requester + - revision_order_id + - project_id + - version_id + - build_variant + - parsed_order_id + - task_name + - task_id + - execution + - is_mainline + type: test + - command: expansions.update + params: + file: src/expansion.yml + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/perf-submission.sh + working_dir: src + include_expansions_in_env: + - requester + - revision_order_id + - project_id + - version_id + - build_variant + - parsed_order_id + - task_name + - task_id + - execution + - is_mainline + type: test + + # Setup system + setup system: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/setup-system.sh + working_dir: src + include_expansions_in_env: + - is_patch + - project + - version_id + type: test + - command: expansions.update + params: + file: src/expansion.yml + + # Teardown system + teardown system: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - teardown-tests + working_dir: src + type: test + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/teardown.sh + working_dir: src + type: test + + # Upload coverage + upload coverage: + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} + type: setup + - command: s3.put + params: + remote_file: coverage/${revision}/${version_id}/coverage/coverage.${build_variant}.${task_name} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: src/.coverage + permissions: public-read + content_type: text/html + display_name: Raw Coverage Report + optional: "true" + type: setup + + # Upload mo artifacts + upload mo artifacts: + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} + type: setup + - command: archive.targz_pack + params: + target: mongo-coredumps.tgz + source_dir: ./ + include: + - ./**.core + - ./**.mdmp + - command: s3.put + params: + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: mongo-coredumps.tgz + permissions: public-read + content_type: ${content_type|application/x-gzip} + display_name: Core Dumps - Execution + optional: "true" + type: setup + - command: s3.put + params: + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-drivers-tools-logs.tar.gz + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: ${DRIVERS_TOOLS}/.evergreen/test_logs.tar.gz + permissions: public-read + content_type: ${content_type|application/x-gzip} + display_name: drivers-tools-logs.tar.gz + optional: "true" + type: setup + + # Upload test results + upload test results: + - command: attach.results + params: + file_location: ${DRIVERS_TOOLS}/results.json + - command: attach.xunit_results + params: + file: src/xunit-results/TEST-*.xml diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml new file mode 100644 index 0000000000..855cbefef8 --- /dev/null +++ b/.evergreen/generated_configs/tasks.yml @@ -0,0 +1,5116 @@ +tasks: + # Aws lambda tests + - name: test-aws-lambda-deployed + commands: + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: aws_lambda + tags: [aws_lambda] + + # Aws tests + - name: test-auth-aws-4.4-regular-python3.10 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: "4.4" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: regular + PYTHON_VERSION: "3.10" + tags: [auth-aws, auth-aws-regular] + - name: test-auth-aws-5.0-assume-role-python3.11 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: "5.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: assume-role + PYTHON_VERSION: "3.11" + tags: [auth-aws, auth-aws-assume-role] + - name: test-auth-aws-6.0-ec2-python3.12 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: "6.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ec2 + PYTHON_VERSION: "3.12" + tags: [auth-aws, auth-aws-ec2] + - name: test-auth-aws-7.0-env-creds-python3.13 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: "7.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: env-creds + PYTHON_VERSION: "3.13" + tags: [auth-aws, auth-aws-env-creds] + - name: test-auth-aws-8.0-session-creds-python3.14t + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: "8.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: session-creds + PYTHON_VERSION: 3.14t + tags: [auth-aws, auth-aws-session-creds, free-threaded] + - name: test-auth-aws-rapid-web-identity-python3.14 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + PYTHON_VERSION: "3.14" + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-rapid-web-identity-session-name-python3.14 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + AWS_ROLE_SESSION_NAME: test + PYTHON_VERSION: "3.14" + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-latest-ecs-python3.10 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: latest + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ecs + PYTHON_VERSION: "3.10" + tags: [auth-aws, auth-aws-ecs] + + # Backport pr tests + - name: backport-pr + commands: + - func: assume ec2 role + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/github_app/backport-pr.sh + - mongodb + - mongo-python-driver + - ${github.amrom.workers.devmit} + working_dir: src + include_expansions_in_env: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + type: test + + # Coverage report tests + - name: coverage-report + commands: + - func: download and merge coverage + depends_on: [{ name: .server-version, variant: .coverage_tag, status: "*", patch_optional: true }] + tags: [coverage, pr] + + # Getdata tests + - name: getdata + commands: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/run-getdata.sh + working_dir: src + type: test + + # Import time tests + - name: check-import-time + commands: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/check-import-time.sh + - ${revision} + - ${github.amrom.workers.devmit} + working_dir: src + type: test + tags: [pr] + + # Kms tests + - name: test-gcpkms + commands: + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: gcp + tags: [] + - name: test-gcpkms-fail + commands: + - func: run server + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: gcp-fail + tags: [pr] + - name: test-azurekms + commands: + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: azure + tags: [] + - name: test-azurekms-fail + commands: + - func: run server + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: azure-fail + tags: [pr] + + # Min deps tests + - name: test-min-deps-python3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + TEST_MIN_DEPS: "1" + tags: [test-min-deps, standalone-noauth-nossl] + - name: test-min-deps-python3.10-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + TEST_MIN_DEPS: "1" + tags: [test-min-deps, replica_set-noauth-ssl] + - name: test-min-deps-python3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + TEST_MIN_DEPS: "1" + tags: [test-min-deps, sharded_cluster-auth-ssl] + + # Mod wsgi tests + - name: mod-wsgi-replica-set-python3.10 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.10" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: standalone + PYTHON_VERSION: "3.10" + tags: [mod_wsgi, pr] + - name: mod-wsgi-embedded-mode-replica-set-python3.11 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.11" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: embedded + PYTHON_VERSION: "3.11" + tags: [mod_wsgi, pr] + - name: mod-wsgi-replica-set-python3.12 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.12" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: standalone + PYTHON_VERSION: "3.12" + tags: [mod_wsgi, pr] + - name: mod-wsgi-embedded-mode-replica-set-python3.13 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.13" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: embedded + PYTHON_VERSION: "3.13" + tags: [mod_wsgi, pr] + - name: mod-wsgi-embedded-mode-replica-set-python3.14 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.14" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: embedded + PYTHON_VERSION: "3.14" + tags: [mod_wsgi, pr] + + # No orchestration tests + - name: test-no-orchestration-python3.10 + commands: + - func: assume ec2 role + - func: run tests + vars: + PYTHON_VERSION: "3.10" + tags: [test-no-orchestration, python-3.10] + - name: test-no-orchestration-python3.14 + commands: + - func: assume ec2 role + - func: run tests + vars: + PYTHON_VERSION: "3.14" + tags: [test-no-orchestration, python-3.14] + - name: test-no-orchestration-pypy3.10 + commands: + - func: assume ec2 role + - func: run tests + vars: + PYTHON_VERSION: pypy3.10 + tags: [test-no-orchestration, python-pypy3.10] + + # No toolchain tests + - name: test-no-toolchain-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + TEST_NAME: default_sync + tags: [test-no-toolchain, standalone-noauth-nossl] + - name: test-no-toolchain-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + TEST_NAME: default_async + tags: [test-no-toolchain, replica_set-noauth-ssl] + - name: test-no-toolchain-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + TEST_NAME: default_sync + tags: [test-no-toolchain, sharded_cluster-auth-ssl] + + # Ocsp tests + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-soft-fail-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-soft-fail-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-soft-fail-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-soft-fail-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-soft-fail-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-soft-fail-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-soft-fail-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-valid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-soft-fail-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-soft-fail-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-soft-fail-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-soft-fail-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-soft-fail-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-soft-fail-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-soft-fail-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-valid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + + # Oidc tests + - name: test-auth-oidc-default + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: default + tags: [auth_oidc] + - name: test-auth-oidc-azure + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: azure + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-gcp + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: gcp + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-eks + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: eks + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-aks + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: aks + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-gke + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: gke + tags: [auth_oidc, auth_oidc_remote] + + # Perf tests + - name: perf-8.0-standalone-ssl + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: ssl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: sync + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone-ssl-async + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: ssl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: async + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: nossl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: sync + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone-async + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: nossl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: async + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + + # Search index tests + - name: test-search-index-helpers + commands: + - func: assume ec2 role + - func: run server + vars: + TEST_NAME: search_index + - func: run tests + vars: + TEST_NAME: search_index + tags: [search_index] + + # Server version tests + - name: test-server-version-python3.14t-async-auth-nossl-replica-set + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: replica_set + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - server-version + - python-3.14t + - replica_set-auth-nossl + - async + - free-threaded + - name: test-server-version-python3.13-sync-auth-nossl-replica-set-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - server-version + - python-3.13 + - replica_set-auth-nossl + - sync + - name: test-server-version-python3.12-async-auth-ssl-replica-set-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - replica_set-auth-ssl + - async + - name: test-server-version-python3.11-sync-auth-ssl-replica-set-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - replica_set-auth-ssl + - sync + - name: test-server-version-python3.11-async-noauth-nossl-replica-set-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - server-version + - python-3.11 + - replica_set-noauth-nossl + - async + - pr + - name: test-server-version-python3.10-sync-noauth-nossl-replica-set-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - server-version + - python-3.10 + - replica_set-noauth-nossl + - sync + - pr + - name: test-server-version-pypy3.10-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - server-version + - python-pypy3.10 + - replica_set-noauth-ssl + - async + - name: test-server-version-python3.14-sync-noauth-ssl-replica-set-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - server-version + - python-3.14 + - replica_set-noauth-ssl + - sync + - name: test-server-version-python3.14-async-auth-nossl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - server-version + - python-3.14 + - sharded_cluster-auth-nossl + - async + - name: test-server-version-python3.14t-sync-auth-nossl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - server-version + - python-3.14t + - sharded_cluster-auth-nossl + - sync + - free-threaded + - name: test-server-version-python3.10-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - sharded_cluster-auth-ssl + - async + - pr + - name: test-server-version-python3.11-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - server-version + - python-3.11 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.12-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.13-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - server-version + - python-3.13 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.14-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - server-version + - python-3.14 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.14t-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - server-version + - python-3.14t + - sharded_cluster-auth-ssl + - async + - free-threaded + - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - server-version + - python-pypy3.10 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.10-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - server-version + - python-3.10 + - sharded_cluster-auth-ssl + - sync + - pr + - name: test-server-version-python3.11-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.12-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - server-version + - python-3.12 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.13-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - server-version + - python-3.13 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.14-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - server-version + - python-3.14 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.14t-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - server-version + - python-3.14t + - sharded_cluster-auth-ssl + - sync + - free-threaded + - name: test-server-version-pypy3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - server-version + - python-pypy3.10 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.12-async-noauth-nossl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - sharded_cluster-noauth-nossl + - async + - name: test-server-version-python3.11-sync-noauth-nossl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - sharded_cluster-noauth-nossl + - sync + - name: test-server-version-python3.10-async-noauth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - sharded_cluster-noauth-ssl + - async + - name: test-server-version-pypy3.10-sync-noauth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - server-version + - python-pypy3.10 + - sharded_cluster-noauth-ssl + - sync + - name: test-server-version-python3.13-async-auth-nossl-standalone-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - server-version + - python-3.13 + - standalone-auth-nossl + - async + - name: test-server-version-python3.12-sync-auth-nossl-standalone-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - server-version + - python-3.12 + - standalone-auth-nossl + - sync + - name: test-server-version-python3.11-async-auth-ssl-standalone-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - server-version + - python-3.11 + - standalone-auth-ssl + - async + - name: test-server-version-python3.10-sync-auth-ssl-standalone-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - server-version + - python-3.10 + - standalone-auth-ssl + - sync + - name: test-server-version-python3.10-async-noauth-nossl-standalone-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - standalone-noauth-nossl + - async + - pr + - name: test-server-version-pypy3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - server-version + - python-pypy3.10 + - standalone-noauth-nossl + - sync + - pr + - name: test-server-version-python3.14-async-noauth-ssl-standalone-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - server-version + - python-3.14 + - standalone-noauth-ssl + - async + - name: test-server-version-python3.14t-sync-noauth-ssl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - server-version + - python-3.14t + - standalone-noauth-ssl + - sync + - free-threaded + + # Standard tests + - name: test-standard-v4.2-python3.11-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.11 + - replica_set-noauth-ssl + - sync + - name: test-standard-v4.2-python3.14-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.14 + - replica_set-noauth-ssl + - sync + - name: test-standard-v4.2-python3.12-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.12 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v4.2-pypy3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - sync + - pypy + - name: test-standard-v4.2-python3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.10 + - standalone-noauth-nossl + - sync + - name: test-standard-v4.2-python3.14t-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.14t + - standalone-noauth-nossl + - sync + - free-threaded + - name: test-standard-v4.4-python3.11-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.11 + - replica_set-noauth-ssl + - async + - name: test-standard-v4.4-python3.14-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.14 + - replica_set-noauth-ssl + - async + - name: test-standard-v4.4-python3.12-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.12 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v4.4-pypy3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - async + - pypy + - name: test-standard-v4.4-python3.10-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.10 + - standalone-noauth-nossl + - async + - name: test-standard-v4.4-python3.14t-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.14t + - standalone-noauth-nossl + - async + - free-threaded + - name: test-standard-v5.0-python3.10-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.10 + - replica_set-noauth-ssl + - sync + - name: test-standard-v5.0-python3.14t-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.14t + - replica_set-noauth-ssl + - sync + - free-threaded + - name: test-standard-v5.0-python3.11-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.11 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v5.0-python3.14-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.14 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v5.0-python3.13-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.13 + - standalone-noauth-nossl + - sync + - name: test-standard-v6.0-python3.10-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.10 + - replica_set-noauth-ssl + - async + - name: test-standard-v6.0-python3.14t-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.14t + - replica_set-noauth-ssl + - async + - free-threaded + - name: test-standard-v6.0-python3.11-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.11 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v6.0-python3.14-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.14 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v6.0-python3.13-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.13 + - standalone-noauth-nossl + - async + - name: test-standard-v7.0-python3.13-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-3.13 + - replica_set-noauth-ssl + - sync + - name: test-standard-v7.0-python3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-3.10 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v7.0-python3.14t-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-3.14t + - sharded_cluster-auth-ssl + - sync + - free-threaded + - name: test-standard-v7.0-python3.12-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-3.12 + - standalone-noauth-nossl + - sync + - name: test-standard-v7.0-pypy3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-pypy3.10 + - standalone-noauth-nossl + - sync + - pypy + - name: test-standard-v8.0-python3.13-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-3.13 + - replica_set-noauth-ssl + - async + - name: test-standard-v8.0-python3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-3.10 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v8.0-python3.14t-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-3.14t + - sharded_cluster-auth-ssl + - async + - free-threaded + - name: test-standard-v8.0-python3.12-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-3.12 + - standalone-noauth-nossl + - async + - name: test-standard-v8.0-pypy3.10-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-pypy3.10 + - standalone-noauth-nossl + - async + - pypy + - name: test-standard-latest-python3.12-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-3.12 + - replica_set-noauth-ssl + - async + - pr + - name: test-standard-latest-pypy3.10-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-pypy3.10 + - replica_set-noauth-ssl + - async + - pypy + - name: test-standard-latest-python3.13-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-3.13 + - sharded_cluster-auth-ssl + - async + - pr + - name: test-standard-latest-python3.11-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-3.11 + - standalone-noauth-nossl + - async + - pr + - name: test-standard-latest-python3.14-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-3.14 + - standalone-noauth-nossl + - async + - pr + - name: test-standard-rapid-python3.12-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-3.12 + - replica_set-noauth-ssl + - sync + - name: test-standard-rapid-pypy3.10-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-pypy3.10 + - replica_set-noauth-ssl + - sync + - pypy + - name: test-standard-rapid-python3.13-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-3.13 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-rapid-python3.11-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-3.11 + - standalone-noauth-nossl + - sync + - name: test-standard-rapid-python3.14-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-3.14 + - standalone-noauth-nossl + - sync + + # Test non standard tests + - name: test-non-standard-v4.2-python3.11-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + PYTHON_VERSION: "3.11" + tags: + - test-non-standard + - server-4.2 + - python-3.11 + - replica_set-noauth-ssl + - noauth + - name: test-non-standard-v4.2-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: "3.12" + tags: + - test-non-standard + - server-4.2 + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v4.2-python3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: "3.10" + tags: + - test-non-standard + - server-4.2 + - python-3.10 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v4.2-pypy3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-4.2 + - python-pypy3.10 + - standalone-noauth-nossl + - noauth + - pypy + - name: test-non-standard-v4.4-python3.14t-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: 3.14t + tags: + - test-non-standard + - server-4.4 + - python-3.14t + - replica_set-noauth-ssl + - noauth + - free-threaded + - name: test-non-standard-v4.4-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-4.4 + - python-pypy3.10 + - replica_set-noauth-ssl + - noauth + - pypy + - name: test-non-standard-v4.4-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: "3.14" + tags: + - test-non-standard + - server-4.4 + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v4.4-python3.13-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: "3.13" + tags: + - test-non-standard + - server-4.4 + - python-3.13 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v5.0-python3.11-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + PYTHON_VERSION: "3.11" + tags: + - test-non-standard + - server-5.0 + - python-3.11 + - replica_set-noauth-ssl + - noauth + - name: test-non-standard-v5.0-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.12" + tags: + - test-non-standard + - server-5.0 + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v5.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-5.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-non-standard-v5.0-python3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + PYTHON_VERSION: "3.10" + tags: + - test-non-standard + - server-5.0 + - python-3.10 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v6.0-python3.14t-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + PYTHON_VERSION: 3.14t + tags: + - test-non-standard + - server-6.0 + - python-3.14t + - replica_set-noauth-ssl + - noauth + - free-threaded + - name: test-non-standard-v6.0-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.14" + tags: + - test-non-standard + - server-6.0 + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v6.0-python3.13-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + PYTHON_VERSION: "3.13" + tags: + - test-non-standard + - server-6.0 + - python-3.13 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v6.0-pypy3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-6.0 + - python-pypy3.10 + - standalone-noauth-nossl + - noauth + - pypy + - name: test-non-standard-v7.0-python3.11-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + PYTHON_VERSION: "3.11" + tags: + - test-non-standard + - server-7.0 + - python-3.11 + - replica_set-noauth-ssl + - noauth + - name: test-non-standard-v7.0-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-7.0 + - python-pypy3.10 + - replica_set-noauth-ssl + - noauth + - pypy + - name: test-non-standard-v7.0-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: "3.12" + tags: + - test-non-standard + - server-7.0 + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v7.0-python3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + PYTHON_VERSION: "3.10" + tags: + - test-non-standard + - server-7.0 + - python-3.10 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v8.0-python3.14t-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + PYTHON_VERSION: 3.14t + tags: + - test-non-standard + - server-8.0 + - python-3.14t + - replica_set-noauth-ssl + - noauth + - free-threaded + - name: test-non-standard-v8.0-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: "3.14" + tags: + - test-non-standard + - server-8.0 + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v8.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-8.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-non-standard-v8.0-python3.13-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: "3.13" + tags: + - test-non-standard + - server-8.0 + - python-3.13 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-latest-python3.14t-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: 3.14t + tags: + - test-non-standard + - server-latest + - python-3.14t + - replica_set-noauth-ssl + - noauth + - free-threaded + - pr + - name: test-non-standard-latest-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-latest + - python-pypy3.10 + - replica_set-noauth-ssl + - noauth + - pypy + - name: test-non-standard-latest-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: "3.14" + tags: + - test-non-standard + - server-latest + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - pr + - name: test-non-standard-latest-python3.13-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + PYTHON_VERSION: "3.13" + tags: + - test-non-standard + - server-latest + - python-3.13 + - standalone-noauth-nossl + - noauth + - pr + - name: test-non-standard-rapid-python3.11-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: "3.11" + tags: + - test-non-standard + - server-rapid + - python-3.11 + - replica_set-noauth-ssl + - noauth + - name: test-non-standard-rapid-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: "3.12" + tags: + - test-non-standard + - server-rapid + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-rapid-python3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: "3.10" + tags: + - test-non-standard + - server-rapid + - python-3.10 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-rapid-pypy3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-rapid + - python-pypy3.10 + - standalone-noauth-nossl + - noauth + - pypy + + # Test standard auth tests + - name: test-standard-auth-v4.2-python3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: "3.10" + tags: + - test-standard-auth + - server-4.2 + - python-3.10 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v4.2-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-4.2 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v4.4-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: "3.11" + tags: + - test-standard-auth + - server-4.4 + - python-3.11 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v4.4-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-4.4 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v5.0-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.12" + tags: + - test-standard-auth + - server-5.0 + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v5.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-5.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v6.0-python3.13-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.13" + tags: + - test-standard-auth + - server-6.0 + - python-3.13 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v6.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-6.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v7.0-python3.14t-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: 3.14t + tags: + - test-standard-auth + - server-7.0 + - python-3.14t + - sharded_cluster-auth-ssl + - auth + - free-threaded + - name: test-standard-auth-v7.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-7.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v8.0-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: "3.14" + tags: + - test-standard-auth + - server-8.0 + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v8.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-8.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-latest-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: "3.11" + tags: + - test-standard-auth + - server-latest + - python-3.11 + - sharded_cluster-auth-ssl + - auth + - pr + - name: test-standard-auth-latest-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-latest + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-rapid-python3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: "3.10" + tags: + - test-standard-auth + - server-rapid + - python-3.10 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-rapid-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-rapid + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml new file mode 100644 index 0000000000..9bae5f4680 --- /dev/null +++ b/.evergreen/generated_configs/variants.yml @@ -0,0 +1,616 @@ +buildvariants: + # Alternative hosts tests + - name: other-hosts-rhel9-fips-latest + tasks: + - name: .test-no-toolchain + display_name: Other hosts RHEL9-FIPS latest + run_on: + - rhel92-fips + batchtime: 1440 + expansions: + VERSION: latest + NO_EXT: "1" + REQUIRE_FIPS: "1" + PYTHON_BINARY: /usr/bin/python3.11 + tags: [] + - name: other-hosts-rhel8-zseries-latest + tasks: + - name: .test-no-toolchain + display_name: Other hosts RHEL8-zseries latest + run_on: + - rhel8-zseries-small + batchtime: 1440 + expansions: + VERSION: latest + NO_EXT: "1" + tags: [] + - name: other-hosts-rhel8-power8-latest + tasks: + - name: .test-no-toolchain + display_name: Other hosts RHEL8-POWER8 latest + run_on: + - rhel8-power-small + batchtime: 1440 + expansions: + VERSION: latest + NO_EXT: "1" + tags: [] + - name: other-hosts-rhel8-arm64-latest + tasks: + - name: .test-no-toolchain + display_name: Other hosts RHEL8-arm64 latest + run_on: + - rhel82-arm64-small + batchtime: 1440 + expansions: + VERSION: latest + NO_EXT: "1" + tags: [] + - name: other-hosts-amazon2023-latest + tasks: + - name: .test-no-toolchain + display_name: Other hosts Amazon2023 latest + run_on: + - amazon2023-arm64-latest-large-m8g + batchtime: 1440 + expansions: + VERSION: latest + NO_EXT: "1" + tags: [pr] + + # Atlas connect tests + - name: atlas-connect-rhel8 + tasks: + - name: .test-no-orchestration + display_name: Atlas connect RHEL8 + run_on: + - rhel87-small + expansions: + TEST_NAME: atlas_connect + tags: [pr] + + # Aws auth tests + - name: auth-aws-ubuntu-20 + tasks: + - name: .auth-aws !.auth-aws-ecs + display_name: Auth AWS Ubuntu-20 + run_on: + - ubuntu2004-small + tags: [] + - name: auth-aws-win64 + tasks: + - name: .auth-aws !.auth-aws-ecs + display_name: Auth AWS Win64 + run_on: + - windows-64-vsMulti-small + tags: [] + - name: auth-aws-macos + tasks: + - name: .auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2 + display_name: Auth AWS macOS + run_on: + - macos-14 + tags: [pr] + + # Aws lambda tests + - name: faas-lambda + tasks: + - name: .aws_lambda + display_name: FaaS Lambda + run_on: + - rhel87-small + + # Backport pr tests + - name: backport-pr + tasks: + - name: backport-pr + display_name: Backport PR + run_on: + - rhel87-small + + # Compression tests + - name: compression-snappy-rhel8 + tasks: + - name: .test-standard + display_name: Compression snappy RHEL8 + run_on: + - rhel87-small + expansions: + COMPRESSOR: snappy + - name: compression-zlib-rhel8 + tasks: + - name: .test-standard + display_name: Compression zlib RHEL8 + run_on: + - rhel87-small + expansions: + COMPRESSOR: zlib + - name: compression-zstd-rhel8 + tasks: + - name: .test-standard !.server-4.2 + display_name: Compression zstd RHEL8 + run_on: + - rhel87-small + expansions: + COMPRESSOR: zstd + + # Coverage report tests + - name: coverage-report + tasks: + - name: coverage-report + display_name: Coverage Report + run_on: + - rhel87-small + + # Disable test commands tests + - name: disable-test-commands-rhel8 + tasks: + - name: .test-standard .server-latest + display_name: Disable test commands RHEL8 + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + DISABLE_TEST_COMMANDS: "1" + + # Doctests tests + - name: doctests-rhel8 + tasks: + - name: .test-non-standard .standalone-noauth-nossl + display_name: Doctests RHEL8 + run_on: + - rhel87-small + expansions: + TEST_NAME: doctest + + # Encryption tests + - name: encryption-rhel8 + tasks: + - name: .test-non-standard + - name: .test-min-deps + display_name: Encryption RHEL8 + run_on: + - rhel87-small + batchtime: 1440 + expansions: + TEST_NAME: encryption + tags: [encryption_tag] + - name: encryption-macos + tasks: + - name: .test-non-standard !.pypy + display_name: Encryption macOS + run_on: + - macos-14 + batchtime: 1440 + expansions: + TEST_NAME: encryption + tags: [encryption_tag] + - name: encryption-win64 + tasks: + - name: .test-non-standard !.pypy + display_name: Encryption Win64 + run_on: + - windows-64-vsMulti-small + batchtime: 1440 + expansions: + TEST_NAME: encryption + tags: [encryption_tag] + - name: encryption-crypt_shared-rhel8 + tasks: + - name: .test-non-standard + - name: .test-min-deps + display_name: Encryption crypt_shared RHEL8 + run_on: + - rhel87-small + batchtime: 1440 + expansions: + TEST_NAME: encryption + TEST_CRYPT_SHARED: "true" + tags: [encryption_tag] + - name: encryption-crypt_shared-macos + tasks: + - name: .test-non-standard !.pypy + display_name: Encryption crypt_shared macOS + run_on: + - macos-14 + batchtime: 1440 + expansions: + TEST_NAME: encryption + TEST_CRYPT_SHARED: "true" + tags: [encryption_tag] + - name: encryption-crypt_shared-win64 + tasks: + - name: .test-non-standard !.pypy + display_name: Encryption crypt_shared Win64 + run_on: + - windows-64-vsMulti-small + batchtime: 1440 + expansions: + TEST_NAME: encryption + TEST_CRYPT_SHARED: "true" + tags: [encryption_tag] + - name: encryption-pyopenssl-rhel8 + tasks: + - name: .test-non-standard + display_name: Encryption PyOpenSSL RHEL8 + run_on: + - rhel87-small + batchtime: 1440 + expansions: + TEST_NAME: encryption + SUB_TEST_NAME: pyopenssl + tags: [encryption_tag] + + # Enterprise auth tests + - name: auth-enterprise-rhel8 + tasks: + - name: .test-standard-auth .auth !.free-threaded + display_name: Auth Enterprise RHEL8 + run_on: + - rhel87-small + expansions: + TEST_NAME: enterprise_auth + AUTH: auth + - name: auth-enterprise-macos + tasks: + - name: .test-standard-auth !.pypy .auth !.free-threaded + display_name: Auth Enterprise macOS + run_on: + - macos-14 + expansions: + TEST_NAME: enterprise_auth + AUTH: auth + - name: auth-enterprise-win64 + tasks: + - name: .test-standard-auth !.pypy .auth + display_name: Auth Enterprise Win64 + run_on: + - windows-64-vsMulti-small + expansions: + TEST_NAME: enterprise_auth + AUTH: auth + + # Green framework tests + - name: green-gevent-rhel8 + tasks: + - name: .test-standard .sync !.free-threaded + display_name: Green Gevent RHEL8 + run_on: + - rhel87-small + expansions: + GREEN_FRAMEWORK: gevent + + # Import time tests + - name: import-time + tasks: + - name: check-import-time + display_name: Import Time + run_on: + - rhel87-small + + # Kms tests + - name: kms + tasks: + - name: test-gcpkms + batchtime: 1440 + - name: test-gcpkms-fail + - name: test-azurekms + batchtime: 1440 + - name: test-azurekms-fail + display_name: KMS + run_on: + - debian11-small + + # Load balancer tests + - name: load-balancer + tasks: + - name: .test-non-standard .server-6.0 .sharded_cluster-auth-ssl + - name: .test-non-standard .server-7.0 .sharded_cluster-auth-ssl + - name: .test-non-standard .server-8.0 .sharded_cluster-auth-ssl + - name: .test-non-standard .server-rapid .sharded_cluster-auth-ssl + - name: .test-non-standard .server-latest .sharded_cluster-auth-ssl + display_name: Load Balancer + run_on: + - rhel87-small + batchtime: 1440 + expansions: + TEST_NAME: load_balancer + + # Mockupdb tests + - name: mockupdb-rhel8 + tasks: + - name: .test-no-orchestration + display_name: MockupDB RHEL8 + run_on: + - rhel87-small + expansions: + TEST_NAME: mockupdb + tags: [pr] + + # Mod wsgi tests + - name: mod_wsgi-ubuntu-22 + tasks: + - name: .mod_wsgi + display_name: Mod_WSGI Ubuntu-22 + run_on: + - ubuntu2204-small + expansions: + MOD_WSGI_VERSION: "4" + + # No c ext tests + - name: no-c-ext-rhel8 + tasks: + - name: .test-standard + display_name: No C Ext RHEL8 + run_on: + - rhel87-small + + # No server tests + - name: no-server-rhel8 + tasks: + - name: .test-no-orchestration + display_name: No server RHEL8 + run_on: + - rhel87-small + tags: [pr] + + # Ocsp tests + - name: ocsp-rhel8 + tasks: + - name: .ocsp + display_name: OCSP RHEL8 + run_on: + - rhel87-small + batchtime: 10080 + - name: ocsp-win64 + tasks: + - name: .ocsp-rsa !.ocsp-staple .latest + - name: .ocsp-rsa !.ocsp-staple .4.4 + display_name: OCSP Win64 + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + - name: ocsp-macos + tasks: + - name: .ocsp-rsa !.ocsp-staple .latest + - name: .ocsp-rsa !.ocsp-staple .4.4 + display_name: OCSP macOS + run_on: + - macos-14 + batchtime: 10080 + + # Oidc auth tests + - name: auth-oidc-ubuntu-22 + tasks: + - name: .auth_oidc_remote + display_name: Auth OIDC Ubuntu-22 + run_on: + - ubuntu2204-small + batchtime: 1440 + - name: auth-oidc-local-ubuntu-22 + tasks: + - name: "!.auth_oidc_remote .auth_oidc" + display_name: Auth OIDC Local Ubuntu-22 + run_on: + - ubuntu2204-small + batchtime: 1440 + tags: [pr] + - name: auth-oidc-macos + tasks: + - name: "!.auth_oidc_remote .auth_oidc" + display_name: Auth OIDC macOS + run_on: + - macos-14 + batchtime: 1440 + - name: auth-oidc-win64 + tasks: + - name: "!.auth_oidc_remote .auth_oidc" + display_name: Auth OIDC Win64 + run_on: + - windows-64-vsMulti-small + batchtime: 1440 + + # Perf tests + - name: performance-benchmarks + tasks: + - name: .perf + display_name: Performance Benchmarks + run_on: + - rhel90-dbx-perf-large + batchtime: 1440 + + # Pyopenssl tests + - name: pyopenssl-rhel8 + tasks: + - name: .test-standard .sync + - name: .test-standard .async .replica_set-noauth-ssl + display_name: PyOpenSSL RHEL8 + run_on: + - rhel87-small + batchtime: 1440 + expansions: + SUB_TEST_NAME: pyopenssl + - name: pyopenssl-macos + tasks: + - name: .test-standard !.pypy .sync + - name: .test-standard !.pypy .async .replica_set-noauth-ssl + display_name: PyOpenSSL macOS + run_on: + - rhel87-small + batchtime: 1440 + expansions: + SUB_TEST_NAME: pyopenssl + - name: pyopenssl-win64 + tasks: + - name: .test-standard !.pypy .sync + - name: .test-standard !.pypy .async .replica_set-noauth-ssl + display_name: PyOpenSSL Win64 + run_on: + - rhel87-small + batchtime: 1440 + expansions: + SUB_TEST_NAME: pyopenssl + + # Search index tests + - name: search-index-helpers-rhel8-python3.10 + tasks: + - name: .search_index + display_name: Search Index Helpers RHEL8 Python3.10 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.10/bin/python3 + + # Server version tests + - name: mongodb-v4.2 + tasks: + - name: .server-version + display_name: "* MongoDB v4.2" + run_on: + - rhel87-small + expansions: + VERSION: "4.2" + tags: [coverage_tag] + - name: mongodb-v4.4 + tasks: + - name: .server-version + display_name: "* MongoDB v4.4" + run_on: + - rhel87-small + expansions: + VERSION: "4.4" + tags: [coverage_tag] + - name: mongodb-v5.0 + tasks: + - name: .server-version + display_name: "* MongoDB v5.0" + run_on: + - rhel87-small + expansions: + VERSION: "5.0" + tags: [coverage_tag] + - name: mongodb-v6.0 + tasks: + - name: .server-version + display_name: "* MongoDB v6.0" + run_on: + - rhel87-small + expansions: + VERSION: "6.0" + tags: [coverage_tag] + - name: mongodb-v7.0 + tasks: + - name: .server-version + display_name: "* MongoDB v7.0" + run_on: + - rhel87-small + expansions: + VERSION: "7.0" + tags: [coverage_tag] + - name: mongodb-v8.0 + tasks: + - name: .server-version + display_name: "* MongoDB v8.0" + run_on: + - rhel87-small + expansions: + VERSION: "8.0" + tags: [coverage_tag] + - name: mongodb-rapid + tasks: + - name: .server-version + display_name: "* MongoDB rapid" + run_on: + - rhel87-small + expansions: + VERSION: rapid + tags: [coverage_tag] + - name: mongodb-latest + tasks: + - name: .server-version + display_name: "* MongoDB latest" + run_on: + - rhel87-small + expansions: + VERSION: latest + tags: [coverage_tag] + + # Stable api tests + - name: stable-api-require-v1-rhel8-auth + tasks: + - name: .test-standard !.replica_set-noauth-ssl .server-5.0 + - name: .test-standard !.replica_set-noauth-ssl .server-6.0 + - name: .test-standard !.replica_set-noauth-ssl .server-7.0 + - name: .test-standard !.replica_set-noauth-ssl .server-8.0 + - name: .test-standard !.replica_set-noauth-ssl .server-rapid + - name: .test-standard !.replica_set-noauth-ssl .server-latest + display_name: Stable API require v1 RHEL8 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + REQUIRE_API_VERSION: "1" + MONGODB_API_VERSION: "1" + tags: [versionedApi_tag] + - name: stable-api-accept-v2-rhel8-auth + tasks: + - name: .test-standard .server-5.0 .standalone-noauth-nossl + - name: .test-standard .server-6.0 .standalone-noauth-nossl + - name: .test-standard .server-7.0 .standalone-noauth-nossl + - name: .test-standard .server-8.0 .standalone-noauth-nossl + - name: .test-standard .server-rapid .standalone-noauth-nossl + - name: .test-standard .server-latest .standalone-noauth-nossl + display_name: Stable API accept v2 RHEL8 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + ORCHESTRATION_FILE: versioned-api-testing.json + tags: [versionedApi_tag] + + # Standard nonlinux tests + - name: test-macos + tasks: + - name: .test-standard !.pypy + display_name: "* Test macOS" + run_on: + - macos-14 + tags: [standard-non-linux] + - name: test-macos-arm64 + tasks: + - name: .test-standard !.pypy .server-6.0 + - name: .test-standard !.pypy .server-7.0 + - name: .test-standard !.pypy .server-8.0 + - name: .test-standard !.pypy .server-rapid + - name: .test-standard !.pypy .server-latest + display_name: "* Test macOS Arm64" + run_on: + - macos-14-arm64 + tags: [standard-non-linux] + - name: test-win64 + tasks: + - name: .test-standard !.pypy + display_name: "* Test Win64" + run_on: + - windows-64-vsMulti-small + tags: [standard-non-linux] + - name: test-win32 + tasks: + - name: .test-standard !.pypy + display_name: "* Test Win32" + run_on: + - windows-64-vsMulti-small + expansions: + IS_WIN32: "1" + tags: [standard-non-linux] + + # Storage engine tests + - name: storage-inmemory-rhel8 + tasks: + - name: .test-standard .standalone-noauth-nossl + display_name: Storage InMemory RHEL8 + run_on: + - rhel87-small + expansions: + STORAGE_ENGINE: inmemory diff --git a/.evergreen/just.sh b/.evergreen/just.sh new file mode 100755 index 0000000000..bebbca8282 --- /dev/null +++ b/.evergreen/just.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -eu + +. .evergreen/scripts/setup-dev-env.sh +just "$@" diff --git a/.evergreen/remove-unimplemented-tests.sh b/.evergreen/remove-unimplemented-tests.sh new file mode 100755 index 0000000000..88ef137f86 --- /dev/null +++ b/.evergreen/remove-unimplemented-tests.sh @@ -0,0 +1,53 @@ +#!/bin/bash +PYMONGO=$(dirname "$(cd "$(dirname "$0")" || exit; pwd)") + +rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 +rm $PYMONGO/test/connection_monitoring/wait-queue-fairness.json # PYTHON-1873 +rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-application-error.json # PYTHON-4918 +rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-checkout-error.json # PYTHON-4918 +rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-min-pool-size-error.json # PYTHON-4918 +rm $PYMONGO/test/client-side-encryption/spec/unified/client-bulkWrite-qe.json # PYTHON-4929 + +# Python doesn't implement DRIVERS-3064 +rm $PYMONGO/test/collection_management/listCollections-rawdata.json +rm $PYMONGO/test/crud/unified/aggregate-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-deleteMany-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-deleteOne-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-replaceOne-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-updateMany-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-updateOne-rawdata.json +rm $PYMONGO/test/crud/unified/client-bulkWrite-delete-rawdata.json +rm $PYMONGO/test/crud/unified/client-bulkWrite-replaceOne-rawdata.json +rm $PYMONGO/test/crud/unified/client-bulkWrite-update-rawdata.json +rm $PYMONGO/test/crud/unified/count-rawdata.json +rm $PYMONGO/test/crud/unified/countDocuments-rawdata.json +rm $PYMONGO/test/crud/unified/db-aggregate-rawdata.json +rm $PYMONGO/test/crud/unified/deleteMany-rawdata.json +rm $PYMONGO/test/crud/unified/deleteOne-rawdata.json +rm $PYMONGO/test/crud/unified/distinct-rawdata.json +rm $PYMONGO/test/crud/unified/estimatedDocumentCount-rawdata.json +rm $PYMONGO/test/crud/unified/find-rawdata.json +rm $PYMONGO/test/crud/unified/findOneAndDelete-rawdata.json +rm $PYMONGO/test/crud/unified/findOneAndReplace-rawdata.json +rm $PYMONGO/test/crud/unified/findOneAndUpdate-rawdata.json +rm $PYMONGO/test/crud/unified/insertMany-rawdata.json +rm $PYMONGO/test/crud/unified/insertOne-rawdata.json +rm $PYMONGO/test/crud/unified/replaceOne-rawdata.json +rm $PYMONGO/test/crud/unified/updateMany-rawdata.json +rm $PYMONGO/test/crud/unified/updateOne-rawdata.json +rm $PYMONGO/test/index_management/index-rawdata.json + +# PyMongo does not support modifyCollection +rm $PYMONGO/test/collection_management/modifyCollection-*.json + +# PYTHON-5248 - Remove support for MongoDB 4.0 +find /$PYMONGO /test -type f -name 'pre-42-*.json' -delete + +# PYTHON-3359 - Remove Database and Collection level timeout override +rm $PYMONGO/test/csot/override-collection-timeoutMS.json +rm $PYMONGO/test/csot/override-database-timeoutMS.json + +# PYTHON-2943 - Socks5 Proxy Support +rm $PYMONGO/test/uri_options/proxy-options.json + +echo "Done removing unimplemented tests" diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh new file mode 100755 index 0000000000..d2bd89c781 --- /dev/null +++ b/.evergreen/resync-specs.sh @@ -0,0 +1,204 @@ +#!/bin/bash +# Resync test files from the specifications repo. +set -eu +PYMONGO=$(dirname "$(cd "$(dirname "$0")"; pwd)") +SPECS=${MDB_SPECS:-~/Work/specifications} + +help (){ + echo "Usage: resync_specs.sh [-bcsp] spec" + echo "Required arguments:" + echo " spec determines which folder the spec tests will be copied from." + echo "Optional flags:" + echo " -b is used to add a string to the blocklist for that next run. Can be used" + echo " any number of times on a single command to block multiple patterns." + echo " You can use any regex pattern (it is passed to 'grep -Ev')." + echo " -c is used to set a branch or commit that will be checked out in the" + echo " specifications repo before copying." + echo " -s is used to set a unique path to the specs repo for that specific" + echo " run." + echo "Notes:" + echo "You can export the environment variable MDB_SPECS to set the specs" + echo " repo similar to -s, but this will persist between runs until you " + echo "unset it." +} + +# Parse flag args +BRANCH='' +BLOCKLIST='.*\.yml' +while getopts 'b:c:s:' flag; do + case "${flag}" in + b) BLOCKLIST+="|$OPTARG" + ;; + c) BRANCH="${OPTARG}" + ;; + s) SPECS="${OPTARG}" + ;; + *) help; exit 0 + ;; + esac +done +shift $((OPTIND-1)) + +if [ -n "$BRANCH" ] +then + git -C $SPECS checkout $BRANCH +fi + +# Ensure the JSON files are up to date. +if ! [ -n "${CI:-}" ] +then + cd $SPECS/source + make + cd - +fi +# cpjson unified-test-format/tests/invalid unified-test-format/invalid +# * param1: Path to spec tests dir in specifications repo +# * param2: Path to where the corresponding tests live in Python. +cpjson () { + find "$PYMONGO"/test/$2 -type f -delete + cd "$SPECS"/source/$1 + find . -name '*.json' | grep -Ev "${BLOCKLIST}" | cpio -pdm \ + $PYMONGO/test/$2 + printf "\nIgnored files for ${PWD}:\n" + IGNORED_FILES="$(printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ + <(find . -name '*.json' | grep -Ev "${BLOCKLIST}" | sort))" | \ + sed -e '/^[0-9]/d' | sed -e 's|< ./||g' )" + printf "%s\n" $IGNORED_FILES + cd "$PYMONGO"/test/$2 + printf "%s\n" $IGNORED_FILES | xargs git checkout master + +} + +for spec in "$@" +do + # Match the spec dir name, the python test dir name, and/or common abbreviations. + case "$spec" in + auth) + cpjson auth/tests/ auth + ;; + bson-binary-vector|bson_binary_vector) + cpjson bson-binary-vector/tests/ bson_binary_vector + ;; + bson-corpus|bson_corpus) + cpjson bson-corpus/tests/ bson_corpus + ;; + max-staleness|max_staleness) + cpjson max-staleness/tests/ max_staleness + ;; + collection-management|collection_management) + cpjson collection-management/tests/ collection_management + ;; + connection-string|connection_string) + cpjson connection-string/tests/ connection_string/test + ;; + change-streams|change_streams) + cpjson change-streams/tests/ change_streams/ + ;; + client-side-encryption|csfle|fle) + cpjson client-side-encryption/tests/ client-side-encryption/spec + cpjson client-side-encryption/corpus/ client-side-encryption/corpus + cpjson client-side-encryption/external/ client-side-encryption/external + cpjson client-side-encryption/limits/ client-side-encryption/limits + cpjson client-side-encryption/etc/data client-side-encryption/etc/data + ;; + connection-monitoring|connection_monitoring) + cpjson connection-monitoring-and-pooling/tests/cmap-format connection_monitoring + ;; + connection-logging|connection_logging) + cpjson connection-monitoring-and-pooling/tests/logging connection_logging + ;; + cmap|CMAP|connection-monitoring-and-pooling) + cpjson connection-monitoring-and-pooling/tests/logging connection_logging + cpjson connection-monitoring-and-pooling/tests/cmap-format connection_monitoring + ;; + apm|APM|command-monitoring|command_monitoring) + cpjson command-logging-and-monitoring/tests/monitoring command_monitoring + ;; + command-logging|command_logging) + cpjson command-logging-and-monitoring/tests/logging command_logging + ;; + clam|CLAM|command-logging-and-monitoring|command_logging_and_monitoring) + cpjson command-logging-and-monitoring/tests/logging command_logging + cpjson command-logging-and-monitoring/tests/monitoring command_monitoring + ;; + crud|CRUD) + cpjson crud/tests/ crud + ;; + csot|CSOT|client-side-operations-timeout) + cpjson client-side-operations-timeout/tests csot + ;; + gridfs) + cpjson gridfs/tests gridfs + ;; + handshake) + cpjson mongodb-handshake/tests handshake + ;; + index|index-management) + cpjson index-management/tests index_management + ;; + load-balancers|load_balancer) + cpjson load-balancers/tests load_balancer + ;; + srv|SRV|initial-dns-seedlist-discovery|srv_seedlist) + cpjson initial-dns-seedlist-discovery/tests/ srv_seedlist + ;; + read-write-concern|read_write_concern) + cpjson read-write-concern/tests/operation read_write_concern/operation + ;; + retryable-reads|retryable_reads) + cpjson retryable-reads/tests/ retryable_reads + ;; + retryable-writes|retryable_writes) + cpjson retryable-writes/tests/ retryable_writes + ;; + run-command|run_command) + cpjson run-command/tests/ run_command + ;; + sdam|SDAM|server-discovery-and-monitoring|discovery_and_monitoring) + cpjson server-discovery-and-monitoring/tests/errors \ + discovery_and_monitoring/errors + cpjson server-discovery-and-monitoring/tests/rs \ + discovery_and_monitoring/rs + cpjson server-discovery-and-monitoring/tests/sharded \ + discovery_and_monitoring/sharded + cpjson server-discovery-and-monitoring/tests/single \ + discovery_and_monitoring/single + cpjson server-discovery-and-monitoring/tests/unified \ + discovery_and_monitoring/unified + cpjson server-discovery-and-monitoring/tests/load-balanced \ + discovery_and_monitoring/load-balanced + ;; + sdam-monitoring|sdam_monitoring) + cpjson server-discovery-and-monitoring/tests/monitoring sdam_monitoring + ;; + server-selection|server_selection) + cpjson server-selection/tests/ server_selection + rm -rf $PYMONGO/test/server_selection/logging # these tests live in server_selection_logging + cpjson server-selection/tests/logging server_selection_logging + ;; + server-selection-logging|server_selection_logging) + cpjson server-selection/tests/logging server_selection_logging + ;; + sessions) + cpjson sessions/tests/ sessions + ;; + transactions|transactions-convenient-api) + cpjson transactions/tests/ transactions + cpjson transactions-convenient-api/tests/ transactions-convenient-api + ;; + unified|unified-test-format) + cpjson unified-test-format/tests/ unified-test-format/ + ;; + uri|uri-options|uri_options) + cpjson uri-options/tests uri_options + cp "$SPECS"/source/uri-options/tests/*.pem $PYMONGO/test/uri_options + ;; + stable-api|versioned-api) + cpjson versioned-api/tests versioned-api + ;; + *) + echo "Do not know how to resync spec tests for '${spec}'" + help + ;; + esac +done diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh new file mode 100755 index 0000000000..b8330de511 --- /dev/null +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Script run on an ECS host to test MONGODB-AWS. +set -eu + +############################################ +# Main Program # +############################################ + +if [[ -z "$1" ]]; then + echo "usage: $0 " + exit 1 +fi +export MONGODB_URI="$1" + +if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials in ECS test!"; + exit 1 +fi +# Now we can safely enable xtrace +set -o xtrace + +# Install python with pip. +PYTHON_VER="python3.10" +apt-get -qq update < /dev/null > /dev/null +apt-get -q install -y software-properties-common +# Use openpgp to avoid gpg key timeout. +mkdir -p $HOME/.gnupg +echo "keyserver keys.openpgp.org" >> $HOME/.gnupg/gpg.conf +add-apt-repository -y 'ppa:deadsnakes/ppa' +apt-get -qq install $PYTHON_VER $PYTHON_VER-venv build-essential $PYTHON_VER-dev -y < /dev/null > /dev/null + +export PYTHON_BINARY=$PYTHON_VER +export SET_XTRACE_ON=1 +cd src +rm -rf .venv +rm -f .evergreen/scripts/test-env.sh || true +bash ./.evergreen/just.sh setup-tests auth_aws ecs-remote +bash .evergreen/just.sh run-tests diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh new file mode 100755 index 0000000000..b34013a6ac --- /dev/null +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Script run on a remote host to test MONGODB-OIDC. +set -eu + +echo "Running MONGODB-OIDC authentication tests on ${OIDC_ENV}..." + +if [ ${OIDC_ENV} == "k8s" ]; then + SUB_TEST_NAME=$K8S_VARIANT-remote +else + SUB_TEST_NAME=$OIDC_ENV-remote + sudo apt-get install -y python3-dev build-essential +fi + +bash ./.evergreen/just.sh setup-tests auth_oidc $SUB_TEST_NAME +bash ./.evergreen/just.sh run-tests "${@:1}" + +echo "Running MONGODB-OIDC authentication tests on ${OIDC_ENV}... done." diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh new file mode 100755 index 0000000000..c14215244e --- /dev/null +++ b/.evergreen/run-tests.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# Run a test suite that was configured with setup-tests.sh. +set -eu + +SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) +SCRIPT_DIR="$( cd -- "$SCRIPT_DIR" > /dev/null 2>&1 && pwd )" +ROOT_DIR="$(dirname $SCRIPT_DIR)" + +PREV_DIR=$(pwd) +cd $ROOT_DIR + +# Try to source the env file. +if [ -f $SCRIPT_DIR/scripts/env.sh ]; then + echo "Sourcing env inputs" + . $SCRIPT_DIR/scripts/env.sh +else + echo "Not sourcing env inputs" +fi + +# Handle test inputs. +if [ -f $SCRIPT_DIR/scripts/test-env.sh ]; then + echo "Sourcing test inputs" + . $SCRIPT_DIR/scripts/test-env.sh +else + echo "Missing test inputs, please run 'just setup-tests'" + exit 1 +fi + +cleanup_tests() { + # Avoid leaving the lock file in a changed state when we change the resolution type. + if [ -n "${TEST_MIN_DEPS:-}" ]; then + git checkout uv.lock || true + fi + cd $PREV_DIR +} + +trap "cleanup_tests" SIGINT ERR + +# Start the test runner. +uv run ${UV_ARGS} --reinstall-package pymongo .evergreen/scripts/run_tests.py "$@" + +cleanup_tests diff --git a/.evergreen/scripts/__init__.py b/.evergreen/scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.evergreen/scripts/check-import-time.sh b/.evergreen/scripts/check-import-time.sh new file mode 100755 index 0000000000..f7a1117b97 --- /dev/null +++ b/.evergreen/scripts/check-import-time.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Check for regressions in the import time of pymongo. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) + +source $HERE/env.sh + +pushd $HERE/../.. >/dev/null + +BASE_SHA="$1" +HEAD_SHA="$2" + +. .evergreen/utils.sh + +if [ -z "${PYTHON_BINARY:-}" ]; then + PYTHON_BINARY=$(find_python3) +fi + +# Use the previous commit if this was not a PR run. +if [ "$BASE_SHA" == "$HEAD_SHA" ]; then + BASE_SHA=$(git rev-parse HEAD~1) +fi + +function get_import_time() { + local log_file + createvirtualenv "$PYTHON_BINARY" import-venv + python -m pip install -q ".[aws,encryption,gssapi,ocsp,snappy,zstd]" + # Import once to cache modules + python -c "import pymongo" + log_file="pymongo-$1.log" + python -X importtime -c "import pymongo" 2> $log_file +} + +get_import_time $HEAD_SHA +git stash || true +git checkout $BASE_SHA +get_import_time $BASE_SHA +git checkout $HEAD_SHA +git stash apply || true +python tools/compare_import_time.py $HEAD_SHA $BASE_SHA + +popd >/dev/null diff --git a/.evergreen/scripts/cleanup.sh b/.evergreen/scripts/cleanup.sh new file mode 100755 index 0000000000..f04a936fd2 --- /dev/null +++ b/.evergreen/scripts/cleanup.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Clean up resources at the end of an evergreen run. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) + +# Try to source the env file. +if [ -f $HERE/env.sh ]; then + echo "Sourcing env file" + source $HERE/env.sh +fi + +rm -rf "${DRIVERS_TOOLS}" || true +rm -f $HERE/../../secrets-export.sh || true diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh new file mode 100755 index 0000000000..8dc328aab3 --- /dev/null +++ b/.evergreen/scripts/configure-env.sh @@ -0,0 +1,114 @@ +#!/bin/bash +# Configure an evergreen test environment. +set -eu + +# Get the current unique version of this checkout +# shellcheck disable=SC2154 +if [ "${is_patch:-}" = "true" ]; then + # shellcheck disable=SC2154 + CURRENT_VERSION="$(git describe)-patch-$version_id" +else + CURRENT_VERSION=latest +fi + +PROJECT_DIRECTORY="$(pwd)" +DRIVERS_TOOLS="$(dirname $PROJECT_DIRECTORY)/drivers-tools" +CARGO_HOME=${CARGO_HOME:-${DRIVERS_TOOLS}/.cargo} +UV_TOOL_DIR=$PROJECT_DIRECTORY/.local/uv/tools +UV_CACHE_DIR=$PROJECT_DIRECTORY/.local/uv/cache +DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS/.bin" +MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" + +# On Evergreen jobs, "CI" will be set, and we don't want to write to $HOME. +if [ "${CI:-}" == "true" ]; then + PYMONGO_BIN_DIR=${DRIVERS_TOOLS_BINARIES:-} +# We want to use a path that's already on PATH on spawn hosts. +else + PYMONGO_BIN_DIR=$HOME/cli_bin +fi + +PATH_EXT="$MONGODB_BINARIES:$DRIVERS_TOOLS_BINARIES:$PYMONGO_BIN_DIR:\$PATH" + +# Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory +if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin + DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) + PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) + CARGO_HOME=$(cygpath -m $CARGO_HOME) + UV_TOOL_DIR=$(cygpath -m "$UV_TOOL_DIR") + UV_CACHE_DIR=$(cygpath -m "$UV_CACHE_DIR") + DRIVERS_TOOLS_BINARIES=$(cygpath -m "$DRIVERS_TOOLS_BINARIES") + MONGODB_BINARIES=$(cygpath -m "$MONGODB_BINARIES") + PYMONGO_BIN_DIR=$(cygpath -m "$PYMONGO_BIN_DIR") +fi + +SCRIPT_DIR="$PROJECT_DIRECTORY/.evergreen/scripts" + +if [ -f "$SCRIPT_DIR/env.sh" ]; then + echo "Reading $SCRIPT_DIR/env.sh file" + . "$SCRIPT_DIR/env.sh" + exit 0 +fi + +export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" +export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" + +cat < "$SCRIPT_DIR"/env.sh +export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" +export CURRENT_VERSION="$CURRENT_VERSION" +export DRIVERS_TOOLS="$DRIVERS_TOOLS" +export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" +export MONGODB_BINARIES="$MONGODB_BINARIES" +export DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS_BINARIES" +export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" + +export CARGO_HOME="$CARGO_HOME" +export UV_TOOL_DIR="$UV_TOOL_DIR" +export UV_CACHE_DIR="$UV_CACHE_DIR" +export UV_TOOL_BIN_DIR="$DRIVERS_TOOLS_BINARIES" +export PYMONGO_BIN_DIR="$PYMONGO_BIN_DIR" +export PATH="$PATH_EXT" +# shellcheck disable=SC2154 +export PROJECT="${project:-mongo-python-driver}" +export PIP_QUIET=1 +EOT + +# Write the .env file for drivers-tools. +rm -rf $DRIVERS_TOOLS +BRANCH=master +ORG=mongodb-labs +git clone --branch $BRANCH https://github.com/$ORG/drivers-evergreen-tools.git $DRIVERS_TOOLS + +cat < ${DRIVERS_TOOLS}/.env +SKIP_LEGACY_SHELL=1 +DRIVERS_TOOLS="$DRIVERS_TOOLS" +MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" +MONGODB_BINARIES="$MONGODB_BINARIES" +EOT + +# Add these expansions to make it easier to call out tests scripts from the EVG yaml +cat < expansion.yml +DRIVERS_TOOLS: "$DRIVERS_TOOLS" +PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" +EOT + +# If the toolchain is available, symlink binaries to the bin dir. This has to be done +# after drivers-tools is cloned, since we might be using its binary dir. +_bin_path="" +if [ "Windows_NT" == "${OS:-}" ]; then + _bin_path="/cygdrive/c/Python/Current/Scripts" +elif [ "$(uname -s)" == "Darwin" ]; then + _bin_path="/Library/Frameworks/Python.Framework/Versions/Current/bin" +else + _bin_path="/opt/python/Current/bin" +fi +if [ -d "${_bin_path}" ]; then + _suffix="" + if [ "Windows_NT" == "${OS:-}" ]; then + _suffix=".exe" + fi + echo "Symlinking binaries from toolchain" + mkdir -p $PYMONGO_BIN_DIR + ln -s ${_bin_path}/just${_suffix} $PYMONGO_BIN_DIR/just${_suffix} + ln -s ${_bin_path}/uv${_suffix} $PYMONGO_BIN_DIR/uv${_suffix} + ln -s ${_bin_path}/uvx${_suffix} $PYMONGO_BIN_DIR/uvx${_suffix} +fi diff --git a/.evergreen/scripts/create-spec-pr.sh b/.evergreen/scripts/create-spec-pr.sh new file mode 100755 index 0000000000..a5e49bb211 --- /dev/null +++ b/.evergreen/scripts/create-spec-pr.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +tools="$(realpath -s "../drivers-tools")" +pushd $tools/.evergreen/github_app || exit + +owner="mongodb" +repo="mongo-python-driver" + +# Bootstrap the app. +echo "bootstrapping" +source utils.sh +bootstrap drivers/comment-bot + +# Run the app. +source ./secrets-export.sh + +# Get a github access token for the git checkout. +echo "Getting github token..." + +token=$(bash ./get-access-token.sh $repo $owner) +if [ -z "${token}" ]; then + echo "Failed to get github access token!" + popd || exit + exit 1 +fi +echo "Getting github token... done." +popd || exit + +# Make the git checkout and create a new branch. +echo "Creating the git checkout..." +branch="spec-resync-"$(date '+%m-%d-%Y') + +git remote set-url origin https://x-access-token:${token}@github.com/$owner/$repo.git +git checkout -b $branch "origin/master" +git add ./test +git commit -am "resyncing specs $(date '+%m-%d-%Y')" +echo "Creating the git checkout... done." + +git push origin $branch +resp=$(curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $token" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + -d "{\"title\":\"[Spec Resync] $(date '+%m-%d-%Y')\",\"body\":\"$(cat "$1")\",\"head\":\"${branch}\",\"base\":\"master\"}" \ + --url https://api.github.com/repos/$owner/$repo/pulls) +echo $resp | jq '.html_url' +echo "Creating the PR... done." + +rm -rf $tools diff --git a/.evergreen/scripts/download-and-merge-coverage.sh b/.evergreen/scripts/download-and-merge-coverage.sh new file mode 100755 index 0000000000..c006813ba9 --- /dev/null +++ b/.evergreen/scripts/download-and-merge-coverage.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Download all the task coverage files. +set -eu +aws s3 cp --recursive s3://"$1"/coverage/"$2"/"$3"/coverage/ coverage/ diff --git a/.evergreen/scripts/generate-config.sh b/.evergreen/scripts/generate-config.sh new file mode 100755 index 0000000000..70b4578cf9 --- /dev/null +++ b/.evergreen/scripts/generate-config.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# Entry point for the generate-config pre-commit hook. + +set -eu + +python .evergreen/scripts/generate_config.py diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py new file mode 100644 index 0000000000..daec0841d5 --- /dev/null +++ b/.evergreen/scripts/generate_config.py @@ -0,0 +1,1177 @@ +# Note: See CONTRIBUTING.md for how to update/run this file. +from __future__ import annotations + +import sys +from itertools import product + +from generate_config_utils import ( + ALL_PYTHONS, + ALL_VERSIONS, + BATCHTIME_DAY, + BATCHTIME_WEEK, + C_EXTS, + CPYTHONS, + DEFAULT_HOST, + HOSTS, + MIN_MAX_PYTHON, + OTHER_HOSTS, + PYPYS, + SYNCS, + TOPOLOGIES, + create_variant, + get_assume_role, + get_s3_put, + get_standard_auth_ssl, + get_subprocess_exec, + get_task_name, + get_variant_name, + get_versions_from, + handle_c_ext, + write_functions_to_file, + write_tasks_to_file, + write_variants_to_file, + zip_cycle, +) +from shrub.v3.evg_build_variant import BuildVariant +from shrub.v3.evg_command import ( + FunctionCall, + archive_targz_pack, + attach_results, + attach_xunit_results, + ec2_assume_role, + expansions_update, + git_get_project, +) +from shrub.v3.evg_task import EvgTask, EvgTaskDependency, EvgTaskRef + +############## +# Variants +############## + + +def create_ocsp_variants() -> list[BuildVariant]: + variants = [] + # OCSP tests on default host with all servers v4.4+. + # MongoDB servers on Windows and MacOS do not staple OCSP responses and only support RSA. + # Only test with MongoDB 4.4 and latest. + for host_name in ["rhel8", "win64", "macos"]: + host = HOSTS[host_name] + if host == DEFAULT_HOST: + tasks = [".ocsp"] + else: + tasks = [".ocsp-rsa !.ocsp-staple .latest", ".ocsp-rsa !.ocsp-staple .4.4"] + variant = create_variant( + tasks, + get_variant_name("OCSP", host), + host=host, + batchtime=BATCHTIME_WEEK, + ) + variants.append(variant) + return variants + + +def create_server_version_variants() -> list[BuildVariant]: + variants = [] + for version in ALL_VERSIONS: + display_name = get_variant_name("* MongoDB", version=version) + variant = create_variant( + [".server-version"], + display_name, + version=version, + host=DEFAULT_HOST, + tags=["coverage_tag"], + ) + variants.append(variant) + return variants + + +def create_standard_nonlinux_variants() -> list[BuildVariant]: + variants = [] + base_display_name = "* Test" + + # Test a subset on each of the other platforms. + for host_name in ("macos", "macos-arm64", "win64", "win32"): + tasks = [".test-standard !.pypy"] + # MacOS arm64 only works on server versions 6.0+ + if host_name == "macos-arm64": + tasks = [ + f".test-standard !.pypy .server-{version}" for version in get_versions_from("6.0") + ] + host = HOSTS[host_name] + tags = ["standard-non-linux"] + expansions = dict() + if host_name == "win32": + expansions["IS_WIN32"] = "1" + display_name = get_variant_name(base_display_name, host) + variant = create_variant(tasks, display_name, host=host, tags=tags, expansions=expansions) + variants.append(variant) + + return variants + + +def create_encryption_variants() -> list[BuildVariant]: + variants = [] + tags = ["encryption_tag"] + batchtime = BATCHTIME_DAY + + def get_encryption_expansions(encryption): + expansions = dict(TEST_NAME="encryption") + if "crypt_shared" in encryption: + expansions["TEST_CRYPT_SHARED"] = "true" + if "PyOpenSSL" in encryption: + expansions["SUB_TEST_NAME"] = "pyopenssl" + return expansions + + # Test encryption on all hosts. + for encryption, host in product( + ["Encryption", "Encryption crypt_shared"], ["rhel8", "macos", "win64"] + ): + expansions = get_encryption_expansions(encryption) + display_name = get_variant_name(encryption, host, **expansions) + tasks = [".test-non-standard", ".test-min-deps"] + if host != "rhel8": + tasks = [".test-non-standard !.pypy"] + variant = create_variant( + tasks, + display_name, + host=host, + expansions=expansions, + batchtime=batchtime, + tags=tags, + ) + variants.append(variant) + + # Test PyOpenSSL on linux. + host = DEFAULT_HOST + encryption = "Encryption PyOpenSSL" + expansions = get_encryption_expansions(encryption) + display_name = get_variant_name(encryption, host, **expansions) + variant = create_variant( + [".test-non-standard"], + display_name, + host=host, + expansions=expansions, + batchtime=batchtime, + tags=tags, + ) + variants.append(variant) + return variants + + +def create_load_balancer_variants(): + tasks = [ + f".test-non-standard .server-{v} .sharded_cluster-auth-ssl" + for v in get_versions_from("6.0") + ] + expansions = dict(TEST_NAME="load_balancer") + return [ + create_variant( + tasks, + "Load Balancer", + host=DEFAULT_HOST, + batchtime=BATCHTIME_DAY, + expansions=expansions, + ) + ] + + +def create_compression_variants(): + # Compression tests - use the standard linux tests. + host = DEFAULT_HOST + variants = [] + for compressor in "snappy", "zlib", "zstd": + expansions = dict(COMPRESSOR=compressor) + if compressor == "zstd": + tasks = [".test-standard !.server-4.2"] + else: + tasks = [".test-standard"] + display_name = get_variant_name(f"Compression {compressor}", host) + variants.append( + create_variant( + tasks, + display_name, + host=host, + expansions=expansions, + ) + ) + return variants + + +def create_enterprise_auth_variants(): + variants = [] + for host in ["rhel8", "macos", "win64"]: + expansions = dict(TEST_NAME="enterprise_auth", AUTH="auth") + display_name = get_variant_name("Auth Enterprise", host) + tasks = [".test-standard-auth .auth !.free-threaded"] + # https://jira.mongodb.org/browse/PYTHON-5586 + if host == "macos": + tasks = [".test-standard-auth !.pypy .auth !.free-threaded"] + if host == "win64": + tasks = [".test-standard-auth !.pypy .auth"] + variant = create_variant(tasks, display_name, host=host, expansions=expansions) + variants.append(variant) + return variants + + +def create_pyopenssl_variants(): + base_name = "PyOpenSSL" + batchtime = BATCHTIME_DAY + expansions = dict(SUB_TEST_NAME="pyopenssl") + variants = [] + + for host in ["rhel8", "macos", "win64"]: + display_name = get_variant_name(base_name, host) + base_task = ".test-standard" if host == "rhel8" else ".test-standard !.pypy" + # We only need to run a subset on async. + tasks = [f"{base_task} .sync", f"{base_task} .async .replica_set-noauth-ssl"] + variants.append( + create_variant( + tasks, + display_name, + expansions=expansions, + batchtime=batchtime, + ) + ) + + return variants + + +def create_storage_engine_variants(): + host = DEFAULT_HOST + engines = ["InMemory"] + variants = [] + for engine in engines: + expansions = dict(STORAGE_ENGINE=engine.lower()) + tasks = [".test-standard .standalone-noauth-nossl"] + display_name = get_variant_name(f"Storage {engine}", host) + variant = create_variant(tasks, display_name, host=host, expansions=expansions) + variants.append(variant) + return variants + + +def create_stable_api_variants(): + host = DEFAULT_HOST + tags = ["versionedApi_tag"] + variants = [] + types = ["require v1", "accept v2"] + + # All python versions across platforms. + for test_type in types: + expansions = dict(AUTH="auth") + # Test against a cluster with requireApiVersion=1. + if test_type == types[0]: + # REQUIRE_API_VERSION is set to make drivers-evergreen-tools + # start a cluster with the requireApiVersion parameter. + expansions["REQUIRE_API_VERSION"] = "1" + # MONGODB_API_VERSION is the apiVersion to use in the test suite. + expansions["MONGODB_API_VERSION"] = "1" + tasks = [ + f".test-standard !.replica_set-noauth-ssl .server-{v}" + for v in get_versions_from("5.0") + ] + else: + # Test against a cluster with acceptApiVersion2 but without + # requireApiVersion, and don't automatically add apiVersion to + # clients created in the test suite. + expansions["ORCHESTRATION_FILE"] = "versioned-api-testing.json" + tasks = [ + f".test-standard .server-{v} .standalone-noauth-nossl" + for v in get_versions_from("5.0") + ] + base_display_name = f"Stable API {test_type}" + display_name = get_variant_name(base_display_name, host, **expansions) + variant = create_variant(tasks, display_name, host=host, tags=tags, expansions=expansions) + variants.append(variant) + + return variants + + +def create_green_framework_variants(): + variants = [] + host = DEFAULT_HOST + for framework in ["gevent"]: + tasks = [".test-standard .sync !.free-threaded"] + expansions = dict(GREEN_FRAMEWORK=framework) + display_name = get_variant_name(f"Green {framework.capitalize()}", host) + variant = create_variant(tasks, display_name, host=host, expansions=expansions) + variants.append(variant) + return variants + + +def create_no_c_ext_variants(): + host = DEFAULT_HOST + tasks = [".test-standard"] + expansions = dict() + handle_c_ext(C_EXTS[0], expansions) + display_name = get_variant_name("No C Ext", host) + return [create_variant(tasks, display_name, host=host)] + + +def create_mod_wsgi_variants(): + host = HOSTS["ubuntu22"] + tasks = [".mod_wsgi"] + expansions = dict(MOD_WSGI_VERSION="4") + display_name = get_variant_name("Mod_WSGI", host) + return [create_variant(tasks, display_name, host=host, expansions=expansions)] + + +def create_disable_test_commands_variants(): + host = DEFAULT_HOST + expansions = dict(AUTH="auth", SSL="ssl", DISABLE_TEST_COMMANDS="1") + display_name = get_variant_name("Disable test commands", host) + tasks = [".test-standard .server-latest"] + return [create_variant(tasks, display_name, host=host, expansions=expansions)] + + +def create_oidc_auth_variants(): + variants = [] + for host_name in ["ubuntu22", "macos", "win64"]: + if host_name == "ubuntu22": + tasks = [".auth_oidc_remote"] + else: + tasks = ["!.auth_oidc_remote .auth_oidc"] + host = HOSTS[host_name] + variants.append( + create_variant( + tasks, + get_variant_name("Auth OIDC", host), + host=host, + batchtime=BATCHTIME_DAY, + ) + ) + # Add a specific local test to run on PRs. + if host_name == "ubuntu22": + tasks = ["!.auth_oidc_remote .auth_oidc"] + variants.append( + create_variant( + tasks, + get_variant_name("Auth OIDC Local", host), + tags=["pr"], + host=host, + batchtime=BATCHTIME_DAY, + ) + ) + return variants + + +def create_search_index_variants(): + host = DEFAULT_HOST + python = CPYTHONS[0] + return [ + create_variant( + [".search_index"], + get_variant_name("Search Index Helpers", host, python=python), + python=python, + host=host, + ) + ] + + +def create_mockupdb_variants(): + host = DEFAULT_HOST + expansions = dict(TEST_NAME="mockupdb") + return [ + create_variant( + [".test-no-orchestration"], + get_variant_name("MockupDB", host), + host=host, + tags=["pr"], + expansions=expansions, + ) + ] + + +def create_doctests_variants(): + host = DEFAULT_HOST + expansions = dict(TEST_NAME="doctest") + return [ + create_variant( + [".test-non-standard .standalone-noauth-nossl"], + get_variant_name("Doctests", host), + host=host, + expansions=expansions, + ) + ] + + +def create_atlas_connect_variants(): + host = DEFAULT_HOST + return [ + create_variant( + [".test-no-orchestration"], + get_variant_name("Atlas connect", host), + tags=["pr"], + host=DEFAULT_HOST, + expansions=dict(TEST_NAME="atlas_connect"), + ) + ] + + +def create_coverage_report_variants(): + return [create_variant(["coverage-report"], "Coverage Report", host=DEFAULT_HOST)] + + +def create_kms_variants(): + tasks = [] + tasks.append(EvgTaskRef(name="test-gcpkms", batchtime=BATCHTIME_DAY)) + tasks.append("test-gcpkms-fail") + tasks.append(EvgTaskRef(name="test-azurekms", batchtime=BATCHTIME_DAY)) + tasks.append("test-azurekms-fail") + return [create_variant(tasks, "KMS", host=HOSTS["debian11"])] + + +def create_import_time_variants(): + return [create_variant(["check-import-time"], "Import Time", host=DEFAULT_HOST)] + + +def create_backport_pr_variants(): + return [create_variant(["backport-pr"], "Backport PR", host=DEFAULT_HOST)] + + +def create_perf_variants(): + host = HOSTS["perf"] + return [create_variant([".perf"], "Performance Benchmarks", host=host, batchtime=BATCHTIME_DAY)] + + +def create_aws_auth_variants(): + variants = [] + + for host_name in ["ubuntu20", "win64", "macos"]: + expansions = dict() + # PYTHON-5604 - we need to skip ECS tests for now. + tasks = [".auth-aws !.auth-aws-ecs"] + tags = [] + if host_name == "macos": + tasks = [".auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2"] + tags = ["pr"] + elif host_name == "win64": + tasks = [".auth-aws !.auth-aws-ecs"] + host = HOSTS[host_name] + variant = create_variant( + tasks, + get_variant_name("Auth AWS", host), + host=host, + tags=tags, + expansions=expansions, + ) + variants.append(variant) + return variants + + +def create_no_server_variants(): + host = HOSTS["rhel8"] + name = get_variant_name("No server", host=host) + return [create_variant([".test-no-orchestration"], name, host=host, tags=["pr"])] + + +def create_alternative_hosts_variants(): + batchtime = BATCHTIME_DAY + variants = [] + + version = "latest" + for host_name in OTHER_HOSTS: + expansions = dict(VERSION="latest") + handle_c_ext(C_EXTS[0], expansions) + host = HOSTS[host_name] + tags = [] + if "fips" in host_name.lower(): + expansions["REQUIRE_FIPS"] = "1" + # Use explicit Python 3.11 binary on the host since the default python3 is 3.9. + expansions["PYTHON_BINARY"] = "/usr/bin/python3.11" + if "amazon" in host_name.lower(): + tags.append("pr") + variants.append( + create_variant( + [".test-no-toolchain"], + display_name=get_variant_name("Other hosts", host, version=version), + batchtime=batchtime, + host=host, + tags=tags, + expansions=expansions, + ) + ) + return variants + + +def create_aws_lambda_variants(): + host = HOSTS["rhel8"] + return [create_variant([".aws_lambda"], display_name="FaaS Lambda", host=host)] + + +############## +# Tasks +############## + + +def create_server_version_tasks(): + tasks = [] + task_combos = set() + # All combinations of topology, auth, ssl, and sync should be tested. + for (topology, auth, ssl, sync), python in zip_cycle( + list(product(TOPOLOGIES, ["auth", "noauth"], ["ssl", "nossl"], SYNCS)), ALL_PYTHONS + ): + task_combos.add((topology, auth, ssl, sync, python)) + + # Every python should be tested with sharded cluster, auth, ssl, with sync and async. + for python, sync in product(ALL_PYTHONS, SYNCS): + task_combos.add(("sharded_cluster", "auth", "ssl", sync, python)) + + # Assemble the tasks. + seen = set() + for topology, auth, ssl, sync, python in sorted(task_combos): + combo = f"{topology}-{auth}-{ssl}" + tags = ["server-version", f"python-{python}", combo, sync] + if combo in [ + "standalone-noauth-nossl", + "replica_set-noauth-nossl", + "sharded_cluster-auth-ssl", + ]: + combo = f"{combo}-{sync}" + if combo not in seen: + seen.add(combo) + tags.append("pr") + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) + if "t" in python: + tags.append("free-threaded") + if python not in PYPYS and "t" not in python: + expansions["COVERAGE"] = "1" + name = get_task_name( + "test-server-version", + python=python, + sync=sync, + **expansions, + ) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_vars["TEST_NAME"] = f"default_{sync}" + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_no_toolchain_tasks(): + tasks = [] + + for topology, sync in zip_cycle(TOPOLOGIES, SYNCS): + auth, ssl = get_standard_auth_ssl(topology) + tags = [ + "test-no-toolchain", + f"{topology}-{auth}-{ssl}", + ] + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) + name = get_task_name("test-no-toolchain", sync=sync, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["TEST_NAME"] = f"default_{sync}" + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_test_non_standard_tasks(): + """For variants that set a TEST_NAME.""" + tasks = [] + task_combos = set() + # For each version and topology, rotate through the CPythons. + for (version, topology), python in zip_cycle(list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS): + pr = version == "latest" + task_combos.add((version, topology, python, pr)) + # For each PyPy and topology, rotate through the MongoDB versions. + for (python, topology), version in zip_cycle(list(product(PYPYS, TOPOLOGIES)), ALL_VERSIONS): + task_combos.add((version, topology, python, False)) + for version, topology, python, pr in sorted(task_combos): + auth, ssl = get_standard_auth_ssl(topology) + tags = [ + "test-non-standard", + f"server-{version}", + f"python-{python}", + f"{topology}-{auth}-{ssl}", + auth, + ] + if "t" in python: + tags.append("free-threaded") + if python in PYPYS: + tags.append("pypy") + if pr: + tags.append("pr") + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) + name = get_task_name("test-non-standard", python=python, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_test_standard_auth_tasks(): + """We only use auth on sharded clusters""" + tasks = [] + task_combos = set() + # Rotate through the CPython and MongoDB versions + for (version, topology), python in zip_cycle( + list(product(ALL_VERSIONS, ["sharded_cluster"])), CPYTHONS + ): + pr = version == "latest" + task_combos.add((version, topology, python, pr)) + # Rotate through each PyPy and MongoDB versions. + for (python, topology), version in zip_cycle( + list(product(PYPYS, ["sharded_cluster"])), ALL_VERSIONS + ): + task_combos.add((version, topology, python, False)) + for version, topology, python, pr in sorted(task_combos): + auth, ssl = get_standard_auth_ssl(topology) + tags = [ + "test-standard-auth", + f"server-{version}", + f"python-{python}", + f"{topology}-{auth}-{ssl}", + auth, + ] + if "t" in python: + tags.append("free-threaded") + if python in PYPYS: + tags.append("pypy") + if pr: + tags.append("pr") + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) + name = get_task_name("test-standard-auth", python=python, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_min_deps_tasks(): + """For variants that support testing with minimum dependencies.""" + tasks = [] + for topology in TOPOLOGIES: + auth, ssl = get_standard_auth_ssl(topology) + tags = ["test-min-deps", f"{topology}-{auth}-{ssl}"] + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["TEST_MIN_DEPS"] = "1" + name = get_task_name("test-min-deps", python=CPYTHONS[0], sync="sync", **test_vars) + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_standard_tasks(): + """For variants that do not set a TEST_NAME.""" + tasks = [] + task_combos = set() + # For each python and topology and sync/async, rotate through the the versions. + for (python, topology, sync), version in zip_cycle( + list(product(CPYTHONS + PYPYS, TOPOLOGIES, SYNCS)), ALL_VERSIONS + ): + pr = version == "latest" and python not in PYPYS + task_combos.add((version, topology, python, sync, pr)) + + for version, topology, python, sync, pr in sorted(task_combos): + auth, ssl = get_standard_auth_ssl(topology) + tags = [ + "test-standard", + f"server-{version}", + f"python-{python}", + f"{topology}-{auth}-{ssl}", + sync, + ] + if "t" in python: + tags.append("free-threaded") + if python in PYPYS: + tags.append("pypy") + if pr: + tags.append("pr") + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) + name = get_task_name("test-standard", python=python, sync=sync, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_vars["TEST_NAME"] = f"default_{sync}" + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_no_orchestration_tasks(): + tasks = [] + for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: + tags = [ + "test-no-orchestration", + f"python-{python}", + ] + name = get_task_name("test-no-orchestration", python=python) + assume_func = FunctionCall(func="assume ec2 role") + test_vars = dict(PYTHON_VERSION=python) + test_func = FunctionCall(func="run tests", vars=test_vars) + commands = [assume_func, test_func] + tasks.append(EvgTask(name=name, tags=tags, commands=commands)) + return tasks + + +def create_kms_tasks(): + tasks = [] + for kms_type in ["gcp", "azure"]: + for success in [True, False]: + name = f"test-{kms_type}kms" + sub_test_name = kms_type + tags = [] + if not success: + name += "-fail" + sub_test_name += "-fail" + tags.append("pr") + commands = [] + if not success: + commands.append(FunctionCall(func="run server")) + test_vars = dict(TEST_NAME="kms", SUB_TEST_NAME=sub_test_name) + test_func = FunctionCall(func="run tests", vars=test_vars) + commands.append(test_func) + tasks.append(EvgTask(name=name, tags=tags, commands=commands)) + return tasks + + +def create_aws_tasks(): + tasks = [] + aws_test_types = [ + "regular", + "assume-role", + "ec2", + "env-creds", + "session-creds", + "web-identity", + "ecs", + ] + for version, test_type, python in zip_cycle(get_versions_from("4.4"), aws_test_types, CPYTHONS): + base_name = f"test-auth-aws-{version}" + base_tags = ["auth-aws"] + server_vars = dict(AUTH_AWS="1", VERSION=version) + server_func = FunctionCall(func="run server", vars=server_vars) + assume_func = FunctionCall(func="assume ec2 role") + tags = [*base_tags, f"auth-aws-{test_type}"] + if "t" in python: + tags.append("free-threaded") + name = get_task_name(f"{base_name}-{test_type}", python=python) + test_vars = dict(TEST_NAME="auth_aws", SUB_TEST_NAME=test_type, PYTHON_VERSION=python) + test_func = FunctionCall(func="run tests", vars=test_vars) + funcs = [server_func, assume_func, test_func] + tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) + + if test_type == "web-identity": + tags = [*base_tags, "auth-aws-web-identity"] + name = get_task_name(f"{base_name}-web-identity-session-name", python=python) + test_vars = dict( + TEST_NAME="auth_aws", + SUB_TEST_NAME="web-identity", + AWS_ROLE_SESSION_NAME="test", + PYTHON_VERSION=python, + ) + if "t" in python: + tags.append("free-threaded") + test_func = FunctionCall(func="run tests", vars=test_vars) + funcs = [server_func, assume_func, test_func] + tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) + + return tasks + + +def create_oidc_tasks(): + tasks = [] + for sub_test in ["default", "azure", "gcp", "eks", "aks", "gke"]: + vars = dict(TEST_NAME="auth_oidc", SUB_TEST_NAME=sub_test) + test_func = FunctionCall(func="run tests", vars=vars) + task_name = f"test-auth-oidc-{sub_test}" + tags = ["auth_oidc"] + if sub_test != "default": + tags.append("auth_oidc_remote") + tasks.append(EvgTask(name=task_name, tags=tags, commands=[test_func])) + + return tasks + + +def create_mod_wsgi_tasks(): + tasks = [] + for (test, topology), python in zip_cycle( + product(["standalone", "embedded-mode"], ["standalone", "replica_set"]), CPYTHONS + ): + if "t" in python: + continue + if test == "standalone": + task_name = "mod-wsgi-" + else: + task_name = "mod-wsgi-embedded-mode-" + task_name += topology.replace("_", "-") + task_name = get_task_name(task_name, python=python) + server_vars = dict(TOPOLOGY=topology, PYTHON_VERSION=python) + server_func = FunctionCall(func="run server", vars=server_vars) + vars = dict(TEST_NAME="mod_wsgi", SUB_TEST_NAME=test.split("-")[0], PYTHON_VERSION=python) + test_func = FunctionCall(func="run tests", vars=vars) + tags = ["mod_wsgi", "pr"] + commands = [server_func, test_func] + tasks.append(EvgTask(name=task_name, tags=tags, commands=commands)) + return tasks + + +def _create_ocsp_tasks(algo, variant, server_type, base_task_name): + tasks = [] + file_name = f"{algo}-basic-tls-ocsp-{variant}.json" + + for version in get_versions_from("4.4"): + if version == "latest": + python = MIN_MAX_PYTHON[-1] + else: + python = MIN_MAX_PYTHON[0] + + vars = dict( + ORCHESTRATION_FILE=file_name, + OCSP_SERVER_TYPE=server_type, + TEST_NAME="ocsp", + PYTHON_VERSION=python, + VERSION=version, + ) + test_func = FunctionCall(func="run tests", vars=vars) + + tags = ["ocsp", f"ocsp-{algo}", version] + if "disableStapling" not in variant: + tags.append("ocsp-staple") + if algo == "valid-cert-server-staples" and version == "latest": + tags.append("pr") + + task_name = get_task_name( + f"test-ocsp-{algo}-{base_task_name}", + python=python, + version=version, + ) + tasks.append(EvgTask(name=task_name, tags=tags, commands=[test_func])) + + return tasks + + +def create_aws_lambda_tasks(): + assume_func = FunctionCall(func="assume ec2 role") + vars = dict(TEST_NAME="aws_lambda") + test_func = FunctionCall(func="run tests", vars=vars) + task_name = "test-aws-lambda-deployed" + tags = ["aws_lambda"] + commands = [assume_func, test_func] + return [EvgTask(name=task_name, tags=tags, commands=commands)] + + +def create_search_index_tasks(): + assume_func = FunctionCall(func="assume ec2 role") + server_func = FunctionCall(func="run server", vars=dict(TEST_NAME="search_index")) + vars = dict(TEST_NAME="search_index") + test_func = FunctionCall(func="run tests", vars=vars) + task_name = "test-search-index-helpers" + tags = ["search_index"] + commands = [assume_func, server_func, test_func] + return [EvgTask(name=task_name, tags=tags, commands=commands)] + + +def create_perf_tasks(): + tasks = [] + for version, ssl, sync in product(["8.0"], ["ssl", "nossl"], ["sync", "async"]): + vars = dict(VERSION=f"v{version}-perf", SSL=ssl) + server_func = FunctionCall(func="run server", vars=vars) + vars = dict(TEST_NAME="perf", SUB_TEST_NAME=sync) + test_func = FunctionCall(func="run tests", vars=vars) + attach_func = FunctionCall(func="attach benchmark test results") + send_func = FunctionCall(func="send dashboard data") + task_name = f"perf-{version}-standalone" + if ssl == "ssl": + task_name += "-ssl" + if sync == "async": + task_name += "-async" + tags = ["perf"] + commands = [server_func, test_func, attach_func, send_func] + tasks.append(EvgTask(name=task_name, tags=tags, commands=commands)) + return tasks + + +def create_getdata_tasks(): + # Wildcard task. Do you need to find out what tools are available and where? + # Throw it here, and execute this task on all buildvariants + cmd = get_subprocess_exec(args=[".evergreen/scripts/run-getdata.sh"]) + return [EvgTask(name="getdata", commands=[cmd])] + + +def create_coverage_report_tasks(): + tags = ["coverage", "pr"] + task_name = "coverage-report" + # BUILD-3165: We can't use "*" (all tasks) and specify "variant". + # Instead list out all coverage tasks using tags. + # Run the coverage task even if some tasks fail. + # Run the coverage task even if some tasks are not scheduled in a patch build. + task_deps = [ + EvgTaskDependency( + name=".server-version", variant=".coverage_tag", status="*", patch_optional=True + ) + ] + cmd = FunctionCall(func="download and merge coverage") + return [EvgTask(name=task_name, tags=tags, depends_on=task_deps, commands=[cmd])] + + +def create_import_time_tasks(): + name = "check-import-time" + tags = ["pr"] + args = [".evergreen/scripts/check-import-time.sh", "${revision}", "${github.amrom.workers.devmit}"] + cmd = get_subprocess_exec(args=args) + return [EvgTask(name=name, tags=tags, commands=[cmd])] + + +def create_backport_pr_tasks(): + name = "backport-pr" + args = [ + "${DRIVERS_TOOLS}/.evergreen/github_app/backport-pr.sh", + "mongodb", + "mongo-python-driver", + "${github.amrom.workers.devmit}", + ] + include_expansions = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + cmd = get_subprocess_exec(args=args, include_expansions_in_env=include_expansions) + assume_func = FunctionCall(func="assume ec2 role") + return [EvgTask(name=name, commands=[assume_func, cmd], allowed_requesters=["commit"])] + + +def create_ocsp_tasks(): + tasks = [] + tests = [ + ("disableStapling", "valid", "valid-cert-server-does-not-staple"), + ("disableStapling", "revoked", "invalid-cert-server-does-not-staple"), + ("disableStapling", "valid-delegate", "delegate-valid-cert-server-does-not-staple"), + ("disableStapling", "revoked-delegate", "delegate-invalid-cert-server-does-not-staple"), + ("disableStapling", "no-responder", "soft-fail"), + ("mustStaple", "valid", "valid-cert-server-staples"), + ("mustStaple", "revoked", "invalid-cert-server-staples"), + ("mustStaple", "valid-delegate", "delegate-valid-cert-server-staples"), + ("mustStaple", "revoked-delegate", "delegate-invalid-cert-server-staples"), + ( + "mustStaple-disableStapling", + "revoked", + "malicious-invalid-cert-mustStaple-server-does-not-staple", + ), + ( + "mustStaple-disableStapling", + "revoked-delegate", + "delegate-malicious-invalid-cert-mustStaple-server-does-not-staple", + ), + ( + "mustStaple-disableStapling", + "no-responder", + "malicious-no-responder-mustStaple-server-does-not-staple", + ), + ] + for algo in ["ecdsa", "rsa"]: + for variant, server_type, base_task_name in tests: + new_tasks = _create_ocsp_tasks(algo, variant, server_type, base_task_name) + tasks.extend(new_tasks) + + return tasks + + +############## +# Functions +############## + + +def create_upload_coverage_func(): + # Upload the coverage report for all tasks in a single build to the same directory. + remote_file = ( + "coverage/${revision}/${version_id}/coverage/coverage.${build_variant}.${task_name}" + ) + display_name = "Raw Coverage Report" + cmd = get_s3_put( + local_file="src/.coverage", + remote_file=remote_file, + display_name=display_name, + content_type="text/html", + ) + return "upload coverage", [get_assume_role(), cmd] + + +def create_download_and_merge_coverage_func(): + include_expansions = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + args = [ + ".evergreen/scripts/download-and-merge-coverage.sh", + "${bucket_name}", + "${revision}", + "${version_id}", + ] + merge_cmd = get_subprocess_exec( + silent=True, include_expansions_in_env=include_expansions, args=args + ) + combine_cmd = get_subprocess_exec(args=[".evergreen/combine-coverage.sh"]) + # Upload the resulting html coverage report. + args = [ + ".evergreen/scripts/upload-coverage-report.sh", + "${bucket_name}", + "${revision}", + "${version_id}", + ] + upload_cmd = get_subprocess_exec( + silent=True, include_expansions_in_env=include_expansions, args=args + ) + display_name = "Coverage Report HTML" + remote_file = "coverage/${revision}/${version_id}/htmlcov/index.html" + put_cmd = get_s3_put( + local_file="src/htmlcov/index.html", + remote_file=remote_file, + display_name=display_name, + content_type="text/html", + ) + cmds = [get_assume_role(), merge_cmd, combine_cmd, upload_cmd, put_cmd] + return "download and merge coverage", cmds + + +def create_upload_mo_artifacts_func(): + include = ["./**.core", "./**.mdmp"] # Windows: minidumps + archive_cmd = archive_targz_pack(target="mongo-coredumps.tgz", source_dir="./", include=include) + display_name = "Core Dumps - Execution" + remote_file = "${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz" + s3_dumps = get_s3_put( + local_file="mongo-coredumps.tgz", remote_file=remote_file, display_name=display_name + ) + display_name = "drivers-tools-logs.tar.gz" + remote_file = "${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-drivers-tools-logs.tar.gz" + s3_logs = get_s3_put( + local_file="${DRIVERS_TOOLS}/.evergreen/test_logs.tar.gz", + remote_file=remote_file, + display_name=display_name, + ) + cmds = [get_assume_role(), archive_cmd, s3_dumps, s3_logs] + return "upload mo artifacts", cmds + + +def create_fetch_source_func(): + # Executes clone and applies the submitted patch, if any. + cmd = git_get_project(directory="src") + return "fetch source", [cmd] + + +def create_setup_system_func(): + # Make an evergreen expansion file with dynamic values. + includes = ["is_patch", "project", "version_id"] + args = [".evergreen/scripts/setup-system.sh"] + setup_cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) + # Load the expansion file to make an evergreen variable with the current unique version. + expansion_cmd = expansions_update(file="src/expansion.yml") + return "setup system", [setup_cmd, expansion_cmd] + + +def create_upload_test_results_func(): + results_cmd = attach_results(file_location="${DRIVERS_TOOLS}/results.json") + xresults_cmd = attach_xunit_results(file="src/xunit-results/TEST-*.xml") + return "upload test results", [results_cmd, xresults_cmd] + + +def create_run_server_func(): + includes = [ + "VERSION", + "TOPOLOGY", + "AUTH", + "SSL", + "ORCHESTRATION_FILE", + "PYTHON_BINARY", + "PYTHON_VERSION", + "STORAGE_ENGINE", + "REQUIRE_API_VERSION", + "DRIVERS_TOOLS", + "TEST_CRYPT_SHARED", + "AUTH_AWS", + "LOAD_BALANCER", + "LOCAL_ATLAS", + "NO_EXT", + ] + args = [".evergreen/just.sh", "run-server", "${TEST_NAME}"] + sub_cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) + expansion_cmd = expansions_update(file="${DRIVERS_TOOLS}/mo-expansion.yml") + return "run server", [sub_cmd, expansion_cmd] + + +def create_run_tests_func(): + includes = [ + "AUTH", + "SSL", + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_SESSION_TOKEN", + "COVERAGE", + "PYTHON_BINARY", + "LIBMONGOCRYPT_URL", + "MONGODB_URI", + "PYTHON_VERSION", + "DISABLE_TEST_COMMANDS", + "GREEN_FRAMEWORK", + "NO_EXT", + "COMPRESSORS", + "MONGODB_API_VERSION", + "REQUIRE_API_VERSION", + "DEBUG_LOG", + "DISABLE_FLAKY", + "ORCHESTRATION_FILE", + "OCSP_SERVER_TYPE", + "VERSION", + "IS_WIN32", + "REQUIRE_FIPS", + "TEST_MIN_DEPS", + ] + args = [".evergreen/just.sh", "setup-tests", "${TEST_NAME}", "${SUB_TEST_NAME}"] + setup_cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) + test_cmd = get_subprocess_exec(args=[".evergreen/just.sh", "run-tests"]) + return "run tests", [setup_cmd, test_cmd] + + +def create_cleanup_func(): + cmd = get_subprocess_exec(args=[".evergreen/scripts/cleanup.sh"]) + return "cleanup", [cmd] + + +def create_teardown_system_func(): + tests_cmd = get_subprocess_exec(args=[".evergreen/just.sh", "teardown-tests"]) + drivers_cmd = get_subprocess_exec(args=["${DRIVERS_TOOLS}/.evergreen/teardown.sh"]) + return "teardown system", [tests_cmd, drivers_cmd] + + +def create_assume_ec2_role_func(): + cmd = ec2_assume_role(role_arn="${aws_test_secrets_role}", duration_seconds=3600) + return "assume ec2 role", [cmd] + + +def create_attach_benchmark_test_results_func(): + cmd = attach_results(file_location="src/report.json") + return "attach benchmark test results", [cmd] + + +def create_send_dashboard_data_func(): + includes = [ + "requester", + "revision_order_id", + "project_id", + "version_id", + "build_variant", + "parsed_order_id", + "task_name", + "task_id", + "execution", + "is_mainline", + ] + cmds = [ + get_subprocess_exec( + include_expansions_in_env=includes, args=[".evergreen/scripts/perf-submission-setup.sh"] + ), + expansions_update(file="src/expansion.yml"), + get_subprocess_exec( + include_expansions_in_env=includes, args=[".evergreen/scripts/perf-submission.sh"] + ), + ] + return "send dashboard data", cmds + + +mod = sys.modules[__name__] +write_variants_to_file(mod) +write_tasks_to_file(mod) +write_functions_to_file(mod) diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py new file mode 100644 index 0000000000..4eb6bcb0dc --- /dev/null +++ b/.evergreen/scripts/generate_config_utils.py @@ -0,0 +1,374 @@ +from __future__ import annotations + +from dataclasses import dataclass +from inspect import getmembers, isfunction +from itertools import cycle, zip_longest +from pathlib import Path +from typing import Any + +from shrub.v3.evg_build_variant import BuildVariant +from shrub.v3.evg_command import ( + EvgCommandType, + ec2_assume_role, + s3_put, + subprocess_exec, +) +from shrub.v3.evg_project import EvgProject +from shrub.v3.evg_task import EvgTaskRef +from shrub.v3.shrub_service import ShrubService + +############## +# Globals +############## + +ALL_VERSIONS = ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] +CPYTHONS = ["3.10", "3.11", "3.12", "3.13", "3.14t", "3.14"] +PYPYS = ["pypy3.10"] +ALL_PYTHONS = CPYTHONS + PYPYS +MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] +BATCHTIME_WEEK = 10080 +BATCHTIME_DAY = 1440 +AUTH_SSLS = [("auth", "ssl"), ("noauth", "ssl"), ("noauth", "nossl")] +TOPOLOGIES = ["standalone", "replica_set", "sharded_cluster"] +C_EXTS = ["without_ext", "with_ext"] +SYNCS = ["sync", "async"] +DISPLAY_LOOKUP = dict( + ssl=dict(ssl="SSL", nossl="NoSSL"), + auth=dict(auth="Auth", noauth="NoAuth"), + topology=dict( + standalone="Standalone", replica_set="Replica Set", sharded_cluster="Sharded Cluster" + ), + test_suites=dict(default="Sync", default_async="Async"), + sync={"sync": "Sync", "async": "Async"}, + coverage={"1": "cov"}, + no_ext={"1": "No C"}, + test_min_deps={True: "Min Deps"}, +) +HOSTS = dict() + + +@dataclass +class Host: + name: str + run_on: str + display_name: str + variables: dict[str, str] | None + + +# Hosts with toolchains. +HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8", dict()) +HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64", dict()) +HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32", dict()) +HOSTS["macos"] = Host("macos", "macos-14", "macOS", dict()) +HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64", dict()) +HOSTS["ubuntu20"] = Host("ubuntu20", "ubuntu2004-small", "Ubuntu-20", dict()) +HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22", dict()) +HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7", dict()) +HOSTS["perf"] = Host("perf", "rhel90-dbx-perf-large", "", dict()) +HOSTS["debian11"] = Host("debian11", "debian11-small", "Debian11", dict()) +DEFAULT_HOST = HOSTS["rhel8"] + +# Other hosts +OTHER_HOSTS = ["RHEL9-FIPS", "RHEL8-zseries", "RHEL8-POWER8", "RHEL8-arm64", "Amazon2023"] +for name, run_on in zip( + OTHER_HOSTS, + [ + "rhel92-fips", + "rhel8-zseries-small", + "rhel8-power-small", + "rhel82-arm64-small", + "amazon2023-arm64-latest-large-m8g", + ], +): + HOSTS[name] = Host(name, run_on, name, dict()) + +############## +# Helpers +############## + + +def create_variant_generic( + tasks: list[str | EvgTaskRef], + display_name: str, + *, + host: Host | str | None = None, + default_run_on="rhel87-small", + expansions: dict | None = None, + **kwargs: Any, +) -> BuildVariant: + """Create a build variant for the given inputs.""" + task_refs = [] + if isinstance(host, str): + host = HOSTS[host] + for t in tasks: + if isinstance(t, EvgTaskRef): + task_refs.append(t) + else: + task_refs.append(EvgTaskRef(name=t)) + expansions = expansions and expansions.copy() or dict() + if "run_on" in kwargs: + run_on = kwargs.pop("run_on") + elif host: + run_on = [host.run_on] + if host.variables: + expansions.update(host.variables) + else: + run_on = [default_run_on] + if isinstance(run_on, str): + run_on = [run_on] + name = display_name.replace(" ", "-").replace("*-", "").lower() + return BuildVariant( + name=name, + display_name=display_name, + tasks=task_refs, + expansions=expansions or None, + run_on=run_on, + **kwargs, + ) + + +def create_variant( + tasks: list[str | EvgTaskRef], + display_name: str, + *, + version: str | None = None, + host: Host | str | None = None, + python: str | None = None, + expansions: dict | None = None, + **kwargs: Any, +) -> BuildVariant: + expansions = expansions and expansions.copy() or dict() + if version: + expansions["VERSION"] = version + if python: + expansions["PYTHON_BINARY"] = get_python_binary(python, host) + return create_variant_generic( + tasks, display_name, version=version, host=host, expansions=expansions, **kwargs + ) + + +def get_python_binary(python: str, host: Host) -> str: + """Get the appropriate python binary given a python version and host.""" + name = host.name + if name in ["win64", "win32"]: + if name == "win32": + base = "C:/python/32" + else: + base = "C:/python" + python_dir = python.replace(".", "").replace("t", "") + return f"{base}/Python{python_dir}/python{python}.exe" + + if name in ["rhel8", "ubuntu22", "ubuntu20", "rhel7"]: + return f"/opt/python/{python}/bin/python3" + + if name in ["macos", "macos-arm64"]: + bin_name = "python3t" if "t" in python else "python3" + python_dir = python.replace("t", "") + framework_dir = "PythonT" if "t" in python else "Python" + return f"/Library/Frameworks/{framework_dir}.Framework/Versions/{python_dir}/bin/{bin_name}" + + raise ValueError(f"no match found for python {python} on {name}") + + +def get_versions_from(min_version: str) -> list[str]: + """Get all server versions starting from a minimum version.""" + min_version_float = float(min_version) + rapid_latest = ["rapid", "latest"] + versions = [v for v in ALL_VERSIONS if v not in rapid_latest] + return [v for v in versions if float(v) >= min_version_float] + rapid_latest + + +def get_versions_until(max_version: str) -> list[str]: + """Get all server version up to a max version.""" + max_version_float = float(max_version) + versions = [v for v in ALL_VERSIONS if v not in ["rapid", "latest"]] + versions = [v for v in versions if float(v) <= max_version_float] + if not len(versions): + raise ValueError(f"No server versions found less <= {max_version}") + return versions + + +def get_common_name(base: str, sep: str, **kwargs) -> str: + display_name = base + version = kwargs.pop("VERSION", None) + version = version or kwargs.pop("version", None) + if version: + if version not in ["rapid", "latest"]: + version = f"v{version}" + display_name = f"{display_name}{sep}{version}" + for key, value in kwargs.items(): + name = value + if key.lower() == "python": + if not value.startswith("pypy"): + name = f"Python{value}" + else: + name = f"PyPy{value.replace('pypy', '')}" + elif key.lower() in DISPLAY_LOOKUP and value in DISPLAY_LOOKUP[key.lower()]: + name = DISPLAY_LOOKUP[key.lower()][value] + else: + continue + display_name = f"{display_name}{sep}{name}" + return display_name + + +def get_variant_name(base: str, host: str | Host | None = None, **kwargs) -> str: + """Get the display name of a variant.""" + display_name = base + if isinstance(host, str): + host = HOSTS[host] + if host is not None: + display_name += f" {host.display_name}" + return get_common_name(display_name, " ", **kwargs) + + +def get_task_name(base: str, **kwargs): + return get_common_name(base, "-", **kwargs).replace(" ", "-").lower() + + +def zip_cycle(*iterables, empty_default=None): + """Get all combinations of the inputs, cycling over the shorter list(s).""" + cycles = [cycle(i) for i in iterables] + for _ in zip_longest(*iterables): + yield tuple(next(i, empty_default) for i in cycles) + + +def handle_c_ext(c_ext, expansions) -> None: + """Handle c extension option.""" + if c_ext == C_EXTS[0]: + expansions["NO_EXT"] = "1" + + +def get_standard_auth_ssl(topology): + auth = "auth" if topology == "sharded_cluster" else "noauth" + ssl = "nossl" if topology == "standalone" else "ssl" + return auth, ssl + + +def get_assume_role(**kwargs): + kwargs.setdefault("command_type", EvgCommandType.SETUP) + kwargs.setdefault("role_arn", "${assume_role_arn}") + return ec2_assume_role(**kwargs) + + +def get_subprocess_exec(**kwargs): + kwargs.setdefault("binary", "bash") + kwargs.setdefault("working_dir", "src") + kwargs.setdefault("command_type", EvgCommandType.TEST) + return subprocess_exec(**kwargs) + + +def get_s3_put(**kwargs): + kwargs["aws_key"] = "${AWS_ACCESS_KEY_ID}" + kwargs["aws_secret"] = "${AWS_SECRET_ACCESS_KEY}" # noqa:S105 + kwargs["aws_session_token"] = "${AWS_SESSION_TOKEN}" # noqa:S105 + kwargs["bucket"] = "${bucket_name}" + kwargs.setdefault("optional", "true") + kwargs.setdefault("permissions", "public-read") + kwargs.setdefault("content_type", "${content_type|application/x-gzip}") + kwargs.setdefault("command_type", EvgCommandType.SETUP) + return s3_put(**kwargs) + + +def generate_yaml(tasks=None, variants=None): + """Generate the yaml for a given set of tasks and variants.""" + project = EvgProject(tasks=tasks, buildvariants=variants) + out = ShrubService.generate_yaml(project) + # Dedent by two spaces to match what we use in config.yml + lines = [line[2:] for line in out.splitlines()] + print("\n".join(lines)) + + +################## +# Generate Config +################## + + +def write_variants_to_file(mod): + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "variants.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("buildvariants:\n") + + for name, func in sorted(getmembers(mod, isfunction)): + if not name.endswith("_variants"): + continue + if not name.startswith("create_"): + raise ValueError("Variant creators must start with create_") + title = name.replace("create_", "").replace("_variants", "").replace("_", " ").capitalize() + project = EvgProject(tasks=None, buildvariants=func()) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title} tests\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") + + +def write_tasks_to_file(mod): + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "tasks.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("tasks:\n") + + for name, func in sorted(getmembers(mod, isfunction)): + if name.startswith("_") or not name.endswith("_tasks"): + continue + if not name.startswith("create_"): + raise ValueError("Task creators must start with create_") + title = name.replace("create_", "").replace("_tasks", "").replace("_", " ").capitalize() + project = EvgProject(tasks=func(), buildvariants=None) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title} tests\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") + + +def write_functions_to_file(mod): + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "functions.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("functions:\n") + + functions = dict() + for name, func in sorted(getmembers(mod, isfunction)): + if name.startswith("_") or not name.endswith("_func"): + continue + if not name.startswith("create_"): + raise ValueError("Function creators must start with create_") + title = name.replace("create_", "").replace("_func", "").replace("_", " ").capitalize() + func_name, cmds = func() + functions = dict() + functions[func_name] = cmds + project = EvgProject(functions=functions, tasks=None, buildvariants=None) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title}\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh new file mode 100755 index 0000000000..23d865d0d8 --- /dev/null +++ b/.evergreen/scripts/install-dependencies.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# Install the dependencies needed for an evergreen run. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +pushd "$(dirname "$(dirname $HERE)")" > /dev/null + +# Source the env files to pick up common variables. +if [ -f $HERE/env.sh ]; then + . $HERE/env.sh +fi + +# Set up the default bin directory. +if [ -z "${PYMONGO_BIN_DIR:-}" ]; then + PYMONGO_BIN_DIR="$HOME/.local/bin" + export PATH="$PYMONGO_BIN_DIR:$PATH" +fi + +# Helper function to pip install a dependency using a temporary python env. +function _pip_install() { + _HERE=$(dirname ${BASH_SOURCE:-$0}) + . $_HERE/../utils.sh + _VENV_PATH=$(mktemp -d) + if [ "Windows_NT" = "${OS:-}" ]; then + _VENV_PATH=$(cygpath -m $_VENV_PATH) + fi + echo "Installing $2 using pip..." + createvirtualenv "$(find_python3)" $_VENV_PATH + python -m pip install $1 + _suffix="" + if [ "Windows_NT" = "${OS:-}" ]; then + _suffix=".exe" + fi + ln -s "$(which $2)" $PYMONGO_BIN_DIR/${2}${_suffix} + # uv also comes with a uvx binary. + if [ $2 == "uv" ]; then + ln -s "$(which uvx)" $PYMONGO_BIN_DIR/uvx${_suffix} + fi + echo "Installed to ${PYMONGO_BIN_DIR}" + echo "Installing $2 using pip... done." +} + +# Ensure just is installed. +if ! command -v just &>/dev/null; then + # On most systems we can install directly. + _TARGET="" + if [ "Windows_NT" = "${OS:-}" ]; then + _TARGET="--target x86_64-pc-windows-msvc" + fi + _BIN_DIR=$PYMONGO_BIN_DIR + mkdir -p ${_BIN_DIR} + echo "Installing just..." + mkdir -p "$_BIN_DIR" 2>/dev/null || true + curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- $_TARGET --to "$_BIN_DIR" || { + # Remove just file if it exists (can be created if there was an install error). + rm -f ${_BIN_DIR}/just + _pip_install rust-just just + } + echo "Installing just... done." +fi + +# Ensure uv is installed. +if ! command -v uv &>/dev/null; then + _BIN_DIR=$PYMONGO_BIN_DIR + mkdir -p ${_BIN_DIR} + echo "Installing uv..." + # On most systems we can install directly. + curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$_BIN_DIR" INSTALLER_NO_MODIFY_PATH=1 sh || { + _pip_install uv uv + } + if [ "Windows_NT" = "${OS:-}" ]; then + chmod +x "$(cygpath -u $_BIN_DIR)/uv.exe" + fi + echo "Installing uv... done." +fi + +popd > /dev/null diff --git a/.evergreen/scripts/kms_tester.py b/.evergreen/scripts/kms_tester.py new file mode 100644 index 0000000000..e3833ae63a --- /dev/null +++ b/.evergreen/scripts/kms_tester.py @@ -0,0 +1,144 @@ +from __future__ import annotations + +import os + +from utils import ( + DRIVERS_TOOLS, + LOGGER, + TMP_DRIVER_FILE, + create_archive, + read_env, + run_command, + write_env, +) + +DIRS = dict( + gcp=f"{DRIVERS_TOOLS}/.evergreen/csfle/gcpkms", + azure=f"{DRIVERS_TOOLS}/.evergreen/csfle/azurekms", +) + + +def _setup_azure_vm(base_env: dict[str, str]) -> None: + LOGGER.info("Setting up Azure VM...") + azure_dir = DIRS["azure"] + env = base_env.copy() + env["AZUREKMS_SRC"] = TMP_DRIVER_FILE + env["AZUREKMS_DST"] = "~/" + run_command(f"{azure_dir}/copy-file.sh", env=env) + + env = base_env.copy() + env["AZUREKMS_CMD"] = "tar xf mongo-python-driver.tgz" + run_command(f"{azure_dir}/run-command.sh", env=env) + + env["AZUREKMS_CMD"] = "sudo apt-get install -y python3-dev build-essential" + run_command(f"{azure_dir}/run-command.sh", env=env) + + env["AZUREKMS_CMD"] = "NO_EXT=1 bash .evergreen/just.sh setup-tests kms azure-remote" + run_command(f"{azure_dir}/run-command.sh", env=env) + LOGGER.info("Setting up Azure VM... done.") + + +def _setup_gcp_vm(base_env: dict[str, str]) -> None: + LOGGER.info("Setting up GCP VM...") + gcp_dir = DIRS["gcp"] + env = base_env.copy() + env["GCPKMS_SRC"] = TMP_DRIVER_FILE + env["GCPKMS_DST"] = f"{env['GCPKMS_INSTANCENAME']}:" + run_command(f"{gcp_dir}/copy-file.sh", env=env) + + env = base_env.copy() + env["GCPKMS_CMD"] = "tar xf mongo-python-driver.tgz" + run_command(f"{gcp_dir}/run-command.sh", env=env) + + env["GCPKMS_CMD"] = "sudo apt-get install -y python3-dev build-essential" + run_command(f"{gcp_dir}/run-command.sh", env=env) + + env["GCPKMS_CMD"] = "NO_EXT=1 bash ./.evergreen/just.sh setup-tests kms gcp-remote" + run_command(f"{gcp_dir}/run-command.sh", env=env) + LOGGER.info("Setting up GCP VM...") + + +def _load_kms_config(sub_test_target: str) -> dict[str, str]: + target_dir = DIRS[sub_test_target] + config = read_env(f"{target_dir}/secrets-export.sh") + base_env = os.environ.copy() + for key, value in config.items(): + base_env[key] = str(value) + return base_env + + +def setup_kms(sub_test_name: str) -> None: + if "-" in sub_test_name: + sub_test_target, sub_test_type = sub_test_name.split("-") + else: + sub_test_target = sub_test_name + sub_test_type = "" + + assert sub_test_target in ["azure", "gcp"], sub_test_target + assert sub_test_type in ["", "remote", "fail"], sub_test_type + success = sub_test_type != "fail" + kms_dir = DIRS[sub_test_target] + + if sub_test_target == "azure": + write_env("TEST_FLE_AZURE_AUTO") + else: + write_env("TEST_FLE_GCP_AUTO") + + write_env("SUCCESS", success) + + # For remote tests, there is no further work required. + if sub_test_type == "remote": + return + + if sub_test_target == "azure": + run_command("./setup-secrets.sh", cwd=kms_dir) + + if success: + create_archive() + if sub_test_target == "azure": + os.environ["AZUREKMS_VMNAME_PREFIX"] = "PYTHON_DRIVER" + + # Found using "az vm image list --output table" + os.environ[ + "AZUREKMS_IMAGE" + ] = "Canonical:0001-com-ubuntu-server-jammy:22_04-lts-gen2:latest" + else: + os.environ["GCPKMS_IMAGEFAMILY"] = "debian-12" + + run_command("./setup.sh", cwd=kms_dir) + base_env = _load_kms_config(sub_test_target) + + if sub_test_target == "azure": + _setup_azure_vm(base_env) + else: + _setup_gcp_vm(base_env) + + if sub_test_target == "azure": + config = read_env(f"{kms_dir}/secrets-export.sh") + if success: + write_env("AZUREKMS_VMNAME", config["AZUREKMS_VMNAME"]) + + write_env("KEY_NAME", config["AZUREKMS_KEYNAME"]) + write_env("KEY_VAULT_ENDPOINT", config["AZUREKMS_KEYVAULTENDPOINT"]) + + +def test_kms_send_to_remote(sub_test_name: str) -> None: + env = _load_kms_config(sub_test_name) + if sub_test_name == "azure": + key_name = os.environ["KEY_NAME"] + key_vault_endpoint = os.environ["KEY_VAULT_ENDPOINT"] + env[ + "AZUREKMS_CMD" + ] = f'KEY_NAME="{key_name}" KEY_VAULT_ENDPOINT="{key_vault_endpoint}" bash ./.evergreen/just.sh run-tests' + else: + env["GCPKMS_CMD"] = "./.evergreen/just.sh run-tests" + cmd = f"{DIRS[sub_test_name]}/run-command.sh" + run_command(cmd, env=env) + + +def teardown_kms(sub_test_name: str) -> None: + run_command(f"{DIRS[sub_test_name]}/teardown.sh") + + +if __name__ == "__main__": + setup_kms() diff --git a/.evergreen/scripts/mod_wsgi_tester.py b/.evergreen/scripts/mod_wsgi_tester.py new file mode 100644 index 0000000000..5968849068 --- /dev/null +++ b/.evergreen/scripts/mod_wsgi_tester.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +import os +import sys +import time +import urllib.error +import urllib.request +from pathlib import Path +from shutil import which + +from utils import LOGGER, ROOT, run_command, write_env + + +def make_request(url, timeout=10): + for _ in range(int(timeout)): + try: + urllib.request.urlopen(url) # noqa: S310 + return + except urllib.error.HTTPError: + pass + time.sleep(1) + raise TimeoutError(f"Failed to access {url}") + + +def setup_mod_wsgi(sub_test_name: str) -> None: + env = os.environ.copy() + if sub_test_name == "embedded": + env["MOD_WSGI_CONF"] = "mod_wsgi_test_embedded.conf" + elif sub_test_name == "standalone": + env["MOD_WSGI_CONF"] = "mod_wsgi_test.conf" + else: + raise ValueError("mod_wsgi sub test must be either 'standalone' or 'embedded'") + write_env("MOD_WSGI_CONF", env["MOD_WSGI_CONF"]) + apache = which("apache2") + if not apache and Path("/usr/lib/apache2/mpm-prefork/apache2").exists(): + apache = "/usr/lib/apache2/mpm-prefork/apache2" + if apache: + apache_config = "apache24ubuntu161404.conf" + else: + apache = which("httpd") + if not apache: + raise ValueError("Could not find apache2 or httpd") + apache_config = "apache22amazon.conf" + python_version = ".".join(str(val) for val in sys.version_info[:2]) + mod_wsgi_version = 4 + so_file = f"/opt/python/mod_wsgi/python_version/{python_version}/mod_wsgi_version/{mod_wsgi_version}/mod_wsgi.so" + write_env("MOD_WSGI_SO", so_file) + env["MOD_WSGI_SO"] = so_file + env["PYTHONHOME"] = f"/opt/python/{python_version}" + env["PROJECT_DIRECTORY"] = project_directory = str(ROOT) + write_env("APACHE_BINARY", apache) + write_env("APACHE_CONFIG", apache_config) + uri1 = f"http://localhost:8080/interpreter1{project_directory}" + write_env("TEST_URI1", uri1) + uri2 = f"http://localhost:8080/interpreter2{project_directory}" + write_env("TEST_URI2", uri2) + run_command(f"{apache} -k start -f {ROOT}/test/mod_wsgi_test/{apache_config}", env=env) + + # Wait for the endpoints to be available. + try: + make_request(uri1, 10) + make_request(uri2, 10) + except Exception as e: + LOGGER.error(Path("error_log").read_text()) + raise e + + +def test_mod_wsgi() -> None: + sys.path.insert(0, ROOT) + from test.mod_wsgi_test.test_client import main, parse_args + + uri1 = os.environ["TEST_URI1"] + uri2 = os.environ["TEST_URI2"] + args = f"-n 25000 -t 100 parallel {uri1} {uri2}" + try: + main(*parse_args(args.split())) + + args = f"-n 25000 serial {uri1} {uri2}" + main(*parse_args(args.split())) + except Exception as e: + LOGGER.error(Path("error_log").read_text()) + raise e + + +def teardown_mod_wsgi() -> None: + apache = os.environ["APACHE_BINARY"] + apache_config = os.environ["APACHE_CONFIG"] + + run_command(f"{apache} -k stop -f {ROOT}/test/mod_wsgi_test/{apache_config}") + + +if __name__ == "__main__": + setup_mod_wsgi() diff --git a/.evergreen/scripts/oidc_tester.py b/.evergreen/scripts/oidc_tester.py new file mode 100644 index 0000000000..ac2960371e --- /dev/null +++ b/.evergreen/scripts/oidc_tester.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import os + +from utils import ( + DRIVERS_TOOLS, + TMP_DRIVER_FILE, + create_archive, + read_env, + run_command, + write_env, +) + +K8S_NAMES = ["aks", "gke", "eks"] +K8S_REMOTE_NAMES = [f"{n}-remote" for n in K8S_NAMES] + + +def _get_target_dir(sub_test_name: str) -> str: + if sub_test_name == "default": + target_dir = "auth_oidc" + elif sub_test_name.startswith("azure"): + target_dir = "auth_oidc/azure" + elif sub_test_name.startswith("gcp"): + target_dir = "auth_oidc/gcp" + elif sub_test_name in K8S_NAMES + K8S_REMOTE_NAMES: + target_dir = "auth_oidc/k8s" + else: + raise ValueError(f"Invalid sub test name '{sub_test_name}'") + return f"{DRIVERS_TOOLS}/.evergreen/{target_dir}" + + +def setup_oidc(sub_test_name: str) -> dict[str, str] | None: + target_dir = _get_target_dir(sub_test_name) + env = os.environ.copy() + + if sub_test_name == "eks" and "AWS_ACCESS_KEY_ID" in os.environ: + # Store AWS creds for kubectl access. + for key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"]: + if key in os.environ: + write_env(key, os.environ[key]) + + if sub_test_name == "azure": + env["AZUREOIDC_VMNAME_PREFIX"] = "PYTHON_DRIVER" + if "-remote" not in sub_test_name: + if sub_test_name == "azure": + # Found using "az vm image list --output table" + env["AZUREOIDC_IMAGE"] = "Canonical:0001-com-ubuntu-server-jammy:22_04-lts-gen2:latest" + else: + env["GCPKMS_IMAGEFAMILY"] = "debian-12" + run_command(f"bash {target_dir}/setup.sh", env=env) + if sub_test_name in K8S_NAMES: + run_command(f"bash {target_dir}/setup-pod.sh {sub_test_name}") + run_command(f"bash {target_dir}/run-self-test.sh") + return None + + source_file = None + if sub_test_name == "default": + source_file = f"{target_dir}/secrets-export.sh" + elif sub_test_name in ["azure-remote", "gcp-remote"]: + source_file = "./secrets-export.sh" + if sub_test_name in K8S_REMOTE_NAMES: + return os.environ.copy() + if source_file is None: + return None + + config = read_env(source_file) + write_env("MONGODB_URI_SINGLE", config["MONGODB_URI_SINGLE"]) + write_env("MONGODB_URI", config["MONGODB_URI"]) + write_env("DB_IP", config["MONGODB_URI"]) + + if sub_test_name == "default": + write_env("OIDC_TOKEN_FILE", config["OIDC_TOKEN_FILE"]) + write_env("OIDC_TOKEN_DIR", config["OIDC_TOKEN_DIR"]) + if "OIDC_DOMAIN" in config: + write_env("OIDC_DOMAIN", config["OIDC_DOMAIN"]) + elif sub_test_name == "azure-remote": + write_env("AZUREOIDC_RESOURCE", config["AZUREOIDC_RESOURCE"]) + elif sub_test_name == "gcp-remote": + write_env("GCPOIDC_AUDIENCE", config["GCPOIDC_AUDIENCE"]) + return config + + +def test_oidc_send_to_remote(sub_test_name: str) -> None: + env = os.environ.copy() + target_dir = _get_target_dir(sub_test_name) + create_archive() + if sub_test_name in ["azure", "gcp"]: + upper_name = sub_test_name.upper() + env[f"{upper_name}OIDC_DRIVERS_TAR_FILE"] = TMP_DRIVER_FILE + env[ + f"{upper_name}OIDC_TEST_CMD" + ] = f"NO_EXT=1 OIDC_ENV={sub_test_name} ./.evergreen/run-mongodb-oidc-test.sh" + elif sub_test_name in K8S_NAMES: + env["K8S_DRIVERS_TAR_FILE"] = TMP_DRIVER_FILE + env["K8S_TEST_CMD"] = "OIDC_ENV=k8s ./.evergreen/run-mongodb-oidc-test.sh" + run_command(f"bash {target_dir}/run-driver-test.sh", env=env) + + +def teardown_oidc(sub_test_name: str) -> None: + target_dir = _get_target_dir(sub_test_name) + # For k8s, make sure an error while tearing down the pod doesn't prevent + # the Altas server teardown. + error = None + if sub_test_name in K8S_NAMES: + try: + run_command(f"bash {target_dir}/teardown-pod.sh") + except Exception as e: + error = e + run_command(f"bash {target_dir}/teardown.sh") + if error: + raise error diff --git a/.evergreen/scripts/perf-submission-setup.sh b/.evergreen/scripts/perf-submission-setup.sh new file mode 100755 index 0000000000..ecb38751a5 --- /dev/null +++ b/.evergreen/scripts/perf-submission-setup.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# We use the requester expansion to determine whether the data is from a mainline evergreen run or not + +set -eu + +# shellcheck disable=SC2154 +if [ "${requester}" == "commit" ]; then + echo "is_mainline: true" >> expansion.yml +else + echo "is_mainline: false" >> expansion.yml +fi + +# We parse the username out of the order_id as patches append that in and SPS does not need that information +# shellcheck disable=SC2154 +echo "parsed_order_id: $(echo "${revision_order_id}" | awk -F'_' '{print $NF}')" >> expansion.yml diff --git a/.evergreen/scripts/perf-submission.sh b/.evergreen/scripts/perf-submission.sh new file mode 100755 index 0000000000..f7c3ea6664 --- /dev/null +++ b/.evergreen/scripts/perf-submission.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# We use the requester expansion to determine whether the data is from a mainline evergreen run or not + +set -eu + +# Submit the performance data to the SPS endpoint +# shellcheck disable=SC2154 +response=$(curl -s -w "\nHTTP_STATUS:%{http_code}" -X 'POST' \ + "https://performance-monitoring-api.corp.mongodb.com/raw_perf_results/cedar_report?project=${project_id}&version=${version_id}&variant=${build_variant}&order=${parsed_order_id}&task_name=${task_name}&task_id=${task_id}&execution=${execution}&mainline=${is_mainline}" \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d @results.json) + +http_status=$(echo "$response" | grep "HTTP_STATUS" | awk -F':' '{print $2}') +response_body=$(echo "$response" | sed '/HTTP_STATUS/d') + +# We want to throw an error if the data was not successfully submitted +if [ "$http_status" -ne 200 ]; then + echo "Error: Received HTTP status $http_status" + echo "Response Body: $response_body" + exit 1 +fi + +echo "Response Body: $response_body" +echo "HTTP Status: $http_status" diff --git a/.evergreen/scripts/resync-all-specs.py b/.evergreen/scripts/resync-all-specs.py new file mode 100644 index 0000000000..8e58e56da2 --- /dev/null +++ b/.evergreen/scripts/resync-all-specs.py @@ -0,0 +1,126 @@ +from __future__ import annotations + +import argparse +import os +import pathlib +import subprocess +from argparse import Namespace +from subprocess import CalledProcessError +from typing import Optional + + +def resync_specs(directory: pathlib.Path, errored: dict[str, str]) -> None: + """Actually sync the specs""" + print("Beginning to sync specs") + for spec in os.scandir(directory): + if not spec.is_dir(): + continue + + if spec.name in ["asynchronous"]: + continue + try: + subprocess.run( + ["bash", "./.evergreen/resync-specs.sh", spec.name], # noqa: S603, S607 + capture_output=True, + text=True, + check=True, + ) + except CalledProcessError as exc: + errored[spec.name] = exc.stderr + print("Done syncing specs") + + +def apply_patches(errored): + print("Beginning to apply patches") + subprocess.run(["bash", "./.evergreen/remove-unimplemented-tests.sh"], check=True) # noqa: S603, S607 + try: + subprocess.run( + ["git apply -R --allow-empty --whitespace=fix ./.evergreen/spec-patch/*"], # noqa: S607 + shell=True, # noqa: S602 + check=True, + stderr=subprocess.PIPE, + ) + except CalledProcessError as exc: + errored["applying patches"] = exc.stderr + + +def check_new_spec_directories(directory: pathlib.Path) -> list[str]: + """Check to see if there are any directories in the spec repo that don't exist in pymongo/test""" + spec_dir = pathlib.Path(os.environ["MDB_SPECS"]) / "source" + spec_set = { + entry.name.replace("-", "_") + for entry in os.scandir(spec_dir) + if entry.is_dir() + and (pathlib.Path(entry.path) / "tests").is_dir() + and len(list(os.scandir(pathlib.Path(entry.path) / "tests"))) > 1 + } + test_set = {entry.name.replace("-", "_") for entry in os.scandir(directory) if entry.is_dir()} + known_mappings = { + "ocsp_support": "ocsp", + "client_side_operations_timeout": "csot", + "mongodb_handshake": "handshake", + "load_balancers": "load_balancer", + "connection_monitoring_and_pooling": "connection_monitoring", + "command_logging_and_monitoring": "command_logging", + "initial_dns_seedlist_discovery": "srv_seedlist", + "server_discovery_and_monitoring": "sdam_monitoring", + } + + for k, v in known_mappings.items(): + if k in spec_set: + spec_set.remove(k) + spec_set.add(v) + return list(spec_set - test_set) + + +def write_summary(errored: dict[str, str], new: list[str], filename: Optional[str]) -> None: + """Generate the PR description""" + pr_body = "" + process = subprocess.run( + ["git diff --name-only | awk -F'/' '{print $2}' | sort | uniq"], # noqa: S607 + shell=True, # noqa: S602 + capture_output=True, + text=True, + check=True, + ) + succeeded = process.stdout.strip().split() + if len(succeeded) > 0: + pr_body += "The following specs were changed:\n -" + pr_body += "\n -".join(succeeded) + pr_body += "\n" + if len(errored) > 0: + pr_body += "\n\nThe following spec syncs encountered errors:" + for k, v in errored.items(): + pr_body += f"\n -{k}\n```{v}\n```" + pr_body += "\n" + if len(new) > 0: + pr_body += "\n\nThe following directories are in the specification repository and not in our test directory:\n -" + pr_body += "\n -".join(new) + pr_body += "\n" + if pr_body != "": + if filename is None: + print(f"\n{pr_body}") + else: + with open(filename, "w") as f: + # replacements made for proper json + f.write(pr_body.replace("\n", "\\n").replace("\t", "\\t")) + + +def main(args: Namespace): + directory = pathlib.Path("./test") + errored: dict[str, str] = {} + resync_specs(directory, errored) + apply_patches(errored) + new = check_new_spec_directories(directory) + write_summary(errored, new, args.filename) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Python Script to resync all specs and generate summary for PR." + ) + parser.add_argument( + "--filename", help="Name of file for the summary to be written into.", default=None + ) + args = parser.parse_args() + main(args) diff --git a/.evergreen/scripts/resync-all-specs.sh b/.evergreen/scripts/resync-all-specs.sh new file mode 100755 index 0000000000..41e4a2bc73 --- /dev/null +++ b/.evergreen/scripts/resync-all-specs.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Run spec syncing script and create PR +set -eu + +# SETUP +SRC_URL="https://github.com/mongodb/specifications.git" +# needs to be set for resync-specs.sh +SPEC_SRC="$(realpath "../specifications")" +SCRIPT="$(realpath "./.evergreen/resync-specs.sh")" + +# Clone the spec repo if the directory does not exist +if [[ ! -d $SPEC_SRC ]]; then + git clone $SRC_URL $SPEC_SRC + if [[ $? -ne 0 ]]; then + echo "Error: Failed to clone repository." + exit 1 + fi +fi + +# Set environment variable to the cloned spec repo for resync-specs.sh +export MDB_SPECS="$SPEC_SRC" + +# Check that resync-specs.sh exists and is executable +if [[ ! -x $SCRIPT ]]; then + echo "Error: $SCRIPT not found or is not executable." + exit 1 +fi + +PR_DESC="spec_sync.txt" + +# run python script that actually does all the resyncing +if ! [ -n "${CI:-}" ] +then + # we're running locally + python3 ./.evergreen/scripts/resync-all-specs.py +else + /opt/devtools/bin/python3.11 ./.evergreen/scripts/resync-all-specs.py --filename "$PR_DESC" + if [[ -f $PR_DESC ]]; then + # changes were made -> call scrypt to create PR for us + .evergreen/scripts/create-spec-pr.sh "$PR_DESC" + rm "$PR_DESC" + fi +fi diff --git a/.evergreen/scripts/run-getdata.sh b/.evergreen/scripts/run-getdata.sh new file mode 100755 index 0000000000..9435a5fcc3 --- /dev/null +++ b/.evergreen/scripts/run-getdata.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Get the debug data for an evergreen task. +set -eu + +. ${DRIVERS_TOOLS}/.evergreen/get-distro.sh || true +get_distro || true +echo $DISTRO +echo $MARCH +echo $OS + +set -x +uname -a || true +ls /etc/*release* || true +cc --version || true +gcc --version || true +clang --version || true +gcov --version || true +lcov --version || true +llvm-cov --version || true +echo $PATH +ls -la /usr/local/Cellar/llvm/*/bin/ || true +ls -la /usr/local/Cellar/ || true +scan-build --version || true +genhtml --version || true +valgrind --version || true +set +x diff --git a/.evergreen/scripts/run-server.sh b/.evergreen/scripts/run-server.sh new file mode 100755 index 0000000000..298eedcd3e --- /dev/null +++ b/.evergreen/scripts/run-server.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) + +# Try to source the env file. +if [ -f $HERE/env.sh ]; then + echo "Sourcing env file" + source $HERE/env.sh +fi + +uv run $HERE/run_server.py "$@" diff --git a/.evergreen/scripts/run_server.py b/.evergreen/scripts/run_server.py new file mode 100644 index 0000000000..a35fbb57a8 --- /dev/null +++ b/.evergreen/scripts/run_server.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import os +from typing import Any + +from utils import DRIVERS_TOOLS, ROOT, get_test_options, run_command + + +def set_env(name: str, value: Any = "1") -> None: + os.environ[name] = str(value) + + +def start_server(): + opts, extra_opts = get_test_options( + "Run a MongoDB server. All given flags will be passed to run-orchestration.sh in DRIVERS_TOOLS.", + require_sub_test_name=False, + allow_extra_opts=True, + ) + test_name = opts.test_name + + # drivers-evergreen-tools expects the version variable to be named MONGODB_VERSION. + if "VERSION" in os.environ: + os.environ["MONGODB_VERSION"] = os.environ["VERSION"] + + if test_name == "auth_aws": + set_env("AUTH_AWS") + + elif test_name == "load_balancer": + set_env("LOAD_BALANCER") + + elif test_name == "search_index": + os.environ["TOPOLOGY"] = "replica_set" + os.environ["MONGODB_VERSION"] = "7.0" + + if not os.environ.get("TEST_CRYPT_SHARED"): + set_env("SKIP_CRYPT_SHARED") + + if opts.ssl: + extra_opts.append("--ssl") + if test_name != "ocsp": + certs = ROOT / "test/certificates" + set_env("TLS_CERT_KEY_FILE", certs / "client.pem") + set_env("TLS_PEM_KEY_FILE", certs / "server.pem") + set_env("TLS_CA_FILE", certs / "ca.pem") + + if opts.auth: + extra_opts.append("--auth") + + if opts.verbose: + extra_opts.append("-v") + elif opts.quiet: + extra_opts.append("-q") + + cmd = ["bash", f"{DRIVERS_TOOLS}/.evergreen/run-orchestration.sh", *extra_opts] + run_command(cmd, cwd=DRIVERS_TOOLS) + + +if __name__ == "__main__": + start_server() diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py new file mode 100644 index 0000000000..c1c29c58bc --- /dev/null +++ b/.evergreen/scripts/run_tests.py @@ -0,0 +1,219 @@ +from __future__ import annotations + +import json +import logging +import os +import platform +import shutil +import sys +from datetime import datetime +from pathlib import Path +from shutil import which + +try: + import importlib_metadata +except ImportError: + from importlib import metadata as importlib_metadata + + +import pytest +from utils import DRIVERS_TOOLS, LOGGER, ROOT, run_command + +AUTH = os.environ.get("AUTH", "noauth") +SSL = os.environ.get("SSL", "nossl") +UV_ARGS = os.environ.get("UV_ARGS", "") +TEST_PERF = os.environ.get("TEST_PERF") +GREEN_FRAMEWORK = os.environ.get("GREEN_FRAMEWORK") +TEST_ARGS = os.environ.get("TEST_ARGS", "").split() +TEST_NAME = os.environ.get("TEST_NAME") +SUB_TEST_NAME = os.environ.get("SUB_TEST_NAME") + + +def list_packages(): + packages = set() + for distribution in importlib_metadata.distributions(): + if distribution.name: + packages.add(distribution.name) + print("Package Version URL") + print("------------------- ----------- ----------------------------------------------------") + for name in sorted(packages): + distribution = importlib_metadata.distribution(name) + url = "" + if distribution.origin is not None: + url = distribution.origin.url + print(f"{name:20s}{distribution.version:12s}{url}") + print("------------------- ----------- ----------------------------------------------------\n") + + +def handle_perf(start_time: datetime): + end_time = datetime.now() + elapsed_secs = (end_time - start_time).total_seconds() + with open("results.json") as fid: + results = json.load(fid) + LOGGER.info("results.json:\n%s", json.dumps(results, indent=2)) + + results = dict( + status="PASS", + exit_code=0, + test_file="BenchMarkTests", + start=int(start_time.timestamp()), + end=int(end_time.timestamp()), + elapsed=elapsed_secs, + ) + report = dict(failures=0, results=[results]) + LOGGER.info("report.json\n%s", json.dumps(report, indent=2)) + + with open("report.json", "w", newline="\n") as fid: + json.dump(report, fid) + + +def handle_green_framework() -> None: + if GREEN_FRAMEWORK == "gevent": + from gevent import monkey + + monkey.patch_all() + + # Never run async tests with a framework. + if len(TEST_ARGS) <= 1: + TEST_ARGS.extend(["-m", "not default_async and default"]) + else: + for i in range(len(TEST_ARGS) - 1): + if "-m" in TEST_ARGS[i]: + TEST_ARGS[i + 1] = f"not default_async and {TEST_ARGS[i + 1]}" + + LOGGER.info(f"Running tests with {GREEN_FRAMEWORK}...") + + +def handle_c_ext() -> None: + if platform.python_implementation() != "CPython": + return + sys.path.insert(0, str(ROOT / "tools")) + from fail_if_no_c import main as fail_if_no_c + + fail_if_no_c() + + +def handle_pymongocrypt() -> None: + import pymongocrypt + + LOGGER.info(f"pymongocrypt version: {pymongocrypt.__version__})") + LOGGER.info(f"libmongocrypt version: {pymongocrypt.libmongocrypt_version()})") + + +def handle_aws_lambda() -> None: + env = os.environ.copy() + target_dir = ROOT / "test/lambda" + env["TEST_LAMBDA_DIRECTORY"] = str(target_dir) + env.setdefault("AWS_REGION", "us-east-1") + dirs = ["pymongo", "gridfs", "bson"] + # Store the original .so files. + before_sos = [] + for dname in dirs: + before_sos.extend(f"{f.parent.name}/{f.name}" for f in (ROOT / dname).glob("*.so")) + # Build the c extensions. + docker = which("docker") or which("podman") + if not docker: + raise ValueError("Could not find docker!") + image = "quay.io/pypa/manylinux2014_x86_64:latest" + run_command( + f'{docker} run --rm -v "{ROOT}:/src" --platform linux/amd64 {image} /src/test/lambda/build_internal.sh' + ) + for dname in dirs: + target = ROOT / "test/lambda/mongodb" / dname + shutil.rmtree(target, ignore_errors=True) + shutil.copytree(ROOT / dname, target) + # Remove the original so files from the lambda directory. + for so_path in before_sos: + (ROOT / "test/lambda/mongodb" / so_path).unlink() + # Remove the new so files from the ROOT directory. + for dname in dirs: + so_paths = [f"{f.parent.name}/{f.name}" for f in (ROOT / dname).glob("*.so")] + for so_path in list(so_paths): + if so_path not in before_sos: + Path(so_path).unlink() + + script_name = "run-deployed-lambda-aws-tests.sh" + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/aws_lambda/{script_name}", env=env) + + +def run() -> None: + # Add diagnostic for python version. + print("Running with python", sys.version) + + # List the installed packages. + list_packages() + + # Handle green framework first so they can patch modules. + if GREEN_FRAMEWORK: + handle_green_framework() + + # Ensure C extensions if applicable. + if not os.environ.get("NO_EXT"): + handle_c_ext() + + if os.environ.get("PYMONGOCRYPT_LIB"): + handle_pymongocrypt() + + LOGGER.info(f"Test setup:\n{AUTH=}\n{SSL=}\n{UV_ARGS=}\n{TEST_ARGS=}") + + # Record the start time for a perf test. + if TEST_PERF: + start_time = datetime.now() + + # Run mod_wsgi tests using the helper. + if TEST_NAME == "mod_wsgi": + from mod_wsgi_tester import test_mod_wsgi + + test_mod_wsgi() + return + + # Send kms tests to run remotely. + if TEST_NAME == "kms" and SUB_TEST_NAME in ["azure", "gcp"]: + from kms_tester import test_kms_send_to_remote + + test_kms_send_to_remote(SUB_TEST_NAME) + return + + # Handle doctests. + if TEST_NAME == "doctest": + from sphinx.cmd.build import main + + result = main("-E -b doctest doc ./doc/_build/doctest".split()) + sys.exit(result) + + # Send ecs tests to run remotely. + if TEST_NAME == "auth_aws" and SUB_TEST_NAME == "ecs": + run_command(f"{DRIVERS_TOOLS}/.evergreen/auth_aws/aws_setup.sh ecs") + return + + # Send OIDC tests to run remotely. + if ( + TEST_NAME == "auth_oidc" + and SUB_TEST_NAME != "default" + and not SUB_TEST_NAME.endswith("-remote") + ): + from oidc_tester import test_oidc_send_to_remote + + test_oidc_send_to_remote(SUB_TEST_NAME) + return + + # Run deployed aws lambda tests. + if TEST_NAME == "aws_lambda": + handle_aws_lambda() + return + + if os.environ.get("DEBUG_LOG"): + TEST_ARGS.extend(f"-o log_cli_level={logging.DEBUG}".split()) + + # Run local tests. + ret = pytest.main(TEST_ARGS + sys.argv[1:]) + if ret != 0: + sys.exit(ret) + + # Handle perf test post actions. + if TEST_PERF: + handle_perf(start_time) + + +if __name__ == "__main__": + run() diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh new file mode 100755 index 0000000000..209857d542 --- /dev/null +++ b/.evergreen/scripts/setup-dev-env.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# Set up a development environment on an evergreen host. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" +ROOT=$(dirname "$(dirname $HERE)") +pushd $ROOT > /dev/null + +# Bail early if running on GitHub Actions. +if [ -n "${GITHUB_ACTION:-}" ]; then + exit 0 +fi + +# Source the env files to pick up common variables. +if [ -f $HERE/env.sh ]; then + . $HERE/env.sh +fi +# PYTHON_BINARY or PYTHON_VERSION may be defined in test-env.sh. +if [ -f $HERE/test-env.sh ]; then + . $HERE/test-env.sh +fi + +# Ensure dependencies are installed. +bash $HERE/install-dependencies.sh + +# Get the appropriate UV_PYTHON. +. $ROOT/.evergreen/utils.sh + +if [ -z "${PYTHON_BINARY:-}" ]; then + if [ -n "${PYTHON_VERSION:-}" ]; then + PYTHON_BINARY=$(get_python_binary $PYTHON_VERSION) + else + PYTHON_BINARY=$(find_python3) + fi +fi +export UV_PYTHON=${PYTHON_BINARY} +echo "Using python $UV_PYTHON" + +# Add the default install path to the path if needed. +if [ -z "${PYMONGO_BIN_DIR:-}" ]; then + export PATH="$PATH:$HOME/.local/bin" +fi + +# Set up venv, making sure c extensions build unless disabled. +if [ -z "${NO_EXT:-}" ]; then + export PYMONGO_C_EXT_MUST_BUILD=1 +fi +# Set up visual studio env on Windows spawn hosts. +if [ -f $HOME/.visualStudioEnv.sh ]; then + set +u + SSH_TTY=1 source $HOME/.visualStudioEnv.sh + set -u +fi +uv sync + +echo "Setting up python environment... done." + +popd > /dev/null diff --git a/.evergreen/scripts/setup-system.sh b/.evergreen/scripts/setup-system.sh new file mode 100755 index 0000000000..9158414cce --- /dev/null +++ b/.evergreen/scripts/setup-system.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Set up the system on an evergreen host. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +pushd "$(dirname "$(dirname $HERE)")" +echo "Setting up system..." +bash .evergreen/scripts/configure-env.sh +source .evergreen/scripts/env.sh +bash $DRIVERS_TOOLS/.evergreen/setup.sh +bash .evergreen/scripts/install-dependencies.sh +popd + +# Enable core dumps if enabled on the machine +# Copied from https://github.com/mongodb/mongo/blob/master/etc/evergreen.yml +if [ -f /proc/self/coredump_filter ]; then + # Set the shell process (and its children processes) to dump ELF headers (bit 4), + # anonymous shared mappings (bit 1), and anonymous private mappings (bit 0). + echo 0x13 >/proc/self/coredump_filter + + if [ -f /sbin/sysctl ]; then + # Check that the core pattern is set explicitly on our distro image instead + # of being the OS's default value. This ensures that coredump names are consistent + # across distros and can be picked up by Evergreen. + core_pattern=$(/sbin/sysctl -n "kernel.core_pattern") + if [ "$core_pattern" = "dump_%e.%p.core" ]; then + echo "Enabling coredumps" + ulimit -c unlimited + fi + fi +fi + +if [ "$(uname -s)" = "Darwin" ]; then + core_pattern_mac=$(/usr/sbin/sysctl -n "kern.corefile") + if [ "$core_pattern_mac" = "dump_%N.%P.core" ]; then + echo "Enabling coredumps" + ulimit -c unlimited + fi +fi + +if [ -w /etc/hosts ]; then + SUDO="" +else + SUDO="sudo" +fi + +# Add 'server' and 'hostname_not_in_cert' as a hostnames +echo "127.0.0.1 server" | $SUDO tee -a /etc/hosts +echo "127.0.0.1 hostname_not_in_cert" | $SUDO tee -a /etc/hosts + +echo "Setting up system... done." diff --git a/.evergreen/scripts/setup-tests.sh b/.evergreen/scripts/setup-tests.sh new file mode 100755 index 0000000000..1074c7eaaf --- /dev/null +++ b/.evergreen/scripts/setup-tests.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Set up the test environment, including secrets and services. +set -eu + +# Supported/used environment variables: +# AUTH Set to enable authentication. Defaults to "noauth" +# SSL Set to enable SSL. Defaults to "nossl" +# GREEN_FRAMEWORK The green framework to test with, if any. +# COVERAGE If non-empty, run the test suite with coverage. +# COMPRESSORS If non-empty, install appropriate compressor. +# LIBMONGOCRYPT_URL The URL to download libmongocrypt. +# TEST_CRYPT_SHARED If non-empty, install crypt_shared lib. +# MONGODB_API_VERSION The mongodb api version to use in tests. +# MONGODB_URI If non-empty, use as the MONGODB_URI in tests. + +SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) + +# Try to source the env file. +if [ -f $SCRIPT_DIR/env.sh ]; then + source $SCRIPT_DIR/env.sh +fi + +echo "Setting up tests with args \"$*\"..." +uv run $SCRIPT_DIR/setup_tests.py "$@" +echo "Setting up tests with args \"$*\"... done." diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py new file mode 100644 index 0000000000..3f0a8cc7f9 --- /dev/null +++ b/.evergreen/scripts/setup_tests.py @@ -0,0 +1,483 @@ +from __future__ import annotations + +import base64 +import io +import os +import platform +import shutil +import stat +import tarfile +from pathlib import Path +from urllib import request + +from utils import ( + DRIVERS_TOOLS, + ENV_FILE, + HERE, + LOGGER, + PLATFORM, + ROOT, + TEST_SUITE_MAP, + Distro, + get_test_options, + read_env, + run_command, + write_env, +) + +# Passthrough environment variables. +PASS_THROUGH_ENV = [ + "GREEN_FRAMEWORK", + "NO_EXT", + "MONGODB_API_VERSION", + "DEBUG_LOG", + "PYTHON_BINARY", + "PYTHON_VERSION", + "REQUIRE_FIPS", + "IS_WIN32", +] + +# Map the test name to test extra. +EXTRAS_MAP = { + "auth_aws": "aws", + "auth_oidc": "aws", + "encryption": "encryption", + "enterprise_auth": "gssapi", + "kms": "encryption", + "ocsp": "ocsp", + "pyopenssl": "ocsp", +} + + +# Map the test name to test group. +GROUP_MAP = dict(mockupdb="mockupdb", perf="perf") + +# The python version used for perf tests. +PERF_PYTHON_VERSION = "3.10.11" + + +def is_set(var: str) -> bool: + value = os.environ.get(var, "") + return len(value.strip()) > 0 + + +def get_distro() -> Distro: + name = "" + version_id = "" + arch = platform.machine() + with open("/etc/os-release") as fid: + for line in fid.readlines(): + line = line.replace('"', "") # noqa: PLW2901 + if line.startswith("NAME="): + _, _, name = line.strip().partition("=") + if line.startswith("VERSION_ID="): + _, _, version_id = line.strip().partition("=") + return Distro(name=name, version_id=version_id, arch=arch) + + +def setup_libmongocrypt(): + target = "" + if PLATFORM == "windows": + # PYTHON-2808 Ensure this machine has the CA cert for google KMS. + if is_set("TEST_FLE_GCP_AUTO"): + run_command('powershell.exe "Invoke-WebRequest -URI https://oauth2.googleapis.com/"') + target = "windows-test" + + elif PLATFORM == "darwin": + target = "macos" + + else: + distro = get_distro() + if distro.name.startswith("Debian"): + target = f"debian{distro.version_id}" + elif distro.name.startswith("Ubuntu"): + if distro.version_id == "20.04": + target = "debian11" + elif distro.version_id == "22.04": + target = "debian12" + elif distro.version_id == "24.04": + target = "debian13" + elif distro.name.startswith("Red Hat"): + if distro.version_id.startswith("7"): + target = "rhel-70-64-bit" + elif distro.version_id.startswith("8"): + if distro.arch == "aarch64": + target = "rhel-82-arm64" + else: + target = "rhel-80-64-bit" + + if not is_set("LIBMONGOCRYPT_URL"): + if not target: + raise ValueError("Cannot find libmongocrypt target for current platform!") + url = f"https://s3.amazonaws.com/mciuploads/libmongocrypt/{target}/master/latest/libmongocrypt.tar.gz" + else: + url = os.environ["LIBMONGOCRYPT_URL"] + + shutil.rmtree(HERE / "libmongocrypt", ignore_errors=True) + + LOGGER.info(f"Fetching {url}...") + with request.urlopen(request.Request(url), timeout=15.0) as response: # noqa: S310 + if response.status == 200: + fileobj = io.BytesIO(response.read()) + with tarfile.open("libmongocrypt.tar.gz", fileobj=fileobj) as fid: + fid.extractall(Path.cwd() / "libmongocrypt") + LOGGER.info(f"Fetching {url}... done.") + + run_command("ls -la libmongocrypt") + run_command("ls -la libmongocrypt/nocrypto") + + if PLATFORM == "windows": + # libmongocrypt's windows dll is not marked executable. + run_command("chmod +x libmongocrypt/nocrypto/bin/mongocrypt.dll") + + +def load_config_from_file(path: str | Path) -> dict[str, str]: + config = read_env(path) + for key, value in config.items(): + write_env(key, value) + return config + + +def get_secrets(name: str) -> dict[str, str]: + secrets_dir = Path(f"{DRIVERS_TOOLS}/.evergreen/secrets_handling") + run_command(f"bash {secrets_dir.as_posix()}/setup-secrets.sh {name}", cwd=secrets_dir) + return load_config_from_file(secrets_dir / "secrets-export.sh") + + +def handle_test_env() -> None: + opts, _ = get_test_options("Set up the test environment and services.") + test_name = opts.test_name + sub_test_name = opts.sub_test_name + AUTH = "auth" if opts.auth else "noauth" + SSL = "ssl" if opts.ssl else "nossl" + TEST_ARGS = "" + + # Start compiling the args we'll pass to uv. + UV_ARGS = ["--extra test --no-group dev"] + + test_title = test_name + if sub_test_name: + test_title += f" {sub_test_name}" + + # Create the test env file with the initial set of values. + with ENV_FILE.open("w", newline="\n") as fid: + fid.write("#!/usr/bin/env bash\n") + fid.write("set +x\n") + ENV_FILE.chmod(ENV_FILE.stat().st_mode | stat.S_IEXEC) + + write_env("PIP_QUIET") # Quiet by default. + write_env("PIP_PREFER_BINARY") # Prefer binary dists by default. + + # Set an environment variable for the test name and sub test name. + write_env(f"TEST_{test_name.upper()}") + write_env("TEST_NAME", test_name) + write_env("SUB_TEST_NAME", sub_test_name) + + # Handle pass through env vars. + for var in PASS_THROUGH_ENV: + if is_set(var) or getattr(opts, var.lower(), ""): + write_env(var, os.environ.get(var, getattr(opts, var.lower(), ""))) + + if extra := EXTRAS_MAP.get(test_name, ""): + UV_ARGS.append(f"--extra {extra}") + + if group := GROUP_MAP.get(test_name, ""): + UV_ARGS.append(f"--group {group}") + + if opts.test_min_deps: + UV_ARGS.append("--resolution=lowest-direct") + + if test_name == "auth_oidc": + from oidc_tester import setup_oidc + + config = setup_oidc(sub_test_name) + if not config: + AUTH = "noauth" + + if test_name in ["aws_lambda", "search_index"]: + env = os.environ.copy() + env["MONGODB_VERSION"] = "7.0" + env["LAMBDA_STACK_NAME"] = "dbx-python-lambda" + write_env("LAMBDA_STACK_NAME", env["LAMBDA_STACK_NAME"]) + run_command( + f"bash {DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh", + env=env, + cwd=DRIVERS_TOOLS, + ) + + if test_name == "search_index": + AUTH = "auth" + + if test_name == "ocsp": + SSL = "ssl" + + write_env("AUTH", AUTH) + write_env("SSL", SSL) + LOGGER.info(f"Setting up '{test_title}' with {AUTH=} and {SSL=}...") + + if test_name == "aws_lambda": + UV_ARGS.append("--group pip") + # Store AWS creds if they were given. + if "AWS_ACCESS_KEY_ID" in os.environ: + for key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"]: + if key in os.environ: + write_env(key, os.environ[key]) + + if AUTH != "noauth": + if test_name == "auth_oidc": + DB_USER = config["OIDC_ADMIN_USER"] + DB_PASSWORD = config["OIDC_ADMIN_PWD"] + elif test_name == "search_index": + config = read_env(f"{DRIVERS_TOOLS}/.evergreen/atlas/secrets-export.sh") + DB_USER = config["DRIVERS_ATLAS_LAMBDA_USER"] + DB_PASSWORD = config["DRIVERS_ATLAS_LAMBDA_PASSWORD"] + write_env("MONGODB_URI", config["MONGODB_URI"]) + else: + DB_USER = "bob" + DB_PASSWORD = "pwd123" # noqa: S105 + write_env("DB_USER", DB_USER) + write_env("DB_PASSWORD", DB_PASSWORD) + LOGGER.info("Added auth, DB_USER: %s", DB_USER) + + if is_set("MONGODB_URI"): + write_env("PYMONGO_MUST_CONNECT", "true") + + if opts.disable_test_commands: + write_env("PYMONGO_DISABLE_TEST_COMMANDS", "1") + + if test_name == "enterprise_auth": + config = get_secrets("drivers/enterprise_auth") + if PLATFORM == "windows": + LOGGER.info("Setting GSSAPI_PASS") + write_env("GSSAPI_PASS", config["SASL_PASS"]) + write_env("GSSAPI_CANONICALIZE", "true") + else: + # BUILD-3830 + krb_conf = ROOT / ".evergreen/krb5.conf.empty" + krb_conf.touch() + write_env("KRB5_CONFIG", krb_conf) + LOGGER.info("Writing keytab") + keytab = base64.b64decode(config["KEYTAB_BASE64"]) + keytab_file = ROOT / ".evergreen/drivers.keytab" + with keytab_file.open("wb") as fid: + fid.write(keytab) + principal = config["PRINCIPAL"] + LOGGER.info("Running kinit") + os.environ["KRB5_CONFIG"] = str(krb_conf) + cmd = f"kinit -k -t {keytab_file} -p {principal}" + run_command(cmd) + + LOGGER.info("Setting GSSAPI variables") + write_env("GSSAPI_HOST", config["SASL_HOST"]) + write_env("GSSAPI_PORT", config["SASL_PORT"]) + write_env("GSSAPI_PRINCIPAL", config["PRINCIPAL"]) + + if test_name == "doctest": + UV_ARGS.append("--extra docs") + + if test_name == "load_balancer": + SINGLE_MONGOS_LB_URI = os.environ.get( + "SINGLE_MONGOS_LB_URI", "mongodb://127.0.0.1:8000/?loadBalanced=true" + ) + MULTI_MONGOS_LB_URI = os.environ.get( + "MULTI_MONGOS_LB_URI", "mongodb://127.0.0.1:8001/?loadBalanced=true" + ) + if SSL != "nossl": + SINGLE_MONGOS_LB_URI += "&tls=true" + MULTI_MONGOS_LB_URI += "&tls=true" + write_env("SINGLE_MONGOS_LB_URI", SINGLE_MONGOS_LB_URI) + write_env("MULTI_MONGOS_LB_URI", MULTI_MONGOS_LB_URI) + if not DRIVERS_TOOLS: + raise RuntimeError("Missing DRIVERS_TOOLS") + cmd = f'bash "{DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh" start' + run_command(cmd) + + if test_name == "mod_wsgi": + from mod_wsgi_tester import setup_mod_wsgi + + setup_mod_wsgi(sub_test_name) + + if test_name == "ocsp": + if sub_test_name: + os.environ["OCSP_SERVER_TYPE"] = sub_test_name + for name in ["OCSP_SERVER_TYPE", "ORCHESTRATION_FILE"]: + if name not in os.environ: + raise ValueError(f"Please set {name}") + + server_type = os.environ["OCSP_SERVER_TYPE"] + orch_file = os.environ["ORCHESTRATION_FILE"] + ocsp_algo = orch_file.split("-")[0] + if server_type == "no-responder": + tls_should_succeed = "false" if "mustStaple-disableStapling" in orch_file else "true" + else: + tls_should_succeed = "true" if "valid" in server_type else "false" + + write_env("OCSP_TLS_SHOULD_SUCCEED", tls_should_succeed) + write_env("CA_FILE", f"{DRIVERS_TOOLS}/.evergreen/ocsp/{ocsp_algo}/ca.pem") + + if server_type != "no-responder": + env = os.environ.copy() + env["SERVER_TYPE"] = server_type + env["OCSP_ALGORITHM"] = ocsp_algo + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/ocsp/setup.sh", env=env) + + # The mock OCSP responder MUST BE started before the mongod as the mongod expects that + # a responder will be available upon startup. + version = os.environ.get("VERSION", "latest") + cmd = [ + "bash", + f"{DRIVERS_TOOLS}/.evergreen/run-orchestration.sh", + "--ssl", + "--version", + version, + ] + if opts.verbose: + cmd.append("-v") + elif opts.quiet: + cmd.append("-q") + run_command(cmd, cwd=DRIVERS_TOOLS) + + if SSL != "nossl": + if not DRIVERS_TOOLS: + raise RuntimeError("Missing DRIVERS_TOOLS") + write_env("CLIENT_PEM", f"{DRIVERS_TOOLS}/.evergreen/x509gen/client.pem") + write_env("CA_PEM", f"{DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem") + + compressors = os.environ.get("COMPRESSORS") or opts.compressor + if compressors == "snappy": + UV_ARGS.append("--extra snappy") + elif compressors == "zstd": + UV_ARGS.append("--extra zstd") + + if test_name in ["encryption", "kms"]: + # Check for libmongocrypt download. + if not (ROOT / "libmongocrypt").exists(): + setup_libmongocrypt() + + if not opts.test_min_deps: + UV_ARGS.append( + "--with pymongocrypt@git+https://github.com/mongodb/libmongocrypt@master#subdirectory=bindings/python" + ) + + # Use the nocrypto build to avoid dependency issues with older windows/python versions. + BASE = ROOT / "libmongocrypt/nocrypto" + if PLATFORM == "linux": + if (BASE / "lib/libmongocrypt.so").exists(): + PYMONGOCRYPT_LIB = BASE / "lib/libmongocrypt.so" + else: + PYMONGOCRYPT_LIB = BASE / "lib64/libmongocrypt.so" + elif PLATFORM == "darwin": + PYMONGOCRYPT_LIB = BASE / "lib/libmongocrypt.dylib" + else: + PYMONGOCRYPT_LIB = BASE / "bin/mongocrypt.dll" + if not PYMONGOCRYPT_LIB.exists(): + raise RuntimeError("Cannot find libmongocrypt shared object file") + write_env("PYMONGOCRYPT_LIB", PYMONGOCRYPT_LIB.as_posix()) + # PATH is updated by configure-env.sh for access to mongocryptd. + + if test_name == "encryption": + if not DRIVERS_TOOLS: + raise RuntimeError("Missing DRIVERS_TOOLS") + csfle_dir = Path(f"{DRIVERS_TOOLS}/.evergreen/csfle") + run_command(f"bash {csfle_dir.as_posix()}/setup-secrets.sh", cwd=csfle_dir) + load_config_from_file(csfle_dir / "secrets-export.sh") + run_command(f"bash {csfle_dir.as_posix()}/start-servers.sh") + + if sub_test_name == "pyopenssl": + UV_ARGS.append("--extra ocsp") + + if opts.crypt_shared: + config = read_env(f"{DRIVERS_TOOLS}/mo-expansion.sh") + CRYPT_SHARED_DIR = Path(config["CRYPT_SHARED_LIB_PATH"]).parent.as_posix() + LOGGER.info("Using crypt_shared_dir %s", CRYPT_SHARED_DIR) + if PLATFORM == "windows": + write_env("PATH", f"{CRYPT_SHARED_DIR}:$PATH") + else: + write_env( + "DYLD_FALLBACK_LIBRARY_PATH", + f"{CRYPT_SHARED_DIR}:${{DYLD_FALLBACK_LIBRARY_PATH:-}}", + ) + write_env("LD_LIBRARY_PATH", f"{CRYPT_SHARED_DIR}:${{LD_LIBRARY_PATH:-}}") + + if test_name == "kms": + from kms_tester import setup_kms + + setup_kms(sub_test_name) + + if test_name == "auth_aws" and sub_test_name != "ecs-remote": + auth_aws_dir = f"{DRIVERS_TOOLS}/.evergreen/auth_aws" + if "AWS_ROLE_SESSION_NAME" in os.environ: + write_env("AWS_ROLE_SESSION_NAME") + if sub_test_name != "ecs": + aws_setup = f"{auth_aws_dir}/aws_setup.sh" + run_command(f"bash {aws_setup} {sub_test_name}") + creds = read_env(f"{auth_aws_dir}/test-env.sh") + for name, value in creds.items(): + write_env(name, value) + else: + run_command(f"bash {auth_aws_dir}/setup-secrets.sh") + + if test_name == "atlas_connect": + secrets = get_secrets("drivers/atlas_connect") + + # Write file with Atlas X509 client certificate: + decoded = base64.b64decode(secrets["ATLAS_X509_DEV_CERT_BASE64"]).decode("utf8") + cert_file = ROOT / ".evergreen/atlas_x509_dev_client_certificate.pem" + with cert_file.open("w") as file: + file.write(decoded) + write_env( + "ATLAS_X509_DEV_WITH_CERT", + secrets["ATLAS_X509_DEV"] + "&tlsCertificateKeyFile=" + str(cert_file), + ) + + # We do not want the default client_context to be initialized. + write_env("DISABLE_CONTEXT") + + if test_name == "perf": + data_dir = ROOT / "specifications/source/benchmarking/data" + if not data_dir.exists(): + run_command("git clone --depth 1 https://github.com/mongodb/specifications.git") + run_command("tar xf extended_bson.tgz", cwd=data_dir) + run_command("tar xf parallel.tgz", cwd=data_dir) + run_command("tar xf single_and_multi_document.tgz", cwd=data_dir) + write_env("TEST_PATH", str(data_dir)) + write_env("OUTPUT_FILE", str(ROOT / "results.json")) + # Overwrite the UV_PYTHON from the env.sh file. + write_env("UV_PYTHON", "") + + UV_ARGS.append(f"--python={PERF_PYTHON_VERSION}") + + # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively + # affects the benchmark results. + if sub_test_name == "sync": + TEST_ARGS = f"test/performance/perf_test.py {TEST_ARGS}" + else: + TEST_ARGS = f"test/performance/async_perf_test.py {TEST_ARGS}" + + # Add coverage if requested. + # Only cover CPython. PyPy reports suspiciously low coverage. + if opts.cov and platform.python_implementation() == "CPython": + # Keep in sync with combine-coverage.sh. + # coverage >=5 is needed for relative_files=true. + UV_ARGS.append("--group coverage") + TEST_ARGS = f"{TEST_ARGS} --cov" + write_env("COVERAGE") + + if opts.green_framework: + framework = opts.green_framework or os.environ["GREEN_FRAMEWORK"] + UV_ARGS.append(f"--group {framework}") + + else: + TEST_ARGS = f"-v --durations=5 {TEST_ARGS}" + TEST_SUITE = TEST_SUITE_MAP.get(test_name) + if TEST_SUITE: + TEST_ARGS = f"-m {TEST_SUITE} {TEST_ARGS}" + + write_env("TEST_ARGS", TEST_ARGS) + write_env("UV_ARGS", " ".join(UV_ARGS)) + + LOGGER.info(f"Setting up test '{test_title}' with {AUTH=} and {SSL=}... done.") + + +if __name__ == "__main__": + handle_test_env() diff --git a/.evergreen/scripts/stop-server.sh b/.evergreen/scripts/stop-server.sh new file mode 100755 index 0000000000..7599387f5f --- /dev/null +++ b/.evergreen/scripts/stop-server.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Stop a server that was started using run-orchestration.sh in DRIVERS_TOOLS. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" + +# Try to source the env file. +if [ -f $HERE/env.sh ]; then + echo "Sourcing env file" + source $HERE/env.sh +fi + +bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh diff --git a/.evergreen/scripts/teardown-tests.sh b/.evergreen/scripts/teardown-tests.sh new file mode 100755 index 0000000000..898425b6cf --- /dev/null +++ b/.evergreen/scripts/teardown-tests.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Tear down any services that were used by tests. +set -eu + +SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) + +# Try to source the env file. +if [ -f $SCRIPT_DIR/env.sh ]; then + echo "Sourcing env inputs" + . $SCRIPT_DIR/env.sh +else + echo "Not sourcing env inputs" +fi + +# Handle test inputs. +if [ -f $SCRIPT_DIR/test-env.sh ]; then + echo "Sourcing test inputs" + . $SCRIPT_DIR/test-env.sh +else + echo "Missing test inputs, please run 'just setup-tests'" +fi + +# Teardown the test runner. +uv run $SCRIPT_DIR/teardown_tests.py diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py new file mode 100644 index 0000000000..7da0b60815 --- /dev/null +++ b/.evergreen/scripts/teardown_tests.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import os +import shutil +import sys +from pathlib import Path + +from utils import DRIVERS_TOOLS, LOGGER, ROOT, run_command + +TEST_NAME = os.environ.get("TEST_NAME", "unconfigured") +SUB_TEST_NAME = os.environ.get("SUB_TEST_NAME") + +LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'...") + +# Shut down csfle servers if applicable. +if TEST_NAME == "encryption": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/csfle/stop-servers.sh") + +# Shut down load balancer if applicable. +elif TEST_NAME == "load-balancer": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh stop") + +# Tear down kms VM if applicable. +elif TEST_NAME == "kms" and SUB_TEST_NAME in ["azure", "gcp"]: + from kms_tester import teardown_kms + + teardown_kms(SUB_TEST_NAME) + +# Tear down OIDC if applicable. +elif TEST_NAME == "auth_oidc": + from oidc_tester import teardown_oidc + + teardown_oidc(SUB_TEST_NAME) + +# Tear down ocsp if applicable. +elif TEST_NAME == "ocsp": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/ocsp/teardown.sh") + +# Tear down atlas cluster if applicable. +if TEST_NAME in ["aws_lambda", "search_index"]: + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh") + +# Tear down auth_aws if applicable. +# We do not run web-identity hosts on macos, because the hosts lack permissions, +# so there is no reason to run the teardown, which would error with a 401. +elif TEST_NAME == "auth_aws" and sys.platform != "darwin": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/auth_aws/teardown.sh") + +# Tear down perf if applicable. +elif TEST_NAME == "perf": + shutil.rmtree(ROOT / "specifications", ignore_errors=True) + Path(os.environ["OUTPUT_FILE"]).unlink(missing_ok=True) + +# Tear down mog_wsgi if applicable. +elif TEST_NAME == "mod_wsgi": + from mod_wsgi_tester import teardown_mod_wsgi + + teardown_mod_wsgi() + +# Tear down coverage if applicable. +if os.environ.get("COVERAGE"): + shutil.rmtree(".pytest_cache", ignore_errors=True) + +LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'... done.") diff --git a/.evergreen/scripts/upload-coverage-report.sh b/.evergreen/scripts/upload-coverage-report.sh new file mode 100755 index 0000000000..895664cbf2 --- /dev/null +++ b/.evergreen/scripts/upload-coverage-report.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Upload a coverate report to s3. +set -eu +aws s3 cp htmlcov/ s3://"$1"/coverage/"$2"/"$3"/htmlcov/ --recursive --acl public-read --region us-east-1 diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py new file mode 100644 index 0000000000..2bc9c720d2 --- /dev/null +++ b/.evergreen/scripts/utils.py @@ -0,0 +1,227 @@ +from __future__ import annotations + +import argparse +import dataclasses +import logging +import os +import shlex +import subprocess +import sys +from pathlib import Path +from typing import Any + +HERE = Path(__file__).absolute().parent +ROOT = HERE.parent.parent +DRIVERS_TOOLS = os.environ.get("DRIVERS_TOOLS", "").replace(os.sep, "/") +TMP_DRIVER_FILE = "/tmp/mongo-python-driver.tgz" # noqa: S108 + +LOGGER = logging.getLogger("test") +logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") +ENV_FILE = HERE / "test-env.sh" +PLATFORM = "windows" if os.name == "nt" else sys.platform.lower() + + +@dataclasses.dataclass +class Distro: + name: str + version_id: str + arch: str + + +# Map the test name to a test suite. +TEST_SUITE_MAP = { + "atlas_connect": "atlas_connect", + "auth_aws": "auth_aws", + "auth_oidc": "auth_oidc", + "default": "", + "default_async": "default_async", + "default_sync": "default", + "encryption": "encryption", + "enterprise_auth": "auth", + "search_index": "search_index", + "kms": "kms", + "load_balancer": "load_balancer", + "mockupdb": "mockupdb", + "ocsp": "ocsp", + "perf": "perf", +} + +# Tests that require a sub test suite. +SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms", "mod_wsgi", "perf"] + +EXTRA_TESTS = ["mod_wsgi", "aws_lambda", "doctest"] + +# Tests that do not use run-orchestration directly. +NO_RUN_ORCHESTRATION = [ + "auth_oidc", + "atlas_connect", + "aws_lambda", + "mockupdb", + "ocsp", +] + +# Mapping of env variables to options +OPTION_TO_ENV_VAR = {"cov": "COVERAGE", "crypt_shared": "TEST_CRYPT_SHARED"} + + +def get_test_options( + description, require_sub_test_name=True, allow_extra_opts=False +) -> tuple[argparse.Namespace, list[str]]: + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawDescriptionHelpFormatter + ) + if require_sub_test_name: + parser.add_argument( + "test_name", + choices=sorted(list(TEST_SUITE_MAP) + EXTRA_TESTS), + nargs="?", + default="default", + help="The optional name of the test suite to set up, typically the same name as a pytest marker.", + ) + parser.add_argument( + "sub_test_name", nargs="?", help="The optional sub test name, for example 'azure'." + ) + else: + parser.add_argument( + "test_name", + choices=set(list(TEST_SUITE_MAP) + EXTRA_TESTS) - set(NO_RUN_ORCHESTRATION), + nargs="?", + default="default", + help="The optional name of the test suite to be run, which informs the server configuration.", + ) + parser.add_argument( + "--verbose", "-v", action="store_true", help="Whether to log at the DEBUG level." + ) + parser.add_argument( + "--quiet", "-q", action="store_true", help="Whether to log at the WARNING level." + ) + parser.add_argument("--auth", action="store_true", help="Whether to add authentication.") + parser.add_argument("--ssl", action="store_true", help="Whether to add TLS configuration.") + parser.add_argument( + "--test-min-deps", action="store_true", help="Test against minimum dependency versions" + ) + + # Add the test modifiers. + if require_sub_test_name: + parser.add_argument( + "--debug-log", action="store_true", help="Enable pymongo standard logging." + ) + parser.add_argument("--cov", action="store_true", help="Add test coverage.") + parser.add_argument( + "--green-framework", + nargs=1, + choices=["gevent"], + help="Optional green framework to test against.", + ) + parser.add_argument( + "--compressor", + nargs=1, + choices=["zlib", "zstd", "snappy"], + help="Optional compression algorithm.", + ) + parser.add_argument("--crypt-shared", action="store_true", help="Test with crypt_shared.") + parser.add_argument("--no-ext", action="store_true", help="Run without c extensions.") + parser.add_argument( + "--mongodb-api-version", choices=["1"], help="MongoDB stable API version to use." + ) + parser.add_argument( + "--disable-test-commands", action="store_true", help="Disable test commands." + ) + + # Get the options. + if not allow_extra_opts: + opts, extra_opts = parser.parse_args(), [] + else: + opts, extra_opts = parser.parse_known_args() + + # Convert list inputs to strings. + for name in vars(opts): + value = getattr(opts, name) + if isinstance(value, list): + setattr(opts, name, value[0]) + + # Handle validation and environment variable overrides. + test_name = opts.test_name + sub_test_name = opts.sub_test_name if require_sub_test_name else "" + if require_sub_test_name and test_name in SUB_TEST_REQUIRED and not sub_test_name: + raise ValueError(f"Test '{test_name}' requires a sub_test_name") + handle_env_overrides(parser, opts) + if "auth" in test_name: + opts.auth = True + # 'auth_aws ecs' shouldn't have extra auth set. + if test_name == "auth_aws" and sub_test_name == "ecs": + opts.auth = False + if opts.verbose: + LOGGER.setLevel(logging.DEBUG) + elif opts.quiet: + LOGGER.setLevel(logging.WARNING) + return opts, extra_opts + + +def handle_env_overrides(parser: argparse.ArgumentParser, opts: argparse.Namespace) -> None: + # Get the options, and then allow environment variable overrides. + for key in vars(opts): + if key in OPTION_TO_ENV_VAR: + env_var = OPTION_TO_ENV_VAR[key] + else: + env_var = key.upper() + if env_var in os.environ: + if parser.get_default(key) != getattr(opts, key): + LOGGER.info("Overriding env var '%s' with cli option", env_var) + elif env_var == "AUTH": + opts.auth = os.environ.get("AUTH") == "auth" + elif env_var == "SSL": + ssl_opt = os.environ.get("SSL", "") + opts.ssl = ssl_opt and ssl_opt.lower() != "nossl" + elif isinstance(getattr(opts, key), bool): + if os.environ[env_var]: + setattr(opts, key, True) + else: + setattr(opts, key, os.environ[env_var]) + + +def read_env(path: Path | str) -> dict[str, str]: + config = dict() + with Path(path).open() as fid: + for line in fid.readlines(): + if "=" not in line: + continue + name, _, value = line.strip().partition("=") + if value.startswith(('"', "'")): + value = value[1:-1] + name = name.replace("export ", "") + config[name] = value + return config + + +def write_env(name: str, value: Any = "1") -> None: + with ENV_FILE.open("a", newline="\n") as fid: + # Remove any existing quote chars. + value = str(value).replace('"', "") + fid.write(f'export {name}="{value}"\n') + + +def run_command(cmd: str | list[str], **kwargs: Any) -> None: + if isinstance(cmd, list): + cmd = " ".join(cmd) + LOGGER.info("Running command '%s'...", cmd) + kwargs.setdefault("check", True) + # Prevent overriding the python used by other tools. + env = kwargs.pop("env", os.environ).copy() + if "UV_PYTHON" in env: + del env["UV_PYTHON"] + kwargs["env"] = env + try: + subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603 + except subprocess.CalledProcessError as e: + LOGGER.error(e.output) + LOGGER.error(str(e)) + sys.exit(e.returncode) + LOGGER.info("Running command '%s'... done.", cmd) + + +def create_archive() -> str: + run_command("git add .", cwd=ROOT) + run_command('git commit --no-verify -m "add files"', check=False, cwd=ROOT) + run_command(f"git archive -o {TMP_DRIVER_FILE} HEAD", cwd=ROOT) + return TMP_DRIVER_FILE diff --git a/.evergreen/setup-spawn-host.sh b/.evergreen/setup-spawn-host.sh new file mode 100755 index 0000000000..bada61e568 --- /dev/null +++ b/.evergreen/setup-spawn-host.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Set up a remote evergreen spawn host. +set -eu + +if [ -z "$1" ] + then + echo "Must supply a spawn host URL!" +fi + +target=$1 +user=${target%@*} +remote_dir=/home/$user/mongo-python-driver + +echo "Copying files to $target..." +rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:$remote_dir +echo "Copying files to $target... done" + +ssh $target $remote_dir/.evergreen/scripts/setup-system.sh +ssh $target "cd $remote_dir && PYTHON_BINARY=${PYTHON_BINARY:-} .evergreen/scripts/setup-dev-env.sh" diff --git a/.evergreen/spec-patch/PYTHON-2673.patch b/.evergreen/spec-patch/PYTHON-2673.patch new file mode 100644 index 0000000000..868538f7b7 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-2673.patch @@ -0,0 +1,64 @@ +diff --git a/test/load_balancer/cursors.json b/test/load_balancer/cursors.json +index 43e4fbb4f..4e2a55fd4 100644 +--- a/test/load_balancer/cursors.json ++++ b/test/load_balancer/cursors.json +@@ -376,7 +376,7 @@ + ] + }, + { ++ "description": "pinned connections are not returned after an network error during getMore", +- "description": "pinned connections are returned after an network error during getMore", + "operations": [ + { + "name": "failPoint", +@@ -440,7 +440,7 @@ + "object": "testRunner", + "arguments": { + "client": "client0", ++ "connections": 1 +- "connections": 0 + } + }, + { +@@ -659,7 +659,7 @@ + ] + }, + { ++ "description": "pinned connections are not returned to the pool after a non-network error on getMore", +- "description": "pinned connections are returned to the pool after a non-network error on getMore", + "operations": [ + { + "name": "failPoint", +@@ -715,7 +715,7 @@ + "object": "testRunner", + "arguments": { + "client": "client0", ++ "connections": 1 +- "connections": 0 + } + }, + { +diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json +index 63aabc04d..462fa0aac 100644 +--- a/test/load_balancer/sdam-error-handling.json ++++ b/test/load_balancer/sdam-error-handling.json +@@ -366,6 +366,9 @@ + { + "connectionCreatedEvent": {} + }, ++ { ++ "poolClearedEvent": {} ++ }, + { + "connectionClosedEvent": { + "reason": "error" +@@ -378,9 +375,6 @@ + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } +- }, +- { +- "poolClearedEvent": {} + } + ] + } diff --git a/.evergreen/spec-patch/PYTHON-3712.patch b/.evergreen/spec-patch/PYTHON-3712.patch new file mode 100644 index 0000000000..b48c05124c --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-3712.patch @@ -0,0 +1,14 @@ +diff --git a/test/discovery_and_monitoring/unified/serverMonitoringMode.json b/test/discovery_and_monitoring/unified/serverMonitoringMode.json +index e44fad1b..4b492f7d 100644 +--- a/test/discovery_and_monitoring/unified/serverMonitoringMode.json ++++ b/test/discovery_and_monitoring/unified/serverMonitoringMode.json +@@ -5,7 +5,8 @@ + { + "topologies": [ + "single", +- "sharded" ++ "sharded", ++ "sharded-replicaset" + ], + "serverless": "forbid" + } diff --git a/.evergreen/spec-patch/PYTHON-4261.patch b/.evergreen/spec-patch/PYTHON-4261.patch new file mode 100644 index 0000000000..e4ffc5ce9f --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-4261.patch @@ -0,0 +1,61 @@ +diff --git a/test/server_selection_logging/replica-set.json b/test/server_selection_logging/replica-set.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/replica-set.json ++++ b/test/server_selection_logging/replica-set.json +@@ -184,7 +184,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +diff --git a/test/server_selection_logging/standalone.json b/test/server_selection_logging/standalone.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/standalone.json ++++ b/test/server_selection_logging/standalone.json +@@ -191,7 +191,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/sharded.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/sharded.json ++++ b/test/server_selection_logging/sharded.json +@@ -193,7 +193,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/operation-id.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/operation-id.json ++++ b/test/server_selection_logging/operation-id.json +@@ -197,7 +197,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +@@ -383,7 +383,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", diff --git a/.evergreen/spec-patch/PYTHON-4918.patch b/.evergreen/spec-patch/PYTHON-4918.patch new file mode 100644 index 0000000000..5f409c5870 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-4918.patch @@ -0,0 +1,24 @@ +diff --git a/test/connection_monitoring/pool-create-min-size-error.json b/test/connection_monitoring/pool-create-min-size-error.json +index 1c744b85..509b2a23 100644 +--- a/test/connection_monitoring/pool-create-min-size-error.json ++++ b/test/connection_monitoring/pool-create-min-size-error.json +@@ -49,15 +49,15 @@ + "type": "ConnectionCreated", + "address": 42 + }, ++ { ++ "type": "ConnectionPoolCleared", ++ "address": 42 ++ }, + { + "type": "ConnectionClosed", + "address": 42, + "connectionId": 42, + "reason": "error" +- }, +- { +- "type": "ConnectionPoolCleared", +- "address": 42 + } + ], + "ignore": [ diff --git a/.evergreen/spec-patch/PYTHON-5052.patch b/.evergreen/spec-patch/PYTHON-5052.patch new file mode 100644 index 0000000000..01cbc00116 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-5052.patch @@ -0,0 +1,440 @@ +diff --git a/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalProperties.json b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalProperties.json +new file mode 100644 +index 00000000..aa8046d2 +--- /dev/null ++++ b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalProperties.json +@@ -0,0 +1,20 @@ ++{ ++ "description": "entity-client-observeTracingMessages-additionalProperties", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0", ++ "observeTracingMessages": { ++ "foo": "bar" ++ } ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "observeTracingMessages must not have additional properties'", ++ "operations": [] ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalPropertyType.json b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalPropertyType.json +new file mode 100644 +index 00000000..0b3a65f5 +--- /dev/null ++++ b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalPropertyType.json +@@ -0,0 +1,20 @@ ++{ ++ "description": "entity-client-observeTracingMessages-additionalPropertyType", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0", ++ "observeTracingMessages": { ++ "enableCommandPayload": 0 ++ } ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "observeTracingMessages enableCommandPayload must be boolean", ++ "operations": [] ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/entity-client-observeTracingMessages-type.json b/test/unified-test-format/invalid/entity-client-observeTracingMessages-type.json +new file mode 100644 +index 00000000..de3ef39a +--- /dev/null ++++ b/test/unified-test-format/invalid/entity-client-observeTracingMessages-type.json +@@ -0,0 +1,18 @@ ++{ ++ "description": "entity-client-observeTracingMessages-type", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0", ++ "observeTracingMessages": "foo" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "observeTracingMessages must be an object", ++ "operations": [] ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-additionalProperties.json b/test/unified-test-format/invalid/expectedTracingSpans-additionalProperties.json +new file mode 100644 +index 00000000..5947a286 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-additionalProperties.json +@@ -0,0 +1,30 @@ ++{ ++ "description": "expectedTracingSpans-additionalProperties", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "additional property foo not allowed in expectTracingMessages", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "ignoreExtraSpans": false, ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ], ++ "foo": 0 ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-clientType.json b/test/unified-test-format/invalid/expectedTracingSpans-clientType.json +new file mode 100644 +index 00000000..2fe7faea +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-clientType.json +@@ -0,0 +1,28 @@ ++{ ++ "description": "expectedTracingSpans-clientType", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "client type must be string", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": 0, ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-emptyNestedSpan.json b/test/unified-test-format/invalid/expectedTracingSpans-emptyNestedSpan.json +new file mode 100644 +index 00000000..8a98d5ba +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-emptyNestedSpan.json +@@ -0,0 +1,29 @@ ++{ ++ "description": "expectedTracingSpans-emptyNestedSpan", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "nested spans must not have fewer than 1 items'", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ }, ++ "nested": [] ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-invalidNestedSpan.json b/test/unified-test-format/invalid/expectedTracingSpans-invalidNestedSpan.json +new file mode 100644 +index 00000000..79a86744 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-invalidNestedSpan.json +@@ -0,0 +1,31 @@ ++{ ++ "description": "expectedTracingSpans-invalidNestedSpan", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "nested span must have required property name", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ }, ++ "nested": [ ++ {} ++ ] ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-missingPropertyClient.json b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertyClient.json +new file mode 100644 +index 00000000..2fb1cd5b +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertyClient.json +@@ -0,0 +1,27 @@ ++{ ++ "description": "expectedTracingSpans-missingPropertyClient", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required property client", ++ "operations": [], ++ "expectTracingMessages": { ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-missingPropertySpans.json b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertySpans.json +new file mode 100644 +index 00000000..acd10307 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertySpans.json +@@ -0,0 +1,20 @@ ++{ ++ "description": "expectedTracingSpans-missingPropertySpans", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required property spans", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0" ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedAdditionalProperties.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedAdditionalProperties.json +new file mode 100644 +index 00000000..17299f86 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedAdditionalProperties.json +@@ -0,0 +1,28 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedAdditionalProperties", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "Span must not have additional properties", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo", ++ "tags": {}, ++ "nested": [], ++ "foo": "bar" ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingName.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingName.json +new file mode 100644 +index 00000000..0257cd9b +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingName.json +@@ -0,0 +1,27 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedMissingName", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required span name", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingTags.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingTags.json +new file mode 100644 +index 00000000..a09ca31c +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingTags.json +@@ -0,0 +1,25 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedMissingTags", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required span tags", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo" ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedNestedMustBeArray.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedNestedMustBeArray.json +new file mode 100644 +index 00000000..ccff0410 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedNestedMustBeArray.json +@@ -0,0 +1,27 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedNestedMustBeArray", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "nested spans must be an array", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo", ++ "tags": {}, ++ "nested": {} ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedTagsMustBeObject.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedTagsMustBeObject.json +new file mode 100644 +index 00000000..72af1c29 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedTagsMustBeObject.json +@@ -0,0 +1,26 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedNestedMustBeObject", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "span tags must be an object", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo", ++ "tags": [] ++ } ++ ] ++ } ++ } ++ ] ++} diff --git a/.evergreen/spec-patch/PYTHON-5493.patch b/.evergreen/spec-patch/PYTHON-5493.patch new file mode 100644 index 0000000000..99c105dcef --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-5493.patch @@ -0,0 +1,50 @@ +diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json +index 5799e834..72103b3c 100644 +--- a/test/connection_logging/connection-logging.json ++++ b/test/connection_logging/connection-logging.json +@@ -446,6 +446,22 @@ + } + } + }, ++ { ++ "level": "debug", ++ "component": "connection", ++ "data": { ++ "message": "Connection pool cleared", ++ "serverHost": { ++ "$$type": "string" ++ }, ++ "serverPort": { ++ "$$type": [ ++ "int", ++ "long" ++ ] ++ } ++ } ++ }, + { + "level": "debug", + "component": "connection", +@@ -498,22 +514,6 @@ + ] + } + } +- }, +- { +- "level": "debug", +- "component": "connection", +- "data": { +- "message": "Connection pool cleared", +- "serverHost": { +- "$$type": "string" +- }, +- "serverPort": { +- "$$type": [ +- "int", +- "long" +- ] +- } +- } + } + ] + } diff --git a/.evergreen/spec-patch/PYTHON-5529.patch b/.evergreen/spec-patch/PYTHON-5529.patch new file mode 100644 index 0000000000..a97602e055 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-5529.patch @@ -0,0 +1,587 @@ +diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json +index aa9c3eb2..212cd410 100644 +--- a/test/csot/command-execution.json ++++ b/test/csot/command-execution.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly during command execution", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", +@@ -69,8 +69,10 @@ + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, +- "heartbeatFrequencyMS": 500 ++ "heartbeatFrequencyMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] +@@ -185,8 +187,10 @@ + "appName": "rttTooHighTest", + "w": 1, + "timeoutMS": 10, +- "heartbeatFrequencyMS": 500 ++ "heartbeatFrequencyMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] +@@ -316,8 +320,10 @@ + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 90, +- "heartbeatFrequencyMS": 100000 ++ "heartbeatFrequencyMS": 100000, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] +diff --git a/test/csot/convenient-transactions.json b/test/csot/convenient-transactions.json +index 3868b302..f9d03429 100644 +--- a/test/csot/convenient-transactions.json ++++ b/test/csot/convenient-transactions.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly for the withTransaction API", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", +@@ -21,8 +21,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 500 ++ "timeoutMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/error-transformations.json b/test/csot/error-transformations.json +index 4889e395..89be49f0 100644 +--- a/test/csot/error-transformations.json ++++ b/test/csot/error-transformations.json +@@ -1,6 +1,6 @@ + { + "description": "MaxTimeMSExpired server errors are transformed into a custom timeout error", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.0", +@@ -26,8 +26,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/global-timeoutMS.json b/test/csot/global-timeoutMS.json +index f1edbe68..9d8046d1 100644 +--- a/test/csot/global-timeoutMS.json ++++ b/test/csot/global-timeoutMS.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS can be configured on a MongoClient", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", +@@ -38,8 +38,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -217,8 +219,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -390,8 +394,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -569,8 +575,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -762,8 +770,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -941,8 +951,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1120,8 +1132,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1305,8 +1319,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1484,8 +1500,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1663,8 +1681,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1842,8 +1862,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2021,8 +2043,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2194,8 +2218,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2375,8 +2401,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2554,8 +2582,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2733,8 +2763,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2906,8 +2938,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3079,8 +3113,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3258,8 +3294,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3441,8 +3479,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3628,8 +3668,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3807,8 +3849,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3986,8 +4030,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4171,8 +4217,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4360,8 +4408,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4549,8 +4599,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4728,8 +4780,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4913,8 +4967,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5102,8 +5158,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5297,8 +5355,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5482,8 +5542,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5677,8 +5739,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/non-tailable-cursors.json b/test/csot/non-tailable-cursors.json +index 291c6e72..58c59cb3 100644 +--- a/test/csot/non-tailable-cursors.json ++++ b/test/csot/non-tailable-cursors.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly for non-tailable cursors", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4" +@@ -17,8 +17,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 200 ++ "timeoutMS": 200, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json +index 9daad260..5a0c9f36 100644 +--- a/test/csot/retryability-timeoutMS.json ++++ b/test/csot/retryability-timeoutMS.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly for retryable operations", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.0", +@@ -26,8 +26,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 100 ++ "timeoutMS": 100, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/runCursorCommand.json b/test/csot/runCursorCommand.json +index 36f774fb..e5182e33 100644 +--- a/test/csot/runCursorCommand.json ++++ b/test/csot/runCursorCommand.json +@@ -1,6 +1,6 @@ + { + "description": "runCursorCommand", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4" +@@ -16,6 +16,10 @@ + { + "client": { + "id": "commandClient", ++ "uriOptions": { ++ "minPoolSize": 1 ++ }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", +diff --git a/test/csot/sessions-inherit-timeoutMS.json b/test/csot/sessions-inherit-timeoutMS.json +index 13ea91c7..dbf163e4 100644 +--- a/test/csot/sessions-inherit-timeoutMS.json ++++ b/test/csot/sessions-inherit-timeoutMS.json +@@ -1,6 +1,6 @@ + { + "description": "sessions inherit timeoutMS from their parent MongoClient", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", +@@ -21,8 +21,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 500 ++ "timeoutMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", diff --git a/.evergreen/sync-spawn-host.sh b/.evergreen/sync-spawn-host.sh new file mode 100755 index 0000000000..61dd84ec22 --- /dev/null +++ b/.evergreen/sync-spawn-host.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Synchronize local files to a remote Evergreen spawn host. +set -eu + +if [ -z "$1" ] + then + echo "Must supply a spawn host URL!" +fi + +target=$1 +user=${target%@*} +remote_dir=/home/$user/mongo-python-driver + +echo "Copying files to $target..." +rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:$remote_dir +echo "Copying files to $target... done." +echo "Syncing files to $target..." +# shellcheck disable=SC2034 +fswatch -o . | while read f; do rsync -hazv -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/$user/mongo-python-driver; done +echo "Syncing files to $target... done." diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh new file mode 100755 index 0000000000..dadb7db084 --- /dev/null +++ b/.evergreen/utils.sh @@ -0,0 +1,148 @@ +#!/bin/bash +# Utility functions used by pymongo evergreen scripts. +set -eu + +find_python3() { + PYTHON="" + # Find a suitable toolchain version, if available. + if [ "$(uname -s)" = "Darwin" ]; then + PYTHON="/Library/Frameworks/Python.Framework/Versions/3.10/bin/python3" + elif [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin + PYTHON="C:/python/Python310/python.exe" + else + # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.10+. + if [ -f "/opt/python/3.10/bin/python3" ]; then + PYTHON="/opt/python/Current/bin/python3" + elif is_python_310 "$(command -v /opt/mongodbtoolchain/v5/bin/python3)"; then + PYTHON="/opt/mongodbtoolchain/v5/bin/python3" + elif is_python_310 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then + PYTHON="/opt/mongodbtoolchain/v4/bin/python3" + elif is_python_310 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then + PYTHON="/opt/mongodbtoolchain/v3/bin/python3" + fi + fi + # Add a fallback system python3 if it is available and Python 3.10+. + if [ -z "$PYTHON" ]; then + if is_python_310 "$(command -v python3)"; then + PYTHON="$(command -v python3)" + fi + fi + if [ -z "$PYTHON" ]; then + echo "Cannot test without python3.10+ installed!" + exit 1 + fi + echo "$PYTHON" +} + +# Usage: +# createvirtualenv /path/to/python /output/path/for/venv +# * param1: Python binary to use for the virtualenv +# * param2: Path to the virtualenv to create +createvirtualenv () { + PYTHON=$1 + VENVPATH=$2 + + # Prefer venv + VENV="$PYTHON -m venv" + if [ "$(uname -s)" = "Darwin" ]; then + VIRTUALENV="$PYTHON -m virtualenv" + else + VIRTUALENV=$(command -v virtualenv 2>/dev/null || echo "$PYTHON -m virtualenv") + VIRTUALENV="$VIRTUALENV -p $PYTHON" + fi + if ! $VENV $VENVPATH 2>/dev/null; then + # Workaround for bug in older versions of virtualenv. + $VIRTUALENV $VENVPATH 2>/dev/null || $VIRTUALENV $VENVPATH + fi + if [ "Windows_NT" = "${OS:-}" ]; then + # Workaround https://bugs.python.org/issue32451: + # mongovenv/Scripts/activate: line 3: $'\r': command not found + dos2unix $VENVPATH/Scripts/activate || true + . $VENVPATH/Scripts/activate + else + . $VENVPATH/bin/activate + fi + + export PIP_QUIET=1 + python -m pip install --upgrade pip +} + +# Usage: +# testinstall /path/to/python /path/to/.whl ["no-virtualenv"] +# * param1: Python binary to test +# * param2: Path to the wheel to install +# * param3 (optional): If set to a non-empty string, don't create a virtualenv. Used in manylinux containers. +testinstall () { + PYTHON=$1 + RELEASE=$2 + NO_VIRTUALENV=$3 + PYTHON_IMPL=$(python -c "import platform; print(platform.python_implementation())") + + if [ -z "$NO_VIRTUALENV" ]; then + createvirtualenv $PYTHON venvtestinstall + PYTHON=python + fi + + $PYTHON -m pip install --upgrade $RELEASE + cd tools + + if [ "$PYTHON_IMPL" = "CPython" ]; then + $PYTHON fail_if_no_c.py + fi + + $PYTHON -m pip uninstall -y pymongo + cd .. + + if [ -z "$NO_VIRTUALENV" ]; then + deactivate + rm -rf venvtestinstall + fi +} + +# Function that returns success if the provided Python binary is version 3.10 or later +# Usage: +# is_python_310 /path/to/python +# * param1: Python binary +is_python_310() { + if [ -z "$1" ]; then + return 1 + elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 10))"; then + # runs when sys.version_info[:2] >= (3, 10) + return 0 + else + return 1 + fi +} + + +# Function that gets a python binary given a python version string. +# Versions can be of the form 3.xx or pypy3.xx. +get_python_binary() { + version=$1 + if [ "$(uname -s)" = "Darwin" ]; then + if [[ "$version" == *"t"* ]]; then + binary_name="python3t" + framework_dir="PythonT" + else + binary_name="python3" + framework_dir="Python" + fi + version=$(echo "$version" | sed 's/t//g') + PYTHON="/Library/Frameworks/$framework_dir.Framework/Versions/$version/bin/$binary_name" + elif [ "Windows_NT" = "${OS:-}" ]; then + version=$(echo $version | cut -d. -f1,2 | sed 's/\.//g; s/t//g') + if [ -n "${IS_WIN32:-}" ]; then + PYTHON="C:/python/32/Python$version/python.exe" + else + PYTHON="C:/python/Python$version/python.exe" + fi + else + PYTHON="/opt/python/$version/bin/python3" + fi + if is_python_310 "$(command -v $PYTHON)"; then + echo "$PYTHON" + else + echo "Could not find suitable python binary for '$version'" >&2 + return 1 + fi +} diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..67ad992c75 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,4 @@ +# Initial pre-commit reformat +5578999a90e439fbca06fc0ffc98f4d04e96f7b4 +# pyupgrade and ruff +0092b0af79378abf35b6db73a082ecb91af1d973 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..e21b87ddd3 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @mongodb/dbx-python diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..5bf500ba12 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,16 @@ +version: 2 +updates: + # GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + groups: + actions: + patterns: + - "*" + # Python + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "weekly" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..8185a38836 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,38 @@ + + +[Issue Key](https://jira.mongodb.org/browse/%7BISSUE_KEY%7D) +## Summary + + +## Changes in this PR + + +## Testing Plan + + +### Screenshots (optional) + + +## Checklist + +### Checklist for Author +- [ ] Did you update the changelog (if necessary)? +- [ ] Is the intention of the code captured in relevant tests? +- [ ] If there are new TODOs, has a related JIRA ticket been created? + +### Checklist for Reviewer {@primary_reviewer} +- [ ] Does the title of the PR reference a JIRA Ticket? +- [ ] Do you fully understand the implementation? (Would you be comfortable explaining how this code works to someone else?) +- [ ] Have you checked for spelling & grammar errors? +- [ ] Is all relevant documentation (README or docstring) updated? + +## Focus Areas for Reviewer (optional) + diff --git a/.github/reviewers.txt b/.github/reviewers.txt new file mode 100644 index 0000000000..9e38ee71b5 --- /dev/null +++ b/.github/reviewers.txt @@ -0,0 +1,5 @@ +# List of reviewers for auto-assignment of reviews. +caseyclements +blink1073 +Jibola +NoahStapp diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000000..b138324bf4 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,68 @@ +name: "CodeQL" + +on: + push: + branches: [ "master", "v*"] + tags: ['*'] + pull_request: + workflow_call: + inputs: + ref: + required: true + type: string + schedule: + - cron: '17 10 * * 2' + +concurrency: + group: codeql-${{ github.ref }} + cancel-in-progress: true + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + runs-on: "ubuntu-latest" + timeout-minutes: 360 + permissions: + # required for all workflows + security-events: write + + strategy: + fail-fast: false + matrix: + include: + - language: c-cpp + build-mode: manual + - language: python + build-mode: none + - language: actions + build-mode: none + steps: + - name: Checkout repository + uses: actions/checkout@v5 + with: + ref: ${{ inputs.ref }} + persist-credentials: false + - uses: actions/setup-python@v6 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + queries: security-extended + config: | + paths-ignore: + - 'doc/**' + - 'tools/**' + - 'test/**' + + - if: matrix.build-mode == 'manual' + run: | + pip install -e . + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/create-release-branch.yml b/.github/workflows/create-release-branch.yml new file mode 100644 index 0000000000..95a5e65c88 --- /dev/null +++ b/.github/workflows/create-release-branch.yml @@ -0,0 +1,57 @@ +name: Create Release Branch + +on: + workflow_dispatch: + inputs: + branch_name: + description: The name of the new branch + required: true + version: + description: The version to set on the branch + required: true + base_ref: + description: The base reference for the branch + push_changes: + description: Whether to push the changes + default: "true" + +concurrency: + group: create-branch-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash -eux {0} + +jobs: + create-branch: + environment: release + runs-on: ubuntu-latest + permissions: + id-token: write + contents: write + outputs: + version: ${{ steps.pre-publish.outputs.version }} + steps: + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v3 + with: + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + - uses: mongodb-labs/drivers-github-tools/setup@v3 + with: + aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} + aws_region_name: ${{ vars.AWS_REGION_NAME }} + aws_secret_id: ${{ secrets.AWS_SECRET_ID }} + artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} + - name: Get hatch + run: pip install hatch + - uses: mongodb-labs/drivers-github-tools/create-branch@v3 + id: create-branch + with: + branch_name: ${{ inputs.branch_name }} + version: ${{ inputs.version }} + base_ref: ${{ inputs.base_ref }} + push_changes: ${{ inputs.push_changes }} + version_bump_script: hatch version + evergreen_project: mongo-python-driver-release + release_workflow_path: ./.github/workflows/release-python.yml diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml new file mode 100644 index 0000000000..84bf1ba893 --- /dev/null +++ b/.github/workflows/dist.yml @@ -0,0 +1,147 @@ +name: Python Dist + +on: + push: + tags: + - "[0-9]+.[0-9]+.[0-9]+" + - "[0-9]+.[0-9]+.[0-9]+.post[0-9]+" + - "[0-9]+.[0-9]+.[0-9]+[a-b][0-9]+" + - "[0-9]+.[0-9]+.[0-9]+rc[0-9]+" + workflow_dispatch: + pull_request: + workflow_call: + inputs: + ref: + required: true + type: string + +concurrency: + group: dist-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash -eux {0} + +jobs: + build_wheels: + name: Build wheels for ${{ matrix.buildplat[1] }} + runs-on: ${{ matrix.buildplat[0] }} + strategy: + # Ensure that a wheel builder finishes even if another fails + fail-fast: false + matrix: + # Github Actions doesn't support pairing matrix values together, let's improvise + # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 + buildplat: + - [ubuntu-latest, "manylinux_x86_64", "cp3*-manylinux_x86_64"] + - [ubuntu-latest, "manylinux_aarch64", "cp3*-manylinux_aarch64"] + - [ubuntu-latest, "manylinux_ppc64le", "cp3*-manylinux_ppc64le"] + - [ubuntu-latest, "manylinux_s390x", "cp3*-manylinux_s390x"] + - [ubuntu-latest, "manylinux_i686", "cp3*-manylinux_i686"] + - [windows-2022, "win_amd6", "cp3*-win_amd64"] + - [windows-2022, "win32", "cp3*-win32"] + - [windows-11-arm, "win_arm64", "cp3*-win_arm64"] + - [macos-14, "macos", "cp*-macosx_*"] + + steps: + - name: Checkout pymongo + uses: actions/checkout@v5 + with: + fetch-depth: 0 + persist-credentials: false + ref: ${{ inputs.ref }} + + - uses: actions/setup-python@v6 + with: + cache: 'pip' + python-version: 3.11 + cache-dependency-path: 'pyproject.toml' + allow-prereleases: true + + - name: Set up QEMU + if: runner.os == 'Linux' + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 + with: + # setup-qemu-action by default uses `tonistiigi/binfmt:latest` image, + # which is out of date. This causes seg faults during build. + # Here we manually fix the version. + image: tonistiigi/binfmt:qemu-v8.1.5 + platforms: all + + - name: Install cibuildwheel + # Note: the default manylinux is manylinux_2_28 + run: | + python -m pip install -U pip + python -m pip install "cibuildwheel>=3.2.0,<4" + + - name: Build wheels + env: + CIBW_BUILD: ${{ matrix.buildplat[2] }} + run: python -m cibuildwheel --output-dir wheelhouse + + - name: Assert all versions in wheelhouse + if: ${{ ! startsWith(matrix.buildplat[1], 'macos') }} + run: | + ls wheelhouse/*cp310*.whl + ls wheelhouse/*cp311*.whl + ls wheelhouse/*cp312*.whl + ls wheelhouse/*cp313*.whl + ls wheelhouse/*cp314*.whl + # Free-threading builds: + ls wheelhouse/*cp314t*.whl + + - uses: actions/upload-artifact@v4 + with: + name: wheel-${{ matrix.buildplat[1] }} + path: ./wheelhouse/*.whl + if-no-files-found: error + + make_sdist: + name: Make SDist + runs-on: macos-latest + steps: + - uses: actions/checkout@v5 + with: + fetch-depth: 0 + persist-credentials: false + ref: ${{ inputs.ref }} + + - uses: actions/setup-python@v6 + with: + # Build sdist on lowest supported Python + python-version: "3.10" + + - name: Build SDist + run: | + set -ex + python -m pip install -U pip build + python -m build --sdist . + + - name: Test SDist + run: | + python -m pip install dist/*.gz + cd .. + python -c "from pymongo import has_c; assert has_c()" + + - uses: actions/upload-artifact@v4 + with: + name: "sdist" + path: ./dist/*.tar.gz + + collect_dist: + runs-on: ubuntu-latest + needs: [build_wheels, make_sdist] + name: Download Wheels + steps: + - name: Download all workflow run artifacts + uses: actions/download-artifact@v5 + - name: Flatten directory + working-directory: . + run: | + find . -mindepth 2 -type f -exec mv {} . \; + find . -type d -empty -delete + - uses: actions/upload-artifact@v4 + with: + name: all-dist-${{ github.run_id }} + path: "./*" diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml new file mode 100644 index 0000000000..6abca9e528 --- /dev/null +++ b/.github/workflows/release-python.yml @@ -0,0 +1,117 @@ +name: Release + +on: + workflow_dispatch: + inputs: + following_version: + description: "The post (dev) version to set" + dry_run: + description: "Dry Run?" + default: false + type: boolean + schedule: + - cron: '30 5 * * *' + +env: + # Changes per repo + PRODUCT_NAME: PyMongo + # Changes per branch + EVERGREEN_PROJECT: mongo-python-driver + # Constant + # inputs will be empty on a scheduled run. so, we only set dry_run + # to 'false' when the input is set to 'false'. + DRY_RUN: ${{ ! contains(inputs.dry_run, 'false') }} + FOLLOWING_VERSION: ${{ inputs.following_version || '' }} + +defaults: + run: + shell: bash -eux {0} + +jobs: + pre-publish: + environment: release + runs-on: ubuntu-latest + if: github.repository_owner == 'mongodb' || github.event_name == 'workflow_dispatch' + permissions: + id-token: write + contents: write + outputs: + version: ${{ steps.pre-publish.outputs.version }} + steps: + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v3 + with: + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + - uses: mongodb-labs/drivers-github-tools/setup@v3 + with: + aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} + aws_region_name: ${{ vars.AWS_REGION_NAME }} + aws_secret_id: ${{ secrets.AWS_SECRET_ID }} + - uses: mongodb-labs/drivers-github-tools/python/pre-publish@v3 + id: pre-publish + with: + dry_run: ${{ env.DRY_RUN }} + + build-dist: + needs: [pre-publish] + uses: ./.github/workflows/dist.yml + with: + ref: ${{ needs.pre-publish.outputs.version }} + + static-scan: + needs: [pre-publish] + uses: ./.github/workflows/codeql.yml + permissions: + security-events: write + with: + ref: ${{ needs.pre-publish.outputs.version }} + + publish: + needs: [build-dist, static-scan] + name: Upload release to PyPI + runs-on: ubuntu-latest + environment: release + permissions: + id-token: write + steps: + - name: Download all the dists + uses: actions/download-artifact@v5 + with: + name: all-dist-${{ github.run_id }} + path: dist/ + - name: Publish package distributions to TestPyPI + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # release/v1 + with: + repository-url: https://test.pypi.org/legacy/ + skip-existing: true + attestations: ${{ env.DRY_RUN }} + - name: Publish package distributions to PyPI + if: startsWith(env.DRY_RUN, 'false') + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # release/v1 + + post-publish: + needs: [publish] + runs-on: ubuntu-latest + environment: release + permissions: + id-token: write + contents: write + attestations: write + security-events: write + steps: + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v3 + with: + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + - uses: mongodb-labs/drivers-github-tools/setup@v3 + with: + aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} + aws_region_name: ${{ vars.AWS_REGION_NAME }} + aws_secret_id: ${{ secrets.AWS_SECRET_ID }} + - uses: mongodb-labs/drivers-github-tools/python/post-publish@v3 + with: + following_version: ${{ env.FOLLOWING_VERSION }} + product_name: ${{ env.PRODUCT_NAME }} + evergreen_project: ${{ env.EVERGREEN_PROJECT }} + token: ${{ github.token }} + dry_run: ${{ env.DRY_RUN }} diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml new file mode 100644 index 0000000000..a057570f3f --- /dev/null +++ b/.github/workflows/test-python.yml @@ -0,0 +1,280 @@ +name: Python Tests + +on: + push: + branches: ["master", "v**"] + pull_request: + workflow_dispatch: + +concurrency: + group: tests-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash -eux {0} + +permissions: + contents: read + +jobs: + + static: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just + - name: Install Python dependencies + run: | + just install + - name: Run linters + run: | + just lint-manual + - name: Run compilation + run: | + export PYMONGO_C_EXT_MUST_BUILD=1 + pip install -v -e . + python tools/fail_if_no_c.py + - name: Run typecheck + run: | + just typing + - run: | + sudo apt-get install -y cppcheck + - run: | + cppcheck --force bson + cppcheck pymongo + + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + # Tests currently only pass on ubuntu on GitHub Actions. + os: [ubuntu-latest] + python-version: ["3.10", "pypy-3.10", "3.13t"] + mongodb-version: ["8.0"] + + name: CPython ${{ matrix.python-version }}-${{ matrix.os }} + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: ${{ matrix.python-version }} + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master + with: + version: "${{ matrix.mongodb-version }}" + - name: Run tests + run: uv run --extra test pytest -v + + doctest: + runs-on: ubuntu-latest + name: DocTest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master + with: + version: "8.0" + - name: Install dependencies + run: just install + - name: Run tests + run: | + just setup-tests doctest + just run-tests + + docs: + name: Docs Checks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just + - name: Install dependencies + run: just install + - name: Build docs + run: just docs + + linkcheck: + name: Link Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just + - name: Install dependencies + run: just install + - name: Build docs + run: just docs-linkcheck + + typing: + name: Typing Tests + runs-on: ubuntu-latest + strategy: + matrix: + python: ["3.10", "3.11"] + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "${{matrix.python}}" + - name: Install just + run: uv tool install rust-just + - name: Install dependencies + run: | + just install + - name: Run typecheck + run: | + just typing + + integration_tests: + runs-on: ubuntu-latest + name: Integration Tests + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just + - name: Install dependencies + run: | + just install + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master + - name: Run tests + run: | + just integration-tests + - id: setup-mongodb-ssl + uses: mongodb-labs/drivers-evergreen-tools@master + with: + ssl: true + - name: Run tests + run: | + just integration-tests + + make_sdist: + runs-on: ubuntu-latest + name: "Make an sdist" + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - uses: actions/setup-python@v6 + with: + cache: 'pip' + cache-dependency-path: 'pyproject.toml' + # Build sdist on lowest supported Python + python-version: "3.10" + - name: Build SDist + shell: bash + run: | + pip install build + python -m build --sdist + - uses: actions/upload-artifact@v4 + with: + name: "sdist" + path: dist/*.tar.gz + + test_sdist: + runs-on: ubuntu-latest + needs: [make_sdist] + name: Install from SDist and Test + timeout-minutes: 20 + steps: + - name: Download sdist + uses: actions/download-artifact@v5 + with: + path: sdist/ + - name: Unpack SDist + shell: bash + run: | + cd sdist + ls + mkdir test + tar --strip-components=1 -zxf *.tar.gz -C ./test + ls test + - uses: actions/setup-python@v6 + with: + cache: 'pip' + cache-dependency-path: 'sdist/test/pyproject.toml' + # Test sdist on lowest supported Python + python-version: "3.10" + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master + - name: Run connect test from sdist + shell: bash + run: | + cd sdist/test + ls + which python + pip install -e ".[test]" + PYMONGO_MUST_CONNECT=1 pytest -v -k client_context + + test_minimum: + permissions: + contents: read + runs-on: ubuntu-latest + name: Test minimum dependencies and Python + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + python-version: "3.10" + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master + with: + version: "8.0" + - name: Run tests + shell: bash + run: | + uv venv + source .venv/bin/activate + uv pip install -e ".[test]" --resolution=lowest-direct --force-reinstall + pytest -v test/test_srv_polling.py test/test_dns.py test/asynchronous/test_srv_polling.py test/asynchronous/test_dns.py diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml new file mode 100644 index 0000000000..c991de2e6d --- /dev/null +++ b/.github/workflows/zizmor.yml @@ -0,0 +1,21 @@ +name: GitHub Actions Security Analysis with zizmor 🌈 + +on: + push: + branches: ["master"] + pull_request: + branches: ["**"] + +jobs: + zizmor: + name: zizmor latest via Cargo + runs-on: ubuntu-latest + permissions: + security-events: write + steps: + - name: Checkout repository + uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Run zizmor 🌈 + uses: zizmorcore/zizmor-action@da5ac40c5419dcf7f21630fb2f95e725ae8fb9d5 diff --git a/.github/zizmor.yml b/.github/zizmor.yml new file mode 100644 index 0000000000..10fd4cdfcf --- /dev/null +++ b/.github/zizmor.yml @@ -0,0 +1,7 @@ +rules: + unpinned-uses: + config: + policies: + actions/*: ref-pin + mongodb-labs/drivers-github-tools/*: ref-pin + mongodb-labs/drivers-evergreen-tools: ref-pin diff --git a/.gitignore b/.gitignore index 1608ca9ddd..74ed0bbb70 100644 --- a/.gitignore +++ b/.gitignore @@ -9,9 +9,36 @@ build/ doc/_build/ dist/ tools/settings.py +drivers-evergreen-tools pymongo.egg-info/ *.so -nosetests.xml -setup.cfg -*.egg +*.egg* .tox +mongocryptd.pid +.idea/ +.vscode/ +.nova/ +.temp/ +venv/ +secrets-export.sh +libmongocrypt.tar.gz +libmongocrypt/ +.venv +expansion.yml +*expansions.yml +.evergreen/scripts/env.sh +.evergreen/scripts/test-env.sh +specifications/ +results.json +.evergreen/atlas_x509_dev_client_certificate.pem + +# Lambda temp files +test/lambda/.aws-sam +test/lambda/mongodb/pymongo/* +test/lambda/mongodb/gridfs/* +test/lambda/mongodb/bson/* +test/lambda/*.json + +# test results and logs +xunit-results/ +server.log diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..d2b9d9a17a --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,135 @@ + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-toml + - id: check-json + - id: check-yaml + exclude: template.yaml + - id: debug-statements + - id: end-of-file-fixer + exclude: WHEEL + exclude_types: [json] + - id: forbid-new-submodules + - id: trailing-whitespace + exclude: .patch + exclude_types: [json] + +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.1.3 + hooks: + - id: ruff + args: ["--fix", "--show-fixes"] + - id: ruff-format + +- repo: local + hooks: + - id: synchro + name: synchro + entry: bash ./tools/synchro.sh + language: python + require_serial: true + fail_fast: true + additional_dependencies: + - ruff==0.1.3 + - unasync + +- repo: https://github.com/adamchainz/blacken-docs + rev: "1.16.0" + hooks: + - id: blacken-docs + additional_dependencies: + - black==22.3.0 + +- repo: https://github.com/pre-commit/pygrep-hooks + rev: "v1.10.0" + hooks: + - id: rst-backticks + - id: rst-directive-colons + - id: rst-inline-touching-normal + +- repo: https://github.com/rstcheck/rstcheck + rev: v6.2.0 + hooks: + - id: rstcheck + additional_dependencies: [sphinx] + args: ["--ignore-directives=doctest,testsetup,todo,automodule","--ignore-substitutions=release", "--report-level=error"] + +# We use the Python version instead of the original version which seems to require Docker +# https://github.com/koalaman/shellcheck-precommit +- repo: https://github.com/shellcheck-py/shellcheck-py + rev: v0.9.0.6 + hooks: + - id: shellcheck + name: shellcheck + args: ["--severity=warning"] + stages: [manual] + +- repo: https://github.com/PyCQA/doc8 + rev: v1.1.1 + hooks: + - id: doc8 + args: ["--ignore=D001"] # ignore line length + stages: [manual] + +- repo: https://github.com/sirosen/check-jsonschema + rev: 0.29.0 + hooks: + - id: check-github-workflows + - id: check-github-actions + - id: check-dependabot + +- repo: https://github.com/ariebovenberg/slotscheck + rev: v0.19.0 + hooks: + - id: slotscheck + files: \.py$ + exclude: "^(test|tools)/" + stages: [manual] + args: ["--no-strict-imports"] + +- repo: https://github.com/codespell-project/codespell + rev: "v2.2.6" + hooks: + - id: codespell + # Examples of errors or updates to justify the exceptions: + # - test/test_on_demand_csfle.py:44: FLE ==> FILE + # - test/test_bson.py:1043: fo ==> of, for, to, do, go + # - test/bson_corpus/decimal128-4.json:98: Infinit ==> Infinite + # - test/test_bson.py:267: isnt ==> isn't + # - test/versioned-api/crud-api-version-1-strict.json:514: nin ==> inn, min, bin, nine + # - test/test_client.py:188: te ==> the, be, we, to + args: ["-L", "fle,fo,infinit,isnt,nin,te,aks"] + +- repo: local + hooks: + - id: executable-shell + name: executable-shell + entry: chmod +x + language: system + types: [shell] + exclude: | + (?x)( + .evergreen/retry-with-backoff.sh + ) + - id: generate-config + name: generate-config + entry: .evergreen/scripts/generate-config.sh + language: python + require_serial: true + additional_dependencies: ["shrub.py>=3.10.0", "pyyaml>=6.0.2"] + + - id: uv-lock + name: uv-lock + entry: uv lock + language: python + require_serial: true + files: ^(uv\.lock|pyproject\.toml|requirements.txt|requirements/.*\.txt)$ + pass_filenames: false + fail_fast: true + additional_dependencies: + - "uv>=0.8.4" diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000000..a3693074f6 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,24 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the doc/ directory with Sphinx +sphinx: + configuration: doc/conf.py + fail_on_warning: true + +# Set the version of Python and requirements required to build the docs. +python: + install: + # Install pymongo itself. + - method: pip + path: . + - requirements: requirements/docs.txt + +build: + os: ubuntu-22.04 + tools: + python: "3.11" diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 20b9dc4212..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: python - -python: - - 2.5 - - 2.6 - - 2.7 - - 3.2 - - 3.3 - - pypy - -services: - - mongodb - -script: python setup.py test - -install: - #Temporary solution for Travis CI mutiprocessing issue #155 - - sudo rm -rf /dev/shm && sudo ln -s /run/shm /dev/shm diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..a8881db9cb --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,530 @@ +# Contributing to PyMongo + +PyMongo has a large +[community](https://pymongo.readthedocs.io/en/stable/contributors.html) +and contributions are always encouraged. Contributions can be as simple +as minor tweaks to the documentation. Please read these guidelines +before sending a pull request. + +## Bugfixes and New Features + +Before starting to write code, look for existing +[tickets](https://jira.mongodb.org/browse/PYTHON) or [create +one](https://jira.mongodb.org/browse/PYTHON) for your specific issue or +feature request. That way you avoid working on something that might not +be of interest or that has already been addressed. + +## Supported Interpreters + +PyMongo supports CPython 3.10+ and PyPy3.10+. Language features not +supported by all interpreters can not be used. + +## Style Guide + +PyMongo follows [PEP8](http://www.python.org/dev/peps/pep-0008/) +including 4 space indents and 79 character line limits. + +## General Guidelines + +- Avoid backward breaking changes if at all possible. +- Write inline documentation for new classes and methods. +- We use [uv](https://docs.astral.sh/uv/) for python environment management and packaging. +- We use [just](https://just.systems/man/en/) as our task runner. +- Write tests and make sure they pass (make sure you have a mongod + running on the default port, then execute `just test` from the cmd + line to run the test suite). +- Add yourself to doc/contributors.rst `:)` + +## Authoring a Pull Request + +**Our Pull Request Policy is based on this** [Code Review Developer +Guide](https://google.github.io/eng-practices/review) + +The expectation for any code author is to provide all the context needed +in the space of a pull request for any engineer to feel equipped to +review the code. Depending on the type of change, do your best to +highlight important new functions or objects you've introduced in the +code; think complex functions or new abstractions. Whilst it may seem +like more work for you to adjust your pull request, the reality is your +likelihood for getting review sooner shoots up. + +**Self Review Guidelines to follow** + +- If the PR is too large, split it if possible. + + - Use 250 LoC (excluding test data and config changes) as a + rule-of-thumb. + + - Moving and changing code should be in separate PRs or commits. + + - Moving: Taking large code blobs and transplanting + them to another file. There\'s generally no (or very + little) actual code changed other than a cut and + paste. It can even be extended to large deletions. + - Changing: Adding code changes (be that refactors or + functionality additions/subtractions). + - These two, when mixed, can muddy understanding and + sometimes make it harder for reviewers to keep track + of things. + +- Prefer explaining with code comments instead of PR comments. + +**Provide background** + +- The PR description and linked tickets should answer the "what" and + "why" of the change. The code change explains the "how". + +**Follow the Template** + +- Please do not deviate from the template we make; it is there for a + lot of reasons. If it is a one line fix, we still need to have + context on what and why it is needed. + +- If making a versioning change, please let that be known. See examples below: + + - `versionadded:: 3.11` + - `versionchanged:: 3.5` + +**Pull Request Template Breakdown** + +- **Github PR Title** + + - The PR Title format should always be + `[JIRA-ID] : Jira Title or Blurb Summary`. + +- **JIRA LINK** + +- Convenient link to the associated JIRA ticket. + +- **Summary** + + - Small blurb on why this is needed. The JIRA task should have + the more in-depth description, but this should still, at a + high level, give anyone looking an understanding of why the + PR has been checked in. + +- **Changes in this PR** + + - The explicit code changes that this PR is introducing. This + should be more specific than just the task name. (Unless the + task name is very clear). + +- **Test Plan** + + - Everything needs a test description. Describe what you did + to validate your changes actually worked; if you did + nothing, then document you did not test it. Aim to make + these steps reproducible by other engineers, specifically + with your primary reviewer in mind. + +- **Screenshots** + + - Any images that provide more context to the PR. Usually, + these just coincide with the test plan. + +- **Callouts or follow-up items** + + - This is a good place for identifying "to-dos" that you've + placed in the code (Must have an accompanying JIRA Ticket). + - Potential bugs that you are unsure how to test in the code. + - Opinions you want to receive about your code. + +## Running Linters + +PyMongo uses [pre-commit](https://pypi.org/project/pre-commit/) for +managing linting of the codebase. `pre-commit` performs various checks +on all files in PyMongo and uses tools that help follow a consistent +code style within the codebase. + +To set up `pre-commit` locally, run: + +```bash +brew install pre-commit +pre-commit install +``` + +To run `pre-commit` manually, run: + +```bash +pre-commit run --all-files +``` + +To run a manual hook like `ruff` manually, run: + +```bash +pre-commit run --all-files --hook-stage manual ruff +``` + +Typically we use `just` to run the linters, e.g. + +```bash +just install # this will install a venv with pre-commit installed, and install the pre-commit hook. +just typing-mypy +just run lint-manual +``` + +## Documentation + +To contribute to the [API documentation](https://pymongo.readthedocs.io/en/stable/) just make your +changes to the inline documentation of the appropriate [source code](https://github.com/mongodb/mongo-python-driver) or +[rst file](https://github.com/mongodb/mongo-python-driver/tree/master/doc) in +a branch and submit a [pull request](https://help.github.com/articles/using-pull-requests). You +might also use the GitHub +[Edit](https://github.com/blog/844-forking-with-the-edit-button) button. + +We use [reStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html) for all +documentation including narrative docs, and the [Sphinx docstring format](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html). + +You can build the documentation locally by running: + +```bash +just docs +``` + +When updating docs, it can be helpful to run the live docs server as: + +```bash +just docs-serve +``` + +Browse to the link provided, and then as you make changes to docstrings or narrative docs, +the pages will re-render and the browser will automatically refresh. + +## Running Tests Locally + +- Run `just install` to set a local virtual environment, or you can manually + create a virtual environment and run `pytest` directly. If you want to use a specific + version of Python, remove the `.venv` folder and set `PYTHON_BINARY` before running `just install`. +- Ensure you have started the appropriate Mongo Server(s). You can run `just run-server` with optional args + to set up the server. All given options will be passed to + [`run-orchestration.sh`](https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/run-orchestration.sh). Run `$DRIVERS_TOOLS/evergreen/run-orchestration.sh -h` + for a full list of options. +- Run `just test` or `pytest` to run all of the tests. +- Append `test/.py::::` to run + specific tests. You can omit the `` to test a full class + and the `` to test a full module. For example: + `just test test/test_change_stream.py::TestUnifiedChangeStreamsErrors::test_change_stream_errors_on_ElectionInProgress`. +- Use the `-k` argument to select tests by pattern. + + +## Running tests that require secrets, services, or other configuration + +### Prerequisites + +- Clone `drivers-evergreen-tools`: + `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. +- Run `export DRIVERS_TOOLS=$PWD/drivers-evergreen-tools`. This can be put into a `.bashrc` file + for convenience. +- Some tests require access to [Drivers test secrets](https://github.com/mongodb-labs/drivers-evergreen-tools/tree/master/.evergreen/secrets_handling#secrets-handling). + +### Usage + +- Run `just run-server` with optional args to set up the server. +- Run `just setup-tests` with optional args to set up the test environment, secrets, etc. + See `just setup-tests -h` for a full list of available options. +- Run `just run-tests` to run the tests in an appropriate Python environment. +- When done, run `just teardown-tests` to clean up and `just stop-server` to stop the server. + +### SSL tests + +- Run `just run-server --ssl` to start the server with TLS enabled. +- Run `just setup-tests --ssl`. +- Run `just run-tests`. + +Note: for general testing purposes with an TLS-enabled server, you can use the following (this should ONLY be used +for local testing): + +```python +from pymongo import MongoClient + +client = MongoClient( + "mongodb://localhost:27017?tls=true&tlsAllowInvalidCertificates=true" +) +``` + +If you want to use the actual certificate file then set `tlsCertificateKeyFile` to the local path +to `/test/certificates/client.pem` and `tlsCAFile` to the local path to `/test/certificates/ca.pem`. + +### Encryption tests + +- Run `just run-server` to start the server. +- Run `just setup-tests encryption`. +- Run the tests with `just run-tests`. + +To test with `encryption` and `PyOpenSSL`, use `just setup-tests encryption pyopenssl`. + +### PyOpenSSL tests + +- Run `just run-server` to start the server. +- Run `just setup-tests default_sync pyopenssl`. +- Run the tests with `just run-tests`. + +Note: `PyOpenSSL` is not used in async tests, but you can use `just setup-tests default_async pyopenssl` +to verify that PyMongo falls back to the standard library `OpenSSL`. + +### Load balancer tests + +- Install `haproxy` (available as `brew install haproxy` on macOS). +- Start the server with `just run-server load_balancer`. +- Set up the test with `just setup-tests load_balancer`. +- Run the tests with `just run-tests`. + +### AWS auth tests + +- Run `just run-server auth_aws` to start the server. +- Run `just setup-tests auth_aws ` to set up the AWS test. +- Run the tests with `just run-tests`. + +### OIDC auth tests + +- Run `just setup-tests auth_oidc ` to set up the OIDC test. +- Run the tests with `just run-tests`. + +The supported types are [`default`, `azure`, `gcp`, `eks`, `aks`, and `gke`]. +For the `eks` test, you will need to set up access to the `drivers-test-secrets-role`, see the [Wiki](https://wiki.corp.mongodb.com/spaces/DRIVERS/pages/239737385/Using+AWS+Secrets+Manager+to+Store+Testing+Secrets). + +### KMS tests + +For KMS tests that are run locally, and expected to fail, in this case using `azure`: + +- Run `just run-server`. +- Run `just setup-tests kms azure-fail`. +- Run `just run-tests`. + +For KMS tests that run remotely and are expected to pass, in this case using `gcp`: + +- Run `just setup-tests kms gcp`. +- Run `just run-tests`. + +### Enterprise Auth tests + +Note: these tests can only be run from an Evergreen host. + +- Run `just run-server enterprise_auth`. +- Run `just setup-tests enterprise_auth`. +- Run `just run-tests`. + +### Atlas Connect tests + +- Run `just setup-tests atlas_connect`. +- Run `just run-tests`. + +### Search Index tests + +- Run `just run-server search_index`. +- Run `just setup-tests search_index`. +- Run `just run-tests`. + +### MockupDB tests + +- Run `just setup-tests mockupdb`. +- Run `just run-tests`. + +### Doc tests + +The doc tests require a running server. + +- Run `just run-server`. +- Run `just setup-tests doctest`. +- Run `just run-tests`. + +### Free-threaded Python Tests + +In the evergreen builds, the tests are configured to use the free-threaded python from the toolchain. +Locally you can run: + +- Run `just run-server`. +- Run `just setup-tests`. +- Run `UV_PYTHON=3.13t just run-tests`. + +### AWS Lambda tests + +You will need to set up access to the `drivers-test-secrets-role`, see the [Wiki](https://wiki.corp.mongodb.com/spaces/DRIVERS/pages/239737385/Using+AWS+Secrets+Manager+to+Store+Testing+Secrets). + +- Run `just setup-tests aws_lambda`. +- Run `just run-tests`. + +### mod_wsgi tests + +Note: these tests can only be run from an Evergreen Linux host that has the Python toolchain. + +- Run `just run-server`. +- Run `just setup-tests mod_wsgi `. +- Run `just run-tests`. + +The `mode` can be `standalone` or `embedded`. For the `replica_set` version of the tests, use +`TOPOLOGY=replica_set just run-server`. + +### OCSP tests + +- Export the orchestration file, e.g. `export ORCHESTRATION_FILE=rsa-basic-tls-ocsp-disableStapling.json`. +This corresponds to a config file in `$DRIVERS_TOOLS/.evergreen/orchestration/configs/servers`. +MongoDB servers on MacOS and Windows do not staple OCSP responses and only support RSA. +NOTE: because the mock ocsp responder MUST be started prior to the server starting, the ocsp tests start the server +as part of `setup-tests`. + +- Run `just setup-tests ocsp ` (options are "valid", "revoked", "valid-delegate", "revoked-delegate"). +- Run `just run-tests` + +If you are running one of the `no-responder` tests, omit the `run-server` step. + +### Perf Tests + +- Start the appropriate server, e.g. `just run-server --version=v8.0-perf --ssl`. +- Set up the tests with `sync` or `async`: `just setup-tests perf sync`. +- Run the tests: `just run-tests`. + +## Enable Debug Logs + +- Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest` to output all debug logs to the terminal. **Warning**: This will output a huge amount of logs. +- Add `log_cli=1` and `log_cli_level="DEBUG"` to the `tool.pytest.ini_options` section in `pyproject.toml` to enable debug logs in this manner by default on your machine. +- Set `DEBUG_LOG=1` and run `just setup-tests`, `just-test`, or `pytest` to enable debug logs only for failed tests. +- Finally, you can use `just setup-tests --debug-log`. +- For evergreen patch builds, you can use `evergreen patch --param DEBUG_LOG=1` to enable debug logs for failed tests in the patch. + +## Testing minimum dependencies + +To run any of the test suites with minimum supported dependencies, pass `--test-min-deps` to +`just setup-tests`. + +## Adding a new test suite + +- If adding new tests files that should only be run for that test suite, add a pytest marker to the file and add + to the list of pytest markers in `pyproject.toml`. Then add the test suite to the `TEST_SUITE_MAP` in `.evergreen/scripts/utils.py`. If for some reason it is not a pytest-runnable test, add it to the list of `EXTRA_TESTS` instead. +- If the test uses Atlas or otherwise doesn't use `run-orchestration.sh`, add it to the `NO_RUN_ORCHESTRATION` list in + `.evergreen/scripts/utils.py`. +- If there is something special required to run the local server or there is an extra flag that should always be set + like `AUTH`, add that logic to `.evergreen/scripts/run_server.py`. +- The bulk of the logic will typically be in `.evergreen/scripts/setup_tests.py`. This is where you should fetch secrets and make them available using `write_env`, start services, and write other env vars needed using `write_env`. +- If there are any special test considerations, including not running `pytest` at all, handle it in `.evergreen/scripts/run_tests.py`. +- If there are any services or atlas clusters to teardown, handle them in `.evergreen/scripts/teardown_tests.py`. +- Add functions to generate the test variant(s) and task(s) to the `.evergreen/scripts/generate_config.py`. +- Regenerate the test variants and tasks using `pre-commit run --all-files generate-config`. +- Make sure to add instructions for running the test suite to `CONTRIBUTING.md`. + +## Handling flaky tests + +We have a custom `flaky` decorator in [test/asynchronous/utils.py](test/asynchronous/utils.py) that can be used for +tests that are `flaky`. By default the decorator only applies when not running on CPython on Linux, since other +runtimes tend to have more variation. When using the `flaky` decorator, open a corresponding ticket and +a use the ticket number as the "reason" parameter to the decorator, e.g. `@flaky(reason="PYTHON-1234")`. +When running tests locally (not in CI), the `flaky` decorator will be disabled unless `ENABLE_FLAKY` is set. +To disable the `flaky` decorator in CI, you can use `evergreen patch --param DISABLE_FLAKY=1`. + +## Integration Tests + +The `integration_tests` directory has a set of scripts that verify the usage of PyMongo with downstream packages or frameworks. See the [README](./integration_tests/README.md) for more information. + +To run the tests, use `just integration_tests`. + +The tests should be able to run with and without SSL enabled. + +## Specification Tests + +The MongoDB [specifications repository](https://github.com/mongodb/specifications) +holds in progress and completed specifications for features of MongoDB, drivers, +and associated products. PyMongo supports the [Unified Test Format](https://jira.mongodb.org/browse/DRIVERS-709) +for running specification tests to confirm PyMongo behaves as expected. + +### Resynchronizing the Specification Tests + +If you would like to re-sync the copy of the specification tests in the +PyMongo repository with that which is inside the [specifications +repo](https://github.com/mongodb/specifications), please use the script +provided in `.evergreen/resync-specs.sh`.: + +```bash +git clone git@github.com:mongodb/specifications.git +export MDB_SPECS=~/specifications +cd ~/mongo-python-driver/.evergreen +./resync-specs.sh -b "" spec1 spec2 ... +./resync-specs.sh -b "connection-string*" crud bson-corpus # Updates crud and bson-corpus specs while ignoring all files with the regex "connection-string*" +cd .. +``` + +The `-b` flag adds as a regex pattern to block files you do not wish to +update in PyMongo. This is primarily helpful if you are implementing a +new feature in PyMongo that has spec tests already implemented, or if +you are attempting to validate new spec tests in PyMongo. + +### Automated Specification Test Resyncing +The (`/.evergreen/scripts/resync-all-specs.sh`) script +automatically runs once a week to resync all the specs with the [specifications repo](https://github.com/mongodb/specifications). +A PR will be generated by mongodb-drivers-pr-bot containing any changes picked up by this resync. +The PR description will display the name(s) of the updated specs along +with any errors that occurred. + +Spec test changes associated with a behavioral change or bugfix that has yet to be implemented in PyMongo +must be added to a patch file in `/.evergreen/spec-patch`. Each patch +file must be named after the associated PYTHON ticket and contain the +test differences between PyMongo's current tests and the specification. +All changes listed in these patch files will be *undone* by the script and won't +be applied to PyMongo's tests. + +When a new test file or folder is added to the spec repo before the associated code changes are implemented, that test's path must be added to `.evergreen/remove-unimplemented-tests.sh` along with a comment indicating the associated PYTHON ticket for those changes. + +Any PR that implements a PYTHON ticket documented in a patch file or within `.evergreen/remove-unimplemented-tests.sh` must also remove the associated patch file or entry in `remove-unimplemented-tests.sh`. + +#### Adding to a patch file +To add to or create a patch file, run `git diff` to show the desired changes to undo and copy the +results into the patch file. + +For example: the imaginary, unimplemented PYTHON-1234 ticket has associated spec test changes. To add those changes to `PYTHON-1234.patch`), do the following: +```bash +git diff HEAD~1 path/to/file >> .evergreen/spec-patch/PYTHON-1234.patch + +#### Running Locally +Both `resync-all-specs.sh` and `resync-all-specs.py` can be run locally (and won't generate a PR). +```bash +./.evergreen/scripts/resync-all-specs.sh +python3 ./.evergreen/scripts/resync-all-specs.py +``` + +## Making a Release + +Follow the [Python Driver Release Process Wiki](https://wiki.corp.mongodb.com/display/DRIVERS/Python+Driver+Release+Process). + +## Asyncio considerations + +PyMongo adds asyncio capability by modifying the source files in `*/asynchronous` to `*/synchronous` using +[unasync](https://github.com/python-trio/unasync/) and some custom transforms. + +Where possible, edit the code in `*/asynchronous/*.py` and not the synchronous files. +You can run `pre-commit run --all-files synchro` before running tests if you are testing synchronous code. + +To prevent the `synchro` hook from accidentally overwriting code, it first checks to see whether a sync version +of a file is changing and not its async counterpart, and will fail. +In the unlikely scenario that you want to override this behavior, first export `OVERRIDE_SYNCHRO_CHECK=1`. + +Sometimes, the `synchro` hook will fail and introduce changes many previously unmodified files. This is due to static +Python errors, such as missing imports, incorrect syntax, or other fatal typos. To resolve these issues, +run `pre-commit run --all-files --hook-stage manual ruff` and fix all reported errors before running the `synchro` +hook again. + +## Converting a test to async + +The `tools/convert_test_to_async.py` script takes in an existing synchronous test file and outputs a +partially-converted asynchronous version of the same name to the `test/asynchronous` directory. +Use this generated file as a starting point for the completed conversion. + +The script is used like so: `python tools/convert_test_to_async.py [test_file.py]` + +## Generating a flame graph using py-spy +To profile a test script and generate a flame graph, follow these steps: +1. Install `py-spy` if you haven't already: + ```bash + pip install py-spy + ``` +2. Inside your test script, perform any required setup and then loop over the code you want to profile for improved sampling. +3. Run `py-spy record -o -r -- python ` to generate a `.svg` file containing the flame graph. + (Note: on macOS you will need to run this command using `sudo` to allow `py-spy` to attach to the Python process.) +4. If you need to include native code (for example the C extensions), profiling should be done on a Linux system, as macOS and Windows do not support the `--native` option of `py-spy`. + Creating an ubuntu Evergreen spawn host and using `scp` to copy the flamegraph `.svg` file back to your local machine is the best way to do this. + +## Dependabot updates + +Dependabot will raise PRs at most once per week, grouped by GitHub Actions updates and Python requirement +file updates. We have a pre-commit hook that will update the `uv.lock` file when requirements change. +To update the lock file on a failing PR, you can use a method like `gh pr checkout `, then run +`just lint uv-lock` to update the lock file, and then push the changes. If a typing dependency has changed, +also run `just typing` and handle any new findings. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index a1ad9c89ef..0000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,56 +0,0 @@ -Contributing to PyMongo -======================= - -PyMongo has a large `community -`_ and -contributions are always encouraged. Contributions can be as simple as -minor tweaks to the documentation. Please read these guidelines before -sending a pull request. - -Bugfixes and New Features -------------------------- - -Before starting to write code, look for existing `tickets -`_ or `create one -`_ for your specific -issue or feature request. That way you avoid working on something -that might not be of interest or that has already been addressed. - -Supported Interpreters ----------------------- - -PyMongo supports CPython 2.4 and newer, PyPy, and Jython. Language -features not supported by all interpreters can not be used (e.g. -the `with statement -`_ -is not supported in Python 2.4). Please also ensure that your code is -properly converted by `2to3 `_ for -Python 3 support. - -Style Guide ------------ - -PyMongo follows `PEP8 `_ -including 4 space indents and 79 character line limits. - -General Guidelines ------------------- - -- Avoid backward breaking changes if at all possible. -- Write inline documentation for new classes and methods. -- Write tests and make sure they pass (make sure you have a mongod - running on the default port, then execute ``python setup.py test`` - from the cmd line to run the test suite). -- Add yourself to doc/contributors.rst :) - -Documentation -------------- - -To contribute to the `API documentation `_ -just make your changes to the inline documentation of the appropriate -`source code `_ or `rst file -`_ in a -branch and submit a `pull request `_. -You might also use the github `Edit `_ -button. - diff --git a/LICENSE b/LICENSE index 57bc88a15a..261eeb9e9f 100644 --- a/LICENSE +++ b/LICENSE @@ -199,4 +199,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index db73d5f476..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,10 +0,0 @@ -include README.rst -include LICENSE -include ez_setup.py -recursive-include doc *.rst -recursive-include doc *.py -recursive-include tools *.py -include tools/README.rst -recursive-include test *.pem -recursive-include test *.py -recursive-include bson *.h diff --git a/README.md b/README.md new file mode 100644 index 0000000000..ba1688cb70 --- /dev/null +++ b/README.md @@ -0,0 +1,217 @@ +# PyMongo + +[![PyPI Version](https://img.shields.io/pypi/v/pymongo)](https://pypi.org/project/pymongo) +[![Python Versions](https://img.shields.io/pypi/pyversions/pymongo)](https://pypi.org/project/pymongo) +[![Monthly Downloads](https://static.pepy.tech/badge/pymongo/month)](https://pepy.tech/project/pymongo) +[![API Documentation Status](https://readthedocs.org/projects/pymongo/badge/?version=stable)](http://pymongo.readthedocs.io/en/stable/api?badge=stable) + +## About + +The PyMongo distribution contains tools for interacting with MongoDB +database from Python. The `bson` package is an implementation of the +[BSON format](http://bsonspec.org) for Python. The `pymongo` package is +a native Python driver for MongoDB, offering both synchronous and asynchronous APIs. The `gridfs` package is a +[gridfs](https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.md/) +implementation on top of `pymongo`. + +PyMongo supports MongoDB 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, and 8.0. PyMongo follows [semantic versioning](https://semver.org/spec/v2.0.0.html) for its releases. + +## Documentation + +Documentation is available at +[mongodb.com](https://www.mongodb.com/docs/languages/python/pymongo-driver/current/). + +[API documentation](https://pymongo.readthedocs.io/en/stable/api/) and the [full changelog](https://pymongo.readthedocs.io/en/stable/changelog.html) for each release is available at [readthedocs.io](https://pymongo.readthedocs.io/en/stable/index.html). + +## Support / Feedback + +For issues with, questions about, or feedback for PyMongo, please look +into our [support channels](https://support.mongodb.com/welcome). Please +do not email any of the PyMongo developers directly with issues or +questions - you're more likely to get an answer on +[StackOverflow](https://stackoverflow.com/questions/tagged/mongodb) +(using a "mongodb" tag). + +## Bugs / Feature Requests + +Think you've found a bug? Want to see a new feature in PyMongo? Please +open a case in our issue management tool, JIRA: + +- [Create an account and login](https://jira.mongodb.org). +- Navigate to [the PYTHON + project](https://jira.mongodb.org/browse/PYTHON). +- Click **Create Issue** - Please provide as much information as + possible about the issue type and how to reproduce it. + +Bug reports in JIRA for all driver projects (i.e. PYTHON, CSHARP, JAVA) +and the Core Server (i.e. SERVER) project are **public**. + +### How To Ask For Help + +Please include all of the following information when opening an issue: + +- Detailed steps to reproduce the problem, including full traceback, + if possible. + +- The exact python version used, with patch level: + +```bash +python -c "import sys; print(sys.version)" +``` + +- The exact version of PyMongo used, with patch level: + +```bash +python -c "import pymongo; print(pymongo.version); print(pymongo.has_c())" +``` + +- The operating system and version (e.g. Windows 7, OSX 10.8, ...) + +- Web framework or asynchronous network library used, if any, with + version (e.g. Django 1.7, mod_wsgi 4.3.0, gevent 1.0.1, Tornado + 4.0.2, ...) + +### Security Vulnerabilities + +If you've identified a security vulnerability in a driver or any other +MongoDB project, please report it according to the [instructions +here](https://www.mongodb.com/docs/manual/tutorial/create-a-vulnerability-report/). + +## Installation + +PyMongo can be installed with [pip](http://pypi.python.org/pypi/pip): + +```bash +python -m pip install pymongo +``` + +You can also download the project source and do: + +```bash +pip install . +``` + +Do **not** install the "bson" package from pypi. PyMongo comes with +its own bson package; running "pip install bson" installs a third-party +package that is incompatible with PyMongo. + +## Dependencies + +PyMongo supports CPython 3.10+ and PyPy3.10+. + +Required dependencies: + +Support for `mongodb+srv://` URIs requires [dnspython](https://pypi.python.org/pypi/dnspython) + +Optional dependencies: + +GSSAPI authentication requires +[pykerberos](https://pypi.python.org/pypi/pykerberos) on Unix or +[WinKerberos](https://pypi.python.org/pypi/winkerberos) on Windows. The +correct dependency can be installed automatically along with PyMongo: + +```bash +python -m pip install "pymongo[gssapi]" +``` + +MONGODB-AWS authentication requires +[pymongo-auth-aws](https://pypi.org/project/pymongo-auth-aws/): + +```bash +python -m pip install "pymongo[aws]" +``` + +OCSP (Online Certificate Status Protocol) requires +[PyOpenSSL](https://pypi.org/project/pyOpenSSL/), +[requests](https://pypi.org/project/requests/), +[service_identity](https://pypi.org/project/service_identity/) and may +require [certifi](https://pypi.python.org/pypi/certifi): + +```bash +python -m pip install "pymongo[ocsp]" +``` + +Wire protocol compression with snappy requires +[python-snappy](https://pypi.org/project/python-snappy): + +```bash +python -m pip install "pymongo[snappy]" +``` + +Wire protocol compression with zstandard requires +[zstandard](https://pypi.org/project/zstandard): + +```bash +python -m pip install "pymongo[zstd]" +``` + +Client-Side Field Level Encryption requires +[pymongocrypt](https://pypi.org/project/pymongocrypt/) and +[pymongo-auth-aws](https://pypi.org/project/pymongo-auth-aws/): + +```bash +python -m pip install "pymongo[encryption]" +``` +You can install all dependencies automatically with the following +command: + +```bash +python -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" +``` + +## Examples + +Here's a basic example (for more see the *examples* section of the +docs): + +```pycon +>>> import pymongo +>>> client = pymongo.MongoClient("localhost", 27017) +>>> db = client.test +>>> db.name +'test' +>>> db.my_collection +Collection(Database(MongoClient('localhost', 27017), 'test'), 'my_collection') +>>> db.my_collection.insert_one({"x": 10}).inserted_id +ObjectId('4aba15ebe23f6b53b0000000') +>>> db.my_collection.insert_one({"x": 8}).inserted_id +ObjectId('4aba160ee23f6b543e000000') +>>> db.my_collection.insert_one({"x": 11}).inserted_id +ObjectId('4aba160ee23f6b543e000002') +>>> db.my_collection.find_one() +{'x': 10, '_id': ObjectId('4aba15ebe23f6b53b0000000')} +>>> for item in db.my_collection.find(): +... print(item["x"]) +... +10 +8 +11 +>>> db.my_collection.create_index("x") +'x_1' +>>> for item in db.my_collection.find().sort("x", pymongo.ASCENDING): +... print(item["x"]) +... +8 +10 +11 +>>> [item["x"] for item in db.my_collection.find().limit(2).skip(1)] +[8, 11] +``` + +## Learning Resources + +- MongoDB Learn - [Python +courses](https://learn.mongodb.com/catalog?labels=%5B%22Language%22%5D&values=%5B%22Python%22%5D). +- [Python Articles on Developer +Center](https://www.mongodb.com/developer/languages/python/). + +## Testing + +The easiest way to run the tests is to run the following from the repository root. + +```bash +pip install -e ".[test]" +pytest +``` + +For more advanced testing scenarios, see the [contributing guide](./CONTRIBUTING.md#running-tests-locally). diff --git a/README.rst b/README.rst deleted file mode 100644 index 08bdfd4dc6..0000000000 --- a/README.rst +++ /dev/null @@ -1,123 +0,0 @@ -======= -PyMongo -======= -:Info: See `the mongo site `_ for more information. See `github `_ for the latest source. -:Author: Mike Dirolf -:Maintainer: Bernie Hackett - -About -===== - -The PyMongo distribution contains tools for interacting with MongoDB -database from Python. The ``bson`` package is an implementation of -the `BSON format `_ for Python. The ``pymongo`` -package is a native Python driver for MongoDB. The ``gridfs`` package -is a `gridfs -`_ -implementation on top of ``pymongo``. - -Support / Feedback -================== - -For issues with, questions about, or feedback for PyMongo, please look into -our `support channels `_. Please -do not email any of the PyMongo developers directly with issues or -questions - you're more likely to get an answer on the `mongodb-user -`_ list on Google Groups. - -Bugs / Feature Requests -======================= - -Think you’ve found a bug? Want to see a new feature in PyMongo? Please open a -case in our issue management tool, JIRA: - -- `Create an account and login `_. -- Navigate to `the PYTHON project `_. -- Click **Create Issue** - Please provide as much information as possible about the issue type and how to reproduce it. - -Bug reports in JIRA for all driver projects (i.e. PYTHON, CSHARP, JAVA) and the -Core Server (i.e. SERVER) project are **public**. - -Security Vulnerabilities ------------------------- - -If you’ve identified a security vulnerability in a driver or any other -MongoDB project, please report it according to the `instructions here -`_. - -Installation -============ - -If you have `setuptools -`_ installed you -should be able to do **easy_install pymongo** to install -PyMongo. Otherwise you can download the project source and do **python -setup.py install** to install. - -Dependencies -============ - -The PyMongo distribution is supported and tested on Python 2.x (where -x >= 4) and Python 3.x (where x >= 1). PyMongo versions <= 1.3 also -supported Python 2.3, but that is no longer supported. - -Additional dependencies are: - -- (to generate documentation) sphinx_ -- (to auto-discover tests) `nose `_ - -Examples -======== -Here's a basic example (for more see the *examples* section of the docs): - -.. code-block:: pycon - - >>> import pymongo - >>> client = pymongo.MongoClient("localhost", 27017) - >>> db = client.test - >>> db.name - u'test' - >>> db.my_collection - Collection(Database(MongoClient('localhost', 27017), u'test'), u'my_collection') - >>> db.my_collection.save({"x": 10}) - ObjectId('4aba15ebe23f6b53b0000000') - >>> db.my_collection.save({"x": 8}) - ObjectId('4aba160ee23f6b543e000000') - >>> db.my_collection.save({"x": 11}) - ObjectId('4aba160ee23f6b543e000002') - >>> db.my_collection.find_one() - {u'x': 10, u'_id': ObjectId('4aba15ebe23f6b53b0000000')} - >>> for item in db.my_collection.find(): - ... print item["x"] - ... - 10 - 8 - 11 - >>> db.my_collection.create_index("x") - u'x_1' - >>> for item in db.my_collection.find().sort("x", pymongo.ASCENDING): - ... print item["x"] - ... - 8 - 10 - 11 - >>> [item["x"] for item in db.my_collection.find().limit(2).skip(1)] - [8, 11] - -Documentation -============= - -You will need sphinx_ installed to generate the -documentation. Documentation can be generated by running **python -setup.py doc**. Generated documentation can be found in the -*doc/build/html/* directory. - -Testing -======= - -The easiest way to run the tests is to install `nose -`_ (**easy_install -nose**) and run **nosetests** or **python setup.py test** in the root -of the distribution. Tests are located in the *test/* directory. - -.. _sphinx: http://sphinx.pocoo.org/ diff --git a/RELEASE.rst b/RELEASE.rst deleted file mode 100644 index f9abea65e4..0000000000 --- a/RELEASE.rst +++ /dev/null @@ -1,70 +0,0 @@ -Some notes on PyMongo releases -============================== - -Versioning ----------- - -We shoot for a release every few months - that will generally just -increment the middle version number (e.g. 2.1.1 -> 2.2). - -Minor releases are reserved for bug fixes (in general no new features -or deprecations) - they only happen in cases where there is a critical -bug in a recently released version, or when a release has no new -features or API changes. - -In between releases we use a "+" version number to denote the version -under development. So if we just released 2.1, then the current dev -version would be 2.1+. When we make the next release (2.1.1 or 2.2) we -replace all instances of 2.1+ in the docs with the new version number. - -Deprecation ------------ - -Changes should be backwards compatible unless absolutely necessary. When making -API changes the approach is generally to add a deprecation warning but keeping -the existing API functional. Eventually (after at least ~4 releases) we can -remove the old API. - -Doing a Release ---------------- - -1. Test release on Python 2.4-2.7 and 3.1-3.3 on Windows, Linux and OSX, - with and without the C extension. Generally enough to just run the tests on - 2.4, 2.7 and 3.3 with and without the extension on a single platform, - and then just test any version on the other platforms as a sanity check. - `python setup.py test` will build the extension and test. - `python tools/clean.py` will remove the extension, and then `nosetests` will - run the tests without it. Run the replica set and mongos high-availability - tests with `PYTHONPATH=. python test/high_availability/test_ha.py` and the slow - tests with `nosetests -d test/slow`. Can also run the doctests: `python - setup.py doc -t`. For building extensions on Windows check section below. - -2. Add release notes to doc/changelog.rst. Generally just summarize/clarify - the git log, but might add some more long form notes for big changes. - -3. Search and replace the "+" version number w/ the new version number (see - note above). - -4. Make sure version number is updated in setup.py and pymongo/__init__.py - -5. Commit with a BUMP version_number message. - -6. Tag w/ version_number - -7. Push commit / tag. - -8. Push source to PyPI: `python setup.py sdist upload` - -9. Push binaries to PyPI; for each version of python and platform do:`python - setup.py bdist_egg upload`. Probably best to do `python setup.py bdist_egg` - first, to make sure the egg builds properly. Notably on the Windows machine, - for Python 2.4 and 2.5, you will have to run `python setup.py build -c mingw32 - bdist_egg upload` or the C extension build will fail with an error about Visual - Studio 2003. On Windows we also push a binary installer. The setup.py target - for that is `bdist_wininst`. - -10. Make sure the docs have properly updated (driver buildbot does this). - -11. Add a "+" to the version number in setup.py/__init__.py, commit, push. - -12. Announce! diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES new file mode 100644 index 0000000000..ad00831a2a --- /dev/null +++ b/THIRD-PARTY-NOTICES @@ -0,0 +1,98 @@ +PyMongo uses third-party libraries or other resources that may +be distributed under licenses different than the PyMongo software. + +In the event that we accidentally failed to list a required notice, +please bring it to our attention through any of the ways detailed here: + + https://jira.mongodb.org/projects/PYTHON + +The attached notices are provided for information only. + +For any licenses that require disclosure of source, sources are available at +https://github.com/mongodb/mongo-python-driver. + + +1) License Notice for time64.c +------------------------------ + +Copyright (c) 2007-2010 Michael G Schwern + +This software originally derived from Paul Sheer's pivotal_gmtime_r.c. + +The MIT License: + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +2) License Notice for _asyncio_lock.py +----------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001-2024 Python Software Foundation; All Rights Reserved" +are retained in Python alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/_setup.py b/_setup.py new file mode 100644 index 0000000000..f99e9e7dc8 --- /dev/null +++ b/_setup.py @@ -0,0 +1,152 @@ +from __future__ import annotations + +import os +import sys +import warnings + +# Hack to silence atexit traceback in some Python versions +try: + import multiprocessing # noqa: F401 +except ImportError: + pass + +from setuptools import setup +from setuptools.command.build_ext import build_ext +from setuptools.extension import Extension + + +class custom_build_ext(build_ext): + """Allow C extension building to fail. + + The C extension speeds up BSON encoding, but is not essential. + """ + + warning_message = """ +******************************************************************** +WARNING: %s could not +be compiled. No C extensions are essential for PyMongo to run, +although they do result in significant speed improvements. +%s + +Please see the installation docs for solutions to build issues: + +https://pymongo.readthedocs.io/en/stable/installation.html + +Here are some hints for popular operating systems: + +If you are seeing this message on Linux you probably need to +install GCC and/or the Python development package for your +version of Python. + +Debian and Ubuntu users should issue the following command: + + $ sudo apt-get install build-essential python-dev + +Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux, +Oracle Linux, Fedora, etc.) should issue the following command: + + $ sudo yum install gcc python-devel + +If you are seeing this message on Microsoft Windows please install +PyMongo using pip. Modern versions of pip will install PyMongo +from binary wheels available on pypi. If you must install from +source read the documentation here: + +https://pymongo.readthedocs.io/en/stable/installation.html#installing-from-source-on-windows + +If you are seeing this message on macOS / OSX please install PyMongo +using pip. Modern versions of pip will install PyMongo from binary +wheels available on pypi. If wheels are not available for your version +of macOS / OSX, or you must install from source read the documentation +here: + +https://pymongo.readthedocs.io/en/stable/installation.html#osx +******************************************************************** +""" + + def run(self): + try: + build_ext.run(self) + except Exception: + if os.environ.get("PYMONGO_C_EXT_MUST_BUILD"): + raise + e = sys.exc_info()[1] + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "Extension modules", + "There was an issue with your platform configuration - see above.", + ), + stacklevel=2, + ) + + def build_extension(self, ext): + # "ProgramFiles(x86)" is not a valid environment variable in Cygwin but is needed for + # the MSVCCompiler in distutils. + if os.name == "nt": + if "ProgramFiles" in os.environ and "ProgramFiles(x86)" not in os.environ: + os.environ["ProgramFiles(x86)"] = os.environ["ProgramFiles"] + " (x86)" + name = ext.name + try: + build_ext.build_extension(self, ext) + except Exception: + if os.environ.get("PYMONGO_C_EXT_MUST_BUILD"): + raise + e = sys.exc_info()[1] + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "The %s extension module" % (name,), # noqa: UP031 + "The output above this warning shows how the compilation failed.", + ), + stacklevel=2, + ) + + +ext_modules = [ + Extension( + "bson._cbson", + include_dirs=["bson"], + sources=["bson/_cbsonmodule.c", "bson/time64.c", "bson/buffer.c"], + ), + Extension( + "pymongo._cmessage", + include_dirs=["bson"], + sources=[ + "pymongo/_cmessagemodule.c", + "bson/_cbsonmodule.c", + "bson/time64.c", + "bson/buffer.c", + ], + ), +] + + +if "--no_ext" in sys.argv or os.environ.get("NO_EXT"): + try: + sys.argv.remove("--no_ext") + except ValueError: + pass + ext_modules = [] +elif ( + sys.platform.startswith("java") + or sys.platform == "cli" + or sys.implementation.name in ("pypy", "graalpy") +): + sys.stdout.write( + """ +*****************************************************\n +The optional C extensions are currently not supported\n +by this python implementation.\n +*****************************************************\n +""" + ) + ext_modules = [] + +setup( + cmdclass={"build_ext": custom_build_ext}, + ext_modules=ext_modules, + packages=["bson", "pymongo", "gridfs"], +) # type:ignore diff --git a/bson/__init__.py b/bson/__init__.py index adafadbf42..d260fb876f 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,632 +13,1472 @@ # limitations under the License. """BSON (Binary JSON) encoding and decoding. + +The mapping from Python types to BSON types is as follows: + +======================================= ============= =================== +Python Type BSON Type Supported Direction +======================================= ============= =================== +None null both +bool boolean both +int [#int]_ int32 / int64 py -> bson +:class:`bson.int64.Int64` int64 both +float number (real) both +str string both +list array both +dict object both +:class:`~bson.son.SON` object both +:py:class:`~collections.abc.Mapping` object py -> bson +:class:`~bson.raw_bson.RawBSONDocument` object both [#raw]_ +datetime.datetime [#dt]_ [#dt2]_ UTC datetime both +:class:`~bson.datetime_ms.DatetimeMS` UTC datetime both [#dt3]_ +:class:`~bson.regex.Regex` regex both +compiled re [#re]_ regex py -> bson +:class:`~bson.binary.Binary` binary both +:py:class:`uuid.UUID` [#uuid]_ binary both +:class:`~bson.objectid.ObjectId` oid both +:class:`~bson.dbref.DBRef` dbref both +:class:`~bson.dbref.DBRef` dbpointer bson -> py +None undefined bson -> py +:class:`~bson.code.Code` code both +str symbol bson -> py +bytes [#bytes]_ binary both +:class:`~bson.timestamp.Timestamp` timestamp both +:class:`~bson.decimal128.Decimal128` decimal128 both +:class:`~bson.min_key.MinKey` min key both +:class:`~bson.max_key.MaxKey` max key both +======================================= ============= =================== + +.. [#int] A Python int will be saved as a BSON int32 or BSON int64 depending + on its size. A BSON int32 will always decode to a Python int. A BSON + int64 will always decode to a :class:`~bson.int64.Int64`. +.. [#raw] Decoding a bson object to :class:`~bson.raw_bson.RawBSONDocument` can be + optionally configured via :attr:`~bson.codec_options.CodecOptions.document_class`. +.. [#dt] datetime.datetime instances are encoded with millisecond precision so + the microsecond field is truncated. +.. [#dt2] all datetime.datetime instances are encoded as UTC. By default, they + are decoded as *naive* but timezone aware datetimes are also supported. + See `Dates and Times `_ for examples. +.. [#dt3] To enable decoding a bson UTC datetime to a :class:`~bson.datetime_ms.DatetimeMS` + instance see `handling out of range datetimes `_. +.. [#uuid] For :py:class:`uuid.UUID` encoding and decoding behavior see ``_. +.. [#re] :class:`~bson.regex.Regex` instances and regular expression + objects from ``re.compile()`` are both saved as BSON regular expressions. + BSON regular expressions are decoded as :class:`~bson.regex.Regex` + instances. +.. [#bytes] The bytes type is encoded as BSON binary with + subtype 0. It will be decoded back to bytes. """ +from __future__ import annotations -import calendar import datetime +import itertools +import os import re import struct import sys - -from bson.binary import (Binary, OLD_UUID_SUBTYPE, - JAVA_LEGACY, CSHARP_LEGACY) +import uuid +from codecs import utf_8_decode as _utf_8_decode +from codecs import utf_8_encode as _utf_8_encode +from collections import abc as _abc +from typing import ( + IO, + TYPE_CHECKING, + Any, + BinaryIO, + Callable, + Generator, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, + cast, + overload, +) + +from bson.binary import ( + ALL_UUID_SUBTYPES, + CSHARP_LEGACY, + JAVA_LEGACY, + OLD_UUID_SUBTYPE, + STANDARD, + UUID_SUBTYPE, + Binary, + UuidRepresentation, +) from bson.code import Code +from bson.codec_options import ( + DEFAULT_CODEC_OPTIONS, + CodecOptions, + DatetimeConversion, + _raw_document_class, +) +from bson.datetime_ms import ( + EPOCH_AWARE, + EPOCH_NAIVE, + DatetimeMS, + _datetime_to_millis, + _millis_to_datetime, +) from bson.dbref import DBRef -from bson.errors import (InvalidBSON, - InvalidDocument, - InvalidStringData) +from bson.decimal128 import Decimal128 +from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData +from bson.int64 import Int64 from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId -from bson.py3compat import b, binary_type from bson.regex import Regex -from bson.son import SON, RE_TYPE +from bson.son import RE_TYPE, SON from bson.timestamp import Timestamp from bson.tz_util import utc +# Import some modules for type-checking only. +if TYPE_CHECKING: + from bson.raw_bson import RawBSONDocument + from bson.typings import _DocumentType, _ReadableBuffer try: - from bson import _cbson - _use_c = True -except ImportError: - _use_c = False + from bson import _cbson # type: ignore[attr-defined] -try: - import uuid - _use_uuid = True + _USE_C = True except ImportError: - _use_uuid = False - -PY3 = sys.version_info[0] == 3 - - -MAX_INT32 = 2147483647 -MIN_INT32 = -2147483648 -MAX_INT64 = 9223372036854775807 -MIN_INT64 = -9223372036854775808 - -EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) -EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) - -# Create constants compatible with all versions of -# python from 2.4 forward. In 2.x b("foo") is just -# "foo". In 3.x it becomes b"foo". -EMPTY = b("") -ZERO = b("\x00") -ONE = b("\x01") - -BSONNUM = b("\x01") # Floating point -BSONSTR = b("\x02") # UTF-8 string -BSONOBJ = b("\x03") # Embedded document -BSONARR = b("\x04") # Array -BSONBIN = b("\x05") # Binary -BSONUND = b("\x06") # Undefined -BSONOID = b("\x07") # ObjectId -BSONBOO = b("\x08") # Boolean -BSONDAT = b("\x09") # UTC Datetime -BSONNUL = b("\x0A") # Null -BSONRGX = b("\x0B") # Regex -BSONREF = b("\x0C") # DBRef -BSONCOD = b("\x0D") # Javascript code -BSONSYM = b("\x0E") # Symbol -BSONCWS = b("\x0F") # Javascript code with scope -BSONINT = b("\x10") # 32bit int -BSONTIM = b("\x11") # Timestamp -BSONLON = b("\x12") # 64bit int -BSONMIN = b("\xFF") # Min key -BSONMAX = b("\x7F") # Max key - - -def _get_int(data, position, as_class=None, - tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE, - compile_re=True, unsigned=False): - format = unsigned and "I" or "i" - try: - value = struct.unpack("<%s" % format, data[position:position + 4])[0] - except struct.error: - raise InvalidBSON() + _USE_C = False + +__all__ = [ + "ALL_UUID_SUBTYPES", + "CSHARP_LEGACY", + "JAVA_LEGACY", + "OLD_UUID_SUBTYPE", + "STANDARD", + "UUID_SUBTYPE", + "Binary", + "UuidRepresentation", + "Code", + "DEFAULT_CODEC_OPTIONS", + "CodecOptions", + "DBRef", + "Decimal128", + "InvalidBSON", + "InvalidDocument", + "InvalidStringData", + "Int64", + "MaxKey", + "MinKey", + "ObjectId", + "Regex", + "RE_TYPE", + "SON", + "Timestamp", + "utc", + "EPOCH_AWARE", + "EPOCH_NAIVE", + "BSONNUM", + "BSONSTR", + "BSONOBJ", + "BSONARR", + "BSONBIN", + "BSONUND", + "BSONOID", + "BSONBOO", + "BSONDAT", + "BSONNUL", + "BSONRGX", + "BSONREF", + "BSONCOD", + "BSONSYM", + "BSONCWS", + "BSONINT", + "BSONTIM", + "BSONLON", + "BSONDEC", + "BSONMIN", + "BSONMAX", + "get_data_and_view", + "gen_list_name", + "encode", + "decode", + "decode_all", + "decode_iter", + "decode_file_iter", + "is_valid", + "BSON", + "has_c", + "DatetimeConversion", + "DatetimeMS", +] + +BSONNUM = b"\x01" # Floating point +BSONSTR = b"\x02" # UTF-8 string +BSONOBJ = b"\x03" # Embedded document +BSONARR = b"\x04" # Array +BSONBIN = b"\x05" # Binary +BSONUND = b"\x06" # Undefined +BSONOID = b"\x07" # ObjectId +BSONBOO = b"\x08" # Boolean +BSONDAT = b"\x09" # UTC Datetime +BSONNUL = b"\x0A" # Null +BSONRGX = b"\x0B" # Regex +BSONREF = b"\x0C" # DBRef +BSONCOD = b"\x0D" # Javascript code +BSONSYM = b"\x0E" # Symbol +BSONCWS = b"\x0F" # Javascript code with scope +BSONINT = b"\x10" # 32bit int +BSONTIM = b"\x11" # Timestamp +BSONLON = b"\x12" # 64bit int +BSONDEC = b"\x13" # Decimal128 +BSONMIN = b"\xFF" # Min key +BSONMAX = b"\x7F" # Max key + + +_UNPACK_FLOAT_FROM = struct.Struct(" Tuple[Any, memoryview]: + if isinstance(data, (bytes, bytearray)): + return data, memoryview(data) + view = memoryview(data) + return view.tobytes(), view + + +def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: + """Unknown type helper.""" + raise InvalidBSON( + "Detected unknown BSON type {!r} for fieldname '{}'. Are " + "you using the latest driver version?".format(chr(element_type).encode(), element_name) + ) + + +def _get_int( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[int, int]: + """Decode a BSON int32 to python int.""" + return _UNPACK_INT_FROM(data, position)[0], position + 4 + + +def _get_c_string(data: Any, view: Any, position: int, opts: CodecOptions[Any]) -> Tuple[str, int]: + """Decode a BSON 'C' string to python str.""" + end = data.index(b"\x00", position) + return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 + + +def _get_float( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[float, int]: + """Decode a BSON double to python float.""" + return _UNPACK_FLOAT_FROM(data, position)[0], position + 8 + + +def _get_string( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], dummy: Any +) -> Tuple[str, int]: + """Decode a BSON string to python str.""" + length = _UNPACK_INT_FROM(data, position)[0] position += 4 - return value, position - - -def _get_c_string(data, position, length=None): - if length is None: - try: - end = data.index(ZERO, position) - except ValueError: - raise InvalidBSON() - else: - end = position + length - value = data[position:end].decode("utf-8") - position = end + 1 + if length < 1 or obj_end - position < length: + raise InvalidBSON("invalid string length") + end = position + length - 1 + if data[end] != 0: + raise InvalidBSON("invalid end of string") + return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 - return value, position +def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: + """Validate and return a BSON document's size.""" + try: + obj_size = _UNPACK_INT_FROM(data, position)[0] + except struct.error as exc: + raise InvalidBSON(str(exc)) from None + end = position + obj_size - 1 + if end >= obj_end: + raise InvalidBSON("invalid object length") + if data[end] != 0: + raise InvalidBSON("bad eoo") + # If this is the top-level document, validate the total size too. + if position == 0 and obj_size != obj_end: + raise InvalidBSON("invalid object length") + return obj_size, end -def _make_c_string(string, check_null=False): - if isinstance(string, unicode): - if check_null and "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NULL character") - return string.encode("utf-8") + ZERO - else: - if check_null and ZERO in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NULL character") - try: - string.decode("utf-8") - return string + ZERO - except UnicodeError: - raise InvalidStringData("strings in documents must be valid " - "UTF-8: %r" % string) +def _get_object( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], dummy: Any +) -> Tuple[Any, int]: + """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" + obj_size, end = _get_object_size(data, position, obj_end) + if _raw_document_class(opts.document_class): + return (opts.document_class(data[position : end + 1], opts), position + obj_size) -def _get_number(data, position, as_class, tz_aware, uuid_subtype, compile_re): - num = struct.unpack(" Tuple[Any, int]: + """Decode a BSON array to python list.""" + size = _UNPACK_INT_FROM(data, position)[0] + end = position + size - 1 + if data[end] != 0: + raise InvalidBSON("bad eoo") -def _get_string(data, position, as_class, tz_aware, uuid_subtype, compile_re): - length = struct.unpack(" Tuple[Union[Binary, uuid.UUID], int]: + """Decode a BSON binary to bson.binary.Binary or python UUID.""" + length, subtype = _UNPACK_LENGTH_SUBTYPE_FROM(data, position) + position += 5 if subtype == 2: - length2, position = _get_int(data, position) + length2 = _UNPACK_INT_FROM(data, position)[0] + position += 4 if length2 != length - 4: raise InvalidBSON("invalid binary (st 2) - lengths don't match!") length = length2 - if subtype in (3, 4) and _use_uuid: - # Java Legacy - if uuid_subtype == JAVA_LEGACY: - java = data[position:position + length] - value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1]) - # C# legacy - elif uuid_subtype == CSHARP_LEGACY: - value = uuid.UUID(bytes_le=data[position:position + length]) - # Python - else: - value = uuid.UUID(bytes=data[position:position + length]) - position += length - return (value, position) - # Python3 special case. Decode subtype 0 to 'bytes'. - if PY3 and subtype == 0: - value = data[position:position + length] + end = position + length + if length < 0 or end > obj_end: + raise InvalidBSON("bad binary object length") + + # Convert UUID subtypes to native UUIDs. + if subtype in ALL_UUID_SUBTYPES: + uuid_rep = opts.uuid_representation + binary_value = Binary(data[position:end], subtype) + if ( + (uuid_rep == UuidRepresentation.UNSPECIFIED) + or (subtype == UUID_SUBTYPE and uuid_rep != STANDARD) + or (subtype == OLD_UUID_SUBTYPE and uuid_rep == STANDARD) + ): + return binary_value, end + return binary_value.as_uuid(uuid_rep), end + + # Decode subtype 0 to 'bytes'. + if subtype == 0: + value = data[position:end] else: - value = Binary(data[position:position + length], subtype) - position += length - return value, position + value = Binary(data[position:end], subtype) + return value, end -def _get_oid(data, position, as_class=None, - tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True): - value = ObjectId(data[position:position + 12]) - position += 12 - return value, position +def _get_oid( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[ObjectId, int]: + """Decode a BSON ObjectId to bson.objectid.ObjectId.""" + end = position + 12 + return ObjectId(data[position:end]), end -def _get_boolean(data, position, as_class, tz_aware, uuid_subtype, compile_re): - value = data[position:position + 1] == ONE - position += 1 - return value, position +def _get_boolean( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[bool, int]: + """Decode a BSON true/false to python True/False.""" + end = position + 1 + boolean_byte = data[position:end] + if boolean_byte == b"\x00": + return False, end + elif boolean_byte == b"\x01": + return True, end + raise InvalidBSON("invalid boolean value: %r" % boolean_byte) -def _get_date(data, position, as_class, tz_aware, uuid_subtype, compile_re): - millis = struct.unpack(" Tuple[Union[datetime.datetime, DatetimeMS], int]: + """Decode a BSON datetime to python datetime.datetime.""" + return _millis_to_datetime(_UNPACK_LONG_FROM(data, position)[0], opts), position + 8 -def _get_code(data, position, as_class, tz_aware, uuid_subtype, compile_re): - code, position = _get_string(data, position, - as_class, tz_aware, uuid_subtype, compile_re) +def _get_code( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], element_name: str +) -> Tuple[Code, int]: + """Decode a BSON code to bson.code.Code.""" + code, position = _get_string(data, view, position, obj_end, opts, element_name) return Code(code), position def _get_code_w_scope( - data, position, as_class, tz_aware, uuid_subtype, compile_re): - _, position = _get_int(data, position) - code, position = _get_string(data, position, - as_class, tz_aware, uuid_subtype, compile_re) - scope, position = _get_object(data, position, - as_class, tz_aware, uuid_subtype, compile_re) + data: Any, view: Any, position: int, _obj_end: int, opts: CodecOptions[Any], element_name: str +) -> Tuple[Code, int]: + """Decode a BSON code_w_scope to bson.code.Code.""" + code_end = position + _UNPACK_INT_FROM(data, position)[0] + code, position = _get_string(data, view, position + 4, code_end, opts, element_name) + scope, position = _get_object(data, view, position, code_end, opts, element_name) + if position != code_end: + raise InvalidBSON("scope outside of javascript code boundaries") return Code(code, scope), position -def _get_null(data, position, as_class, tz_aware, uuid_subtype, compile_re): - return None, position - - -def _get_regex(data, position, as_class, tz_aware, uuid_subtype, compile_re): - pattern, position = _get_c_string(data, position) - bson_flags, position = _get_c_string(data, position) +def _get_regex( + data: Any, view: Any, position: int, dummy0: Any, opts: CodecOptions[Any], dummy1: Any +) -> Tuple[Regex[Any], int]: + """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" + pattern, position = _get_c_string(data, view, position, opts) + bson_flags, position = _get_c_string(data, view, position, opts) bson_re = Regex(pattern, bson_flags) - if compile_re: - return bson_re.try_compile(), position - else: - return bson_re, position + return bson_re, position -def _get_ref(data, position, as_class, tz_aware, uuid_subtype, compile_re): - collection, position = _get_string(data, position, as_class, tz_aware, - uuid_subtype, compile_re) - oid, position = _get_oid(data, position) +def _get_ref( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], element_name: str +) -> Tuple[DBRef, int]: + """Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.""" + collection, position = _get_string(data, view, position, obj_end, opts, element_name) + oid, position = _get_oid(data, view, position, obj_end, opts, element_name) return DBRef(collection, oid), position def _get_timestamp( - data, position, as_class, tz_aware, uuid_subtype, compile_re): - inc, position = _get_int(data, position, unsigned=True) - timestamp, position = _get_int(data, position, unsigned=True) - return Timestamp(timestamp, inc), position - - -def _get_long(data, position, as_class, tz_aware, uuid_subtype, compile_re): - # Have to cast to long; on 32-bit unpack may return an int. - # 2to3 will change long to int. That's fine since long doesn't - # exist in python3. - value = long(struct.unpack(" Tuple[Timestamp, int]: + """Decode a BSON timestamp to bson.timestamp.Timestamp.""" + inc, timestamp = _UNPACK_TIMESTAMP_FROM(data, position) + return Timestamp(timestamp, inc), position + 8 + + +def _get_int64( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Int64, int]: + """Decode a BSON int64 to bson.int64.Int64.""" + return Int64(_UNPACK_LONG_FROM(data, position)[0]), position + 8 + + +def _get_decimal128( + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Decimal128, int]: + """Decode a BSON decimal128 to bson.decimal128.Decimal128.""" + end = position + 16 + return Decimal128.from_bid(data[position:end]), end + + +# Each decoder function's signature is: +# - data: bytes +# - view: memoryview that references `data` +# - position: int, beginning of object in 'data' to decode +# - obj_end: int, end of object to decode in 'data' if variable-length type +# - opts: a CodecOptions +_ELEMENT_GETTER: dict[int, Callable[..., Tuple[Any, int]]] = { + ord(BSONNUM): _get_float, + ord(BSONSTR): _get_string, + ord(BSONOBJ): _get_object, + ord(BSONARR): _get_array, + ord(BSONBIN): _get_binary, + ord(BSONUND): lambda u, v, w, x, y, z: (None, w), # noqa: ARG005 # Deprecated undefined + ord(BSONOID): _get_oid, + ord(BSONBOO): _get_boolean, + ord(BSONDAT): _get_date, + ord(BSONNUL): lambda u, v, w, x, y, z: (None, w), # noqa: ARG005 + ord(BSONRGX): _get_regex, + ord(BSONREF): _get_ref, # Deprecated DBPointer + ord(BSONCOD): _get_code, + ord(BSONSYM): _get_string, # Deprecated symbol + ord(BSONCWS): _get_code_w_scope, + ord(BSONINT): _get_int, + ord(BSONTIM): _get_timestamp, + ord(BSONLON): _get_int64, + ord(BSONDEC): _get_decimal128, + ord(BSONMIN): lambda u, v, w, x, y, z: (MinKey(), w), # noqa: ARG005 + ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w), # noqa: ARG005 +} + + +if _USE_C: + + def _element_to_dict( + data: Any, + view: Any, # noqa: ARG001 + position: int, + obj_end: int, + opts: CodecOptions[Any], + raw_array: bool = False, + ) -> Tuple[str, Any, int]: + return cast( + "Tuple[str, Any, int]", + _cbson._element_to_dict(data, position, obj_end, opts, raw_array), + ) + +else: + + def _element_to_dict( + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions[Any], + raw_array: bool = False, + ) -> Tuple[str, Any, int]: + """Decode a single key, value pair.""" + element_type = data[position] + position += 1 + element_name, position = _get_c_string(data, view, position, opts) + if raw_array and element_type == ord(BSONARR): + _, end = _get_object_size(data, position, len(data)) + return element_name, view[position : end + 1], end + 1 + try: + value, position = _ELEMENT_GETTER[element_type]( + data, view, position, obj_end, opts, element_name + ) + except KeyError: + _raise_unknown_type(element_type, element_name) + + if opts.type_registry._decoder_map: + custom_decoder = opts.type_registry._decoder_map.get(type(value)) + if custom_decoder is not None: + value = custom_decoder(value) + + return element_name, value, position + + +_T = TypeVar("_T", bound=MutableMapping[str, Any]) + + +def _raw_to_dict( + data: Any, + position: int, + obj_end: int, + opts: CodecOptions[RawBSONDocument], + result: _T, + raw_array: bool = False, +) -> _T: + data, view = get_data_and_view(data) + return cast( + _T, _elements_to_dict(data, view, position, obj_end, opts, result, raw_array=raw_array) + ) + + +def _elements_to_dict( + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions[Any], + result: Any = None, + raw_array: bool = False, +) -> Any: + """Decode a BSON document into result.""" + if result is None: + result = opts.document_class() + end = obj_end - 1 while position < end: - (key, value, position) = _element_to_dict( - data, position, as_class, tz_aware, uuid_subtype, compile_re) + key, value, position = _element_to_dict( + data, view, position, obj_end, opts, raw_array=raw_array + ) result[key] = value + if position != obj_end: + raise InvalidBSON("bad object or element length") return result -def _bson_to_dict(data, as_class, tz_aware, uuid_subtype, compile_re): - obj_size = struct.unpack(" _DocumentType: + """Decode a BSON string to document_class.""" + data, view = get_data_and_view(data) + try: + if _raw_document_class(opts.document_class): + return opts.document_class(data, opts) # type:ignore[call-arg] + _, end = _get_object_size(data, 0, len(data)) + return cast("_DocumentType", _elements_to_dict(data, view, 4, end, opts)) + except InvalidBSON: + raise + except Exception: + # Change exception type to InvalidBSON but preserve traceback. + _, exc_value, exc_tb = sys.exc_info() + raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) from None + + +if _USE_C: _bson_to_dict = _cbson._bson_to_dict -def _element_to_bson(key, value, check_keys, uuid_subtype): - if not isinstance(key, basestring): - raise InvalidDocument("documents must have only string keys, " - "key was %r" % key) +_PACK_FLOAT = struct.Struct(" Generator[bytes, None, None]: + """Generate "keys" for encoded lists in the sequence + b"0\x00", b"1\x00", b"2\x00", ... + The first 1000 keys are returned from a pre-built cache. All + subsequent keys are generated on the fly. + """ + yield from _LIST_NAMES + + counter = itertools.count(1000) + while True: + yield (str(next(counter)) + "\x00").encode("utf8") + + +def _make_c_string_check(string: Union[str, bytes]) -> bytes: + """Make a 'C' string, checking for embedded NUL characters.""" + if isinstance(string, bytes): + if b"\x00" in string: + raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") + try: + _utf_8_decode(string, None, True) + return string + b"\x00" + except UnicodeError: + raise InvalidStringData( + "strings in documents must be valid UTF-8: %r" % string + ) from None + else: + if "\x00" in string: + raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") + return _utf_8_encode(string)[0] + b"\x00" + + +def _make_c_string(string: Union[str, bytes]) -> bytes: + """Make a 'C' string.""" + if isinstance(string, bytes): + try: + _utf_8_decode(string, None, True) + return string + b"\x00" + except UnicodeError: + raise InvalidStringData( + "strings in documents must be valid UTF-8: %r" % string + ) from None + else: + return _utf_8_encode(string)[0] + b"\x00" + + +def _make_name(string: str) -> bytes: + """Make a 'C' string suitable for a BSON key.""" + if "\x00" in string: + raise InvalidDocument("BSON keys must not contain a NUL character") + return _utf_8_encode(string)[0] + b"\x00" + + +def _encode_float(name: bytes, value: float, dummy0: Any, dummy1: Any) -> bytes: + """Encode a float.""" + return b"\x01" + name + _PACK_FLOAT(value) + + +def _encode_bytes(name: bytes, value: bytes, dummy0: Any, dummy1: Any) -> bytes: + """Encode a python bytes.""" + # Python3 special case. Store 'bytes' as BSON binary subtype 0. + return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value + + +def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: CodecOptions[Any]) -> bytes: + """Encode a mapping type.""" + if _raw_document_class(value): + return b"\x03" + name + cast(bytes, value.raw) + data = b"".join([_element_to_bson(key, val, check_keys, opts) for key, val in value.items()]) + return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00" + + +def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: CodecOptions[Any]) -> bytes: + """Encode bson.dbref.DBRef.""" + buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00") + begin = len(buf) - 4 + + buf += _name_value_to_bson(b"$ref\x00", value.collection, check_keys, opts) + buf += _name_value_to_bson(b"$id\x00", value.id, check_keys, opts) + if value.database is not None: + buf += _name_value_to_bson(b"$db\x00", value.database, check_keys, opts) + for key, val in value._DBRef__kwargs.items(): + buf += _element_to_bson(key, val, check_keys, opts) + + buf += b"\x00" + buf[begin : begin + 4] = _PACK_INT(len(buf) - begin) + return bytes(buf) + + +def _encode_list( + name: bytes, value: Sequence[Any], check_keys: bool, opts: CodecOptions[Any] +) -> bytes: + """Encode a list/tuple.""" + lname = gen_list_name() + data = b"".join([_name_value_to_bson(next(lname), item, check_keys, opts) for item in value]) + return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00" + + +def _encode_text(name: bytes, value: str, dummy0: Any, dummy1: Any) -> bytes: + """Encode a python str.""" + bvalue = _utf_8_encode(value)[0] + return b"\x02" + name + _PACK_INT(len(bvalue) + 1) + bvalue + b"\x00" + + +def _encode_binary(name: bytes, value: Binary, dummy0: Any, dummy1: Any) -> bytes: + """Encode bson.binary.Binary.""" + subtype = value.subtype + if subtype == 2: + value = _PACK_INT(len(value)) + value # type: ignore + return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value + + +def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: CodecOptions[Any]) -> bytes: + """Encode uuid.UUID.""" + uuid_representation = opts.uuid_representation + binval = Binary.from_uuid(value, uuid_representation=uuid_representation) + return _encode_binary(name, binval, dummy, opts) + + +def _encode_objectid(name: bytes, value: ObjectId, dummy: Any, dummy1: Any) -> bytes: + """Encode bson.objectid.ObjectId.""" + return b"\x07" + name + value.binary + + +def _encode_bool(name: bytes, value: bool, dummy0: Any, dummy1: Any) -> bytes: + """Encode a python boolean (True/False).""" + return b"\x08" + name + (value and b"\x01" or b"\x00") + + +def _encode_datetime(name: bytes, value: datetime.datetime, dummy0: Any, dummy1: Any) -> bytes: + """Encode datetime.datetime.""" + millis = _datetime_to_millis(value) + return b"\x09" + name + _PACK_LONG(millis) + + +def _encode_datetime_ms(name: bytes, value: DatetimeMS, dummy0: Any, dummy1: Any) -> bytes: + """Encode datetime.datetime.""" + millis = int(value) + return b"\x09" + name + _PACK_LONG(millis) + + +def _encode_none(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: + """Encode python None.""" + return b"\x0A" + name + + +def _encode_regex(name: bytes, value: Regex[Any], dummy0: Any, dummy1: Any) -> bytes: + """Encode a python regex or bson.regex.Regex.""" + flags = value.flags + # Python 3 common case + if flags == re.UNICODE: + return b"\x0B" + name + _make_c_string_check(value.pattern) + b"u\x00" + elif flags == 0: + return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00" + else: + sflags = b"" + if flags & re.IGNORECASE: + sflags += b"i" + if flags & re.LOCALE: + sflags += b"l" + if flags & re.MULTILINE: + sflags += b"m" + if flags & re.DOTALL: + sflags += b"s" + if flags & re.UNICODE: + sflags += b"u" + if flags & re.VERBOSE: + sflags += b"x" + sflags += b"\x00" + return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags + + +def _encode_code(name: bytes, value: Code, dummy: Any, opts: CodecOptions[Any]) -> bytes: + """Encode bson.code.Code.""" + cstring = _make_c_string(value) + cstrlen = len(cstring) + if value.scope is None: + return b"\x0D" + name + _PACK_INT(cstrlen) + cstring + scope = _dict_to_bson(value.scope, False, opts, False) + full_length = _PACK_INT(8 + cstrlen + len(scope)) + return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope + + +def _encode_int(name: bytes, value: int, dummy0: Any, dummy1: Any) -> bytes: + """Encode a python int.""" + if -2147483648 <= value <= 2147483647: + return b"\x10" + name + _PACK_INT(value) + else: + try: + return b"\x12" + name + _PACK_LONG(value) + except struct.error: + raise OverflowError("BSON can only handle up to 8-byte ints") from None + + +def _encode_timestamp(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> bytes: + """Encode bson.timestamp.Timestamp.""" + return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time) + + +def _encode_long(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> bytes: + """Encode a bson.int64.Int64.""" + try: + return b"\x12" + name + _PACK_LONG(value) + except struct.error: + raise OverflowError("BSON can only handle up to 8-byte ints") from None + + +def _encode_decimal128(name: bytes, value: Decimal128, dummy0: Any, dummy1: Any) -> bytes: + """Encode bson.decimal128.Decimal128.""" + return b"\x13" + name + value.bid + + +def _encode_minkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: + """Encode bson.min_key.MinKey.""" + return b"\xFF" + name + + +def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: + """Encode bson.max_key.MaxKey.""" + return b"\x7F" + name + + +# Each encoder function's signature is: +# - name: utf-8 bytes +# - value: a Python data type, e.g. a Python int for _encode_int +# - check_keys: bool, whether to check for invalid names +# - opts: a CodecOptions +_ENCODERS = { + bool: _encode_bool, + bytes: _encode_bytes, + datetime.datetime: _encode_datetime, + DatetimeMS: _encode_datetime_ms, + dict: _encode_mapping, + float: _encode_float, + int: _encode_int, + list: _encode_list, + str: _encode_text, + tuple: _encode_list, + type(None): _encode_none, + uuid.UUID: _encode_uuid, + Binary: _encode_binary, + Int64: _encode_long, + Code: _encode_code, + DBRef: _encode_dbref, + MaxKey: _encode_maxkey, + MinKey: _encode_minkey, + ObjectId: _encode_objectid, + Regex: _encode_regex, + RE_TYPE: _encode_regex, + SON: _encode_mapping, + Timestamp: _encode_timestamp, + Decimal128: _encode_decimal128, + # Special case. This will never be looked up directly. + _abc.Mapping: _encode_mapping, +} + +# Map each _type_marker to its encoder for faster lookup. +_MARKERS = {} +for _typ in _ENCODERS: + if hasattr(_typ, "_type_marker"): + _MARKERS[_typ._type_marker] = _ENCODERS[_typ] + + +_BUILT_IN_TYPES = tuple(t for t in _ENCODERS) + + +def _name_value_to_bson( + name: bytes, + value: Any, + check_keys: bool, + opts: CodecOptions[Any], + in_custom_call: bool = False, + in_fallback_call: bool = False, +) -> bytes: + """Encode a single name, value pair.""" + + was_integer_overflow = False + + # First see if the type is already cached. KeyError will only ever + # happen once per subtype. + try: + return _ENCODERS[type(value)](name, value, check_keys, opts) # type: ignore + except KeyError: + pass + except OverflowError: + if not isinstance(value, int): + raise + + # Give the fallback_encoder a chance + was_integer_overflow = True + + # Second, fall back to trying _type_marker. This has to be done + # before the loop below since users could subclass one of our + # custom types that subclasses a python built-in (e.g. Binary) + marker = getattr(value, "_type_marker", None) + if isinstance(marker, int) and marker in _MARKERS: + func = _MARKERS[marker] + # Cache this type for faster subsequent lookup. + _ENCODERS[type(value)] = func + return func(name, value, check_keys, opts) # type: ignore + + # Third, check if a type encoder is registered for this type. + # Note that subtypes of registered custom types are not auto-encoded. + if not in_custom_call and opts.type_registry._encoder_map: + custom_encoder = opts.type_registry._encoder_map.get(type(value)) + if custom_encoder is not None: + return _name_value_to_bson( + name, custom_encoder(value), check_keys, opts, in_custom_call=True + ) + + # Fourth, test each base type. This will only happen once for + # a subtype of a supported base type. Unlike in the C-extensions, this + # is done after trying the custom type encoder because checking for each + # subtype is expensive. + for base in _BUILT_IN_TYPES: + if not was_integer_overflow and isinstance(value, base): + func = _ENCODERS[base] + # Cache this type for faster subsequent lookup. + _ENCODERS[type(value)] = func + return func(name, value, check_keys, opts) # type: ignore + + # As a last resort, try using the fallback encoder, if the user has + # provided one. + fallback_encoder = opts.type_registry._fallback_encoder + if not in_fallback_call and fallback_encoder is not None: + return _name_value_to_bson( + name, fallback_encoder(value), check_keys, opts, in_fallback_call=True + ) + + if was_integer_overflow: + raise OverflowError("BSON can only handle up to 8-byte ints") + raise InvalidDocument(f"cannot encode object: {value!r}, of type: {type(value)!r}") + + +def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: CodecOptions[Any]) -> bytes: + """Encode a single key, value pair.""" + if not isinstance(key, str): + raise InvalidDocument(f"documents must have only string keys, key was {key!r}") if check_keys: if key.startswith("$"): - raise InvalidDocument("key %r must not start with '$'" % key) + raise InvalidDocument(f"key {key!r} must not start with '$'") if "." in key: - raise InvalidDocument("key %r must not contain '.'" % key) - - name = _make_c_string(key, True) - if isinstance(value, float): - return BSONNUM + name + struct.pack(" MAX_INT64 or value < MIN_INT64: - raise OverflowError("BSON can only handle up to 8-byte ints") - if value > MAX_INT32 or value < MIN_INT32: - return BSONLON + name + struct.pack(" MAX_INT64 or value < MIN_INT64: - raise OverflowError("BSON can only handle up to 8-byte ints") - return BSONLON + name + struct.pack(" bytes: + """Encode a document to BSON.""" + if _raw_document_class(doc): + return cast(bytes, doc.raw) try: elements = [] - if top_level and "_id" in dict: - elements.append(_element_to_bson("_id", dict["_id"], - check_keys, uuid_subtype)) - for (key, value) in dict.iteritems(): + if top_level and "_id" in doc: + elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], check_keys, opts)) + for key, value in doc.items(): if not top_level or key != "_id": - elements.append(_element_to_bson(key, value, - check_keys, uuid_subtype)) + try: + elements.append(_element_to_bson(key, value, check_keys, opts)) + except InvalidDocument as err: + raise InvalidDocument(f"Invalid document: {err}", doc) from err except AttributeError: - raise TypeError("encoder expected a mapping type but got: %r" % dict) + raise TypeError(f"encoder expected a mapping type but got: {doc!r}") from None + + encoded = b"".join(elements) + return _PACK_INT(len(encoded) + 5) + encoded + b"\x00" - encoded = EMPTY.join(elements) - length = len(encoded) + 5 - return struct.pack(" bytes: + """Encode a document to BSON. + + A document can be any mapping type (like :class:`dict`). + + Raises :class:`TypeError` if `document` is not a mapping type, + or contains keys that are not instances of :class:`str`. Raises + :class:`~bson.errors.InvalidDocument` if `document` cannot be + converted to :class:`BSON`. + + :param document: mapping type representing a document + :param check_keys: check if keys start with '$' or + contain '.', raising :class:`~bson.errors.InvalidDocument` in + either case + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. + + .. versionadded:: 3.9 + """ + if not isinstance(codec_options, CodecOptions): + raise _CODEC_OPTIONS_TYPE_ERROR + + return _dict_to_bson(document, check_keys, codec_options) + + +@overload +def decode(data: _ReadableBuffer, codec_options: None = None) -> dict[str, Any]: + ... + + +@overload +def decode(data: _ReadableBuffer, codec_options: CodecOptions[_DocumentType]) -> _DocumentType: + ... + + +def decode( + data: _ReadableBuffer, codec_options: Optional[CodecOptions[_DocumentType]] = None +) -> Union[dict[str, Any], _DocumentType]: + """Decode BSON to a document. + + By default, returns a BSON document represented as a Python + :class:`dict`. To use a different :class:`MutableMapping` class, + configure a :class:`~bson.codec_options.CodecOptions`:: + + >>> import collections # From Python standard library. + >>> import bson + >>> from bson.codec_options import CodecOptions + >>> data = bson.encode({'a': 1}) + >>> decoded_doc = bson.decode(data) + + >>> options = CodecOptions(document_class=collections.OrderedDict) + >>> decoded_doc = bson.decode(data, codec_options=options) + >>> type(decoded_doc) + + + :param data: the BSON to decode. Any bytes-like object that implements + the buffer protocol. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. - :Parameters: - - `data`: BSON data - - `as_class` (optional): the class to use for the resulting - documents - - `tz_aware` (optional): if ``True``, return timezone-aware - :class:`~datetime.datetime` instances - - `compile_re` (optional): if ``False``, don't attempt to compile - BSON regular expressions into Python regular expressions. Return - instances of :class:`~bson.regex.Regex` instead. Can avoid - :exc:`~bson.errors.InvalidBSON` errors when receiving - Python-incompatible regular expressions, for example from ``currentOp`` - - .. versionchanged:: 2.7 - Added `compile_re` option. - .. versionadded:: 1.9 + .. versionadded:: 3.9 """ - docs = [] + opts: CodecOptions[Any] = codec_options or DEFAULT_CODEC_OPTIONS + if not isinstance(opts, CodecOptions): + raise _CODEC_OPTIONS_TYPE_ERROR + + return cast("Union[dict[str, Any], _DocumentType]", _bson_to_dict(data, opts)) + + +def _decode_all(data: _ReadableBuffer, opts: CodecOptions[_DocumentType]) -> list[_DocumentType]: + """Decode a BSON data to multiple documents.""" + data, view = get_data_and_view(data) + data_len = len(data) + docs: list[_DocumentType] = [] position = 0 - end = len(data) - 1 + end = data_len - 1 + use_raw = _raw_document_class(opts.document_class) try: while position < end: - obj_size = struct.unpack(" list[dict[str, Any]]: + ... + + +@overload +def decode_all( + data: _ReadableBuffer, codec_options: CodecOptions[_DocumentType] +) -> list[_DocumentType]: + ... + + +def decode_all( + data: _ReadableBuffer, codec_options: Optional[CodecOptions[_DocumentType]] = None +) -> Union[list[dict[str, Any]], list[_DocumentType]]: + """Decode BSON data to multiple documents. + + `data` must be a bytes-like object implementing the buffer protocol that + provides concatenated, valid, BSON-encoded documents. + + :param data: BSON data + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. + + .. versionchanged:: 3.9 + Supports bytes-like objects that implement the buffer protocol. + + .. versionchanged:: 3.0 + Removed `compile_re` option: PyMongo now always represents BSON regular + expressions as :class:`~bson.regex.Regex` objects. Use + :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a + BSON regular expression to a Python regular expression object. + + Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with + `codec_options`. + """ + if codec_options is None: + return _decode_all(data, DEFAULT_CODEC_OPTIONS) + + if not isinstance(codec_options, CodecOptions): + raise _CODEC_OPTIONS_TYPE_ERROR + + return _decode_all(data, codec_options) + + +def _decode_selective( + rawdoc: Any, fields: Any, codec_options: CodecOptions[_DocumentType] +) -> _DocumentType: + if _raw_document_class(codec_options.document_class): + # If document_class is RawBSONDocument, use vanilla dictionary for + # decoding command response. + doc: _DocumentType = {} # type:ignore[assignment] + else: + # Else, use the specified document_class. + doc = codec_options.document_class() + for key, value in rawdoc.items(): + if key in fields: + if fields[key] == 1: + doc[key] = _bson_to_dict(rawdoc.raw, codec_options)[key] # type:ignore[index] + else: + doc[key] = _decode_selective( # type:ignore[index] + value, fields[key], codec_options + ) + else: + doc[key] = value # type:ignore[index] + return doc + + +def _array_of_documents_to_buffer(data: Union[memoryview, bytes]) -> bytes: + # Extract the raw bytes of each document. + position = 0 + view = memoryview(data) + _, end = _get_object_size(view, position, len(view)) + position += 4 + buffers: list[memoryview] = [] + append = buffers.append + while position < end - 1: + # Just skip the keys. + while view[position] != 0: + position += 1 + position += 1 + obj_size, _ = _get_object_size(view, position, end) + append(view[position : position + obj_size]) + position += obj_size + if position != end: + raise InvalidBSON("bad object or element length") + return b"".join(buffers) + + +if _USE_C: + _array_of_documents_to_buffer = _cbson._array_of_documents_to_buffer + + +def _convert_raw_document_lists_to_streams(document: Any) -> None: + """Convert raw array of documents to a stream of BSON documents.""" + cursor = document.get("cursor") + if not cursor: + return + for key in ("firstBatch", "nextBatch"): + batch = cursor.get(key) + if not batch: + continue + data = _array_of_documents_to_buffer(batch) + if data: + cursor[key] = [data] + else: + cursor[key] = [] + + +def _decode_all_selective( + data: Any, codec_options: CodecOptions[_DocumentType], fields: Any +) -> list[_DocumentType]: + """Decode BSON data to a single document while using user-provided + custom decoding logic. + + `data` must be a string representing a valid, BSON-encoded document. + + :param data: BSON data + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions` with user-specified type + decoders. If no decoders are found, this method is the same as + ``decode_all``. + :param fields: Map of document namespaces where data that needs + to be custom decoded lives or None. For example, to custom decode a + list of objects in 'field1.subfield1', the specified value should be + ``{'field1': {'subfield1': 1}}``. If ``fields`` is an empty map or + None, this method is the same as ``decode_all``. + + :return: Single-member list containing the decoded document. + + .. versionadded:: 3.8 + """ + if not codec_options.type_registry._decoder_map: + return decode_all(data, codec_options) + + if not fields: + return decode_all(data, codec_options.with_options(type_registry=None)) + + # Decode documents for internal use. + from bson.raw_bson import RawBSONDocument + + internal_codec_options: CodecOptions[RawBSONDocument] = codec_options.with_options( + document_class=RawBSONDocument, type_registry=None + ) + _doc = _bson_to_dict(data, internal_codec_options) + return [ + _decode_selective( + _doc, + fields, + codec_options, + ) + ] + + +@overload +def decode_iter(data: bytes, codec_options: None = None) -> Iterator[dict[str, Any]]: + ... + +@overload +def decode_iter(data: bytes, codec_options: CodecOptions[_DocumentType]) -> Iterator[_DocumentType]: + ... + + +def decode_iter( + data: bytes, codec_options: Optional[CodecOptions[_DocumentType]] = None +) -> Union[Iterator[dict[str, Any]], Iterator[_DocumentType]]: + """Decode BSON data to multiple documents as a generator. + + Works similarly to the decode_all function, but yields one document at a + time. + + `data` must be a string of concatenated, valid, BSON-encoded + documents. -def is_valid(bson): + :param data: BSON data + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. + + .. versionchanged:: 3.0 + Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with + `codec_options`. + + .. versionadded:: 2.8 + """ + opts = codec_options or DEFAULT_CODEC_OPTIONS + if not isinstance(opts, CodecOptions): + raise _CODEC_OPTIONS_TYPE_ERROR + + position = 0 + end = len(data) - 1 + while position < end: + obj_size = _UNPACK_INT_FROM(data, position)[0] + elements = data[position : position + obj_size] + position += obj_size + + yield _bson_to_dict(elements, opts) + + +@overload +def decode_file_iter( + file_obj: Union[BinaryIO, IO[bytes]], codec_options: None = None +) -> Iterator[dict[str, Any]]: + ... + + +@overload +def decode_file_iter( + file_obj: Union[BinaryIO, IO[bytes]], codec_options: CodecOptions[_DocumentType] +) -> Iterator[_DocumentType]: + ... + + +def decode_file_iter( + file_obj: Union[BinaryIO, IO[bytes]], + codec_options: Optional[CodecOptions[_DocumentType]] = None, +) -> Union[Iterator[dict[str, Any]], Iterator[_DocumentType]]: + """Decode bson data from a file to multiple documents as a generator. + + Works similarly to the decode_all function, but reads from the file object + in chunks and parses bson in chunks, yielding one document at a time. + + :param file_obj: A file object containing BSON data. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. + + .. versionchanged:: 3.0 + Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with + `codec_options`. + + .. versionadded:: 2.8 + """ + opts = codec_options or DEFAULT_CODEC_OPTIONS + while True: + # Read size of next object. + size_data: Any = file_obj.read(4) + if not size_data: + break # Finished with file normally. + elif len(size_data) != 4: + raise InvalidBSON("cut off in middle of objsize") + obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 + elements = size_data + file_obj.read(max(0, obj_size)) + yield _bson_to_dict(elements, opts) # type:ignore[misc] + + +def is_valid(bson: bytes) -> bool: """Check that the given string represents valid :class:`BSON` data. Raises :class:`TypeError` if `bson` is not an instance of - :class:`str` (:class:`bytes` in python 3). Returns ``True`` + :class:`bytes`. Returns ``True`` if `bson` is valid :class:`BSON`, ``False`` otherwise. - :Parameters: - - `bson`: the data to be validated + :param bson: the data to be validated """ - if not isinstance(bson, binary_type): - raise TypeError("BSON data must be an instance " - "of a subclass of %s" % (binary_type.__name__,)) + if not isinstance(bson, bytes): + raise TypeError(f"BSON data must be an instance of a subclass of bytes, not {type(bson)}") try: - (_, remainder) = _bson_to_dict(bson, dict, True, OLD_UUID_SUBTYPE, True) - return remainder == EMPTY - except: + _bson_to_dict(bson, DEFAULT_CODEC_OPTIONS) + return True + except Exception: return False -class BSON(binary_type): +class BSON(bytes): """BSON (Binary JSON) data. + + .. warning:: Using this class to encode and decode BSON adds a performance + cost. For better performance use the module level functions + :func:`encode` and :func:`decode` instead. """ @classmethod - def encode(cls, document, check_keys=False, uuid_subtype=OLD_UUID_SUBTYPE): + def encode( + cls: Type[BSON], + document: Mapping[str, Any], + check_keys: bool = False, + codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS, + ) -> BSON: """Encode a document to a new :class:`BSON` instance. A document can be any mapping type (like :class:`dict`). Raises :class:`TypeError` if `document` is not a mapping type, or contains keys that are not instances of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~bson.errors.InvalidDocument` if `document` cannot be - converted to :class:`BSON`. + :class:`str'. Raises :class:`~bson.errors.InvalidDocument` + if `document` cannot be converted to :class:`BSON`. - :Parameters: - - `document`: mapping type representing a document - - `check_keys` (optional): check if keys start with '$' or + :param document: mapping type representing a document + :param check_keys: check if keys start with '$' or contain '.', raising :class:`~bson.errors.InvalidDocument` in either case + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. - .. versionadded:: 1.9 + .. versionchanged:: 3.0 + Replaced `uuid_subtype` option with `codec_options`. """ - return cls(_dict_to_bson(document, check_keys, uuid_subtype)) + return cls(encode(document, check_keys, codec_options)) - def decode(self, as_class=dict, - tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True): + def decode( # type:ignore[override] + self, codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS + ) -> dict[str, Any]: """Decode this BSON data. - The default type to use for the resultant document is - :class:`dict`. Any other class that supports - :meth:`__setitem__` can be used instead by passing it as the - `as_class` parameter. - - If `tz_aware` is ``True`` (recommended), any - :class:`~datetime.datetime` instances returned will be - timezone-aware, with their timezone set to - :attr:`bson.tz_util.utc`. Otherwise (default), all - :class:`~datetime.datetime` instances will be naive (but - contain UTC). - - :Parameters: - - `as_class` (optional): the class to use for the resulting - document - - `tz_aware` (optional): if ``True``, return timezone-aware - :class:`~datetime.datetime` instances - - `compile_re` (optional): if ``False``, don't attempt to compile - BSON regular expressions into Python regular expressions. Return - instances of - :class:`~bson.regex.Regex` instead. Can avoid - :exc:`~bson.errors.InvalidBSON` errors when receiving - Python-incompatible regular expressions, for example from - ``currentOp`` - - .. versionchanged:: 2.7 - Added ``compile_re`` option. - .. versionadded:: 1.9 + By default, returns a BSON document represented as a Python + :class:`dict`. To use a different :class:`MutableMapping` class, + configure a :class:`~bson.codec_options.CodecOptions`:: + + >>> import collections # From Python standard library. + >>> import bson + >>> from bson.codec_options import CodecOptions + >>> data = bson.BSON.encode({'a': 1}) + >>> decoded_doc = bson.BSON(data).decode() + + >>> options = CodecOptions(document_class=collections.OrderedDict) + >>> decoded_doc = bson.BSON(data).decode(codec_options=options) + >>> type(decoded_doc) + + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. + + .. versionchanged:: 3.0 + Removed `compile_re` option: PyMongo now always represents BSON + regular expressions as :class:`~bson.regex.Regex` objects. Use + :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a + BSON regular expression to a Python regular expression object. + + Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with + `codec_options`. """ - (document, _) = _bson_to_dict( - self, as_class, tz_aware, uuid_subtype, compile_re) + return decode(self, codec_options) - return document +def has_c() -> bool: + """Is the C extension installed?""" + return _USE_C -def has_c(): - """Is the C extension installed? - - .. versionadded:: 1.9 - """ - return _use_c +def _after_fork() -> None: + """Releases the ObjectID lock child.""" + if ObjectId._inc_lock.locked(): + ObjectId._inc_lock.release() -def has_uuid(): - """Is the uuid module available? - .. versionadded:: 2.3 - """ - return _use_uuid +if hasattr(os, "register_at_fork"): + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 733ce3de43..7d184641c5 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -1,5 +1,5 @@ /* - * Copyright 2009-2014 MongoDB, Inc. + * Copyright 2009-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,12 +20,12 @@ * should be used to speed up BSON encoding and decoding. */ +#define PY_SSIZE_T_CLEAN #include "Python.h" #include "datetime.h" #include "buffer.h" #include "time64.h" -#include "encoding_helpers.h" #define _CBSON_MODULE #include "_cbsonmodule.h" @@ -42,7 +42,6 @@ struct module_state { PyObject* Code; PyObject* ObjectId; PyObject* DBRef; - PyObject* RECompile; PyObject* Regex; PyObject* UUID; PyObject* Timestamp; @@ -50,34 +49,165 @@ struct module_state { PyObject* MaxKey; PyObject* UTC; PyTypeObject* REType; + PyObject* BSONInt64; + PyObject* Decimal128; + PyObject* Mapping; + PyObject* DatetimeMS; + PyObject* min_datetime; + PyObject* max_datetime; + PyObject* replace_args; + PyObject* replace_kwargs; + PyObject* _type_marker_str; + PyObject* _flags_str; + PyObject* _pattern_str; + PyObject* _encoder_map_str; + PyObject* _decoder_map_str; + PyObject* _fallback_encoder_str; + PyObject* _raw_str; + PyObject* _subtype_str; + PyObject* _binary_str; + PyObject* _scope_str; + PyObject* _inc_str; + PyObject* _time_str; + PyObject* _bid_str; + PyObject* _replace_str; + PyObject* _astimezone_str; + PyObject* _id_str; + PyObject* _dollar_ref_str; + PyObject* _dollar_id_str; + PyObject* _dollar_db_str; + PyObject* _tzinfo_str; + PyObject* _as_doc_str; + PyObject* _utcoffset_str; + PyObject* _from_uuid_str; + PyObject* _as_uuid_str; + PyObject* _from_bid_str; + int64_t min_millis; + int64_t max_millis; }; -/* The Py_TYPE macro was introduced in CPython 2.6 */ -#ifndef Py_TYPE -#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) -#endif - -#if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) -#else -#define GETSTATE(m) (&_state) -static struct module_state _state; -#endif /* Maximum number of regex flags */ #define FLAGS_SIZE 7 +/* Default UUID representation type code. */ +#define PYTHON_LEGACY 3 + +/* Other UUID representations. */ +#define STANDARD 4 #define JAVA_LEGACY 5 #define CSHARP_LEGACY 6 +#define UNSPECIFIED 0 + #define BSON_MAX_SIZE 2147483647 /* The smallest possible BSON document, i.e. "{}" */ #define BSON_MIN_SIZE 5 +/* Datetime codec options */ +#define DATETIME 1 +#define DATETIME_CLAMP 2 +#define DATETIME_MS 3 +#define DATETIME_AUTO 4 + +/* Converts integer to its string representation in decimal notation. */ +extern int cbson_long_long_to_str(long long num, char* str, size_t size) { + // Buffer should fit 64-bit signed integer + if (size < 21) { + PyErr_Format( + PyExc_RuntimeError, + "Buffer too small to hold long long: %d < 21", size); + return -1; + } + int index = 0; + int sign = 1; + // Convert to unsigned to handle -LLONG_MIN overflow + unsigned long long absNum; + // Handle the case of 0 + if (num == 0) { + str[index++] = '0'; + str[index] = '\0'; + return 0; + } + // Handle negative numbers + if (num < 0) { + sign = -1; + absNum = 0ULL - (unsigned long long)num; + } else { + absNum = (unsigned long long)num; + } + // Convert the number to string + unsigned long long digit; + while (absNum > 0) { + digit = absNum % 10ULL; + str[index++] = (char)digit + '0'; // Convert digit to character + absNum /= 10; + } + // Add minus sign if negative + if (sign == -1) { + str[index++] = '-'; + } + str[index] = '\0'; // Null terminator + // Reverse the string + int start = 0; + int end = index - 1; + while (start < end) { + char temp = str[start]; + str[start++] = str[end]; + str[end--] = temp; + } + return 0; +} + +static PyObject* _test_long_long_to_str(PyObject* self, PyObject* args) { + // Test extreme values + Py_ssize_t maxNum = PY_SSIZE_T_MAX; + Py_ssize_t minNum = PY_SSIZE_T_MIN; + Py_ssize_t num; + char str_1[BUF_SIZE]; + char str_2[BUF_SIZE]; + int res = LL2STR(str_1, (long long)minNum); + if (res == -1) { + return NULL; + } + INT2STRING(str_2, (long long)minNum); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + LL2STR(str_1, (long long)maxNum); + INT2STRING(str_2, (long long)maxNum); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + + // Test common values + for (num = 0; num < 10000; num++) { + char str_1[BUF_SIZE]; + char str_2[BUF_SIZE]; + LL2STR(str_1, (long long)num); + INT2STRING(str_2, (long long)num); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + } + + return args; +} + /* Get an error class from the bson.errors module. * * Returns a new ref */ static PyObject* _error(char* name) { - PyObject* error; + PyObject* error = NULL; PyObject* errors = PyImport_ImportModule("bson.errors"); if (!errors) { return NULL; @@ -90,7 +220,7 @@ static PyObject* _error(char* name) { /* Safely downcast from Py_ssize_t to int, setting an * exception and returning -1 on error. */ static int -_downcast_and_check(Py_ssize_t size, int extra) { +_downcast_and_check(Py_ssize_t size, uint8_t extra) { if (size > BSON_MAX_SIZE || ((BSON_MAX_SIZE - extra) < size)) { PyObject* InvalidStringData = _error("InvalidStringData"); if (InvalidStringData) { @@ -104,15 +234,20 @@ _downcast_and_check(Py_ssize_t size, int extra) { } static PyObject* elements_to_dict(PyObject* self, const char* string, - unsigned max, PyObject* as_class, - unsigned char tz_aware, - unsigned char uuid_subtype, - unsigned char compile_re); + unsigned max, + const codec_options_t* options); static int _write_element_to_buffer(PyObject* self, buffer_t buffer, int type_byte, PyObject* value, unsigned char check_keys, - unsigned char uuid_subtype); + const codec_options_t* options, + unsigned char in_custom_call, + unsigned char in_fallback_call); + +/* Write a RawBSONDocument to the buffer. + * Returns the number of bytes written or 0 on failure. + */ +static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw); /* Date stuff */ static PyObject* datetime_from_millis(long long millis) { @@ -122,7 +257,7 @@ static PyObject* datetime_from_millis(long long millis) { * 2. Multiply that by 1000: 253402300799000 * 3. Add in microseconds divided by 1000 253402300799999 * - * (Note: BSON doesn't support microsecond accuracy, hence the rounding.) + * (Note: BSON doesn't support microsecond accuracy, hence the truncation.) * * To decode we could do: * 1. Get seconds: timestamp / 1000: 253402300799 @@ -144,19 +279,51 @@ static PyObject* datetime_from_millis(long long millis) { * micros = diff * 1000 111000 * Resulting in datetime(1, 1, 1, 1, 1, 1, 111000) -- the expected result */ + PyObject* datetime = NULL; int diff = (int)(((millis % 1000) + 1000) % 1000); int microseconds = diff * 1000; Time64_T seconds = (millis - diff) / 1000; struct TM timeinfo; - gmtime64_r(&seconds, &timeinfo); - - return PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, - timeinfo.tm_mon + 1, - timeinfo.tm_mday, - timeinfo.tm_hour, - timeinfo.tm_min, - timeinfo.tm_sec, - microseconds); + cbson_gmtime64_r(&seconds, &timeinfo); + + datetime = PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, + timeinfo.tm_mon + 1, + timeinfo.tm_mday, + timeinfo.tm_hour, + timeinfo.tm_min, + timeinfo.tm_sec, + microseconds); + if(!datetime) { + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + + /* + * Calling _error clears the error state, so fetch it first. + */ + PyErr_Fetch(&etype, &evalue, &etrace); + + /* Only add addition error message on ValueError exceptions. */ + if (PyErr_GivenExceptionMatches(etype, PyExc_ValueError)) { + if (evalue) { + PyObject* err_msg = PyObject_Str(evalue); + if (err_msg) { + PyObject* appendage = PyUnicode_FromString(" (Consider Using CodecOptions(datetime_conversion=DATETIME_AUTO) or MongoClient(datetime_conversion='DATETIME_AUTO')). See: https://www.mongodb.com/docs/languages/python/pymongo-driver/current/data-formats/dates-and-times/#handling-out-of-range-datetimes"); + if (appendage) { + PyObject* msg = PyUnicode_Concat(err_msg, appendage); + if (msg) { + Py_DECREF(evalue); + evalue = msg; + } + } + Py_XDECREF(appendage); + } + Py_XDECREF(err_msg); + } + PyErr_NormalizeException(&etype, &evalue, &etrace); + } + /* Steals references to args. */ + PyErr_Restore(etype, evalue, etrace); + } + return datetime; } static long long millis_from_datetime(PyObject* datetime) { @@ -170,20 +337,190 @@ static long long millis_from_datetime(PyObject* datetime) { timeinfo.tm_min = PyDateTime_DATE_GET_MINUTE(datetime); timeinfo.tm_sec = PyDateTime_DATE_GET_SECOND(datetime); - millis = timegm64(&timeinfo) * 1000; + millis = cbson_timegm64(&timeinfo) * 1000; millis += PyDateTime_DATE_GET_MICROSECOND(datetime) / 1000; return millis; } +/* Extended-range datetime, returns a DatetimeMS object with millis */ +static PyObject* datetime_ms_from_millis(PyObject* self, long long millis){ + // Allocate a new DatetimeMS object. + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } + + PyObject* dt = NULL; + PyObject* ll_millis = NULL; + + if (!(ll_millis = PyLong_FromLongLong(millis))){ + return NULL; + } + dt = PyObject_CallFunctionObjArgs(state->DatetimeMS, ll_millis, NULL); + Py_DECREF(ll_millis); + return dt; +} + +/* Extended-range datetime, takes a DatetimeMS object and extracts the long long value. */ +static int millis_from_datetime_ms(PyObject* dt, long long* out){ + PyObject* ll_millis; + long long millis; + + if (!(ll_millis = PyNumber_Long(dt))){ + return 0; + } + millis = PyLong_AsLongLong(ll_millis); + Py_DECREF(ll_millis); + if (millis == -1 && PyErr_Occurred()) { /* Overflow */ + PyErr_SetString(PyExc_OverflowError, + "MongoDB datetimes can only handle up to 8-byte ints"); + return 0; + } + *out = millis; + return 1; +} + +static PyObject* decode_datetime(PyObject* self, long long millis, const codec_options_t* options){ + PyObject* naive = NULL; + PyObject* replace = NULL; + PyObject* value = NULL; + struct module_state *state = GETSTATE(self); + if (!state) { + goto invalid; + } + if (options->datetime_conversion == DATETIME_MS){ + return datetime_ms_from_millis(self, millis); + } + + int dt_clamp = options->datetime_conversion == DATETIME_CLAMP; + int dt_auto = options->datetime_conversion == DATETIME_AUTO; + + if (dt_clamp || dt_auto){ + int64_t min_millis = state->min_millis; + int64_t max_millis = state->max_millis; + int64_t min_millis_offset = 0; + int64_t max_millis_offset = 0; + if (options->tz_aware && options->tzinfo && options->tzinfo != Py_None) { + PyObject* utcoffset = PyObject_CallMethodObjArgs(options->tzinfo, state->_utcoffset_str, state->min_datetime, NULL); + if (utcoffset == NULL) { + return 0; + } + if (utcoffset != Py_None) { + if (!PyDelta_Check(utcoffset)) { + PyObject* BSONError = _error("BSONError"); + if (BSONError) { + PyErr_SetString(BSONError, "tzinfo.utcoffset() did not return a datetime.timedelta"); + Py_DECREF(BSONError); + } + Py_DECREF(utcoffset); + return 0; + } + min_millis_offset = (PyDateTime_DELTA_GET_DAYS(utcoffset) * (int64_t)86400 + + PyDateTime_DELTA_GET_SECONDS(utcoffset)) * (int64_t)1000 + + (PyDateTime_DELTA_GET_MICROSECONDS(utcoffset) / 1000); + } + Py_DECREF(utcoffset); + utcoffset = PyObject_CallMethodObjArgs(options->tzinfo, state->_utcoffset_str, state->max_datetime, NULL); + if (utcoffset == NULL) { + return 0; + } + if (utcoffset != Py_None) { + if (!PyDelta_Check(utcoffset)) { + PyObject* BSONError = _error("BSONError"); + if (BSONError) { + PyErr_SetString(BSONError, "tzinfo.utcoffset() did not return a datetime.timedelta"); + Py_DECREF(BSONError); + } + Py_DECREF(utcoffset); + return 0; + } + max_millis_offset = (PyDateTime_DELTA_GET_DAYS(utcoffset) * (int64_t)86400 + + PyDateTime_DELTA_GET_SECONDS(utcoffset)) * (int64_t)1000 + + (PyDateTime_DELTA_GET_MICROSECONDS(utcoffset) / 1000); + } + Py_DECREF(utcoffset); + } + if (min_millis_offset < 0) { + min_millis -= min_millis_offset; + } + + if (max_millis_offset > 0) { + max_millis -= max_millis_offset; + } + + if (dt_clamp) { + if (millis < min_millis) { + millis = min_millis; + } else if (millis > max_millis) { + millis = max_millis; + } + // Continues from here to return a datetime. + } else { // dt_auto + if (millis < min_millis || millis > max_millis){ + return datetime_ms_from_millis(self, millis); + } + } + } + + naive = datetime_from_millis(millis); + if (!naive) { + goto invalid; + } + + if (!options->tz_aware) { /* In the naive case, we're done here. */ + return naive; + } + replace = PyObject_GetAttr(naive, state->_replace_str); + if (!replace) { + goto invalid; + } + value = PyObject_Call(replace, state->replace_args, state->replace_kwargs); + if (!value) { + goto invalid; + } + + /* convert to local time */ + if (options->tzinfo != Py_None) { + PyObject* temp = PyObject_CallMethodObjArgs(value, state->_astimezone_str, options->tzinfo, NULL); + Py_DECREF(value); + value = temp; + } +invalid: + Py_XDECREF(naive); + Py_XDECREF(replace); + return value; +} + /* Just make this compatible w/ the old API. */ int buffer_write_bytes(buffer_t buffer, const char* data, int size) { - if (buffer_write(buffer, data, size)) { - PyErr_NoMemory(); + if (pymongo_buffer_write(buffer, data, size)) { return 0; } return 1; } +int buffer_write_double(buffer_t buffer, double data) { + double data_le = BSON_DOUBLE_TO_LE(data); + return buffer_write_bytes(buffer, (const char*)&data_le, 8); +} + +int buffer_write_int32(buffer_t buffer, int32_t data) { + uint32_t data_le = BSON_UINT32_TO_LE(data); + return buffer_write_bytes(buffer, (const char*)&data_le, 4); +} + +int buffer_write_int64(buffer_t buffer, int64_t data) { + uint64_t data_le = BSON_UINT64_TO_LE(data); + return buffer_write_bytes(buffer, (const char*)&data_le, 8); +} + +void buffer_write_int32_at_position(buffer_t buffer, + int position, + int32_t data) { + uint32_t data_le = BSON_UINT32_TO_LE(data); + memcpy(pymongo_buffer_get_buffer(buffer) + position, &data_le, 4); +} + static int write_unicode(buffer_t buffer, PyObject* py_string) { int size; const char* data; @@ -191,22 +528,14 @@ static int write_unicode(buffer_t buffer, PyObject* py_string) { if (!encoded) { return 0; } -#if PY_MAJOR_VERSION >= 3 data = PyBytes_AS_STRING(encoded); -#else - data = PyString_AS_STRING(encoded); -#endif if (!data) goto unicodefail; -#if PY_MAJOR_VERSION >= 3 if ((size = _downcast_and_check(PyBytes_GET_SIZE(encoded), 1)) == -1) -#else - if ((size = _downcast_and_check(PyString_GET_SIZE(encoded), 1)) == -1) -#endif goto unicodefail; - if (!buffer_write_bytes(buffer, (const char*)&size, 4)) + if (!buffer_write_int32(buffer, (int32_t)size)) goto unicodefail; if (!buffer_write_bytes(buffer, data, size)) @@ -224,26 +553,18 @@ static int write_unicode(buffer_t buffer, PyObject* py_string) { static int write_string(buffer_t buffer, PyObject* py_string) { int size; const char* data; -#if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(py_string)){ return write_unicode(buffer, py_string); } data = PyBytes_AsString(py_string); -#else - data = PyString_AsString(py_string); -#endif if (!data) { return 0; } -#if PY_MAJOR_VERSION >= 3 if ((size = _downcast_and_check(PyBytes_Size(py_string), 1)) == -1) -#else - if ((size = _downcast_and_check(PyString_Size(py_string), 1)) == -1) -#endif return 0; - if (!buffer_write_bytes(buffer, (const char*)&size, 4)) { + if (!buffer_write_int32(buffer, (int32_t)size)) { return 0; } if (!buffer_write_bytes(buffer, data, size)) { @@ -252,49 +573,6 @@ static int write_string(buffer_t buffer, PyObject* py_string) { return 1; } -/* - * Are we in the main interpreter or a sub-interpreter? - * Useful for deciding if we can use cached pure python - * types in mod_wsgi. - */ -static int -_in_main_interpreter(void) { - static PyInterpreterState* main_interpreter = NULL; - PyInterpreterState* interpreter; - - if (main_interpreter == NULL) { - interpreter = PyInterpreterState_Head(); - - while (PyInterpreterState_Next(interpreter)) - interpreter = PyInterpreterState_Next(interpreter); - - main_interpreter = interpreter; - } - - return (main_interpreter == PyThreadState_Get()->interp); -} - -/* - * Get a reference to a pure python type. If we are in the - * main interpreter return the cached object, otherwise import - * the object we need and return it instead. - */ -static PyObject* -_get_object(PyObject* object, char* module_name, char* object_name) { - if (_in_main_interpreter()) { - Py_XINCREF(object); - return object; - } else { - PyObject* imported = NULL; - PyObject* module = PyImport_ImportModule(module_name); - if (!module) - return NULL; - imported = PyObject_GetAttrString(module, object_name); - Py_DECREF(module); - return imported; - } -} - /* Load a Python object to cache. * * Returns non-zero on failure. */ @@ -316,9 +594,44 @@ static int _load_object(PyObject** object, char* module_name, char* object_name) * * Returns non-zero on failure. */ static int _load_python_objects(PyObject* module) { - PyObject* empty_string; - PyObject* compiled; + PyObject* empty_string = NULL; + PyObject* re_compile = NULL; + PyObject* compiled = NULL; + PyObject* min_datetime_ms = NULL; + PyObject* max_datetime_ms = NULL; struct module_state *state = GETSTATE(module); + if (!state) { + return 1; + } + + /* Cache commonly used attribute names to improve performance. */ + if (!((state->_type_marker_str = PyUnicode_FromString("_type_marker")) && + (state->_flags_str = PyUnicode_FromString("flags")) && + (state->_pattern_str = PyUnicode_FromString("pattern")) && + (state->_encoder_map_str = PyUnicode_FromString("_encoder_map")) && + (state->_decoder_map_str = PyUnicode_FromString("_decoder_map")) && + (state->_fallback_encoder_str = PyUnicode_FromString("_fallback_encoder")) && + (state->_raw_str = PyUnicode_FromString("raw")) && + (state->_subtype_str = PyUnicode_FromString("subtype")) && + (state->_binary_str = PyUnicode_FromString("binary")) && + (state->_scope_str = PyUnicode_FromString("scope")) && + (state->_inc_str = PyUnicode_FromString("inc")) && + (state->_time_str = PyUnicode_FromString("time")) && + (state->_bid_str = PyUnicode_FromString("bid")) && + (state->_replace_str = PyUnicode_FromString("replace")) && + (state->_astimezone_str = PyUnicode_FromString("astimezone")) && + (state->_id_str = PyUnicode_FromString("_id")) && + (state->_dollar_ref_str = PyUnicode_FromString("$ref")) && + (state->_dollar_id_str = PyUnicode_FromString("$id")) && + (state->_dollar_db_str = PyUnicode_FromString("$db")) && + (state->_tzinfo_str = PyUnicode_FromString("tzinfo")) && + (state->_as_doc_str = PyUnicode_FromString("as_doc")) && + (state->_utcoffset_str = PyUnicode_FromString("utcoffset")) && + (state->_from_uuid_str = PyUnicode_FromString("from_uuid")) && + (state->_as_uuid_str = PyUnicode_FromString("as_uuid")) && + (state->_from_bid_str = PyUnicode_FromString("from_bid")))) { + return 1; + } if (_load_object(&state->Binary, "bson.binary", "Binary") || _load_object(&state->Code, "bson.code", "Code") || @@ -328,26 +641,55 @@ static int _load_python_objects(PyObject* module) { _load_object(&state->MinKey, "bson.min_key", "MinKey") || _load_object(&state->MaxKey, "bson.max_key", "MaxKey") || _load_object(&state->UTC, "bson.tz_util", "utc") || - _load_object(&state->RECompile, "re", "compile") || - _load_object(&state->Regex, "bson.regex", "Regex")) { + _load_object(&state->Regex, "bson.regex", "Regex") || + _load_object(&state->BSONInt64, "bson.int64", "Int64") || + _load_object(&state->Decimal128, "bson.decimal128", "Decimal128") || + _load_object(&state->UUID, "uuid", "UUID") || + _load_object(&state->Mapping, "collections.abc", "Mapping") || + _load_object(&state->DatetimeMS, "bson.datetime_ms", "DatetimeMS") || + _load_object(&min_datetime_ms, "bson.datetime_ms", "_MIN_UTC_MS") || + _load_object(&max_datetime_ms, "bson.datetime_ms", "_MAX_UTC_MS") || + _load_object(&state->min_datetime, "bson.datetime_ms", "_MIN_UTC") || + _load_object(&state->max_datetime, "bson.datetime_ms", "_MAX_UTC")) { + return 1; + } + + state->min_millis = PyLong_AsLongLong(min_datetime_ms); + state->max_millis = PyLong_AsLongLong(max_datetime_ms); + Py_DECREF(min_datetime_ms); + Py_DECREF(max_datetime_ms); + if ((state->min_millis == -1 || state->max_millis == -1) && PyErr_Occurred()) { + return 1; + } + + /* Speed up datetime.replace(tzinfo=utc) call */ + state->replace_args = PyTuple_New(0); + if (!state->replace_args) { + return 1; + } + state->replace_kwargs = PyDict_New(); + if (!state->replace_kwargs) { return 1; } - /* If we couldn't import uuid then we must be on 2.4. Just ignore. */ - if (_load_object(&state->UUID, "uuid", "UUID") == 1) { - state->UUID = NULL; - PyErr_Clear(); + if (PyDict_SetItem(state->replace_kwargs, state->_tzinfo_str, state->UTC) == -1) { + return 1; } + /* Reload our REType hack too. */ -#if PY_MAJOR_VERSION >= 3 empty_string = PyBytes_FromString(""); -#else - empty_string = PyString_FromString(""); -#endif if (empty_string == NULL) { state->REType = NULL; return 1; } - compiled = PyObject_CallFunction(state->RECompile, "O", empty_string); + + if (_load_object(&re_compile, "re", "compile")) { + state->REType = NULL; + Py_DECREF(empty_string); + return 1; + } + + compiled = PyObject_CallFunction(re_compile, "O", empty_string); + Py_DECREF(re_compile); if (compiled == NULL) { state->REType = NULL; Py_DECREF(empty_string); @@ -360,65 +702,178 @@ static int _load_python_objects(PyObject* module) { return 0; } +/* + * Get the _type_marker from an Object. + * + * Return the type marker, 0 if there is no marker, or -1 on failure. + */ +static long _type_marker(PyObject* object, PyObject* _type_marker_str) { + PyObject* type_marker = NULL; + long type = 0; + + if (PyObject_HasAttr(object, _type_marker_str)) { + type_marker = PyObject_GetAttr(object, _type_marker_str); + if (type_marker == NULL) { + return -1; + } + } + + /* + * Python objects with broken __getattr__ implementations could return + * arbitrary types for a call to PyObject_GetAttrString. For example + * pymongo.database.Database returns a new Collection instance for + * __getattr__ calls with names that don't match an existing attribute + * or method. In some cases "value" could be a subtype of something + * we know how to serialize. Make a best effort to encode these types. + */ + if (type_marker && PyLong_CheckExact(type_marker)) { + type = PyLong_AsLong(type_marker); + Py_DECREF(type_marker); + } else { + Py_XDECREF(type_marker); + } + + return type; +} + +/* Fill out a type_registry_t* from a TypeRegistry object. + * + * Return 1 on success. options->document_class is a new reference. + * Return 0 on failure. + */ +int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry, PyObject* _encoder_map_str, PyObject* _decoder_map_str, PyObject* _fallback_encoder_str) { + registry->encoder_map = NULL; + registry->decoder_map = NULL; + registry->fallback_encoder = NULL; + registry->registry_obj = NULL; + + registry->encoder_map = PyObject_GetAttr(registry_obj, _encoder_map_str); + if (registry->encoder_map == NULL) { + goto fail; + } + registry->is_encoder_empty = (PyDict_Size(registry->encoder_map) == 0); + + registry->decoder_map = PyObject_GetAttr(registry_obj, _decoder_map_str); + if (registry->decoder_map == NULL) { + goto fail; + } + registry->is_decoder_empty = (PyDict_Size(registry->decoder_map) == 0); + + registry->fallback_encoder = PyObject_GetAttr(registry_obj, _fallback_encoder_str); + if (registry->fallback_encoder == NULL) { + goto fail; + } + registry->has_fallback_encoder = (registry->fallback_encoder != Py_None); + + registry->registry_obj = registry_obj; + Py_INCREF(registry->registry_obj); + return 1; + +fail: + Py_XDECREF(registry->encoder_map); + Py_XDECREF(registry->decoder_map); + Py_XDECREF(registry->fallback_encoder); + return 0; +} + +/* Fill out a codec_options_t* from a CodecOptions object. + * + * Return 1 on success. options->document_class is a new reference. + * Return 0 on failure. + */ +int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t* options) { + PyObject* type_registry_obj = NULL; + struct module_state *state = GETSTATE(self); + long type_marker; + if (!state) { + return 0; + } + + options->unicode_decode_error_handler = NULL; + + if (!PyArg_ParseTuple(options_obj, "ObbzOOb", + &options->document_class, + &options->tz_aware, + &options->uuid_rep, + &options->unicode_decode_error_handler, + &options->tzinfo, + &type_registry_obj, + &options->datetime_conversion)) { + return 0; + } + + type_marker = _type_marker(options->document_class, + state->_type_marker_str); + if (type_marker < 0) { + return 0; + } + + if (!cbson_convert_type_registry(type_registry_obj, + &options->type_registry, state->_encoder_map_str, state->_decoder_map_str, state->_fallback_encoder_str)) { + return 0; + } + + options->is_raw_bson = (101 == type_marker); + options->options_obj = options_obj; + + Py_INCREF(options->options_obj); + Py_INCREF(options->document_class); + Py_INCREF(options->tzinfo); + + return 1; +} + +void destroy_codec_options(codec_options_t* options) { + Py_CLEAR(options->document_class); + Py_CLEAR(options->tzinfo); + Py_CLEAR(options->options_obj); + Py_CLEAR(options->type_registry.registry_obj); + Py_CLEAR(options->type_registry.encoder_map); + Py_CLEAR(options->type_registry.decoder_map); + Py_CLEAR(options->type_registry.fallback_encoder); +} + static int write_element_to_buffer(PyObject* self, buffer_t buffer, int type_byte, PyObject* value, unsigned char check_keys, - unsigned char uuid_subtype) { - int result; - if(Py_EnterRecursiveCall(" while encoding an object to BSON ")) + const codec_options_t* options, + unsigned char in_custom_call, + unsigned char in_fallback_call) { + int result = 0; + if(Py_EnterRecursiveCall(" while encoding an object to BSON ")) { return 0; + } result = _write_element_to_buffer(self, buffer, type_byte, - value, check_keys, uuid_subtype); + value, check_keys, options, + in_custom_call, in_fallback_call); Py_LeaveRecursiveCall(); return result; } static void -_fix_java(const char* in, char* out) { - int i, j; - for (i = 0, j = 7; i < j; i++, j--) { - out[i] = in[j]; - out[j] = in[i]; - } - for (i = 8, j = 15; i < j; i++, j--) { - out[i] = in[j]; - out[j] = in[i]; +_set_cannot_encode(PyObject* value) { + if (PyLong_Check(value)) { + if ((PyLong_AsLongLong(value) == -1) && PyErr_Occurred()) { + return PyErr_SetString(PyExc_OverflowError, + "MongoDB can only handle up to 8-byte ints"); + } } -} -static void -_set_cannot_encode(PyObject* value) { + PyObject* type = NULL; PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument) { - PyObject* repr = PyObject_Repr(value); - if (repr) { -#if PY_MAJOR_VERSION >= 3 - PyObject* errmsg = PyUnicode_FromString("Cannot encode object: "); -#else - PyObject* errmsg = PyString_FromString("Cannot encode object: "); -#endif - if (errmsg) { -#if PY_MAJOR_VERSION >= 3 - PyObject* error = PyUnicode_Concat(errmsg, repr); - if (error) { - PyErr_SetObject(InvalidDocument, error); - Py_DECREF(error); - } - Py_DECREF(errmsg); - Py_DECREF(repr); -#else - PyString_ConcatAndDel(&errmsg, repr); - if (errmsg) { - PyErr_SetObject(InvalidDocument, errmsg); - Py_DECREF(errmsg); - } -#endif - } else { - Py_DECREF(repr); - } - } - Py_DECREF(InvalidDocument); + if (InvalidDocument == NULL) { + goto error; } + + type = PyObject_Type(value); + if (type == NULL) { + goto error; + } + PyErr_Format(InvalidDocument, "cannot encode object: %R, of type: %R", + value, type); +error: + Py_XDECREF(type); + Py_XDECREF(InvalidDocument); } /* @@ -427,33 +882,32 @@ _set_cannot_encode(PyObject* value) { * Sets exception and returns 0 on failure. */ static int _write_regex_to_buffer( - buffer_t buffer, int type_byte, PyObject* value) { + buffer_t buffer, int type_byte, PyObject* value, PyObject* _flags_str, PyObject* _pattern_str) { PyObject* py_flags; PyObject* py_pattern; PyObject* encoded_pattern; + PyObject* decoded_pattern; long int_flags; char flags[FLAGS_SIZE]; char check_utf8 = 0; const char* pattern_data; int pattern_length, flags_length; - result_t status; /* * Both the builtin re type and our Regex class have attributes * "flags" and "pattern". */ - py_flags = PyObject_GetAttrString(value, "flags"); + py_flags = PyObject_GetAttr(value, _flags_str); if (!py_flags) { return 0; } -#if PY_MAJOR_VERSION >= 3 int_flags = PyLong_AsLong(py_flags); -#else - int_flags = PyInt_AsLong(py_flags); -#endif Py_DECREF(py_flags); - py_pattern = PyObject_GetAttrString(value, "pattern"); + if (int_flags == -1 && PyErr_Occurred()) { + return 0; + } + py_pattern = PyObject_GetAttr(value, _pattern_str); if (!py_pattern) { return 0; } @@ -469,7 +923,6 @@ static int _write_regex_to_buffer( check_utf8 = 1; } -#if PY_MAJOR_VERSION >= 3 if (!(pattern_data = PyBytes_AsString(encoded_pattern))) { Py_DECREF(encoded_pattern); return 0; @@ -478,28 +931,8 @@ static int _write_regex_to_buffer( Py_DECREF(encoded_pattern); return 0; } -#else - if (!(pattern_data = PyString_AsString(encoded_pattern))) { - Py_DECREF(encoded_pattern); - return 0; - } - if ((pattern_length = _downcast_and_check(PyString_Size(encoded_pattern), 0)) == -1) { - Py_DECREF(encoded_pattern); - return 0; - } -#endif - status = check_string((const unsigned char*)pattern_data, - pattern_length, check_utf8, 1); - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyErr_SetString(InvalidStringData, - "regex patterns must be valid UTF-8"); - Py_DECREF(InvalidStringData); - } - Py_DECREF(encoded_pattern); - return 0; - } else if (status == HAS_NULL) { + + if (strlen(pattern_data) != (size_t) pattern_length){ PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { PyErr_SetString(InvalidDocument, @@ -510,6 +943,22 @@ static int _write_regex_to_buffer( return 0; } + if (check_utf8) { + decoded_pattern = PyUnicode_DecodeUTF8(pattern_data, (Py_ssize_t) pattern_length, NULL); + if (decoded_pattern == NULL) { + PyErr_Clear(); + PyObject* InvalidStringData = _error("InvalidStringData"); + if (InvalidStringData) { + PyErr_SetString(InvalidStringData, + "regex patterns must be valid UTF-8"); + Py_DECREF(InvalidStringData); + } + Py_DECREF(encoded_pattern); + return 0; + } + Py_DECREF(decoded_pattern); + } + if (!buffer_write_bytes(buffer, pattern_data, pattern_length + 1)) { Py_DECREF(encoded_pattern); return 0; @@ -540,11 +989,10 @@ static int _write_regex_to_buffer( if (!buffer_write_bytes(buffer, flags, flags_length)) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x0B; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0B; return 1; } -/* TODO our platform better be little-endian w/ 4-byte ints! */ /* Write a single value to the buffer (also write its type_byte, for which * space has already been reserved. * @@ -552,322 +1000,318 @@ static int _write_regex_to_buffer( static int _write_element_to_buffer(PyObject* self, buffer_t buffer, int type_byte, PyObject* value, unsigned char check_keys, - unsigned char uuid_subtype) { + const codec_options_t* options, + unsigned char in_custom_call, + unsigned char in_fallback_call) { + PyObject* new_value = NULL; + int retval; + int is_list; + long type; struct module_state *state = GETSTATE(self); - + if (!state) { + return 0; + } /* - * Don't use PyObject_IsInstance for our custom types. It causes - * problems with python sub interpreters. Our custom types should - * have a _type_marker attribute, which we can switch on instead. + * Use _type_marker attribute instead of PyObject_IsInstance for better perf. */ - if (PyObject_HasAttrString(value, "_type_marker")) { - long type; - PyObject* type_marker = PyObject_GetAttrString(value, "_type_marker"); - if (type_marker == NULL) - return 0; -#if PY_MAJOR_VERSION >= 3 - type = PyLong_AsLong(type_marker); -#else - type = PyInt_AsLong(type_marker); -#endif - Py_DECREF(type_marker); - /* - * Py(Long|Int)_AsLong returns -1 for error but -1 is a valid value - * so we call PyErr_Occurred to differentiate. - * - * One potential reason for an error is the user passing an invalid - * type that overrides __getattr__ (e.g. pymongo.collection.Collection) - */ - if (type == -1 && PyErr_Occurred()) { - PyErr_Clear(); - _set_cannot_encode(value); - return 0; - } - switch (type) { - case 5: - { - /* Binary */ - PyObject* subtype_object; - long subtype; - const char* data; - int size; - - *(buffer_get_buffer(buffer) + type_byte) = 0x05; - subtype_object = PyObject_GetAttrString(value, "subtype"); - if (!subtype_object) { - return 0; - } -#if PY_MAJOR_VERSION >= 3 - subtype = PyLong_AsLong(subtype_object); -#else - subtype = PyInt_AsLong(subtype_object); -#endif - if (subtype == -1) { - Py_DECREF(subtype_object); - return 0; - } -#if PY_MAJOR_VERSION >= 3 - size = _downcast_and_check(PyBytes_Size(value), 0); -#else - size = _downcast_and_check(PyString_Size(value), 0); -#endif - if (size == -1) { - Py_DECREF(subtype_object); - return 0; - } + type = _type_marker(value, state->_type_marker_str); + if (type < 0) { + return 0; + } + + switch (type) { + case 5: + { + /* Binary */ + PyObject* subtype_object; + char subtype; + const char* data; + int size; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; + subtype_object = PyObject_GetAttr(value, state->_subtype_str); + if (!subtype_object) { + return 0; + } + subtype = (char)PyLong_AsLong(subtype_object); + if (subtype == -1) { Py_DECREF(subtype_object); - if (subtype == 2) { -#if PY_MAJOR_VERSION >= 3 - int other_size = _downcast_and_check(PyBytes_Size(value), 4); -#else - int other_size = _downcast_and_check(PyString_Size(value), 4); -#endif - if (other_size == -1) - return 0; - if (!buffer_write_bytes(buffer, (const char*)&other_size, 4)) { - return 0; - } - if (!buffer_write_bytes(buffer, (const char*)&subtype, 1)) { - return 0; - } - } - if (!buffer_write_bytes(buffer, (const char*)&size, 4)) { + return 0; + } + size = _downcast_and_check(PyBytes_Size(value), 0); + if (size == -1) { + Py_DECREF(subtype_object); + return 0; + } + + Py_DECREF(subtype_object); + if (subtype == 2) { + int other_size = _downcast_and_check(PyBytes_Size(value), 4); + if (other_size == -1) return 0; - } - if (subtype != 2) { - if (!buffer_write_bytes(buffer, (const char*)&subtype, 1)) { - return 0; - } - } -#if PY_MAJOR_VERSION >= 3 - data = PyBytes_AsString(value); -#else - data = PyString_AsString(value); -#endif - if (!data) { + if (!buffer_write_int32(buffer, other_size)) { return 0; } - if (!buffer_write_bytes(buffer, data, size)) { - return 0; - } - return 1; - } - case 7: - { - /* ObjectId */ - const char* data; - PyObject* pystring = PyObject_GetAttrString(value, "_ObjectId__id"); - if (!pystring) { + if (!buffer_write_bytes(buffer, &subtype, 1)) { return 0; } -#if PY_MAJOR_VERSION >= 3 - data = PyBytes_AsString(pystring); -#else - data = PyString_AsString(pystring); -#endif - if (!data) { - Py_DECREF(pystring); + } + if (!buffer_write_int32(buffer, size)) { + return 0; + } + if (subtype != 2) { + if (!buffer_write_bytes(buffer, &subtype, 1)) { return 0; } - if (!buffer_write_bytes(buffer, data, 12)) { - Py_DECREF(pystring); + } + data = PyBytes_AsString(value); + if (!data) { + return 0; + } + if (!buffer_write_bytes(buffer, data, size)) { return 0; - } + } + return 1; + } + case 7: + { + /* ObjectId */ + const char* data; + PyObject* pystring = PyObject_GetAttr(value, state->_binary_str); + if (!pystring) { + return 0; + } + data = PyBytes_AsString(pystring); + if (!data) { Py_DECREF(pystring); - *(buffer_get_buffer(buffer) + type_byte) = 0x07; - return 1; - } - case 11: - { - /* Regex */ - return _write_regex_to_buffer(buffer, type_byte, value); - } - case 13: - { - /* Code */ - int start_position, - length_location, - length; - - PyObject* scope = PyObject_GetAttrString(value, "scope"); - if (!scope) { - return 0; - } + return 0; + } + if (!buffer_write_bytes(buffer, data, 12)) { + Py_DECREF(pystring); + return 0; + } + Py_DECREF(pystring); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x07; + return 1; + } + case 9: + { + /* DatetimeMS */ + long long millis; + if (!millis_from_datetime_ms(value, &millis)) { + return 0; + } + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; + return buffer_write_int64(buffer, (int64_t)millis); + } + case 11: + { + /* Regex */ + return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); + } + case 13: + { + /* Code */ + int start_position, + length_location, + length; - if (!PyDict_Size(scope)) { - Py_DECREF(scope); - *(buffer_get_buffer(buffer) + type_byte) = 0x0D; - return write_string(buffer, value); - } + PyObject* scope = PyObject_GetAttr(value, state->_scope_str); + if (!scope) { + return 0; + } - *(buffer_get_buffer(buffer) + type_byte) = 0x0F; + if (scope == Py_None) { + Py_DECREF(scope); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0D; + return write_string(buffer, value); + } - start_position = buffer_get_position(buffer); - /* save space for length */ - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyErr_NoMemory(); - Py_DECREF(scope); - return 0; - } + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0F; - if (!write_string(buffer, value)) { - Py_DECREF(scope); - return 0; - } + start_position = pymongo_buffer_get_position(buffer); + /* save space for length */ + length_location = pymongo_buffer_save_space(buffer, 4); + if (length_location == -1) { + Py_DECREF(scope); + return 0; + } - if (!write_dict(self, buffer, scope, 0, uuid_subtype, 0)) { - Py_DECREF(scope); - return 0; - } + if (!write_string(buffer, value)) { Py_DECREF(scope); + return 0; + } - length = buffer_get_position(buffer) - start_position; - memcpy(buffer_get_buffer(buffer) + length_location, &length, 4); - return 1; + if (!write_dict(self, buffer, scope, 0, options, 0)) { + Py_DECREF(scope); + return 0; } - case 17: - { - /* Timestamp */ - PyObject* obj; - long i; + Py_DECREF(scope); - obj = PyObject_GetAttrString(value, "inc"); - if (!obj) { - return 0; - } -#if PY_MAJOR_VERSION >= 3 - i = PyLong_AsLong(obj); -#else - i = PyInt_AsLong(obj); -#endif - Py_DECREF(obj); - if (!buffer_write_bytes(buffer, (const char*)&i, 4)) { - return 0; - } + length = pymongo_buffer_get_position(buffer) - start_position; + buffer_write_int32_at_position( + buffer, length_location, (int32_t)length); + return 1; + } + case 17: + { + /* Timestamp */ + PyObject* obj; + unsigned long i; - obj = PyObject_GetAttrString(value, "time"); - if (!obj) { - return 0; - } -#if PY_MAJOR_VERSION >= 3 - i = PyLong_AsLong(obj); -#else - i = PyInt_AsLong(obj); -#endif - Py_DECREF(obj); - if (!buffer_write_bytes(buffer, (const char*)&i, 4)) { - return 0; - } + obj = PyObject_GetAttr(value, state->_inc_str); + if (!obj) { + return 0; + } + i = PyLong_AsUnsignedLong(obj); + Py_DECREF(obj); + if (i == (unsigned long)-1 && PyErr_Occurred()) { + return 0; + } + if (!buffer_write_int32(buffer, (int32_t)i)) { + return 0; + } - *(buffer_get_buffer(buffer) + type_byte) = 0x11; - return 1; + obj = PyObject_GetAttr(value, state->_time_str); + if (!obj) { + return 0; } - case 100: - { - /* DBRef */ - PyObject* as_doc = PyObject_CallMethod(value, "as_doc", NULL); - if (!as_doc) { - return 0; - } - if (!write_dict(self, buffer, as_doc, 0, uuid_subtype, 0)) { - Py_DECREF(as_doc); - return 0; - } - Py_DECREF(as_doc); - *(buffer_get_buffer(buffer) + type_byte) = 0x03; - return 1; + i = PyLong_AsUnsignedLong(obj); + Py_DECREF(obj); + if (i == (unsigned long)-1 && PyErr_Occurred()) { + return 0; + } + if (!buffer_write_int32(buffer, (int32_t)i)) { + return 0; } - case 255: - { - /* MinKey */ - *(buffer_get_buffer(buffer) + type_byte) = 0xFF; - return 1; + + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x11; + return 1; + } + case 18: + { + /* Int64 */ + const long long ll = PyLong_AsLongLong(value); + if (PyErr_Occurred()) { /* Overflow */ + PyErr_SetString(PyExc_OverflowError, + "MongoDB can only handle up to 8-byte ints"); + return 0; } - case 127: - { - /* MaxKey */ - *(buffer_get_buffer(buffer) + type_byte) = 0x7F; - return 1; + if (!buffer_write_int64(buffer, (int64_t)ll)) { + return 0; } + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; + return 1; + } + case 19: + { + /* Decimal128 */ + const char* data; + PyObject* pystring = PyObject_GetAttr(value, state->_bid_str); + if (!pystring) { + return 0; + } + data = PyBytes_AsString(pystring); + if (!data) { + Py_DECREF(pystring); + return 0; + } + if (!buffer_write_bytes(buffer, data, 16)) { + Py_DECREF(pystring); + return 0; + } + Py_DECREF(pystring); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x13; + return 1; + } + case 100: + { + /* DBRef */ + PyObject* as_doc = PyObject_CallMethodObjArgs(value, state->_as_doc_str, NULL); + if (!as_doc) { + return 0; + } + if (!write_dict(self, buffer, as_doc, 0, options, 0)) { + Py_DECREF(as_doc); + return 0; + } + Py_DECREF(as_doc); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; + return 1; + } + case 101: + { + /* RawBSONDocument */ + if (!write_raw_doc(buffer, value, state->_raw_str)) { + return 0; + } + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; + return 1; + } + case 255: + { + /* MinKey */ + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0xFF; + return 1; + } + case 127: + { + /* MaxKey */ + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x7F; + return 1; } } - /* No _type_marker attibute or not one of our types. */ + /* No _type_marker attribute or not one of our types. */ if (PyBool_Check(value)) { -#if PY_MAJOR_VERSION >= 3 - const long bool = PyLong_AsLong(value); -#else - const long bool = PyInt_AsLong(value); -#endif - const char c = bool ? 0x01 : 0x00; - *(buffer_get_buffer(buffer) + type_byte) = 0x08; + const char c = (value == Py_True) ? 0x01 : 0x00; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x08; return buffer_write_bytes(buffer, &c, 1); } -#if PY_MAJOR_VERSION >= 3 else if (PyLong_Check(value)) { - const long long_value = PyLong_AsLong(value); -#else - else if (PyInt_Check(value)) { - const long long_value = PyInt_AsLong(value); -#endif - - const int int_value = (int)long_value; - if (PyErr_Occurred() || long_value != int_value) { /* Overflow */ - long long long_long_value; - PyErr_Clear(); - long_long_value = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow AGAIN */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; - } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; - return buffer_write_bytes(buffer, (const char*)&long_long_value, 8); - } - *(buffer_get_buffer(buffer) + type_byte) = 0x10; - return buffer_write_bytes(buffer, (const char*)&int_value, 4); -#if PY_MAJOR_VERSION < 3 - } else if (PyLong_Check(value)) { const long long long_long_value = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; + if (long_long_value == -1 && PyErr_Occurred()) { + /* Ignore error and give the fallback_encoder a chance. */ + PyErr_Clear(); + } else if (-2147483648LL <= long_long_value && long_long_value <= 2147483647LL) { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; + return buffer_write_int32(buffer, (int32_t)long_long_value); + } else { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; + return buffer_write_int64(buffer, (int64_t)long_long_value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; - return buffer_write_bytes(buffer, (const char*)&long_long_value, 8); -#endif } else if (PyFloat_Check(value)) { const double d = PyFloat_AsDouble(value); - *(buffer_get_buffer(buffer) + type_byte) = 0x01; - return buffer_write_bytes(buffer, (const char*)&d, 8); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x01; + return buffer_write_double(buffer, d); } else if (value == Py_None) { - *(buffer_get_buffer(buffer) + type_byte) = 0x0A; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0A; return 1; } else if (PyDict_Check(value)) { - *(buffer_get_buffer(buffer) + type_byte) = 0x03; - return write_dict(self, buffer, value, check_keys, uuid_subtype, 0); - } else if (PyList_Check(value) || PyTuple_Check(value)) { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; + return write_dict(self, buffer, value, check_keys, options, 0); + } else if ((is_list = PyList_Check(value)) || PyTuple_Check(value)) { Py_ssize_t items, i; int start_position, length_location, length; char zero = 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x04; - start_position = buffer_get_position(buffer); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x04; + start_position = pymongo_buffer_get_position(buffer); /* save space for length */ - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); return 0; } - - if ((items = PySequence_Size(value)) > BSON_MAX_SIZE) { + if (is_list) { + items = PyList_Size(value); + } else { + items = PyTuple_Size(value); + } + if (items > BSON_MAX_SIZE) { PyObject* BSONError = _error("BSONError"); if (BSONError) { PyErr_SetString(BSONError, @@ -877,115 +1321,75 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } for(i = 0; i < items; i++) { - int list_type_byte = buffer_save_space(buffer, 1); - char name[16]; + int list_type_byte = pymongo_buffer_save_space(buffer, 1); + char name[BUF_SIZE]; PyObject* item_value; if (list_type_byte == -1) { - PyErr_NoMemory(); return 0; } - INT2STRING(name, (int)i); + int res = LL2STR(name, (long long)i); + if (res == -1) { + return 0; + } if (!buffer_write_bytes(buffer, name, (int)strlen(name) + 1)) { return 0; } - - if (!(item_value = PySequence_GetItem(value, i))) + if (is_list) { + item_value = PyList_GET_ITEM(value, i); + } else { + item_value = PyTuple_GET_ITEM(value, i); + } + if (!item_value) { return 0; + } if (!write_element_to_buffer(self, buffer, list_type_byte, - item_value, check_keys, uuid_subtype)) { - Py_DECREF(item_value); + item_value, check_keys, options, + 0, 0)) { return 0; } - Py_DECREF(item_value); } /* write null byte and fill in length */ if (!buffer_write_bytes(buffer, &zero, 1)) { return 0; } - length = buffer_get_position(buffer) - start_position; - memcpy(buffer_get_buffer(buffer) + length_location, &length, 4); + length = pymongo_buffer_get_position(buffer) - start_position; + buffer_write_int32_at_position( + buffer, length_location, (int32_t)length); return 1; -#if PY_MAJOR_VERSION >= 3 /* Python3 special case. Store bytes as BSON binary subtype 0. */ } else if (PyBytes_Check(value)) { - int subtype = 0; + char subtype = 0; int size; const char* data = PyBytes_AS_STRING(value); if (!data) return 0; if ((size = _downcast_and_check(PyBytes_GET_SIZE(value), 0)) == -1) return 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x05; - if (!buffer_write_bytes(buffer, (const char*)&size, 4)) { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; + if (!buffer_write_int32(buffer, (int32_t)size)) { return 0; } - if (!buffer_write_bytes(buffer, (const char*)&subtype, 1)) { + if (!buffer_write_bytes(buffer, &subtype, 1)) { return 0; } if (!buffer_write_bytes(buffer, data, size)) { return 0; } return 1; -#else - /* PyString_Check only works in Python 2.x. */ - } else if (PyString_Check(value)) { - result_t status; - const char* data; - int size; - if (!(data = PyString_AS_STRING(value))) - return 0; - if ((size = _downcast_and_check(PyString_GET_SIZE(value), 1)) == -1) - return 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x02; - status = check_string((const unsigned char*)data, size - 1, 1, 0); - - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyObject* repr = PyObject_Repr(value); - char* repr_as_cstr = repr ? PyString_AsString(repr) : NULL; - if (repr_as_cstr) { - PyObject *message = PyString_FromFormat( - "strings in documents must be valid UTF-8: %s", - repr_as_cstr); - - if (message) { - PyErr_SetObject(InvalidStringData, message); - Py_DECREF(message); - } - } else { - /* repr(value) failed, use a generic message. */ - PyErr_SetString( - InvalidStringData, - "strings in documents must be valid UTF-8"); - } - Py_XDECREF(repr); - Py_DECREF(InvalidStringData); - } - return 0; - } - if (!buffer_write_bytes(buffer, (const char*)&size, 4)) { - return 0; - } - if (!buffer_write_bytes(buffer, data, size)) { - return 0; - } - return 1; -#endif } else if (PyUnicode_Check(value)) { - *(buffer_get_buffer(buffer) + type_byte) = 0x02; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x02; return write_unicode(buffer, value); } else if (PyDateTime_Check(value)) { long long millis; - PyObject* utcoffset = PyObject_CallMethod(value, "utcoffset", NULL); + PyObject* utcoffset = PyObject_CallMethodObjArgs(value, state->_utcoffset_str , NULL); if (utcoffset == NULL) return 0; if (utcoffset != Py_None) { PyObject* result = PyNumber_Subtract(value, utcoffset); - Py_DECREF(utcoffset); if (!result) { + Py_DECREF(utcoffset); return 0; } millis = millis_from_datetime(result); @@ -993,94 +1397,87 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } else { millis = millis_from_datetime(value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x09; - return buffer_write_bytes(buffer, (const char*)&millis, 8); + Py_DECREF(utcoffset); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; + return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { - return _write_regex_to_buffer(buffer, type_byte, value); - } - - /* - * Try UUID last since we have to import - * it if we're in a sub-interpreter. - * - * If we're running under python 2.4 there likely - * isn't a uuid module. - */ - if (state->UUID) { - PyObject* uuid_type = _get_object(state->UUID, "uuid", "UUID"); - if (uuid_type && PyObject_IsInstance(value, uuid_type)) { - /* Just a special case of Binary above, but - * simpler to do as a separate case. */ - PyObject* bytes; - /* Could be bytes, bytearray, str... */ - const char* data; - /* UUID is always 16 bytes */ - int size = 16; - int subtype; + return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); + } else if (PyObject_IsInstance(value, state->Mapping)) { + /* PyObject_IsInstance returns -1 on error */ + if (PyErr_Occurred()) { + return 0; + } + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; + return write_dict(self, buffer, value, check_keys, options, 0); + } else if (PyObject_IsInstance(value, state->UUID)) { + PyObject* binary_value = NULL; + PyObject *uuid_rep_obj = NULL; + int result; - Py_DECREF(uuid_type); + /* PyObject_IsInstance returns -1 on error */ + if (PyErr_Occurred()) { + return 0; + } - if (uuid_subtype == JAVA_LEGACY || uuid_subtype == CSHARP_LEGACY) { - subtype = 3; - } - else { - subtype = uuid_subtype; - } + if (!(uuid_rep_obj = PyLong_FromLong(options->uuid_rep))) { + return 0; + } + binary_value = PyObject_CallMethodObjArgs(state->Binary, state->_from_uuid_str, value, uuid_rep_obj, NULL); + Py_DECREF(uuid_rep_obj); - *(buffer_get_buffer(buffer) + type_byte) = 0x05; - if (!buffer_write_bytes(buffer, (const char*)&size, 4)) { - return 0; - } - if (!buffer_write_bytes(buffer, (const char*)&subtype, 1)) { - return 0; - } + if (binary_value == NULL) { + return 0; + } - if (uuid_subtype == CSHARP_LEGACY) { - /* Legacy C# byte order */ - bytes = PyObject_GetAttrString(value, "bytes_le"); - } - else { - bytes = PyObject_GetAttrString(value, "bytes"); - } - if (!bytes) { - return 0; - } -#if PY_MAJOR_VERSION >= 3 - /* Work around http://bugs.python.org/issue7380 */ - if (PyByteArray_Check(bytes)) { - data = PyByteArray_AsString(bytes); - } - else { - data = PyBytes_AsString(bytes); - } -#else - data = PyString_AsString(bytes); -#endif - if (data == NULL) { - Py_DECREF(bytes); + result = _write_element_to_buffer(self, buffer, + type_byte, binary_value, + check_keys, options, + in_custom_call, + in_fallback_call); + Py_DECREF(binary_value); + return result; + } + + /* Try a custom encoder if one is provided and we have not already + * attempted to use a type encoder. */ + if (!in_custom_call && !options->type_registry.is_encoder_empty) { + PyObject* value_type = NULL; + PyObject* converter = NULL; + value_type = PyObject_Type(value); + if (value_type == NULL) { + return 0; + } + converter = PyDict_GetItem(options->type_registry.encoder_map, value_type); + Py_XDECREF(value_type); + if (converter != NULL) { + /* Transform types that have a registered converter. + * A new reference is created upon transformation. */ + new_value = PyObject_CallFunctionObjArgs(converter, value, NULL); + if (new_value == NULL) { return 0; } - if (uuid_subtype == JAVA_LEGACY) { - /* Store in legacy java byte order. */ - char as_legacy_java[16]; - _fix_java(data, as_legacy_java); - if (!buffer_write_bytes(buffer, as_legacy_java, size)) { - Py_DECREF(bytes); - return 0; - } - } - else { - if (!buffer_write_bytes(buffer, data, size)) { - Py_DECREF(bytes); - return 0; - } - } - Py_DECREF(bytes); - return 1; - } else { - Py_XDECREF(uuid_type); + retval = write_element_to_buffer(self, buffer, type_byte, new_value, + check_keys, options, 1, 0); + Py_XDECREF(new_value); + return retval; } } + + /* Try the fallback encoder if one is provided and we have not already + * attempted to use the fallback encoder. */ + if (!in_fallback_call && options->type_registry.has_fallback_encoder) { + new_value = PyObject_CallFunctionObjArgs( + options->type_registry.fallback_encoder, value, NULL); + if (new_value == NULL) { + // propagate any exception raised by the callback + return 0; + } + retval = write_element_to_buffer(self, buffer, type_byte, new_value, + check_keys, options, 0, 1); + Py_XDECREF(new_value); + return retval; + } + /* We can't determine value's type. Fail. */ _set_cannot_encode(value); return 0; @@ -1091,13 +1488,8 @@ static int check_key_name(const char* name, int name_length) { if (name_length > 0 && name[0] == '$') { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { -#if PY_MAJOR_VERSION >= 3 PyObject* errmsg = PyUnicode_FromFormat( "key '%s' must not start with '$'", name); -#else - PyObject* errmsg = PyString_FromFormat( - "key '%s' must not start with '$'", name); -#endif if (errmsg) { PyErr_SetObject(InvalidDocument, errmsg); Py_DECREF(errmsg); @@ -1109,13 +1501,8 @@ static int check_key_name(const char* name, int name_length) { if (strchr(name, '.')) { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { -#if PY_MAJOR_VERSION >= 3 PyObject* errmsg = PyUnicode_FromFormat( "key '%s' must not contain '.'", name); -#else - PyObject* errmsg = PyString_FromFormat( - "key '%s' must not contain '.'", name); -#endif if (errmsg) { PyErr_SetObject(InvalidDocument, errmsg); Py_DECREF(errmsg); @@ -1132,7 +1519,7 @@ static int check_key_name(const char* name, int name_length) { * Returns 0 on failure */ int write_pair(PyObject* self, buffer_t buffer, const char* name, int name_length, PyObject* value, unsigned char check_keys, - unsigned char uuid_subtype, unsigned char allow_id) { + const codec_options_t* options, unsigned char allow_id) { int type_byte; /* Don't write any _id elements unless we're explicitly told to - @@ -1142,9 +1529,8 @@ int write_pair(PyObject* self, buffer_t buffer, const char* name, int name_lengt return 1; } - type_byte = buffer_save_space(buffer, 1); + type_byte = pymongo_buffer_save_space(buffer, 1); if (type_byte == -1) { - PyErr_NoMemory(); return 0; } if (check_keys && !check_key_name(name, name_length)) { @@ -1154,7 +1540,7 @@ int write_pair(PyObject* self, buffer_t buffer, const char* name, int name_lengt return 0; } if (!write_element_to_buffer(self, buffer, type_byte, - value, check_keys, uuid_subtype)) { + value, check_keys, options, 0, 0)) { return 0; } return 1; @@ -1163,34 +1549,24 @@ int write_pair(PyObject* self, buffer_t buffer, const char* name, int name_lengt int decode_and_write_pair(PyObject* self, buffer_t buffer, PyObject* key, PyObject* value, unsigned char check_keys, - unsigned char uuid_subtype, unsigned char top_level) { + const codec_options_t* options, + unsigned char top_level) { PyObject* encoded; const char* data; int size; if (PyUnicode_Check(key)) { encoded = PyUnicode_AsUTF8String(key); - if (!encoded) { - return 0; - } -#if PY_MAJOR_VERSION >= 3 - if (!(data = PyBytes_AS_STRING(encoded))) { - Py_DECREF(encoded); - return 0; - } - if ((size = _downcast_and_check(PyBytes_GET_SIZE(encoded), 1)) == -1) { - Py_DECREF(encoded); + if (!encoded) { return 0; } -#else - if (!(data = PyString_AS_STRING(encoded))) { + if (!(data = PyBytes_AS_STRING(encoded))) { Py_DECREF(encoded); return 0; } - if ((size = _downcast_and_check(PyString_GET_SIZE(encoded), 1)) == -1) { + if ((size = _downcast_and_check(PyBytes_GET_SIZE(encoded), 1)) == -1) { Py_DECREF(encoded); return 0; } -#endif if (strlen(data) != (size_t)(size - 1)) { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { @@ -1201,56 +1577,14 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, Py_DECREF(encoded); return 0; } -#if PY_MAJOR_VERSION < 3 - } else if (PyString_Check(key)) { - result_t status; - encoded = key; - Py_INCREF(encoded); - - if (!(data = PyString_AS_STRING(encoded))) { - Py_DECREF(encoded); - return 0; - } - if ((size = _downcast_and_check(PyString_GET_SIZE(encoded), 1)) == -1) { - Py_DECREF(encoded); - return 0; - } - status = check_string((const unsigned char*)data, size - 1, 1, 1); - - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyErr_SetString(InvalidStringData, - "strings in documents must be valid UTF-8"); - Py_DECREF(InvalidStringData); - } - Py_DECREF(encoded); - return 0; - } else if (status == HAS_NULL) { - PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument) { - PyErr_SetString(InvalidDocument, - "Key names must not contain the NULL byte"); - Py_DECREF(InvalidDocument); - } - Py_DECREF(encoded); - return 0; - } -#endif } else { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { PyObject* repr = PyObject_Repr(key); if (repr) { -#if PY_MAJOR_VERSION >= 3 PyObject* errmsg = PyUnicode_FromString( "documents must have only string keys, key was "); -#else - PyObject* errmsg = PyString_FromString( - "documents must have only string keys, key was "); -#endif if (errmsg) { -#if PY_MAJOR_VERSION >= 3 PyObject* error = PyUnicode_Concat(errmsg, repr); if (error) { PyErr_SetObject(InvalidDocument, error); @@ -1258,13 +1592,6 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, } Py_DECREF(errmsg); Py_DECREF(repr); -#else - PyString_ConcatAndDel(&errmsg, repr); - if (errmsg) { - PyErr_SetObject(InvalidDocument, errmsg); - Py_DECREF(errmsg); - } -#endif } else { Py_DECREF(repr); } @@ -1276,7 +1603,7 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, /* If top_level is True, don't allow writing _id here - it was already written. */ if (!write_pair(self, buffer, data, - size - 1, value, check_keys, uuid_subtype, !top_level)) { + size - 1, value, check_keys, options, !top_level)) { Py_DECREF(encoded); return 0; } @@ -1285,143 +1612,358 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, return 1; } -/* returns 0 on failure */ + +/* Write a RawBSONDocument to the buffer. + * Returns the number of bytes written or 0 on failure. + */ +static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw_str) { + char* bytes; + Py_ssize_t len; + int len_int; + int bytes_written = 0; + PyObject* bytes_obj = NULL; + + bytes_obj = PyObject_GetAttr(raw, _raw_str); + if (!bytes_obj) { + goto fail; + } + + if (-1 == PyBytes_AsStringAndSize(bytes_obj, &bytes, &len)) { + goto fail; + } + len_int = _downcast_and_check(len, 0); + if (-1 == len_int) { + goto fail; + } + if (!buffer_write_bytes(buffer, bytes, len_int)) { + goto fail; + } + bytes_written = len_int; +fail: + Py_XDECREF(bytes_obj); + return bytes_written; +} + + +/* Update Invalid Document error to include doc as a property. + */ +void handle_invalid_doc_error(PyObject* dict) { + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + PyObject *msg = NULL, *new_msg = NULL, *new_evalue = NULL; + PyErr_Fetch(&etype, &evalue, &etrace); + PyObject *InvalidDocument = _error("InvalidDocument"); + if (InvalidDocument == NULL) { + goto cleanup; + } + + if (evalue && PyErr_GivenExceptionMatches(etype, InvalidDocument)) { + msg = PyObject_Str(evalue); + if (msg) { + const char * msg_utf8 = PyUnicode_AsUTF8(msg); + if (msg_utf8 == NULL) { + goto cleanup; + } + new_msg = PyUnicode_FromFormat("Invalid document: %s", msg_utf8); + if (new_msg == NULL) { + goto cleanup; + } + // Add doc to the error instance as a property. + new_evalue = PyObject_CallFunctionObjArgs(InvalidDocument, new_msg, dict, NULL); + Py_DECREF(evalue); + Py_DECREF(etype); + etype = InvalidDocument; + InvalidDocument = NULL; + if (new_evalue) { + evalue = new_evalue; + new_evalue = NULL; + } else { + evalue = msg; + msg = NULL; + } + } + PyErr_NormalizeException(&etype, &evalue, &etrace); + } +cleanup: + PyErr_Restore(etype, evalue, etrace); + Py_XDECREF(msg); + Py_XDECREF(InvalidDocument); + Py_XDECREF(new_evalue); + Py_XDECREF(new_msg); +} + + +/* returns the number of bytes written or 0 on failure */ int write_dict(PyObject* self, buffer_t buffer, PyObject* dict, unsigned char check_keys, - unsigned char uuid_subtype, unsigned char top_level) { + const codec_options_t* options, unsigned char top_level) { PyObject* key; PyObject* iter; char zero = 0; int length; int length_location; + struct module_state *state = GETSTATE(self); + long type_marker; + int is_dict = PyDict_Check(dict); + if (!state) { + return 0; + } - if (!PyDict_Check(dict)) { - PyObject* repr = PyObject_Repr(dict); - if (repr) { -#if PY_MAJOR_VERSION >= 3 - PyObject* errmsg = PyUnicode_FromString( - "encoder expected a mapping type but got: "); - if (errmsg) { - PyObject* error = PyUnicode_Concat(errmsg, repr); - if (error) { - PyErr_SetObject(PyExc_TypeError, error); - Py_DECREF(error); - } - Py_DECREF(errmsg); - Py_DECREF(repr); - } -#else - PyObject* errmsg = PyString_FromString( - "encoder expected a mapping type but got: "); - if (errmsg) { - PyString_ConcatAndDel(&errmsg, repr); + if (!is_dict) { + /* check for RawBSONDocument */ + type_marker = _type_marker(dict, state->_type_marker_str); + if (type_marker < 0) { + return 0; + } + + if (101 == type_marker) { + return write_raw_doc(buffer, dict, state->_raw_str); + } + + if (!PyObject_IsInstance(dict, state->Mapping)) { + PyObject* repr; + if ((repr = PyObject_Repr(dict))) { + PyObject* errmsg = PyUnicode_FromString( + "encoder expected a mapping type but got: "); if (errmsg) { - PyErr_SetObject(PyExc_TypeError, errmsg); + PyObject* error = PyUnicode_Concat(errmsg, repr); + if (error) { + PyErr_SetObject(PyExc_TypeError, error); + Py_DECREF(error); + } Py_DECREF(errmsg); + Py_DECREF(repr); } + else { + Py_DECREF(repr); + } + } else { + PyErr_SetString(PyExc_TypeError, + "encoder expected a mapping type"); } -#endif - else { - Py_DECREF(repr); - } - } else { - PyErr_SetString(PyExc_TypeError, - "encoder expected a mapping type"); + + return 0; + } + /* PyObject_IsInstance returns -1 on error */ + if (PyErr_Occurred()) { + return 0; } - return 0; } - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); return 0; } /* Write _id first if this is a top level doc. */ if (top_level) { - PyObject* _id = PyDict_GetItemString(dict, "_id"); - if (_id) { + /* + * If "dict" is a defaultdict we don't want to call + * PyObject_GetItem on it. That would **create** + * an _id where one didn't previously exist (PYTHON-871). + */ + if (is_dict) { + /* PyDict_GetItem returns a borrowed reference. */ + PyObject* _id = PyDict_GetItem(dict, state->_id_str); + if (_id) { + if (!write_pair(self, buffer, "_id", 3, + _id, check_keys, options, 1)) { + return 0; + } + } + } else if (PyMapping_HasKey(dict, state->_id_str)) { + PyObject* _id = PyObject_GetItem(dict, state->_id_str); + if (!_id) { + return 0; + } if (!write_pair(self, buffer, "_id", 3, - _id, check_keys, uuid_subtype, 1)) { + _id, check_keys, options, 1)) { + Py_DECREF(_id); return 0; } + /* PyObject_GetItem returns a new reference. */ + Py_DECREF(_id); } } - iter = PyObject_GetIter(dict); - if (iter == NULL) { - return 0; - } - while ((key = PyIter_Next(iter)) != NULL) { - PyObject* value = PyDict_GetItem(dict, key); - if (!value) { - PyErr_SetObject(PyExc_KeyError, key); - Py_DECREF(key); - Py_DECREF(iter); + if (is_dict) { + PyObject* value; + Py_ssize_t pos = 0; + while (PyDict_Next(dict, &pos, &key, &value)) { + if (!decode_and_write_pair(self, buffer, key, value, + check_keys, options, top_level)) { + if (PyErr_Occurred() && top_level) { + handle_invalid_doc_error(dict); + } + return 0; + } + } + } else { + iter = PyObject_GetIter(dict); + if (iter == NULL) { return 0; } - if (!decode_and_write_pair(self, buffer, key, value, - check_keys, uuid_subtype, top_level)) { + while ((key = PyIter_Next(iter)) != NULL) { + PyObject* value = PyObject_GetItem(dict, key); + if (!value) { + PyErr_SetObject(PyExc_KeyError, key); + Py_DECREF(key); + Py_DECREF(iter); + return 0; + } + if (!decode_and_write_pair(self, buffer, key, value, + check_keys, options, top_level)) { + if (PyErr_Occurred() && top_level) { + handle_invalid_doc_error(dict); + } + Py_DECREF(key); + Py_DECREF(value); + Py_DECREF(iter); + return 0; + } Py_DECREF(key); - Py_DECREF(iter); + Py_DECREF(value); + } + Py_DECREF(iter); + if (PyErr_Occurred()) { return 0; } - Py_DECREF(key); } - Py_DECREF(iter); /* write null byte and fill in length */ if (!buffer_write_bytes(buffer, &zero, 1)) { return 0; } - length = buffer_get_position(buffer) - length_location; - memcpy(buffer_get_buffer(buffer) + length_location, &length, 4); - return 1; + length = pymongo_buffer_get_position(buffer) - length_location; + buffer_write_int32_at_position( + buffer, length_location, (int32_t)length); + return length; } static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { PyObject* dict; PyObject* result; unsigned char check_keys; - unsigned char uuid_subtype; unsigned char top_level = 1; + PyObject* options_obj = NULL; + codec_options_t options; buffer_t buffer; + PyObject* raw_bson_document_bytes_obj; + long type_marker; + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } - if (!PyArg_ParseTuple(args, "Obb|b", &dict, - &check_keys, &uuid_subtype, &top_level)) { + if (!(PyArg_ParseTuple(args, "ObO|b", &dict, &check_keys, + &options_obj, &top_level) && + convert_codec_options(self, options_obj, &options))) { return NULL; } - buffer = buffer_new(); + /* check for RawBSONDocument */ + type_marker = _type_marker(dict, state->_type_marker_str); + if (type_marker < 0) { + destroy_codec_options(&options); + return NULL; + } else if (101 == type_marker) { + destroy_codec_options(&options); + raw_bson_document_bytes_obj = PyObject_GetAttr(dict, state->_raw_str); + if (NULL == raw_bson_document_bytes_obj) { + return NULL; + } + return raw_bson_document_bytes_obj; + } + + buffer = pymongo_buffer_new(); if (!buffer) { - PyErr_NoMemory(); + destroy_codec_options(&options); return NULL; } - if (!write_dict(self, buffer, dict, check_keys, uuid_subtype, top_level)) { - buffer_free(buffer); + if (!write_dict(self, buffer, dict, check_keys, &options, top_level)) { + destroy_codec_options(&options); + pymongo_buffer_free(buffer); return NULL; } /* objectify buffer */ -#if PY_MAJOR_VERSION >= 3 - result = Py_BuildValue("y#", buffer_get_buffer(buffer), - buffer_get_position(buffer)); -#else - result = Py_BuildValue("s#", buffer_get_buffer(buffer), - buffer_get_position(buffer)); -#endif - buffer_free(buffer); + result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); + destroy_codec_options(&options); + pymongo_buffer_free(buffer); return result; } -static PyObject* get_value(PyObject* self, const char* buffer, unsigned* position, - unsigned char type, unsigned max, PyObject* as_class, - unsigned char tz_aware, unsigned char uuid_subtype, - unsigned char compile_re) { +/* + * Hook for optional decoding BSON documents to DBRef. + */ +static PyObject *_dbref_hook(PyObject* self, PyObject* value) { struct module_state *state = GETSTATE(self); + PyObject* ref = NULL; + PyObject* id = NULL; + PyObject* database = NULL; + PyObject* ret = NULL; + int db_present = 0; + if (!state) { + return NULL; + } + + /* Decoding for DBRefs */ + if (PyMapping_HasKey(value, state->_dollar_ref_str) && PyMapping_HasKey(value, state->_dollar_id_str)) { /* DBRef */ + ref = PyObject_GetItem(value, state->_dollar_ref_str); + /* PyObject_GetItem returns NULL to indicate error. */ + if (!ref) { + goto invalid; + } + id = PyObject_GetItem(value, state->_dollar_id_str); + /* PyObject_GetItem returns NULL to indicate error. */ + if (!id) { + goto invalid; + } + + if (PyMapping_HasKey(value, state->_dollar_db_str)) { + database = PyObject_GetItem(value, state->_dollar_db_str); + if (!database) { + goto invalid; + } + db_present = 1; + } else { + database = Py_None; + Py_INCREF(database); + } + + // check types + if (!(PyUnicode_Check(ref) && (database == Py_None || PyUnicode_Check(database)))) { + ret = value; + goto invalid; + } + + PyMapping_DelItem(value, state->_dollar_ref_str); + PyMapping_DelItem(value, state->_dollar_id_str); + if (db_present) { + PyMapping_DelItem(value, state->_dollar_db_str); + } + + ret = PyObject_CallFunctionObjArgs(state->DBRef, ref, id, database, value, NULL); + Py_DECREF(value); + } else { + ret = value; + } +invalid: + Py_XDECREF(ref); + Py_XDECREF(id); + Py_XDECREF(database); + return ret; +} +static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, + unsigned* position, unsigned char type, + unsigned max, const codec_options_t* options, int raw_array) { + struct module_state *state = GETSTATE(self); PyObject* value = NULL; + if (!state) { + return NULL; + } switch (type) { case 1: { @@ -1430,18 +1972,19 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio goto invalid; } memcpy(&d, buffer + *position, 8); - value = PyFloat_FromDouble(d); + value = PyFloat_FromDouble(BSON_DOUBLE_FROM_LE(d)); *position += 8; break; } case 2: case 14: { - unsigned value_length; + uint32_t value_length; if (max < 4) { goto invalid; } memcpy(&value_length, buffer + *position, 4); + value_length = BSON_UINT32_FROM_LE(value_length); /* Encoded string length + string */ if (!value_length || max < value_length || max < 4 + value_length) { goto invalid; @@ -1451,7 +1994,9 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio if (buffer[*position + value_length - 1]) { goto invalid; } - value = PyUnicode_DecodeUTF8(buffer + *position, value_length - 1, "strict"); + value = PyUnicode_DecodeUTF8( + buffer + *position, value_length - 1, + options->unicode_decode_error_handler); if (!value) { goto invalid; } @@ -1460,12 +2005,13 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio } case 3: { - PyObject* collection; - unsigned size; + uint32_t size; + if (max < 4) { goto invalid; } memcpy(&size, buffer + *position, 4); + size = BSON_UINT32_FROM_LE(size); if (size < BSON_MIN_SIZE || max < size) { goto invalid; } @@ -1473,52 +2019,22 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio if (buffer[*position + size - 1]) { goto invalid; } - value = elements_to_dict(self, buffer + *position + 4, - size - 5, as_class, tz_aware, uuid_subtype, - compile_re); + + value = elements_to_dict(self, buffer + *position, + size, options); if (!value) { goto invalid; } - /* Decoding for DBRefs */ - collection = PyDict_GetItemString(value, "$ref"); - if (collection) { /* DBRef */ - PyObject* dbref = NULL; - PyObject* dbref_type; - PyObject* id; - PyObject* database; - - Py_INCREF(collection); - PyDict_DelItemString(value, "$ref"); - - id = PyDict_GetItemString(value, "$id"); - if (id == NULL) { - id = Py_None; - Py_INCREF(id); - } else { - Py_INCREF(id); - PyDict_DelItemString(value, "$id"); - } - - database = PyDict_GetItemString(value, "$db"); - if (database == NULL) { - database = Py_None; - Py_INCREF(database); - } else { - Py_INCREF(database); - PyDict_DelItemString(value, "$db"); - } - - if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { - dbref = PyObject_CallFunctionObjArgs(dbref_type, collection, id, database, value, NULL); - Py_DECREF(dbref_type); - } - Py_DECREF(value); - value = dbref; + if (options->is_raw_bson) { + *position += size; + break; + } - Py_DECREF(id); - Py_DECREF(collection); - Py_DECREF(database); + /* Hook for DBRefs */ + value = _dbref_hook(self, value); + if (!value) { + goto invalid; } *position += size; @@ -1526,20 +2042,30 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio } case 4: { - unsigned size, end; + uint32_t size, end; if (max < 4) { goto invalid; } memcpy(&size, buffer + *position, 4); + size = BSON_UINT32_FROM_LE(size); if (size < BSON_MIN_SIZE || max < size) { goto invalid; } + end = *position + size - 1; /* Check for bad eoo */ if (buffer[end]) { goto invalid; } + + if (raw_array != 0) { + // Treat it as a binary buffer. + value = PyBytes_FromStringAndSize(buffer + *position, size); + *position += size; + break; + } + *position += 4; value = PyList_New(0); @@ -1562,18 +2088,23 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio Py_DECREF(value); goto invalid; } - to_append = get_value(self, buffer, position, bson_type, - max - (unsigned)key_size, - as_class, tz_aware, uuid_subtype, - compile_re); + to_append = get_value(self, name, buffer, position, bson_type, + max - (unsigned)key_size, options, raw_array); Py_LeaveRecursiveCall(); if (!to_append) { Py_DECREF(value); goto invalid; } - PyList_Append(value, to_append); + if (PyList_Append(value, to_append) < 0) { + Py_DECREF(value); + Py_DECREF(to_append); + goto invalid; + } Py_DECREF(to_append); } + if (*position != end) { + goto invalid; + } (*position)++; break; } @@ -1581,24 +2112,30 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio { PyObject* data; PyObject* st; - PyObject* type_to_create; - unsigned length; + uint32_t length, length2; unsigned char subtype; if (max < 5) { goto invalid; } memcpy(&length, buffer + *position, 4); + length = BSON_UINT32_FROM_LE(length); if (max < length) { goto invalid; } subtype = (unsigned char)buffer[*position + 4]; *position += 5; - if (subtype == 2 && length < 4) { - goto invalid; + if (subtype == 2) { + if (length < 4) { + goto invalid; + } + memcpy(&length2, buffer + *position, 4); + length2 = BSON_UINT32_FROM_LE(length2); + if (length2 != length - 4) { + goto invalid; + } } -#if PY_MAJOR_VERSION >= 3 /* Python3 special case. Decode BSON binary subtype 0 to bytes. */ if (subtype == 0) { value = PyBytes_FromStringAndSize(buffer + *position, length); @@ -1610,95 +2147,54 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio } else { data = PyBytes_FromStringAndSize(buffer + *position, length); } -#else - if (subtype == 2) { - data = PyString_FromStringAndSize(buffer + *position + 4, length - 4); - } else { - data = PyString_FromStringAndSize(buffer + *position, length); - } -#endif if (!data) { goto invalid; } - /* Encode as UUID, not Binary */ - if ((subtype == 3 || subtype == 4) && state->UUID) { - PyObject* kwargs; - PyObject* args = PyTuple_New(0); + /* Encode as UUID or Binary based on options->uuid_rep */ + if (subtype == 3 || subtype == 4) { + PyObject* binary_value = NULL; + char uuid_rep = options->uuid_rep; + /* UUID should always be 16 bytes */ - if (!args || length != 16) { - Py_DECREF(data); - goto invalid; + if (length != 16) { + goto uuiderror; } - kwargs = PyDict_New(); - if (!kwargs) { - Py_DECREF(data); - Py_DECREF(args); - goto invalid; + + binary_value = PyObject_CallFunction(state->Binary, "(Oi)", data, subtype); + if (binary_value == NULL) { + goto uuiderror; } - /* - * From this point, we hold refs to args, kwargs, and data. - * If anything fails, goto uuiderror to clean them up. - */ - if (uuid_subtype == CSHARP_LEGACY) { - /* Legacy C# byte order */ - if ((PyDict_SetItemString(kwargs, "bytes_le", data)) == -1) + if ((uuid_rep == UNSPECIFIED) || + (subtype == 4 && uuid_rep != STANDARD) || + (subtype == 3 && uuid_rep == STANDARD)) { + value = binary_value; + Py_INCREF(value); + } else { + PyObject *uuid_rep_obj = PyLong_FromLong(uuid_rep); + if (!uuid_rep_obj) { goto uuiderror; - } - else { - if (uuid_subtype == JAVA_LEGACY) { - /* Convert from legacy java byte order */ - char big_endian[16]; - _fix_java(buffer + *position, big_endian); - /* Free the previously created PyString object */ - Py_DECREF(data); -#if PY_MAJOR_VERSION >= 3 - data = PyBytes_FromStringAndSize(big_endian, length); -#else - data = PyString_FromStringAndSize(big_endian, length); -#endif - if (data == NULL) - goto uuiderror; } - if ((PyDict_SetItemString(kwargs, "bytes", data)) == -1) - goto uuiderror; - - } - if ((type_to_create = _get_object(state->UUID, "uuid", "UUID"))) { - value = PyObject_Call(type_to_create, args, kwargs); - Py_DECREF(type_to_create); + value = PyObject_CallMethodObjArgs(binary_value, state->_as_uuid_str, uuid_rep_obj, NULL); + Py_DECREF(uuid_rep_obj); } - Py_DECREF(args); - Py_DECREF(kwargs); + uuiderror: + Py_XDECREF(binary_value); Py_DECREF(data); if (!value) { goto invalid; } - *position += length; break; - - uuiderror: - Py_DECREF(args); - Py_DECREF(kwargs); - Py_XDECREF(data); - goto invalid; } -#if PY_MAJOR_VERSION >= 3 st = PyLong_FromLong(subtype); -#else - st = PyInt_FromLong(subtype); -#endif if (!st) { Py_DECREF(data); goto invalid; } - if ((type_to_create = _get_object(state->Binary, "bson.binary", "Binary"))) { - value = PyObject_CallFunctionObjArgs(type_to_create, data, st, NULL); - Py_DECREF(type_to_create); - } + value = PyObject_CallFunctionObjArgs(state->Binary, data, st, NULL); Py_DECREF(st); Py_DECREF(data); if (!value) { @@ -1716,83 +2212,46 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio } case 7: { - PyObject* objectid_type; if (max < 12) { goto invalid; } - if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { -#if PY_MAJOR_VERSION >= 3 - value = PyObject_CallFunction(objectid_type, "y#", buffer + *position, 12); -#else - value = PyObject_CallFunction(objectid_type, "s#", buffer + *position, 12); -#endif - Py_DECREF(objectid_type); - } + value = PyObject_CallFunction(state->ObjectId, "y#", buffer + *position, (Py_ssize_t)12); *position += 12; break; } case 8: { - value = buffer[(*position)++] ? Py_True : Py_False; + char boolean_raw = buffer[(*position)++]; + if (0 == boolean_raw) { + value = Py_False; + } else if (1 == boolean_raw) { + value = Py_True; + } else { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_Format(InvalidBSON, "invalid boolean value: %x", boolean_raw); + Py_DECREF(InvalidBSON); + } + return NULL; + } Py_INCREF(value); break; } case 9: { - PyObject* utc_type; - PyObject* naive; - PyObject* replace; - PyObject* args; - PyObject* kwargs; - long long millis; + int64_t millis; if (max < 8) { goto invalid; } memcpy(&millis, buffer + *position, 8); - naive = datetime_from_millis(millis); + millis = (int64_t)BSON_UINT64_FROM_LE(millis); *position += 8; - if (!tz_aware) { /* In the naive case, we're done here. */ - value = naive; - break; - } - if (!naive) { - goto invalid; - } - replace = PyObject_GetAttrString(naive, "replace"); - Py_DECREF(naive); - if (!replace) { - goto invalid; - } - args = PyTuple_New(0); - if (!args) { - Py_DECREF(replace); - goto invalid; - } - kwargs = PyDict_New(); - if (!kwargs) { - Py_DECREF(replace); - Py_DECREF(args); - goto invalid; - } - utc_type = _get_object(state->UTC, "bson.tz_util", "UTC"); - if (!utc_type || PyDict_SetItemString(kwargs, "tzinfo", utc_type) == -1) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - Py_XDECREF(utc_type); - goto invalid; - } - Py_XDECREF(utc_type); - value = PyObject_Call(replace, args, kwargs); - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); + value = decode_datetime(self, millis, options); break; } case 11: { - PyObject* compile_func; PyObject* pattern; int flags; size_t flags_length, i; @@ -1800,7 +2259,9 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio if (pattern_length > BSON_MAX_SIZE || max < pattern_length) { goto invalid; } - pattern = PyUnicode_DecodeUTF8(buffer + *position, pattern_length, "strict"); + pattern = PyUnicode_DecodeUTF8( + buffer + *position, pattern_length, + options->unicode_decode_error_handler); if (!pattern) { goto invalid; } @@ -1833,35 +2294,21 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio } *position += (unsigned)flags_length + 1; - /* - * Use re.compile() if we're configured to compile regular - * expressions, else create an instance of our Regex class. - */ - if (compile_re) { - compile_func = _get_object(state->RECompile, "re", "compile"); - } else { - compile_func = _get_object(state->Regex, "bson.regex", "Regex"); - } - - if (compile_func) { - value = PyObject_CallFunction(compile_func, "Oi", pattern, flags); - Py_DECREF(compile_func); - } + value = PyObject_CallFunction(state->Regex, "Oi", pattern, flags); Py_DECREF(pattern); break; } case 12: { - unsigned coll_length; + uint32_t coll_length; PyObject* collection; PyObject* id = NULL; - PyObject* objectid_type; - PyObject* dbref_type; if (max < 4) { goto invalid; } memcpy(&coll_length, buffer + *position, 4); + coll_length = BSON_UINT32_FROM_LE(coll_length); /* Encoded string length + string + 12 byte ObjectId */ if (!coll_length || max < coll_length || max < 4 + coll_length + 12) { goto invalid; @@ -1872,30 +2319,21 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio goto invalid; } - collection = PyUnicode_DecodeUTF8(buffer + *position, - coll_length - 1, "strict"); + collection = PyUnicode_DecodeUTF8( + buffer + *position, coll_length - 1, + options->unicode_decode_error_handler); if (!collection) { goto invalid; } *position += coll_length; - if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { -#if PY_MAJOR_VERSION >= 3 - id = PyObject_CallFunction(objectid_type, "y#", buffer + *position, 12); -#else - id = PyObject_CallFunction(objectid_type, "s#", buffer + *position, 12); -#endif - Py_DECREF(objectid_type); - } + id = PyObject_CallFunction(state->ObjectId, "y#", buffer + *position, (Py_ssize_t)12); if (!id) { Py_DECREF(collection); goto invalid; } *position += 12; - if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { - value = PyObject_CallFunctionObjArgs(dbref_type, collection, id, NULL); - Py_DECREF(dbref_type); - } + value = PyObject_CallFunctionObjArgs(state->DBRef, collection, id, NULL); Py_DECREF(collection); Py_DECREF(id); break; @@ -1903,12 +2341,12 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio case 13: { PyObject* code; - PyObject* code_type; - unsigned value_length; + uint32_t value_length; if (max < 4) { goto invalid; } memcpy(&value_length, buffer + *position, 4); + value_length = BSON_UINT32_FROM_LE(value_length); /* Encoded string length + string */ if (!value_length || max < value_length || max < 4 + value_length) { goto invalid; @@ -1918,32 +2356,32 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio if (buffer[*position + value_length - 1]) { goto invalid; } - code = PyUnicode_DecodeUTF8(buffer + *position, value_length - 1, "strict"); + code = PyUnicode_DecodeUTF8( + buffer + *position, value_length - 1, + options->unicode_decode_error_handler); if (!code) { goto invalid; } *position += value_length; - if ((code_type = _get_object(state->Code, "bson.code", "Code"))) { - value = PyObject_CallFunctionObjArgs(code_type, code, NULL, NULL); - Py_DECREF(code_type); - } + value = PyObject_CallFunctionObjArgs(state->Code, code, NULL, NULL); Py_DECREF(code); break; } case 15: { - unsigned c_w_s_size; - unsigned code_size; - unsigned scope_size; + uint32_t c_w_s_size; + uint32_t code_size; + uint32_t scope_size; + uint32_t len; PyObject* code; PyObject* scope; - PyObject* code_type; if (max < 8) { goto invalid; } memcpy(&c_w_s_size, buffer + *position, 4); + c_w_s_size = BSON_UINT32_FROM_LE(c_w_s_size); *position += 4; if (max < c_w_s_size) { @@ -1951,8 +2389,10 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio } memcpy(&code_size, buffer + *position, 4); + code_size = BSON_UINT32_FROM_LE(code_size); /* code_w_scope length + code length + code + scope length */ - if (!code_size || max < code_size || max < 4 + 4 + code_size + 4) { + len = 4 + 4 + code_size + 4; + if (!code_size || max < code_size || max < len || len < code_size) { goto invalid; } *position += 4; @@ -1960,19 +2400,19 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio if (buffer[*position + code_size - 1]) { goto invalid; } - code = PyUnicode_DecodeUTF8(buffer + *position, code_size - 1, "strict"); + code = PyUnicode_DecodeUTF8( + buffer + *position, code_size - 1, + options->unicode_decode_error_handler); if (!code) { goto invalid; } *position += code_size; memcpy(&scope_size, buffer + *position, 4); - if (scope_size < BSON_MIN_SIZE) { - Py_DECREF(code); - goto invalid; - } + scope_size = BSON_UINT32_FROM_LE(scope_size); /* code length + code + scope length + scope */ - if ((4 + code_size + 4 + scope_size) != c_w_s_size) { + len = 4 + 4 + code_size + scope_size; + if (scope_size < BSON_MIN_SIZE || len != c_w_s_size || len < scope_size) { Py_DECREF(code); goto invalid; } @@ -1981,35 +2421,28 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio if (buffer[*position + scope_size - 1]) { goto invalid; } - scope = elements_to_dict(self, buffer + *position + 4, - scope_size - 5, (PyObject*)&PyDict_Type, - tz_aware, uuid_subtype, compile_re); + scope = elements_to_dict(self, buffer + *position, + scope_size, options); if (!scope) { Py_DECREF(code); goto invalid; } *position += scope_size; - if ((code_type = _get_object(state->Code, "bson.code", "Code"))) { - value = PyObject_CallFunctionObjArgs(code_type, code, scope, NULL); - Py_DECREF(code_type); - } + value = PyObject_CallFunctionObjArgs(state->Code, code, scope, NULL); Py_DECREF(code); Py_DECREF(scope); break; } case 16: { - int i; + int32_t i; if (max < 4) { goto invalid; } memcpy(&i, buffer + *position, 4); -#if PY_MAJOR_VERSION >= 3 + i = (int32_t)BSON_UINT32_FROM_LE(i); value = PyLong_FromLong(i); -#else - value = PyInt_FromLong(i); -#endif if (!value) { goto invalid; } @@ -2018,65 +2451,112 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio } case 17: { - unsigned int time, inc; - PyObject* timestamp_type; + uint32_t time, inc; if (max < 8) { goto invalid; } memcpy(&inc, buffer + *position, 4); memcpy(&time, buffer + *position + 4, 4); - if ((timestamp_type = _get_object(state->Timestamp, "bson.timestamp", "Timestamp"))) { - value = PyObject_CallFunction(timestamp_type, "II", time, inc); - Py_DECREF(timestamp_type); - } + inc = BSON_UINT32_FROM_LE(inc); + time = BSON_UINT32_FROM_LE(time); + value = PyObject_CallFunction(state->Timestamp, "II", time, inc); *position += 8; break; } case 18: { - long long ll; + int64_t ll; if (max < 8) { goto invalid; } memcpy(&ll, buffer + *position, 8); - value = PyLong_FromLongLong(ll); - if (!value) { + ll = (int64_t)BSON_UINT64_FROM_LE(ll); + value = PyObject_CallFunction(state->BSONInt64, "L", ll); + *position += 8; + break; + } + case 19: + { + if (max < 16) { goto invalid; } - *position += 8; + PyObject *_bytes_obj = PyBytes_FromStringAndSize(buffer + *position, (Py_ssize_t)16); + if (!_bytes_obj) { + goto invalid; + } + value = PyObject_CallMethodObjArgs(state->Decimal128, state->_from_bid_str, _bytes_obj, NULL); + Py_DECREF(_bytes_obj); + *position += 16; break; } case 255: { - PyObject* minkey_type = _get_object(state->MinKey, "bson.min_key", "MinKey"); - if (!minkey_type) - goto invalid; - value = PyObject_CallFunctionObjArgs(minkey_type, NULL); - Py_DECREF(minkey_type); + value = PyObject_CallFunctionObjArgs(state->MinKey, NULL); break; } case 127: { - PyObject* maxkey_type = _get_object(state->MaxKey, "bson.max_key", "MaxKey"); - if (!maxkey_type) - goto invalid; - value = PyObject_CallFunctionObjArgs(maxkey_type, NULL); - Py_DECREF(maxkey_type); + value = PyObject_CallFunctionObjArgs(state->MaxKey, NULL); break; } default: { - PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument) { - PyErr_SetString(InvalidDocument, - "no c decoder for this type yet"); - Py_DECREF(InvalidDocument); + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyObject* bobj = PyBytes_FromFormat("%c", type); + if (bobj) { + PyObject* repr = PyObject_Repr(bobj); + Py_DECREF(bobj); + /* + * See http://bugs.python.org/issue22023 for why we can't + * just use PyUnicode_FromFormat with %S or %R to do this + * work. + */ + if (repr) { + PyObject* left = PyUnicode_FromString( + "Detected unknown BSON type "); + if (left) { + PyObject* lmsg = PyUnicode_Concat(left, repr); + Py_DECREF(left); + if (lmsg) { + PyObject* errmsg = PyUnicode_FromFormat( + "%U for fieldname '%U'. Are you using the " + "latest driver version?", lmsg, name); + if (errmsg) { + PyErr_SetObject(InvalidBSON, errmsg); + Py_DECREF(errmsg); + } + Py_DECREF(lmsg); + } + } + Py_DECREF(repr); + } + } + Py_DECREF(InvalidBSON); } goto invalid; } } if (value) { + if (!options->type_registry.is_decoder_empty) { + PyObject* value_type = NULL; + PyObject* converter = NULL; + value_type = PyObject_Type(value); + if (value_type == NULL) { + goto invalid; + } + converter = PyDict_GetItem(options->type_registry.decoder_map, value_type); + if (converter != NULL) { + PyObject* new_value = PyObject_CallFunctionObjArgs(converter, value, NULL); + Py_DECREF(value_type); + Py_DECREF(value); + return new_value; + } else { + Py_DECREF(value_type); + return value; + } + } return value; } @@ -2086,34 +2566,38 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio * Wrap any non-InvalidBSON errors in InvalidBSON. */ if (PyErr_Occurred()) { - PyObject *etype, *evalue, *etrace; - PyObject *InvalidBSON; + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + PyObject *InvalidBSON = NULL; /* * Calling _error clears the error state, so fetch it first. */ PyErr_Fetch(&etype, &evalue, &etrace); - InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - if (!PyErr_GivenExceptionMatches(etype, InvalidBSON)) { - /* - * Raise InvalidBSON(str(e)). - */ - Py_DECREF(etype); - etype = InvalidBSON; - if (evalue) { - PyObject *msg = PyObject_Str(evalue); - Py_DECREF(evalue); - evalue = msg; + /* Dont reraise anything but PyExc_Exceptions as InvalidBSON. */ + if (PyErr_GivenExceptionMatches(etype, PyExc_Exception)) { + InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + if (!PyErr_GivenExceptionMatches(etype, InvalidBSON)) { + /* + * Raise InvalidBSON(str(e)). + */ + Py_DECREF(etype); + etype = InvalidBSON; + + if (evalue) { + PyObject *msg = PyObject_Str(evalue); + Py_DECREF(evalue); + evalue = msg; + } + PyErr_NormalizeException(&etype, &evalue, &etrace); + } else { + /* + * The current exception matches InvalidBSON, so we don't + * need this reference after all. + */ + Py_DECREF(InvalidBSON); } - PyErr_NormalizeException(&etype, &evalue, &etrace); - } else { - /* - * The current exception matches InvalidBSON, so we don't need - * this reference after all. - */ - Py_DECREF(InvalidBSON); } } /* Steals references to args. */ @@ -2128,44 +2612,127 @@ static PyObject* get_value(PyObject* self, const char* buffer, unsigned* positio return NULL; } +/* + * Get the next 'name' and 'value' from a document in a string, whose position + * is provided. + * + * Returns the position of the next element in the document, or -1 on error. + */ +static int _element_to_dict(PyObject* self, const char* string, + unsigned position, unsigned max, + const codec_options_t* options, + int raw_array, + PyObject** name, PyObject** value) { + unsigned char type = (unsigned char)string[position++]; + size_t name_length = strlen(string + position); + if (name_length > BSON_MAX_SIZE || position + name_length >= max) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "field name too large"); + Py_DECREF(InvalidBSON); + } + return -1; + } + *name = PyUnicode_DecodeUTF8( + string + position, name_length, + options->unicode_decode_error_handler); + if (!*name) { + /* If NULL is returned then wrap the UnicodeDecodeError + in an InvalidBSON error */ + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + PyObject *InvalidBSON = NULL; + + PyErr_Fetch(&etype, &evalue, &etrace); + if (PyErr_GivenExceptionMatches(etype, PyExc_Exception)) { + InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + Py_DECREF(etype); + etype = InvalidBSON; + + if (evalue) { + PyObject *msg = PyObject_Str(evalue); + Py_DECREF(evalue); + evalue = msg; + } + PyErr_NormalizeException(&etype, &evalue, &etrace); + } + } + PyErr_Restore(etype, evalue, etrace); + return -1; + } + position += (unsigned)name_length + 1; + *value = get_value(self, *name, string, &position, type, + max - position, options, raw_array); + if (!*value) { + Py_DECREF(*name); + return -1; + } + return position; +} + +static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { + /* TODO: Support buffer protocol */ + char* string; + PyObject* bson; + PyObject* options_obj = NULL; + codec_options_t options; + unsigned position; + unsigned max; + int new_position; + int raw_array = 0; + PyObject* name; + PyObject* value; + PyObject* result_tuple; + + if (!(PyArg_ParseTuple(args, "OIIOp", &bson, &position, &max, + &options_obj, &raw_array) && + convert_codec_options(self, options_obj, &options))) { + return NULL; + } + + if (!PyBytes_Check(bson)) { + PyErr_SetString(PyExc_TypeError, "argument to _element_to_dict must be a bytes object"); + return NULL; + } + string = PyBytes_AS_STRING(bson); + + new_position = _element_to_dict(self, string, position, max, &options, raw_array, &name, &value); + if (new_position < 0) { + return NULL; + } + + result_tuple = Py_BuildValue("NNi", name, value, new_position); + if (!result_tuple) { + Py_DECREF(name); + Py_DECREF(value); + return NULL; + } + + destroy_codec_options(&options); + return result_tuple; +} + static PyObject* _elements_to_dict(PyObject* self, const char* string, - unsigned max, PyObject* as_class, - unsigned char tz_aware, - unsigned char uuid_subtype, - unsigned char compile_re) { + unsigned max, + const codec_options_t* options) { unsigned position = 0; - PyObject* dict = PyObject_CallObject(as_class, NULL); + PyObject* dict = PyObject_CallObject(options->document_class, NULL); if (!dict) { return NULL; } + int raw_array = 0; while (position < max) { - PyObject* name; - PyObject* value; + PyObject* name = NULL; + PyObject* value = NULL; + int new_position; - unsigned char type = (unsigned char)string[position++]; - size_t name_length = strlen(string + position); - if (name_length > BSON_MAX_SIZE || position + name_length >= max) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetNone(InvalidBSON); - Py_DECREF(InvalidBSON); - } - Py_DECREF(dict); - return NULL; - } - name = PyUnicode_DecodeUTF8(string + position, name_length, "strict"); - if (!name) { - Py_DECREF(dict); - return NULL; - } - position += (unsigned)name_length + 1; - value = get_value(self, string, &position, type, - max - position, as_class, tz_aware, uuid_subtype, - compile_re); - if (!value) { - Py_DECREF(name); + new_position = _element_to_dict( + self, string, position, max, options, raw_array, &name, &value); + if (new_position < 0) { Py_DECREF(dict); return NULL; + } else { + position = (unsigned)new_position; } PyObject_SetItem(dict, name, value); @@ -2176,52 +2743,67 @@ static PyObject* _elements_to_dict(PyObject* self, const char* string, } static PyObject* elements_to_dict(PyObject* self, const char* string, - unsigned max, PyObject* as_class, - unsigned char tz_aware, - unsigned char uuid_subtype, - unsigned char compile_re) { + unsigned max, + const codec_options_t* options) { PyObject* result; + if (options->is_raw_bson) { + return PyObject_CallFunction( + options->document_class, "y#O", + string, max, options->options_obj); + } if (Py_EnterRecursiveCall(" while decoding a BSON document")) return NULL; - result = _elements_to_dict(self, string, max, - as_class, tz_aware, uuid_subtype, compile_re); + result = _elements_to_dict(self, string + 4, max - 5, options); Py_LeaveRecursiveCall(); return result; } +static int _get_buffer(PyObject *exporter, Py_buffer *view) { + if (PyObject_GetBuffer(exporter, view, PyBUF_SIMPLE) == -1) { + return 0; + } + if (!PyBuffer_IsContiguous(view, 'C')) { + PyErr_SetString(PyExc_ValueError, + "must be a contiguous buffer"); + goto fail; + } + if (!view->buf || view->len < 0) { + PyErr_SetString(PyExc_ValueError, "invalid buffer"); + goto fail; + } + if (view->itemsize != 1) { + PyErr_SetString(PyExc_ValueError, + "buffer data must be ascii or utf8"); + goto fail; + } + return 1; +fail: + PyBuffer_Release(view); + return 0; +} + static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { - int size; + int32_t size; Py_ssize_t total_size; const char* string; - PyObject* bson; - PyObject* as_class; - unsigned char tz_aware; - unsigned char uuid_subtype; - unsigned char compile_re; - - PyObject* dict; - PyObject* remainder; - PyObject* result; + PyObject* bson = NULL; + codec_options_t options; + PyObject* result = NULL; + PyObject* options_obj; + Py_buffer view = {0}; - if (!PyArg_ParseTuple( - args, "OObbb", &bson, &as_class, &tz_aware, &uuid_subtype, &compile_re)) { - return NULL; + if (! (PyArg_ParseTuple(args, "OO", &bson, &options_obj) && + convert_codec_options(self, options_obj, &options))) { + return result; } -#if PY_MAJOR_VERSION >= 3 - if (!PyBytes_Check(bson)) { - PyErr_SetString(PyExc_TypeError, "argument to _bson_to_dict must be a bytes object"); -#else - if (!PyString_Check(bson)) { - PyErr_SetString(PyExc_TypeError, "argument to _bson_to_dict must be a string"); -#endif - return NULL; + if (!_get_buffer(bson, &view)) { + destroy_codec_options(&options); + return result; } -#if PY_MAJOR_VERSION >= 3 - total_size = PyBytes_Size(bson); -#else - total_size = PyString_Size(bson); -#endif + + total_size = view.len; + if (total_size < BSON_MIN_SIZE) { PyObject* InvalidBSON = _error("InvalidBSON"); if (InvalidBSON) { @@ -2229,26 +2811,19 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { "not enough data for a BSON document"); Py_DECREF(InvalidBSON); } - return NULL; - } - -#if PY_MAJOR_VERSION >= 3 - string = PyBytes_AsString(bson); -#else - string = PyString_AsString(bson); -#endif - if (!string) { - return NULL; + goto done;; } + string = (char*)view.buf; memcpy(&size, string, 4); + size = (int32_t)BSON_UINT32_FROM_LE(size); if (size < BSON_MIN_SIZE) { PyObject* InvalidBSON = _error("InvalidBSON"); if (InvalidBSON) { PyErr_SetString(InvalidBSON, "invalid message size"); Py_DECREF(InvalidBSON); } - return NULL; + goto done; } if (total_size < size || total_size > BSON_MAX_SIZE) { @@ -2257,7 +2832,7 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { PyErr_SetString(InvalidBSON, "objsize too large"); Py_DECREF(InvalidBSON); } - return NULL; + goto done; } if (size != total_size || string[size - 1]) { @@ -2266,69 +2841,42 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { PyErr_SetString(InvalidBSON, "bad eoo"); Py_DECREF(InvalidBSON); } - return NULL; + goto done; } - dict = elements_to_dict(self, string + 4, (unsigned)size - 5, - as_class, tz_aware, uuid_subtype, compile_re); - if (!dict) { - return NULL; - } -#if PY_MAJOR_VERSION >= 3 - remainder = PyBytes_FromStringAndSize(string + size, total_size - size); -#else - remainder = PyString_FromStringAndSize(string + size, total_size - size); -#endif - if (!remainder) { - Py_DECREF(dict); - return NULL; - } - result = Py_BuildValue("OO", dict, remainder); - Py_DECREF(dict); - Py_DECREF(remainder); + result = elements_to_dict(self, string, (unsigned)size, &options); +done: + PyBuffer_Release(&view); + destroy_codec_options(&options); return result; } static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { - int size; + int32_t size; Py_ssize_t total_size; const char* string; PyObject* bson; PyObject* dict; - PyObject* result; - PyObject* as_class = (PyObject*)&PyDict_Type; - unsigned char tz_aware = 1; - unsigned char uuid_subtype = 3; - unsigned char compile_re = 1; - - if (!PyArg_ParseTuple( - args, "O|Obbb", - &bson, &as_class, &tz_aware, &uuid_subtype, &compile_re)) { - return NULL; - } + PyObject* result = NULL; + codec_options_t options; + PyObject* options_obj = NULL; + Py_buffer view = {0}; -#if PY_MAJOR_VERSION >= 3 - if (!PyBytes_Check(bson)) { - PyErr_SetString(PyExc_TypeError, "argument to decode_all must be a bytes object"); -#else - if (!PyString_Check(bson)) { - PyErr_SetString(PyExc_TypeError, "argument to decode_all must be a string"); -#endif + if (!(PyArg_ParseTuple(args, "OO", &bson, &options_obj) && + convert_codec_options(self, options_obj, &options))) { return NULL; } -#if PY_MAJOR_VERSION >= 3 - total_size = PyBytes_Size(bson); - string = PyBytes_AsString(bson); -#else - total_size = PyString_Size(bson); - string = PyString_AsString(bson); -#endif - if (!string) { + + if (!_get_buffer(bson, &view)) { + destroy_codec_options(&options); return NULL; } + total_size = view.len; + string = (char*)view.buf; - if (!(result = PyList_New(0))) - return NULL; + if (!(result = PyList_New(0))) { + goto fail; + } while (total_size > 0) { if (total_size < BSON_MIN_SIZE) { @@ -2339,10 +2887,11 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { Py_DECREF(InvalidBSON); } Py_DECREF(result); - return NULL; + goto fail; } memcpy(&size, string, 4); + size = (int32_t)BSON_UINT32_FROM_LE(size); if (size < BSON_MIN_SIZE) { PyObject* InvalidBSON = _error("InvalidBSON"); if (InvalidBSON) { @@ -2350,7 +2899,7 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { Py_DECREF(InvalidBSON); } Py_DECREF(result); - return NULL; + goto fail; } if (total_size < size) { @@ -2360,7 +2909,7 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { Py_DECREF(InvalidBSON); } Py_DECREF(result); - return NULL; + goto fail; } if (string[size - 1]) { @@ -2370,89 +2919,279 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { Py_DECREF(InvalidBSON); } Py_DECREF(result); - return NULL; + goto fail; } - dict = elements_to_dict(self, string + 4, (unsigned)size - 5, - as_class, tz_aware, uuid_subtype, compile_re); + dict = elements_to_dict(self, string, (unsigned)size, &options); if (!dict) { Py_DECREF(result); - return NULL; + goto fail; + } + if (PyList_Append(result, dict) < 0) { + Py_DECREF(dict); + Py_DECREF(result); + goto fail; } - PyList_Append(result, dict); Py_DECREF(dict); string += size; total_size -= size; } + goto done; +fail: + result = NULL; +done: + PyBuffer_Release(&view); + destroy_codec_options(&options); + return result; +} + + +static PyObject* _cbson_array_of_documents_to_buffer(PyObject* self, PyObject* args) { + uint32_t size; + uint32_t value_length; + uint32_t position = 0; + buffer_t buffer; + const char* string; + PyObject* arr; + PyObject* result = NULL; + Py_buffer view = {0}; + + if (!PyArg_ParseTuple(args, "O", &arr)) { + return NULL; + } + + if (!_get_buffer(arr, &view)) { + return NULL; + } + + buffer = pymongo_buffer_new(); + if (!buffer) { + PyBuffer_Release(&view); + return NULL; + } + + string = (char*)view.buf; + + if (view.len < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, + "not enough data for a BSON document"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + memcpy(&size, string, 4); + size = BSON_UINT32_FROM_LE(size); + + /* validate the size of the array */ + if (view.len != (int32_t)size || (int32_t)size < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "objsize too large"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (string[size - 1]) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "bad eoo"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + /* save space for length */ + if (pymongo_buffer_save_space(buffer, size) == -1) { + goto fail; + } + pymongo_buffer_update_position(buffer, 0); + + position += 4; + while (position < size - 1) { + // Verify the value is an object. + unsigned char type = (unsigned char)string[position]; + if (type != 3) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "array element was not an object"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + // Just skip the keys. + position = position + strlen(string + position) + 1; + + if (position >= size || (size - position) < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "invalid array content"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + memcpy(&value_length, string + position, 4); + value_length = BSON_UINT32_FROM_LE(value_length); + if (value_length < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "invalid message size"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (pymongo_buffer_write(buffer, string + position, value_length) == 1) { + goto fail; + } + position += value_length; + } + + if (position != size - 1) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, + "bad object or element length"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + /* objectify buffer */ + result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); + goto done; +fail: + result = NULL; +done: + PyBuffer_Release(&view); + pymongo_buffer_free(buffer); return result; } + static PyMethodDef _CBSONMethods[] = { {"_dict_to_bson", _cbson_dict_to_bson, METH_VARARGS, "convert a dictionary to a string containing its BSON representation."}, {"_bson_to_dict", _cbson_bson_to_dict, METH_VARARGS, "convert a BSON string to a SON object."}, - {"decode_all", _cbson_decode_all, METH_VARARGS, + {"_decode_all", _cbson_decode_all, METH_VARARGS, "convert binary data to a sequence of documents."}, + {"_element_to_dict", _cbson_element_to_dict, METH_VARARGS, + "Decode a single key, value pair."}, + {"_array_of_documents_to_buffer", _cbson_array_of_documents_to_buffer, METH_VARARGS, "Convert raw array of documents to a stream of BSON documents"}, + {"_test_long_long_to_str", _test_long_long_to_str, METH_VARARGS, "Test conversion of extreme and common Py_ssize_t values to str."}, {NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION >= 3 -#define INITERROR return NULL +#define INITERROR return -1; static int _cbson_traverse(PyObject *m, visitproc visit, void *arg) { - Py_VISIT(GETSTATE(m)->Binary); - Py_VISIT(GETSTATE(m)->Code); - Py_VISIT(GETSTATE(m)->ObjectId); - Py_VISIT(GETSTATE(m)->DBRef); - Py_VISIT(GETSTATE(m)->RECompile); - Py_VISIT(GETSTATE(m)->Regex); - Py_VISIT(GETSTATE(m)->UUID); - Py_VISIT(GETSTATE(m)->Timestamp); - Py_VISIT(GETSTATE(m)->MinKey); - Py_VISIT(GETSTATE(m)->MaxKey); - Py_VISIT(GETSTATE(m)->UTC); - Py_VISIT(GETSTATE(m)->REType); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_VISIT(state->Binary); + Py_VISIT(state->Code); + Py_VISIT(state->ObjectId); + Py_VISIT(state->DBRef); + Py_VISIT(state->Regex); + Py_VISIT(state->UUID); + Py_VISIT(state->Timestamp); + Py_VISIT(state->MinKey); + Py_VISIT(state->MaxKey); + Py_VISIT(state->UTC); + Py_VISIT(state->REType); + Py_VISIT(state->_type_marker_str); + Py_VISIT(state->_flags_str); + Py_VISIT(state->_pattern_str); + Py_VISIT(state->_encoder_map_str); + Py_VISIT(state->_decoder_map_str); + Py_VISIT(state->_fallback_encoder_str); + Py_VISIT(state->_raw_str); + Py_VISIT(state->_subtype_str); + Py_VISIT(state->_binary_str); + Py_VISIT(state->_scope_str); + Py_VISIT(state->_inc_str); + Py_VISIT(state->_time_str); + Py_VISIT(state->_bid_str); + Py_VISIT(state->_replace_str); + Py_VISIT(state->_astimezone_str); + Py_VISIT(state->_id_str); + Py_VISIT(state->_dollar_ref_str); + Py_VISIT(state->_dollar_id_str); + Py_VISIT(state->_dollar_db_str); + Py_VISIT(state->_tzinfo_str); + Py_VISIT(state->_as_doc_str); + Py_VISIT(state->_utcoffset_str); + Py_VISIT(state->_from_uuid_str); + Py_VISIT(state->_as_uuid_str); + Py_VISIT(state->_from_bid_str); + Py_VISIT(state->min_datetime); + Py_VISIT(state->max_datetime); + Py_VISIT(state->replace_args); + Py_VISIT(state->replace_kwargs); return 0; } static int _cbson_clear(PyObject *m) { - Py_CLEAR(GETSTATE(m)->Binary); - Py_CLEAR(GETSTATE(m)->Code); - Py_CLEAR(GETSTATE(m)->ObjectId); - Py_CLEAR(GETSTATE(m)->DBRef); - Py_CLEAR(GETSTATE(m)->RECompile); - Py_CLEAR(GETSTATE(m)->Regex); - Py_CLEAR(GETSTATE(m)->UUID); - Py_CLEAR(GETSTATE(m)->Timestamp); - Py_CLEAR(GETSTATE(m)->MinKey); - Py_CLEAR(GETSTATE(m)->MaxKey); - Py_CLEAR(GETSTATE(m)->UTC); - Py_CLEAR(GETSTATE(m)->REType); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_CLEAR(state->Binary); + Py_CLEAR(state->Code); + Py_CLEAR(state->ObjectId); + Py_CLEAR(state->DBRef); + Py_CLEAR(state->Regex); + Py_CLEAR(state->UUID); + Py_CLEAR(state->Timestamp); + Py_CLEAR(state->MinKey); + Py_CLEAR(state->MaxKey); + Py_CLEAR(state->UTC); + Py_CLEAR(state->REType); + Py_CLEAR(state->_type_marker_str); + Py_CLEAR(state->_flags_str); + Py_CLEAR(state->_pattern_str); + Py_CLEAR(state->_encoder_map_str); + Py_CLEAR(state->_decoder_map_str); + Py_CLEAR(state->_fallback_encoder_str); + Py_CLEAR(state->_raw_str); + Py_CLEAR(state->_subtype_str); + Py_CLEAR(state->_binary_str); + Py_CLEAR(state->_scope_str); + Py_CLEAR(state->_inc_str); + Py_CLEAR(state->_time_str); + Py_CLEAR(state->_bid_str); + Py_CLEAR(state->_replace_str); + Py_CLEAR(state->_astimezone_str); + Py_CLEAR(state->_id_str); + Py_CLEAR(state->_dollar_ref_str); + Py_CLEAR(state->_dollar_id_str); + Py_CLEAR(state->_dollar_db_str); + Py_CLEAR(state->_tzinfo_str); + Py_CLEAR(state->_as_doc_str); + Py_CLEAR(state->_utcoffset_str); + Py_CLEAR(state->_from_uuid_str); + Py_CLEAR(state->_as_uuid_str); + Py_CLEAR(state->_from_bid_str); + Py_CLEAR(state->min_datetime); + Py_CLEAR(state->max_datetime); + Py_CLEAR(state->replace_args); + Py_CLEAR(state->replace_kwargs); return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_cbson", - NULL, - sizeof(struct module_state), - _CBSONMethods, - NULL, - _cbson_traverse, - _cbson_clear, - NULL -}; - -PyMODINIT_FUNC -PyInit__cbson(void) -#else -#define INITERROR return -PyMODINIT_FUNC -init_cbson(void) -#endif +/* Multi-phase extension module initialization code. + * See https://peps.python.org/pep-0489/. +*/ +static int +_cbson_exec(PyObject *m) { - PyObject *m; PyObject *c_api_object; static void *_cbson_API[_cbson_API_POINTER_COUNT]; @@ -2466,44 +3205,68 @@ init_cbson(void) _cbson_API[_cbson_write_dict_INDEX] = (void *) write_dict; _cbson_API[_cbson_write_pair_INDEX] = (void *) write_pair; _cbson_API[_cbson_decode_and_write_pair_INDEX] = (void *) decode_and_write_pair; + _cbson_API[_cbson_convert_codec_options_INDEX] = (void *) convert_codec_options; + _cbson_API[_cbson_destroy_codec_options_INDEX] = (void *) destroy_codec_options; + _cbson_API[_cbson_buffer_write_double_INDEX] = (void *) buffer_write_double; + _cbson_API[_cbson_buffer_write_int32_INDEX] = (void *) buffer_write_int32; + _cbson_API[_cbson_buffer_write_int64_INDEX] = (void *) buffer_write_int64; + _cbson_API[_cbson_buffer_write_int32_at_position_INDEX] = + (void *) buffer_write_int32_at_position; + _cbson_API[_cbson_downcast_and_check_INDEX] = (void *) _downcast_and_check; -#if PY_VERSION_HEX >= 0x03010000 - /* PyCapsule is new in python 3.1 */ c_api_object = PyCapsule_New((void *) _cbson_API, "_cbson._C_API", NULL); -#else - c_api_object = PyCObject_FromVoidPtr((void *) _cbson_API, NULL); -#endif if (c_api_object == NULL) INITERROR; -#if PY_MAJOR_VERSION >= 3 - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("_cbson", _CBSONMethods); -#endif - if (m == NULL) { - Py_DECREF(c_api_object); - INITERROR; - } - /* Import several python objects */ if (_load_python_objects(m)) { Py_DECREF(c_api_object); -#if PY_MAJOR_VERSION >= 3 Py_DECREF(m); -#endif INITERROR; } +#if PY_VERSION_HEX >= 0x030D0000 + if (PyModule_Add(m, "_C_API", c_api_object) < 0) { + Py_DECREF(m); + INITERROR; + } +# else if (PyModule_AddObject(m, "_C_API", c_api_object) < 0) { Py_DECREF(c_api_object); -#if PY_MAJOR_VERSION >= 3 Py_DECREF(m); -#endif INITERROR; } +#endif + + return 0; +} -#if PY_MAJOR_VERSION >= 3 - return m; +static PyModuleDef_Slot _cbson_slots[] = { + {Py_mod_exec, _cbson_exec}, +#if defined(Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED) + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED}, #endif +#if PY_VERSION_HEX >= 0x030D0000 + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_cbson", + NULL, + sizeof(struct module_state), + _CBSONMethods, + _cbson_slots, + _cbson_traverse, + _cbson_clear, + NULL +}; + +PyMODINIT_FUNC +PyInit__cbson(void) +{ + return PyModuleDef_Init(&moduledef); } diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 0b207a32ae..3be2b74427 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -1,5 +1,5 @@ /* - * Copyright 2009-2014 MongoDB, Inc. + * Copyright 2009-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,44 +14,66 @@ * limitations under the License. */ +#include "bson-endian.h" + #ifndef _CBSONMODULE_H #define _CBSONMODULE_H -/* Py_ssize_t was new in python 2.5. See conversion - * guidlines in http://www.python.org/dev/peps/pep-0353 - * */ -#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) -typedef int Py_ssize_t; -#define PY_SSIZE_T_MAX INT_MAX -#define PY_SSIZE_T_MIN INT_MIN -#endif - #if defined(WIN32) || defined(_MSC_VER) /* * This macro is basically an implementation of asprintf for win32 * We print to the provided buffer to get the string value as an int. + * USE LL2STR. This is kept only to test LL2STR. */ #if defined(_MSC_VER) && (_MSC_VER >= 1400) #define INT2STRING(buffer, i) \ _snprintf_s((buffer), \ - _scprintf("%d", (i)) + 1, \ - _scprintf("%d", (i)) + 1, \ - "%d", \ + _scprintf("%lld", (i)) + 1, \ + _scprintf("%lld", (i)) + 1, \ + "%lld", \ (i)) #define STRCAT(dest, n, src) strcat_s((dest), (n), (src)) #else #define INT2STRING(buffer, i) \ _snprintf((buffer), \ - _scprintf("%d", (i)) + 1, \ - "%d", \ + _scprintf("%lld", (i)) + 1, \ + "%lld", \ (i)) #define STRCAT(dest, n, src) strcat((dest), (src)) #endif #else -#define INT2STRING(buffer, i) snprintf((buffer), sizeof((buffer)), "%d", (i)) +#define INT2STRING(buffer, i) snprintf((buffer), sizeof((buffer)), "%lld", (i)) #define STRCAT(dest, n, src) strcat((dest), (src)) #endif +/* Just enough space in char array to hold LLONG_MIN and null terminator */ +#define BUF_SIZE 21 +/* Converts integer to its string representation in decimal notation. */ +extern int cbson_long_long_to_str(long long int num, char* str, size_t size); +#define LL2STR(buffer, i) cbson_long_long_to_str((i), (buffer), sizeof(buffer)) + +typedef struct type_registry_t { + PyObject* encoder_map; + PyObject* decoder_map; + PyObject* fallback_encoder; + PyObject* registry_obj; + unsigned char is_encoder_empty; + unsigned char is_decoder_empty; + unsigned char has_fallback_encoder; +} type_registry_t; + +typedef struct codec_options_t { + PyObject* document_class; + unsigned char tz_aware; + unsigned char uuid_rep; + char* unicode_decode_error_handler; + PyObject* tzinfo; + type_registry_t type_registry; + unsigned char datetime_conversion; + PyObject* options_obj; + unsigned char is_raw_bson; +} codec_options_t; + /* C API functions */ #define _cbson_buffer_write_bytes_INDEX 0 #define _cbson_buffer_write_bytes_RETURN int @@ -59,18 +81,46 @@ typedef int Py_ssize_t; #define _cbson_write_dict_INDEX 1 #define _cbson_write_dict_RETURN int -#define _cbson_write_dict_PROTO (PyObject* self, buffer_t buffer, PyObject* dict, unsigned char check_keys, unsigned char uuid_subtype, unsigned char top_level) +#define _cbson_write_dict_PROTO (PyObject* self, buffer_t buffer, PyObject* dict, unsigned char check_keys, const codec_options_t* options, unsigned char top_level) #define _cbson_write_pair_INDEX 2 #define _cbson_write_pair_RETURN int -#define _cbson_write_pair_PROTO (PyObject* self, buffer_t buffer, const char* name, int name_length, PyObject* value, unsigned char check_keys, unsigned char uuid_subtype, unsigned char allow_id) +#define _cbson_write_pair_PROTO (PyObject* self, buffer_t buffer, const char* name, int name_length, PyObject* value, unsigned char check_keys, const codec_options_t* options, unsigned char allow_id) #define _cbson_decode_and_write_pair_INDEX 3 #define _cbson_decode_and_write_pair_RETURN int -#define _cbson_decode_and_write_pair_PROTO (PyObject* self, buffer_t buffer, PyObject* key, PyObject* value, unsigned char check_keys, unsigned char uuid_subtype, unsigned char top_level) +#define _cbson_decode_and_write_pair_PROTO (PyObject* self, buffer_t buffer, PyObject* key, PyObject* value, unsigned char check_keys, const codec_options_t* options, unsigned char top_level) + +#define _cbson_convert_codec_options_INDEX 4 +#define _cbson_convert_codec_options_RETURN int +#define _cbson_convert_codec_options_PROTO (PyObject* self, PyObject* options_obj, codec_options_t* options) + +#define _cbson_destroy_codec_options_INDEX 5 +#define _cbson_destroy_codec_options_RETURN void +#define _cbson_destroy_codec_options_PROTO (codec_options_t* options) + +#define _cbson_buffer_write_double_INDEX 6 +#define _cbson_buffer_write_double_RETURN int +#define _cbson_buffer_write_double_PROTO (buffer_t buffer, double data) + +#define _cbson_buffer_write_int32_INDEX 7 +#define _cbson_buffer_write_int32_RETURN int +#define _cbson_buffer_write_int32_PROTO (buffer_t buffer, int32_t data) + +#define _cbson_buffer_write_int64_INDEX 8 +#define _cbson_buffer_write_int64_RETURN int +#define _cbson_buffer_write_int64_PROTO (buffer_t buffer, int64_t data) + +#define _cbson_buffer_write_int32_at_position_INDEX 9 +#define _cbson_buffer_write_int32_at_position_RETURN void +#define _cbson_buffer_write_int32_at_position_PROTO (buffer_t buffer, int position, int32_t data) + +#define _cbson_downcast_and_check_INDEX 10 +#define _cbson_downcast_and_check_RETURN int +#define _cbson_downcast_and_check_PROTO (Py_ssize_t size, uint8_t extra) /* Total number of C API pointers */ -#define _cbson_API_POINTER_COUNT 4 +#define _cbson_API_POINTER_COUNT 11 #ifdef _CBSON_MODULE /* This section is used when compiling _cbsonmodule */ @@ -83,6 +133,20 @@ static _cbson_write_pair_RETURN write_pair _cbson_write_pair_PROTO; static _cbson_decode_and_write_pair_RETURN decode_and_write_pair _cbson_decode_and_write_pair_PROTO; +static _cbson_convert_codec_options_RETURN convert_codec_options _cbson_convert_codec_options_PROTO; + +static _cbson_destroy_codec_options_RETURN destroy_codec_options _cbson_destroy_codec_options_PROTO; + +static _cbson_buffer_write_double_RETURN buffer_write_double _cbson_buffer_write_double_PROTO; + +static _cbson_buffer_write_int32_RETURN buffer_write_int32 _cbson_buffer_write_int32_PROTO; + +static _cbson_buffer_write_int64_RETURN buffer_write_int64 _cbson_buffer_write_int64_PROTO; + +static _cbson_buffer_write_int32_at_position_RETURN buffer_write_int32_at_position _cbson_buffer_write_int32_at_position_PROTO; + +static _cbson_downcast_and_check_RETURN _downcast_and_check _cbson_downcast_and_check_PROTO; + #else /* This section is used in modules that use _cbsonmodule's API */ @@ -96,6 +160,20 @@ static void **_cbson_API; #define decode_and_write_pair (*(_cbson_decode_and_write_pair_RETURN (*)_cbson_decode_and_write_pair_PROTO) _cbson_API[_cbson_decode_and_write_pair_INDEX]) +#define convert_codec_options (*(_cbson_convert_codec_options_RETURN (*)_cbson_convert_codec_options_PROTO) _cbson_API[_cbson_convert_codec_options_INDEX]) + +#define destroy_codec_options (*(_cbson_destroy_codec_options_RETURN (*)_cbson_destroy_codec_options_PROTO) _cbson_API[_cbson_destroy_codec_options_INDEX]) + +#define buffer_write_double (*(_cbson_buffer_write_double_RETURN (*)_cbson_buffer_write_double_PROTO) _cbson_API[_cbson_buffer_write_double_INDEX]) + +#define buffer_write_int32 (*(_cbson_buffer_write_int32_RETURN (*)_cbson_buffer_write_int32_PROTO) _cbson_API[_cbson_buffer_write_int32_INDEX]) + +#define buffer_write_int64 (*(_cbson_buffer_write_int64_RETURN (*)_cbson_buffer_write_int64_PROTO) _cbson_API[_cbson_buffer_write_int64_INDEX]) + +#define buffer_write_int32_at_position (*(_cbson_buffer_write_int32_at_position_RETURN (*)_cbson_buffer_write_int32_at_position_PROTO) _cbson_API[_cbson_buffer_write_int32_at_position_INDEX]) + +#define _downcast_and_check (*(_cbson_downcast_and_check_RETURN (*)_cbson_downcast_and_check_PROTO) _cbson_API[_cbson_downcast_and_check_INDEX]) + #define _cbson_IMPORT _cbson_API = (void **)PyCapsule_Import("_cbson._C_API", 0) #endif diff --git a/bson/_helpers.py b/bson/_helpers.py new file mode 100644 index 0000000000..5a479867c2 --- /dev/null +++ b/bson/_helpers.py @@ -0,0 +1,43 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Setstate and getstate functions for objects with __slots__, allowing +compatibility with default pickling protocol +""" +from __future__ import annotations + +from typing import Any, Mapping + + +def _setstate_slots(self: Any, state: Any) -> None: + for slot, value in state.items(): + setattr(self, slot, value) + + +def _mangle_name(name: str, prefix: str) -> str: + if name.startswith("__"): + prefix = "_" + prefix + else: + prefix = "" + return prefix + name + + +def _getstate_slots(self: Any) -> Mapping[Any, Any]: + prefix = self.__class__.__name__ + ret = {} + for name in self.__slots__: + mangled_name = _mangle_name(name, prefix) + if hasattr(self, mangled_name): + ret[mangled_name] = getattr(self, mangled_name) + return ret diff --git a/bson/binary.py b/bson/binary.py index bc7e144df7..48eb12b0ac 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,14 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations -try: - from uuid import UUID -except ImportError: - # Python2.4 doesn't have a uuid module. - pass - -from bson.py3compat import PY3, binary_type +import struct +import warnings +from enum import Enum +from typing import TYPE_CHECKING, Any, Optional, Sequence, Tuple, Type, Union, overload +from uuid import UUID """Tools for representing BSON binary data. """ @@ -27,14 +26,10 @@ """BSON binary subtype for binary data. This is the default subtype for binary data. - -.. versionadded:: 1.5 """ FUNCTION_SUBTYPE = 1 """BSON binary subtype for functions. - -.. versionadded:: 1.5 """ OLD_BINARY_SUBTYPE = 2 @@ -42,15 +37,16 @@ This is the old default subtype, the current default is :data:`BINARY_SUBTYPE`. - -.. versionadded:: 1.7 """ OLD_UUID_SUBTYPE = 3 """Old BSON binary subtype for a UUID. :class:`uuid.UUID` instances will automatically be encoded -by :mod:`bson` using this subtype. +by :mod:`bson` using this subtype when using +:data:`UuidRepresentation.PYTHON_LEGACY`, +:data:`UuidRepresentation.JAVA_LEGACY`, or +:data:`UuidRepresentation.CSHARP_LEGACY`. .. versionadded:: 2.1 """ @@ -58,174 +54,524 @@ UUID_SUBTYPE = 4 """BSON binary subtype for a UUID. -This is the new BSON binary subtype for UUIDs. The -current default is :data:`OLD_UUID_SUBTYPE` but will -change to this in a future release. +This is the standard BSON binary subtype for UUIDs. +:class:`uuid.UUID` instances will automatically be encoded +by :mod:`bson` using this subtype when using +:data:`UuidRepresentation.STANDARD`. +""" + + +if TYPE_CHECKING: + from array import array as _array + from mmap import mmap as _mmap + + +class UuidRepresentation: + UNSPECIFIED = 0 + """An unspecified UUID representation. + + When configured, :class:`uuid.UUID` instances will **not** be + automatically encoded to or decoded from :class:`~bson.binary.Binary`. + When encoding a :class:`uuid.UUID` instance, an error will be raised. + To encode a :class:`uuid.UUID` instance with this configuration, it must + be wrapped in the :class:`~bson.binary.Binary` class by the application + code. When decoding a BSON binary field with a UUID subtype, a + :class:`~bson.binary.Binary` instance will be returned instead of a + :class:`uuid.UUID` instance. + + See `unspecified representation details `_ for details. + + .. versionadded:: 3.11 + """ + + STANDARD = UUID_SUBTYPE + """The standard UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary, using RFC-4122 byte order with + binary subtype :data:`UUID_SUBTYPE`. + + See `standard representation details `_ for details. + + .. versionadded:: 3.11 + """ + + PYTHON_LEGACY = OLD_UUID_SUBTYPE + """The Python legacy UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary, using RFC-4122 byte order with + binary subtype :data:`OLD_UUID_SUBTYPE`. + + See `python legacy representation details `_ for details. + + .. versionadded:: 3.11 + """ -.. versionchanged:: 2.1 - Changed to subtype 4. -.. versionadded:: 1.5 + JAVA_LEGACY = 5 + """The Java legacy UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, + using the Java driver's legacy byte order. + + See `Java Legacy UUID `_ for details. + + .. versionadded:: 3.11 + """ + + CSHARP_LEGACY = 6 + """The C#/.net legacy UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, + using the C# driver's legacy byte order. + + See `C# Legacy UUID `_ for details. + + .. versionadded:: 3.11 + """ + + +STANDARD = UuidRepresentation.STANDARD +"""An alias for :data:`UuidRepresentation.STANDARD`. + +.. versionadded:: 3.0 """ -JAVA_LEGACY = 5 -"""Used with :attr:`pymongo.collection.Collection.uuid_subtype` -to specify that UUIDs should be stored in the legacy byte order -used by the Java driver. +PYTHON_LEGACY = UuidRepresentation.PYTHON_LEGACY +"""An alias for :data:`UuidRepresentation.PYTHON_LEGACY`. -:class:`uuid.UUID` instances will automatically be encoded -by :mod:`bson` using :data:`OLD_UUID_SUBTYPE`. +.. versionadded:: 3.0 +""" + +JAVA_LEGACY = UuidRepresentation.JAVA_LEGACY +"""An alias for :data:`UuidRepresentation.JAVA_LEGACY`. +.. versionchanged:: 3.6 + BSON binary subtype 4 is decoded using RFC-4122 byte order. .. versionadded:: 2.3 """ -CSHARP_LEGACY = 6 -"""Used with :attr:`pymongo.collection.Collection.uuid_subtype` -to specify that UUIDs should be stored in the legacy byte order -used by the C# driver. - -:class:`uuid.UUID` instances will automatically be encoded -by :mod:`bson` using :data:`OLD_UUID_SUBTYPE`. +CSHARP_LEGACY = UuidRepresentation.CSHARP_LEGACY +"""An alias for :data:`UuidRepresentation.CSHARP_LEGACY`. +.. versionchanged:: 3.6 + BSON binary subtype 4 is decoded using RFC-4122 byte order. .. versionadded:: 2.3 """ -ALL_UUID_SUBTYPES = (OLD_UUID_SUBTYPE, UUID_SUBTYPE, JAVA_LEGACY, CSHARP_LEGACY) +ALL_UUID_SUBTYPES = (OLD_UUID_SUBTYPE, UUID_SUBTYPE) +ALL_UUID_REPRESENTATIONS = ( + UuidRepresentation.UNSPECIFIED, + UuidRepresentation.STANDARD, + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, +) +UUID_REPRESENTATION_NAMES = { + UuidRepresentation.UNSPECIFIED: "UuidRepresentation.UNSPECIFIED", + UuidRepresentation.STANDARD: "UuidRepresentation.STANDARD", + UuidRepresentation.PYTHON_LEGACY: "UuidRepresentation.PYTHON_LEGACY", + UuidRepresentation.JAVA_LEGACY: "UuidRepresentation.JAVA_LEGACY", + UuidRepresentation.CSHARP_LEGACY: "UuidRepresentation.CSHARP_LEGACY", +} MD5_SUBTYPE = 5 """BSON binary subtype for an MD5 hash. +""" -.. versionadded:: 1.5 +COLUMN_SUBTYPE = 7 +"""BSON binary subtype for columns. + +.. versionadded:: 4.0 """ +SENSITIVE_SUBTYPE = 8 +"""BSON binary subtype for sensitive data. + +.. versionadded:: 4.5 +""" + + +VECTOR_SUBTYPE = 9 +"""BSON binary subtype for densely packed vector data. + +.. versionadded:: 4.10 +""" + + USER_DEFINED_SUBTYPE = 128 """BSON binary subtype for any user defined structure. - -.. versionadded:: 1.5 """ -class Binary(binary_type): +class BinaryVectorDtype(Enum): + """Datatypes of vector subtype. + + :param FLOAT32: (0x27) Pack list of :class:`float` as float32 + :param INT8: (0x03) Pack list of :class:`int` in [-128, 127] as signed int8 + :param PACKED_BIT: (0x10) Pack list of :class:`int` in [0, 255] as unsigned uint8 + + The `PACKED_BIT` value represents a special case where vector values themselves + can only be of two values (0 or 1) but these are packed together into groups of 8, + a byte. In Python, these are displayed as ints in range [0, 255] + + Each value is of type bytes with a length of one. + + .. versionadded:: 4.10 + """ + + INT8 = b"\x03" + FLOAT32 = b"\x27" + PACKED_BIT = b"\x10" + + +class BinaryVector: + """Vector of numbers along with metadata for binary interoperability. + .. versionadded:: 4.10 + """ + + __slots__ = ("data", "dtype", "padding") + + def __init__(self, data: Sequence[float | int], dtype: BinaryVectorDtype, padding: int = 0): + """ + :param data: Sequence of numbers representing the mathematical vector. + :param dtype: The data type stored in binary + :param padding: The number of bits in the final byte that are to be ignored + when a vector element's size is less than a byte + and the length of the vector is not a multiple of 8. + """ + self.data = data + self.dtype = dtype + self.padding = padding + + def __repr__(self) -> str: + return f"BinaryVector(dtype={self.dtype}, padding={self.padding}, data={self.data})" + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, BinaryVector): + return False + return ( + self.dtype == other.dtype and self.padding == other.padding and self.data == other.data + ) + + def __len__(self) -> int: + return len(self.data) + + +class Binary(bytes): """Representation of BSON binary data. - This is necessary because we want to represent Python strings as - the BSON string type. We need to wrap binary data so we can tell + We want to represent Python strings as the BSON string type. + We need to wrap binary data so that we can tell the difference between what should be considered binary data and what should be considered a string when we encode to BSON. - Raises TypeError if `data` is not an instance of :class:`str` - (:class:`bytes` in python 3) or `subtype` is not an instance of - :class:`int`. Raises ValueError if `subtype` is not in [0, 256). + Subtype 9 provides a space-efficient representation of 1-dimensional vector data. + Its data is prepended with two bytes of metadata. + The first (dtype) describes its data type, such as float32 or int8. + The second (padding) prescribes the number of bits to ignore in the final byte. + This is relevant when the element size of the dtype is not a multiple of 8. + + Raises TypeError if `subtype` is not an instance of :class:`int`. + Raises ValueError if `subtype` is not in [0, 256). .. note:: - In python 3 instances of Binary with subtype 0 will be decoded - directly to :class:`bytes`. + Instances of Binary with subtype 0 will be decoded directly to :class:`bytes`. - :Parameters: - - `data`: the binary data to represent - - `subtype` (optional): the `binary subtype - `_ + :param data: the binary data to represent. Can be any bytes-like type + that implements the buffer protocol. + :param subtype: the `binary subtype + `_ to use + + .. versionchanged:: 3.9 + Support any bytes-like type that implements the buffer protocol. + + .. versionchanged:: 4.10 + Addition of vector subtype. """ _type_marker = 5 + __subtype: int - def __new__(cls, data, subtype=BINARY_SUBTYPE): - if not isinstance(data, binary_type): - raise TypeError("data must be an " - "instance of %s" % (binary_type.__name__,)) + def __new__( + cls: Type[Binary], + data: Union[memoryview, bytes, bytearray, _mmap, _array[Any]], + subtype: int = BINARY_SUBTYPE, + ) -> Binary: if not isinstance(subtype, int): - raise TypeError("subtype must be an instance of int") + raise TypeError(f"subtype must be an instance of int, not {type(subtype)}") if subtype >= 256 or subtype < 0: raise ValueError("subtype must be contained in [0, 256)") - self = binary_type.__new__(cls, data) + # Support any type that implements the buffer protocol. + self = bytes.__new__(cls, memoryview(data).tobytes()) self.__subtype = subtype return self - @property - def subtype(self): - """Subtype of this binary data. + @classmethod + def from_uuid( + cls: Type[Binary], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD + ) -> Binary: + """Create a BSON Binary object from a Python UUID. + + Creates a :class:`~bson.binary.Binary` object from a + :class:`uuid.UUID` instance. Assumes that the native + :class:`uuid.UUID` instance uses the byte-order implied by the + provided ``uuid_representation``. + + Raises :exc:`TypeError` if `uuid` is not an instance of + :class:`~uuid.UUID`. + + :param uuid: A :class:`uuid.UUID` instance. + :param uuid_representation: A member of + :class:`~bson.binary.UuidRepresentation`. Default: + :const:`~bson.binary.UuidRepresentation.STANDARD`. + See `UUID representations `_ for details. + + .. versionadded:: 3.11 + """ + if not isinstance(uuid, UUID): + raise TypeError(f"uuid must be an instance of uuid.UUID, not {type(uuid)}") + + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError( + "uuid_representation must be a value from bson.binary.UuidRepresentation" + ) + + if uuid_representation == UuidRepresentation.UNSPECIFIED: + raise ValueError( + "cannot encode native uuid.UUID with " + "UuidRepresentation.UNSPECIFIED. UUIDs can be manually " + "converted to bson.Binary instances using " + "bson.Binary.from_uuid() or a different UuidRepresentation " + "can be configured. See the documentation for " + "UuidRepresentation for more information." + ) + + subtype = OLD_UUID_SUBTYPE + if uuid_representation == UuidRepresentation.PYTHON_LEGACY: + payload = uuid.bytes + elif uuid_representation == UuidRepresentation.JAVA_LEGACY: + from_uuid = uuid.bytes + payload = from_uuid[0:8][::-1] + from_uuid[8:16][::-1] + elif uuid_representation == UuidRepresentation.CSHARP_LEGACY: + payload = uuid.bytes_le + else: + # uuid_representation == UuidRepresentation.STANDARD + subtype = UUID_SUBTYPE + payload = uuid.bytes + + return cls(payload, subtype) + + def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUID: + """Create a Python UUID from this BSON Binary object. + + Decodes this binary object as a native :class:`uuid.UUID` instance + with the provided ``uuid_representation``. + + Raises :exc:`ValueError` if this :class:`~bson.binary.Binary` instance + does not contain a UUID. + + :param uuid_representation: A member of + :class:`~bson.binary.UuidRepresentation`. Default: + :const:`~bson.binary.UuidRepresentation.STANDARD`. + See `UUID representations `_ for details. + + .. versionadded:: 3.11 """ + if self.subtype not in ALL_UUID_SUBTYPES: + raise ValueError(f"cannot decode subtype {self.subtype} as a uuid") + + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError( + "uuid_representation must be a value from bson.binary.UuidRepresentation" + ) + + if uuid_representation == UuidRepresentation.UNSPECIFIED: + raise ValueError("uuid_representation cannot be UNSPECIFIED") + elif uuid_representation == UuidRepresentation.PYTHON_LEGACY: + if self.subtype == OLD_UUID_SUBTYPE: + return UUID(bytes=self) + elif uuid_representation == UuidRepresentation.JAVA_LEGACY: + if self.subtype == OLD_UUID_SUBTYPE: + return UUID(bytes=self[0:8][::-1] + self[8:16][::-1]) + elif uuid_representation == UuidRepresentation.CSHARP_LEGACY: + if self.subtype == OLD_UUID_SUBTYPE: + return UUID(bytes_le=self) + else: + # uuid_representation == UuidRepresentation.STANDARD + if self.subtype == UUID_SUBTYPE: + return UUID(bytes=self) + + raise ValueError( + f"cannot decode subtype {self.subtype} to {UUID_REPRESENTATION_NAMES[uuid_representation]}" + ) + + @classmethod + @overload + def from_vector(cls: Type[Binary], vector: BinaryVector) -> Binary: + ... + + @classmethod + @overload + def from_vector( + cls: Type[Binary], + vector: Union[list[int], list[float]], + dtype: BinaryVectorDtype, + padding: int = 0, + ) -> Binary: + ... + + @classmethod + def from_vector( + cls: Type[Binary], + vector: Union[BinaryVector, list[int], list[float]], + dtype: Optional[BinaryVectorDtype] = None, + padding: Optional[int] = None, + ) -> Binary: + """Create a BSON :class:`~bson.binary.Binary` of Vector subtype. + + To interpret the representation of the numbers, a data type must be included. + See :class:`~bson.binary.BinaryVectorDtype` for available types and descriptions. + + The dtype and padding are prepended to the binary data's value. + + :param vector: Either a List of values, or a :class:`~bson.binary.BinaryVector` dataclass. + :param dtype: Data type of the values + :param padding: For fractional bytes, number of bits to ignore at end of vector. + :return: Binary packed data identified by dtype and padding. + + .. versionchanged:: 4.14 + When padding is non-zero, ignored bits should be zero. Raise exception on encoding, warn on decoding. + + .. versionadded:: 4.10 + """ + if isinstance(vector, BinaryVector): + if dtype or padding: + raise ValueError( + "The first argument, vector, has type BinaryVector. " + "dtype or padding cannot be separately defined, but were." + ) + dtype = vector.dtype + padding = vector.padding + vector = vector.data # type: ignore + + padding = 0 if padding is None else padding + if dtype == BinaryVectorDtype.INT8: # pack ints in [-128, 127] as signed int8 + format_str = "b" + if padding: + raise ValueError(f"padding does not apply to {dtype=}") + elif dtype == BinaryVectorDtype.PACKED_BIT: # pack ints in [0, 255] as unsigned uint8 + format_str = "B" + if 0 <= padding > 7: + raise ValueError(f"{padding=}. It must be in [0,1, ..7].") + if padding and not vector: + raise ValueError("Empty vector with non-zero padding.") + elif dtype == BinaryVectorDtype.FLOAT32: # pack floats as float32 + format_str = "f" + if padding: + raise ValueError(f"padding does not apply to {dtype=}") + else: + raise NotImplementedError("%s not yet supported" % dtype) + + metadata = struct.pack(" BinaryVector: + """From the Binary, create a list of numbers, along with dtype and padding. + + :return: BinaryVector + + .. versionadded:: 4.10 + """ + + if self.subtype != VECTOR_SUBTYPE: + raise ValueError(f"Cannot decode subtype {self.subtype} as a vector") + + position = 0 + dtype, padding = struct.unpack_from(" 7 or padding < 0: + raise ValueError(f"Corrupt data. Padding ({padding}) must be between 0 and 7.") + dtype_format = "B" + format_string = f"<{n_values}{dtype_format}" + unpacked_uint8s = list(struct.unpack_from(format_string, self, position)) + if padding and n_values and unpacked_uint8s[-1] & (1 << padding) - 1 != 0: + warnings.warn( + "Vector has a padding P, but bits in the final byte lower than P are non-zero. For pymongo>=5.0, they must be zero.", + DeprecationWarning, + stacklevel=2, + ) + return BinaryVector(unpacked_uint8s, dtype, padding) + + else: + raise NotImplementedError("Binary Vector dtype %s not yet supported" % dtype.name) + + @property + def subtype(self) -> int: + """Subtype of this binary data.""" return self.__subtype - def __getnewargs__(self): + def __getnewargs__(self) -> Tuple[bytes, int]: # type: ignore[override] # Work around http://bugs.python.org/issue7382 - data = super(Binary, self).__getnewargs__()[0] - if PY3 and not isinstance(data, binary_type): - data = data.encode('latin-1') + data = super().__getnewargs__()[0] + if not isinstance(data, bytes): + data = data.encode("latin-1") return data, self.__subtype - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Binary): - return ((self.__subtype, binary_type(self)) == - (other.subtype, binary_type(other))) + return (self.__subtype, bytes(self)) == (other.subtype, bytes(other)) # We don't return NotImplemented here because if we did then # Binary("foo") == "foo" would return True, since Binary is a # subclass of str... return False - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "Binary(%s, %s)" % (binary_type.__repr__(self), self.__subtype) - - -class UUIDLegacy(Binary): - """UUID wrapper to support working with UUIDs stored as legacy - BSON binary subtype 3. - - .. doctest:: - - >>> import uuid - >>> from bson.binary import Binary, UUIDLegacy, UUID_SUBTYPE - >>> my_uuid = uuid.uuid4() - >>> coll = db.test - >>> coll.uuid_subtype = UUID_SUBTYPE - >>> coll.insert({'uuid': Binary(my_uuid.bytes, 3)}) - ObjectId('...') - >>> coll.find({'uuid': my_uuid}).count() - 0 - >>> coll.find({'uuid': UUIDLegacy(my_uuid)}).count() - 1 - >>> coll.find({'uuid': UUIDLegacy(my_uuid)})[0]['uuid'] - UUID('...') - >>> - >>> # Convert from subtype 3 to subtype 4 - >>> doc = coll.find_one({'uuid': UUIDLegacy(my_uuid)}) - >>> coll.save(doc) - ObjectId('...') - >>> coll.find({'uuid': UUIDLegacy(my_uuid)}).count() - 0 - >>> coll.find({'uuid': {'$in': [UUIDLegacy(my_uuid), my_uuid]}}).count() - 1 - >>> coll.find_one({'uuid': my_uuid})['uuid'] - UUID('...') - - Raises TypeError if `obj` is not an instance of :class:`~uuid.UUID`. - - :Parameters: - - `obj`: An instance of :class:`~uuid.UUID`. - """ + def __hash__(self) -> int: + return super().__hash__() ^ hash(self.__subtype) - def __new__(cls, obj): - if not isinstance(obj, UUID): - raise TypeError("obj must be an instance of uuid.UUID") - # Python 3.0(.1) returns a bytearray instance for bytes (3.1 and - # newer just return a bytes instance). Convert that to binary_type - # for compatibility. - self = Binary.__new__(cls, binary_type(obj.bytes), OLD_UUID_SUBTYPE) - self.__uuid = obj - return self - - def __getnewargs__(self): - # Support copy and deepcopy - return (self.__uuid,) - - @property - def uuid(self): - """UUID instance wrapped by this UUIDLegacy instance. - """ - return self.__uuid + def __ne__(self, other: Any) -> bool: + return not self == other - def __repr__(self): - return "UUIDLegacy('%s')" % self.__uuid + def __repr__(self) -> str: + if self.__subtype == SENSITIVE_SUBTYPE: + return f"" + else: + return f"Binary({bytes.__repr__(self)}, {self.__subtype})" diff --git a/bson/bson-endian.h b/bson/bson-endian.h new file mode 100644 index 0000000000..e906b0776f --- /dev/null +++ b/bson/bson-endian.h @@ -0,0 +1,233 @@ +/* + * Copyright 2013-2016 MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef BSON_ENDIAN_H +#define BSON_ENDIAN_H + + +#if defined(__sun) +# include +#endif + + +#ifdef _MSC_VER +# define BSON_INLINE __inline +#else +# include +# define BSON_INLINE __inline__ +#endif + + +#define BSON_BIG_ENDIAN 4321 +#define BSON_LITTLE_ENDIAN 1234 + + +/* WORDS_BIGENDIAN from pyconfig.h / Python.h */ +#ifdef WORDS_BIGENDIAN +# define BSON_BYTE_ORDER BSON_BIG_ENDIAN +#else +# define BSON_BYTE_ORDER BSON_LITTLE_ENDIAN +#endif + + +#if defined(__sun) +# define BSON_UINT16_SWAP_LE_BE(v) BSWAP_16((uint16_t)v) +# define BSON_UINT32_SWAP_LE_BE(v) BSWAP_32((uint32_t)v) +# define BSON_UINT64_SWAP_LE_BE(v) BSWAP_64((uint64_t)v) +#elif defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__) && \ + (__clang_major__ >= 3) && (__clang_minor__ >= 1) +# if __has_builtin(__builtin_bswap16) +# define BSON_UINT16_SWAP_LE_BE(v) __builtin_bswap16(v) +# endif +# if __has_builtin(__builtin_bswap32) +# define BSON_UINT32_SWAP_LE_BE(v) __builtin_bswap32(v) +# endif +# if __has_builtin(__builtin_bswap64) +# define BSON_UINT64_SWAP_LE_BE(v) __builtin_bswap64(v) +# endif +#elif defined(__GNUC__) && (__GNUC__ >= 4) +# if __GNUC__ >= 4 && defined (__GNUC_MINOR__) && __GNUC_MINOR__ >= 3 +# define BSON_UINT32_SWAP_LE_BE(v) __builtin_bswap32 ((uint32_t)v) +# define BSON_UINT64_SWAP_LE_BE(v) __builtin_bswap64 ((uint64_t)v) +# endif +# if __GNUC__ >= 4 && defined (__GNUC_MINOR__) && __GNUC_MINOR__ >= 8 +# define BSON_UINT16_SWAP_LE_BE(v) __builtin_bswap16 ((uint32_t)v) +# endif +#endif + + +#ifndef BSON_UINT16_SWAP_LE_BE +# define BSON_UINT16_SWAP_LE_BE(v) __bson_uint16_swap_slow ((uint16_t)v) +#endif + + +#ifndef BSON_UINT32_SWAP_LE_BE +# define BSON_UINT32_SWAP_LE_BE(v) __bson_uint32_swap_slow ((uint32_t)v) +#endif + + +#ifndef BSON_UINT64_SWAP_LE_BE +# define BSON_UINT64_SWAP_LE_BE(v) __bson_uint64_swap_slow ((uint64_t)v) +#endif + + +#if BSON_BYTE_ORDER == BSON_LITTLE_ENDIAN +# define BSON_UINT16_FROM_LE(v) ((uint16_t)v) +# define BSON_UINT16_TO_LE(v) ((uint16_t)v) +# define BSON_UINT16_FROM_BE(v) BSON_UINT16_SWAP_LE_BE (v) +# define BSON_UINT16_TO_BE(v) BSON_UINT16_SWAP_LE_BE (v) +# define BSON_UINT32_FROM_LE(v) ((uint32_t)v) +# define BSON_UINT32_TO_LE(v) ((uint32_t)v) +# define BSON_UINT32_FROM_BE(v) BSON_UINT32_SWAP_LE_BE (v) +# define BSON_UINT32_TO_BE(v) BSON_UINT32_SWAP_LE_BE (v) +# define BSON_UINT64_FROM_LE(v) ((uint64_t)v) +# define BSON_UINT64_TO_LE(v) ((uint64_t)v) +# define BSON_UINT64_FROM_BE(v) BSON_UINT64_SWAP_LE_BE (v) +# define BSON_UINT64_TO_BE(v) BSON_UINT64_SWAP_LE_BE (v) +# define BSON_DOUBLE_FROM_LE(v) ((double)v) +# define BSON_DOUBLE_TO_LE(v) ((double)v) +#elif BSON_BYTE_ORDER == BSON_BIG_ENDIAN +# define BSON_UINT16_FROM_LE(v) BSON_UINT16_SWAP_LE_BE (v) +# define BSON_UINT16_TO_LE(v) BSON_UINT16_SWAP_LE_BE (v) +# define BSON_UINT16_FROM_BE(v) ((uint16_t)v) +# define BSON_UINT16_TO_BE(v) ((uint16_t)v) +# define BSON_UINT32_FROM_LE(v) BSON_UINT32_SWAP_LE_BE (v) +# define BSON_UINT32_TO_LE(v) BSON_UINT32_SWAP_LE_BE (v) +# define BSON_UINT32_FROM_BE(v) ((uint32_t)v) +# define BSON_UINT32_TO_BE(v) ((uint32_t)v) +# define BSON_UINT64_FROM_LE(v) BSON_UINT64_SWAP_LE_BE (v) +# define BSON_UINT64_TO_LE(v) BSON_UINT64_SWAP_LE_BE (v) +# define BSON_UINT64_FROM_BE(v) ((uint64_t)v) +# define BSON_UINT64_TO_BE(v) ((uint64_t)v) +# define BSON_DOUBLE_FROM_LE(v) (__bson_double_swap_slow (v)) +# define BSON_DOUBLE_TO_LE(v) (__bson_double_swap_slow (v)) +#else +# error "The endianness of target architecture is unknown." +#endif + + +/* + *-------------------------------------------------------------------------- + * + * __bson_uint16_swap_slow -- + * + * Fallback endianness conversion for 16-bit integers. + * + * Returns: + * The endian swapped version. + * + * Side effects: + * None. + * + *-------------------------------------------------------------------------- + */ + +static BSON_INLINE uint16_t +__bson_uint16_swap_slow (uint16_t v) /* IN */ +{ + return ((v & 0x00FF) << 8) | + ((v & 0xFF00) >> 8); +} + + +/* + *-------------------------------------------------------------------------- + * + * __bson_uint32_swap_slow -- + * + * Fallback endianness conversion for 32-bit integers. + * + * Returns: + * The endian swapped version. + * + * Side effects: + * None. + * + *-------------------------------------------------------------------------- + */ + +static BSON_INLINE uint32_t +__bson_uint32_swap_slow (uint32_t v) /* IN */ +{ + return ((v & 0x000000FFU) << 24) | + ((v & 0x0000FF00U) << 8) | + ((v & 0x00FF0000U) >> 8) | + ((v & 0xFF000000U) >> 24); +} + + +/* + *-------------------------------------------------------------------------- + * + * __bson_uint64_swap_slow -- + * + * Fallback endianness conversion for 64-bit integers. + * + * Returns: + * The endian swapped version. + * + * Side effects: + * None. + * + *-------------------------------------------------------------------------- + */ + +static BSON_INLINE uint64_t +__bson_uint64_swap_slow (uint64_t v) /* IN */ +{ + return ((v & 0x00000000000000FFULL) << 56) | + ((v & 0x000000000000FF00ULL) << 40) | + ((v & 0x0000000000FF0000ULL) << 24) | + ((v & 0x00000000FF000000ULL) << 8) | + ((v & 0x000000FF00000000ULL) >> 8) | + ((v & 0x0000FF0000000000ULL) >> 24) | + ((v & 0x00FF000000000000ULL) >> 40) | + ((v & 0xFF00000000000000ULL) >> 56); +} + + +/* + *-------------------------------------------------------------------------- + * + * __bson_double_swap_slow -- + * + * Fallback endianness conversion for double floating point. + * + * Returns: + * The endian swapped version. + * + * Side effects: + * None. + * + *-------------------------------------------------------------------------- + */ + + +static BSON_INLINE double +__bson_double_swap_slow (double v) /* IN */ +{ + uint64_t uv; + + memcpy(&uv, &v, sizeof(v)); + uv = BSON_UINT64_SWAP_LE_BE(uv); + memcpy(&v, &uv, sizeof(v)); + + return v; +} + + +#endif /* BSON_ENDIAN_H */ diff --git a/bson/buffer.c b/bson/buffer.c index 3c5f71bba2..cc75202746 100644 --- a/bson/buffer.c +++ b/bson/buffer.c @@ -1,5 +1,5 @@ /* - * Copyright 2009-2014 MongoDB, Inc. + * Copyright 2009-2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,10 @@ * limitations under the License. */ +/* Include Python.h so we can set Python's error indicator. */ +#define PY_SSIZE_T_CLEAN +#include "Python.h" + #include #include @@ -27,12 +31,19 @@ struct buffer { int position; }; +/* Set Python's error indicator to MemoryError. + * Called after allocation failures. */ +static void set_memory_error(void) { + PyErr_NoMemory(); +} + /* Allocate and return a new buffer. - * Return NULL on allocation failure. */ -buffer_t buffer_new(void) { + * Return NULL and sets MemoryError on allocation failure. */ +buffer_t pymongo_buffer_new(void) { buffer_t buffer; buffer = (buffer_t)malloc(sizeof(struct buffer)); if (buffer == NULL) { + set_memory_error(); return NULL; } @@ -41,6 +52,7 @@ buffer_t buffer_new(void) { buffer->buffer = (char*)malloc(sizeof(char) * INITIAL_BUFFER_SIZE); if (buffer->buffer == NULL) { free(buffer); + set_memory_error(); return NULL; } @@ -49,17 +61,20 @@ buffer_t buffer_new(void) { /* Free the memory allocated for `buffer`. * Return non-zero on failure. */ -int buffer_free(buffer_t buffer) { +int pymongo_buffer_free(buffer_t buffer) { if (buffer == NULL) { return 1; } - free(buffer->buffer); + /* Buffer will be NULL when buffer_grow fails. */ + if (buffer->buffer != NULL) { + free(buffer->buffer); + } free(buffer); return 0; } /* Grow `buffer` to at least `min_length`. - * Return non-zero on allocation failure. */ + * Return non-zero and sets MemoryError on allocation failure. */ static int buffer_grow(buffer_t buffer, int min_length) { int old_size = 0; int size = buffer->size; @@ -79,7 +94,7 @@ static int buffer_grow(buffer_t buffer, int min_length) { buffer->buffer = (char*)realloc(buffer->buffer, sizeof(char) * size); if (buffer->buffer == NULL) { free(old_buffer); - free(buffer); + set_memory_error(); return 1; } buffer->size = size; @@ -87,17 +102,27 @@ static int buffer_grow(buffer_t buffer, int min_length) { } /* Assure that `buffer` has at least `size` free bytes (and grow if needed). - * Return non-zero on allocation failure. */ + * Return non-zero and sets MemoryError on allocation failure. + * Return non-zero and sets ValueError if `size` would exceed 2GiB. */ static int buffer_assure_space(buffer_t buffer, int size) { - if (buffer->position + size <= buffer->size) { + int new_size = buffer->position + size; + /* Check for overflow. */ + if (new_size < buffer->position) { + PyErr_SetString(PyExc_ValueError, + "Document would overflow BSON size limit"); + return 1; + } + + if (new_size <= buffer->size) { return 0; } - return buffer_grow(buffer, buffer->position + size); + return buffer_grow(buffer, new_size); } /* Save `size` bytes from the current position in `buffer` (and grow if needed). - * Return offset for writing, or -1 on allocation failure. */ -buffer_position buffer_save_space(buffer_t buffer, int size) { + * Return offset for writing, or -1 on failure. + * Sets MemoryError or ValueError on failure. */ +buffer_position pymongo_buffer_save_space(buffer_t buffer, int size) { int position = buffer->position; if (buffer_assure_space(buffer, size) != 0) { return -1; @@ -107,8 +132,9 @@ buffer_position buffer_save_space(buffer_t buffer, int size) { } /* Write `size` bytes from `data` to `buffer` (and grow if needed). - * Return non-zero on allocation failure. */ -int buffer_write(buffer_t buffer, const char* data, int size) { + * Return non-zero on failure. + * Sets MemoryError or ValueError on failure. */ +int pymongo_buffer_write(buffer_t buffer, const char* data, int size) { if (buffer_assure_space(buffer, size) != 0) { return 1; } @@ -118,29 +144,14 @@ int buffer_write(buffer_t buffer, const char* data, int size) { return 0; } -/* Write `size` bytes from `data` to `buffer` at position `position`. - * Does not change the internal position of `buffer`. - * Return non-zero if buffer isn't large enough for write. */ -int buffer_write_at_position(buffer_t buffer, buffer_position position, - const char* data, int size) { - if (position + size > buffer->size) { - buffer_free(buffer); - return 1; - } - - memcpy(buffer->buffer + position, data, size); - return 0; -} - - -int buffer_get_position(buffer_t buffer) { +int pymongo_buffer_get_position(buffer_t buffer) { return buffer->position; } -char* buffer_get_buffer(buffer_t buffer) { +char* pymongo_buffer_get_buffer(buffer_t buffer) { return buffer->buffer; } -void buffer_update_position(buffer_t buffer, buffer_position new_position) { +void pymongo_buffer_update_position(buffer_t buffer, buffer_position new_position) { buffer->position = new_position; } diff --git a/bson/buffer.h b/bson/buffer.h index 455c47de11..a78e34e4de 100644 --- a/bson/buffer.h +++ b/bson/buffer.h @@ -1,5 +1,5 @@ /* - * Copyright 2009-2014 MongoDB, Inc. + * Copyright 2009-2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,30 +27,25 @@ typedef int buffer_position; /* Allocate and return a new buffer. * Return NULL on allocation failure. */ -buffer_t buffer_new(void); +buffer_t pymongo_buffer_new(void); /* Free the memory allocated for `buffer`. * Return non-zero on failure. */ -int buffer_free(buffer_t buffer); +int pymongo_buffer_free(buffer_t buffer); /* Save `size` bytes from the current position in `buffer` (and grow if needed). * Return offset for writing, or -1 on allocation failure. */ -buffer_position buffer_save_space(buffer_t buffer, int size); +buffer_position pymongo_buffer_save_space(buffer_t buffer, int size); /* Write `size` bytes from `data` to `buffer` (and grow if needed). * Return non-zero on allocation failure. */ -int buffer_write(buffer_t buffer, const char* data, int size); - -/* Write `size` bytes from `data` to `buffer` at position `position`. - * Does not change the internal position of `buffer`. - * Return non-zero if buffer isn't large enough for write. */ -int buffer_write_at_position(buffer_t buffer, buffer_position position, const char* data, int size); +int pymongo_buffer_write(buffer_t buffer, const char* data, int size); /* Getters for the internals of a buffer_t. * Should try to avoid using these as much as possible * since they break the abstraction. */ -buffer_position buffer_get_position(buffer_t buffer); -char* buffer_get_buffer(buffer_t buffer); -void buffer_update_position(buffer_t buffer, buffer_position new_position); +buffer_position pymongo_buffer_get_position(buffer_t buffer); +char* pymongo_buffer_get_buffer(buffer_t buffer); +void pymongo_buffer_update_position(buffer_t buffer, buffer_position new_position); #endif diff --git a/bson/code.py b/bson/code.py index d049944cb1..f0523b2a95 100644 --- a/bson/code.py +++ b/bson/code.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,69 +12,89 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing JavaScript code in BSON. -""" +"""Tools for representing JavaScript code in BSON.""" +from __future__ import annotations + +from collections.abc import Mapping as _Mapping +from typing import Any, Mapping, Optional, Type, Union + class Code(str): """BSON's JavaScript code type. Raises :class:`TypeError` if `code` is not an instance of - :class:`basestring` (:class:`str` in python 3) or `scope` - is not ``None`` or an instance of :class:`dict`. + :class:`str` or `scope` is not ``None`` or an instance + of :class:`dict`. Scope variables can be set by passing a dictionary as the `scope` argument or by using keyword arguments. If a variable is set as a keyword argument it will override any setting for that variable in the `scope` dictionary. - :Parameters: - - `code`: string containing JavaScript code to be evaluated - - `scope` (optional): dictionary representing the scope in which + :param code: A string containing JavaScript code to be evaluated or another + instance of Code. In the latter case, the scope of `code` becomes this + Code's :attr:`scope`. + :param scope: dictionary representing the scope in which `code` should be evaluated - a mapping from identifiers (as - strings) to values - - `**kwargs` (optional): scope variables can also be passed as - keyword arguments + strings) to values. Defaults to ``None``. This is applied after any + scope associated with a given `code` above. + :param kwargs: scope variables can also be passed as + keyword arguments. These are applied after `scope` and `code`. + + .. versionchanged:: 3.4 + The default value for :attr:`scope` is ``None`` instead of ``{}``. - .. versionadded:: 1.9 - Ability to pass scope values using keyword arguments. """ _type_marker = 13 + __scope: Union[Mapping[str, Any], None] - def __new__(cls, code, scope=None, **kwargs): - if not isinstance(code, basestring): - raise TypeError("code must be an " - "instance of %s" % (basestring.__name__,)) + def __new__( + cls: Type[Code], + code: Union[str, Code], + scope: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> Code: + if not isinstance(code, str): + raise TypeError(f"code must be an instance of str, not {type(code)}") self = str.__new__(cls, code) try: - self.__scope = code.scope + self.__scope = code.scope # type: ignore except AttributeError: - self.__scope = {} + self.__scope = None if scope is not None: - if not isinstance(scope, dict): - raise TypeError("scope must be an instance of dict") - self.__scope.update(scope) - - self.__scope.update(kwargs) + if not isinstance(scope, _Mapping): + raise TypeError(f"scope must be an instance of dict, not {type(scope)}") + if self.__scope is not None: + self.__scope.update(scope) # type: ignore + else: + self.__scope = scope + + if kwargs: + if self.__scope is not None: + self.__scope.update(kwargs) # type: ignore + else: + self.__scope = kwargs return self @property - def scope(self): - """Scope dictionary for this instance. - """ + def scope(self) -> Optional[Mapping[str, Any]]: + """Scope dictionary for this instance or ``None``.""" return self.__scope - def __repr__(self): - return "Code(%s, %r)" % (str.__repr__(self), self.__scope) + def __repr__(self) -> str: + return f"Code({str.__repr__(self)}, {self.__scope!r})" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Code): return (self.__scope, str(self)) == (other.__scope, str(other)) return False - def __ne__(self, other): + __hash__: Any = None + + def __ne__(self, other: Any) -> bool: return not self == other diff --git a/bson/codec_options.py b/bson/codec_options.py new file mode 100644 index 0000000000..add5416a5b --- /dev/null +++ b/bson/codec_options.py @@ -0,0 +1,521 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for specifying BSON codec options.""" +from __future__ import annotations + +import abc +import datetime +import enum +from collections.abc import MutableMapping as _MutableMapping +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + Iterable, + Mapping, + NamedTuple, + Optional, + Tuple, + Type, + Union, + cast, +) + +from bson.binary import ( + ALL_UUID_REPRESENTATIONS, + UUID_REPRESENTATION_NAMES, + UuidRepresentation, +) +from bson.typings import _DocumentType + +_RAW_BSON_DOCUMENT_MARKER = 101 + + +def _raw_document_class(document_class: Any) -> bool: + """Determine if a document_class is a RawBSONDocument class.""" + marker = getattr(document_class, "_type_marker", None) + return marker == _RAW_BSON_DOCUMENT_MARKER + + +class TypeEncoder(abc.ABC): + """Base class for defining type codec classes which describe how a + custom type can be transformed to one of the types BSON understands. + + Codec classes must implement the ``python_type`` attribute, and the + ``transform_python`` method to support encoding. + + See `encode data with type codecs `_ documentation for an example. + """ + + @abc.abstractproperty + def python_type(self) -> Any: + """The Python type to be converted into something serializable.""" + + @abc.abstractmethod + def transform_python(self, value: Any) -> Any: + """Convert the given Python object into something serializable.""" + + +class TypeDecoder(abc.ABC): + """Base class for defining type codec classes which describe how a + BSON type can be transformed to a custom type. + + Codec classes must implement the ``bson_type`` attribute, and the + ``transform_bson`` method to support decoding. + + See `encode data with type codecs `_ documentation for an example. + """ + + @abc.abstractproperty + def bson_type(self) -> Any: + """The BSON type to be converted into our own type.""" + + @abc.abstractmethod + def transform_bson(self, value: Any) -> Any: + """Convert the given BSON value into our own type.""" + + +class TypeCodec(TypeEncoder, TypeDecoder): + """Base class for defining type codec classes which describe how a + custom type can be transformed to/from one of the types :mod:`bson` + can already encode/decode. + + Codec classes must implement the ``python_type`` attribute, and the + ``transform_python`` method to support encoding, as well as the + ``bson_type`` attribute, and the ``transform_bson`` method to support + decoding. + + See `encode data with type codecs `_ documentation for an example. + """ + + +_Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] +_Fallback = Callable[[Any], Any] + + +class TypeRegistry: + """Encapsulates type codecs used in encoding and / or decoding BSON, as + well as the fallback encoder. Type registries cannot be modified after + instantiation. + + ``TypeRegistry`` can be initialized with an iterable of type codecs, and + a callable for the fallback encoder:: + + >>> from bson.codec_options import TypeRegistry + >>> type_registry = TypeRegistry([Codec1, Codec2, Codec3, ...], + ... fallback_encoder) + + See `add codec to the type registry `_ documentation for an example. + + :param type_codecs: iterable of type codec instances. If + ``type_codecs`` contains multiple codecs that transform a single + python or BSON type, the transformation specified by the type codec + occurring last prevails. A TypeError will be raised if one or more + type codecs modify the encoding behavior of a built-in :mod:`bson` + type. + :param fallback_encoder: callable that accepts a single, + unencodable python value and transforms it into a type that + :mod:`bson` can encode. See `define a fallback encoder `_ + documentation for an example. + """ + + def __init__( + self, + type_codecs: Optional[Iterable[_Codec]] = None, + fallback_encoder: Optional[_Fallback] = None, + ) -> None: + self.__type_codecs = list(type_codecs or []) + self._fallback_encoder = fallback_encoder + self._encoder_map: dict[Any, Any] = {} + self._decoder_map: dict[Any, Any] = {} + + if self._fallback_encoder is not None: + if not callable(fallback_encoder): + raise TypeError("fallback_encoder %r is not a callable" % (fallback_encoder)) + + for codec in self.__type_codecs: + is_valid_codec = False + if isinstance(codec, TypeEncoder): + self._validate_type_encoder(codec) + is_valid_codec = True + self._encoder_map[codec.python_type] = codec.transform_python + if isinstance(codec, TypeDecoder): + is_valid_codec = True + self._decoder_map[codec.bson_type] = codec.transform_bson + if not is_valid_codec: + raise TypeError( + f"Expected an instance of {TypeEncoder.__name__}, {TypeDecoder.__name__}, or {TypeCodec.__name__}, got {codec!r} instead" + ) + + @property + def codecs(self) -> list[TypeEncoder | TypeDecoder | TypeCodec]: + """The list of type codecs in this registry.""" + return self.__type_codecs + + @property + def fallback_encoder(self) -> Optional[_Fallback]: + """The fallback encoder in this registry.""" + return self._fallback_encoder + + def _validate_type_encoder(self, codec: _Codec) -> None: + from bson import _BUILT_IN_TYPES + + for pytype in _BUILT_IN_TYPES: + if issubclass(cast(TypeCodec, codec).python_type, pytype): + err_msg = ( + "TypeEncoders cannot change how built-in types are " + f"encoded (encoder {codec} transforms type {pytype})" + ) + raise TypeError(err_msg) + + def __repr__(self) -> str: + return "{}(type_codecs={!r}, fallback_encoder={!r})".format( + self.__class__.__name__, + self.__type_codecs, + self._fallback_encoder, + ) + + def __eq__(self, other: Any) -> Any: + if not isinstance(other, type(self)): + return NotImplemented + return ( + (self._decoder_map == other._decoder_map) + and (self._encoder_map == other._encoder_map) + and (self._fallback_encoder == other._fallback_encoder) + ) + + +class DatetimeConversion(int, enum.Enum): + """Options for decoding BSON datetimes.""" + + DATETIME = 1 + """Decode a BSON UTC datetime as a :class:`datetime.datetime`. + + BSON UTC datetimes that cannot be represented as a + :class:`~datetime.datetime` will raise an :class:`OverflowError` + or a :class:`ValueError`. + + .. versionadded 4.3 + """ + + DATETIME_CLAMP = 2 + """Decode a BSON UTC datetime as a :class:`datetime.datetime`, clamping + to :attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max`. + + .. versionadded 4.3 + """ + + DATETIME_MS = 3 + """Decode a BSON UTC datetime as a :class:`~bson.datetime_ms.DatetimeMS` + object. + + .. versionadded 4.3 + """ + + DATETIME_AUTO = 4 + """Decode a BSON UTC datetime as a :class:`datetime.datetime` if possible, + and a :class:`~bson.datetime_ms.DatetimeMS` if not. + + .. versionadded 4.3 + """ + + +class _BaseCodecOptions(NamedTuple): + document_class: Type[Mapping[str, Any]] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: str + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry + datetime_conversion: Optional[DatetimeConversion] + + +if TYPE_CHECKING: + + class CodecOptions(Tuple[_DocumentType], Generic[_DocumentType]): + document_class: Type[_DocumentType] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: Optional[str] + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry + datetime_conversion: Optional[int] + + def __new__( + cls: Type[CodecOptions[_DocumentType]], + document_class: Optional[Type[_DocumentType]] = ..., + tz_aware: bool = ..., + uuid_representation: Optional[int] = ..., + unicode_decode_error_handler: Optional[str] = ..., + tzinfo: Optional[datetime.tzinfo] = ..., + type_registry: Optional[TypeRegistry] = ..., + datetime_conversion: Optional[int] = ..., + ) -> CodecOptions[_DocumentType]: + ... + + # CodecOptions API + def with_options(self, **kwargs: Any) -> CodecOptions[Any]: + ... + + def _arguments_repr(self) -> str: + ... + + def _options_dict(self) -> dict[Any, Any]: + ... + + # NamedTuple API + @classmethod + def _make(cls, obj: Iterable[Any]) -> CodecOptions[_DocumentType]: + ... + + def _asdict(self) -> dict[str, Any]: + ... + + def _replace(self, **kwargs: Any) -> CodecOptions[_DocumentType]: + ... + + _source: str + _fields: Tuple[str] + +else: + + class CodecOptions(_BaseCodecOptions): + """Encapsulates options used encoding and / or decoding BSON.""" + + def __init__(self, *args, **kwargs): + """Encapsulates options used encoding and / or decoding BSON. + + The `document_class` option is used to define a custom type for use + decoding BSON documents. Access to the underlying raw BSON bytes for + a document is available using the :class:`~bson.raw_bson.RawBSONDocument` + type:: + + >>> from bson.raw_bson import RawBSONDocument + >>> from bson.codec_options import CodecOptions + >>> codec_options = CodecOptions(document_class=RawBSONDocument) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc.raw + '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' + + The document class can be any type that inherits from + :class:`~collections.abc.MutableMapping`:: + + >>> class AttributeDict(dict): + ... # A dict that supports attribute access. + ... def __getattr__(self, key): + ... return self[key] + ... def __setattr__(self, key, value): + ... self[key] = value + ... + >>> codec_options = CodecOptions(document_class=AttributeDict) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc._id + ObjectId('5b3016359110ea14e8c58b93') + + See `Dates and Times `_ for examples using the `tz_aware` and + `tzinfo` options. + + See `UUID `_ for examples using the `uuid_representation` + option. + + :param document_class: BSON documents returned in queries will be decoded + to an instance of this class. Must be a subclass of + :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. + :param tz_aware: If ``True``, BSON datetimes will be decoded to timezone + aware instances of :class:`~datetime.datetime`. Otherwise they will be + naive. Defaults to ``False``. + :param uuid_representation: The BSON representation to use when encoding + and decoding instances of :class:`~uuid.UUID`. Defaults to + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New + applications should consider setting this to + :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language + compatibility. See `UUID representations `_ for details. + :param unicode_decode_error_handler: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + :param tzinfo: A :class:`~datetime.tzinfo` subclass that specifies the + timezone to/from which :class:`~datetime.datetime` objects should be + encoded/decoded. + :param type_registry: Instance of :class:`TypeRegistry` used to customize + encoding and decoding behavior. + :param datetime_conversion: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. + + .. versionchanged:: 4.0 + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionadded:: 3.8 + `type_registry` attribute. + + .. warning:: Care must be taken when changing + `unicode_decode_error_handler` from its default value ('strict'). + The 'replace' and 'ignore' modes should not be used when documents + retrieved from the server will be modified in the client application + and stored back to the server. + """ + super().__init__() + + def __new__( + cls: Type[CodecOptions], + document_class: Optional[Type[Mapping[str, Any]]] = None, + tz_aware: bool = False, + uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, + unicode_decode_error_handler: str = "strict", + tzinfo: Optional[datetime.tzinfo] = None, + type_registry: Optional[TypeRegistry] = None, + datetime_conversion: Optional[DatetimeConversion] = DatetimeConversion.DATETIME, + ) -> CodecOptions: + doc_class = document_class or dict + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(doc_class, _MutableMapping) + except TypeError: + if hasattr(doc_class, "__origin__"): + is_mapping = issubclass(doc_class.__origin__, _MutableMapping) + if not (is_mapping or _raw_document_class(doc_class)): + raise TypeError( + "document_class must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "subclass of collections.abc.MutableMapping" + ) + if not isinstance(tz_aware, bool): + raise TypeError(f"tz_aware must be True or False, was: tz_aware={tz_aware}") + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError( + "uuid_representation must be a value from bson.binary.UuidRepresentation" + ) + if not isinstance(unicode_decode_error_handler, str): + raise ValueError( + f"unicode_decode_error_handler must be a string, not {type(unicode_decode_error_handler)}" + ) + if tzinfo is not None: + if not isinstance(tzinfo, datetime.tzinfo): + raise TypeError( + f"tzinfo must be an instance of datetime.tzinfo, not {type(tzinfo)}" + ) + if not tz_aware: + raise ValueError("cannot specify tzinfo without also setting tz_aware=True") + + type_registry = type_registry or TypeRegistry() + + if not isinstance(type_registry, TypeRegistry): + raise TypeError( + f"type_registry must be an instance of TypeRegistry, not {type(type_registry)}" + ) + + return tuple.__new__( + cls, + ( + doc_class, + tz_aware, + uuid_representation, + unicode_decode_error_handler, + tzinfo, + type_registry, + datetime_conversion, + ), + ) + + def _arguments_repr(self) -> str: + """Representation of the arguments used to create this object.""" + document_class_repr = ( + "dict" if self.document_class is dict else repr(self.document_class) + ) + + uuid_rep_repr = UUID_REPRESENTATION_NAMES.get( + self.uuid_representation, self.uuid_representation + ) + + return ( + "document_class={}, tz_aware={!r}, uuid_representation={}, " + "unicode_decode_error_handler={!r}, tzinfo={!r}, " + "type_registry={!r}, datetime_conversion={!s}".format( + document_class_repr, + self.tz_aware, + uuid_rep_repr, + self.unicode_decode_error_handler, + self.tzinfo, + self.type_registry, + self.datetime_conversion, + ) + ) + + def _options_dict(self) -> dict[str, Any]: + """Dictionary of the arguments used to create this object.""" + # TODO: PYTHON-2442 use _asdict() instead + return { + "document_class": self.document_class, + "tz_aware": self.tz_aware, + "uuid_representation": self.uuid_representation, + "unicode_decode_error_handler": self.unicode_decode_error_handler, + "tzinfo": self.tzinfo, + "type_registry": self.type_registry, + "datetime_conversion": self.datetime_conversion, + } + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._arguments_repr()})" + + def with_options(self, **kwargs: Any) -> CodecOptions: + """Make a copy of this CodecOptions, overriding some options:: + + >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS + >>> DEFAULT_CODEC_OPTIONS.tz_aware + False + >>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True) + >>> options.tz_aware + True + + .. versionadded:: 3.5 + """ + opts = self._options_dict() + opts.update(kwargs) + return CodecOptions(**opts) + + +DEFAULT_CODEC_OPTIONS: CodecOptions[dict[str, Any]] = CodecOptions() + + +def _parse_codec_options(options: Any) -> CodecOptions[Any]: + """Parse BSON codec options.""" + kwargs = {} + for k in set(options) & { + "document_class", + "tz_aware", + "uuidrepresentation", + "unicode_decode_error_handler", + "tzinfo", + "type_registry", + "datetime_conversion", + }: + if k == "uuidrepresentation": + kwargs["uuid_representation"] = options[k] + else: + kwargs[k] = options[k] + return CodecOptions(**kwargs) diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py new file mode 100644 index 0000000000..2047bd30b2 --- /dev/null +++ b/bson/datetime_ms.py @@ -0,0 +1,182 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools for representing the BSON datetime type. + +.. versionadded:: 4.3 +""" +from __future__ import annotations + +import calendar +import datetime +from typing import Any, Union, cast + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, DatetimeConversion +from bson.errors import InvalidBSON +from bson.tz_util import utc + +EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) +EPOCH_NAIVE = EPOCH_AWARE.replace(tzinfo=None) +_DATETIME_ERROR_SUGGESTION = ( + "(Consider Using CodecOptions(datetime_conversion=DATETIME_AUTO)" + " or MongoClient(datetime_conversion='DATETIME_AUTO'))." + " See: https://www.mongodb.com/docs/languages/python/pymongo-driver/current/data-formats/dates-and-times/#handling-out-of-range-datetimes" +) + + +class DatetimeMS: + """Represents a BSON UTC datetime.""" + + __slots__ = ("_value",) + + def __init__(self, value: Union[int, datetime.datetime]): + """Represents a BSON UTC datetime. + + BSON UTC datetimes are defined as an int64 of milliseconds since the + Unix epoch. The principal use of DatetimeMS is to represent + datetimes outside the range of the Python builtin + :class:`~datetime.datetime` class when + encoding/decoding BSON. + + To decode UTC datetimes as a ``DatetimeMS``, `datetime_conversion` in + :class:`~bson.codec_options.CodecOptions` must be set to 'datetime_ms' or + 'datetime_auto'. See `handling out of range datetimes `_ for + details. + + :param value: An instance of :class:`datetime.datetime` to be + represented as milliseconds since the Unix epoch, or int of + milliseconds since the Unix epoch. + """ + if isinstance(value, int): + if not (-(2**63) <= value <= 2**63 - 1): + raise OverflowError("Must be a 64-bit integer of milliseconds") + self._value = value + elif isinstance(value, datetime.datetime): + self._value = _datetime_to_millis(value) + else: + raise TypeError(f"{type(value)} is not a valid type for DatetimeMS") + + def __hash__(self) -> int: + return hash(self._value) + + def __repr__(self) -> str: + return type(self).__name__ + "(" + str(self._value) + ")" + + def __lt__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value < other + + def __le__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value <= other + + def __eq__(self, other: Any) -> bool: + if isinstance(other, DatetimeMS): + return self._value == other._value + return False + + def __ne__(self, other: Any) -> bool: + if isinstance(other, DatetimeMS): + return self._value != other._value + return True + + def __gt__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value > other + + def __ge__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value >= other + + _type_marker = 9 + + def as_datetime( + self, codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS + ) -> datetime.datetime: + """Create a Python :class:`~datetime.datetime` from this DatetimeMS object. + + :param codec_options: A CodecOptions instance for specifying how the + resulting DatetimeMS object will be formatted using ``tz_aware`` + and ``tz_info``. Defaults to + :const:`~bson.codec_options.DEFAULT_CODEC_OPTIONS`. + """ + return cast(datetime.datetime, _millis_to_datetime(self._value, codec_options)) + + def __int__(self) -> int: + return self._value + + +def _datetime_to_millis(dtm: datetime.datetime) -> int: + """Convert datetime to milliseconds since epoch UTC.""" + if dtm.utcoffset() is not None: + dtm = dtm - dtm.utcoffset() # type: ignore + return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) + + +_MIN_UTC = datetime.datetime.min.replace(tzinfo=utc) +_MAX_UTC = datetime.datetime.max.replace(tzinfo=utc) +_MIN_UTC_MS = _datetime_to_millis(_MIN_UTC) +_MAX_UTC_MS = _datetime_to_millis(_MAX_UTC) + + +# Inclusive min and max for timezones. +def _min_datetime_ms(tz: datetime.tzinfo = utc) -> int: + delta = tz.utcoffset(_MIN_UTC) + if delta is not None: + offset_millis = (delta.days * 86400 + delta.seconds) * 1000 + delta.microseconds // 1000 + else: + offset_millis = 0 + return max(_MIN_UTC_MS, _MIN_UTC_MS - offset_millis) + + +def _max_datetime_ms(tz: datetime.tzinfo = utc) -> int: + delta = tz.utcoffset(_MAX_UTC) + if delta is not None: + offset_millis = (delta.days * 86400 + delta.seconds) * 1000 + delta.microseconds // 1000 + else: + offset_millis = 0 + return min(_MAX_UTC_MS, _MAX_UTC_MS - offset_millis) + + +def _millis_to_datetime( + millis: int, opts: CodecOptions[Any] +) -> Union[datetime.datetime, DatetimeMS]: + """Convert milliseconds since epoch UTC to datetime.""" + if ( + opts.datetime_conversion == DatetimeConversion.DATETIME + or opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP + or opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO + ): + tz = opts.tzinfo or utc + if opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP: + millis = max(_min_datetime_ms(tz), min(millis, _max_datetime_ms(tz))) + elif opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO: + if not (_min_datetime_ms(tz) <= millis <= _max_datetime_ms(tz)): + return DatetimeMS(millis) + + diff = ((millis % 1000) + 1000) % 1000 + seconds = (millis - diff) // 1000 + micros = diff * 1000 + + try: + if opts.tz_aware: + dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, microseconds=micros) + if opts.tzinfo: + dt = dt.astimezone(tz) + return dt + else: + return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) + except ArithmeticError as err: + raise InvalidBSON(f"{err} {_DATETIME_ERROR_SUGGESTION}") from err + + elif opts.datetime_conversion == DatetimeConversion.DATETIME_MS: + return DatetimeMS(millis) + else: + raise ValueError("datetime_conversion must be an element of DatetimeConversion") diff --git a/bson/dbref.py b/bson/dbref.py index a8cc537f28..40bdb73cff 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,135 +13,121 @@ # limitations under the License. """Tools for manipulating DBRefs (references to MongoDB documents).""" +from __future__ import annotations from copy import deepcopy +from typing import Any, Mapping, Optional +from bson._helpers import _getstate_slots, _setstate_slots from bson.son import SON -class DBRef(object): - """A reference to a document stored in MongoDB. - """ +class DBRef: + """A reference to a document stored in MongoDB.""" + __slots__ = "__collection", "__id", "__database", "__kwargs" + __getstate__ = _getstate_slots + __setstate__ = _setstate_slots # DBRef isn't actually a BSON "type" so this number was arbitrarily chosen. _type_marker = 100 - def __init__(self, collection, id, database=None, _extra={}, **kwargs): + def __init__( + self, + collection: str, + id: Any, + database: Optional[str] = None, + _extra: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> None: """Initialize a new :class:`DBRef`. Raises :class:`TypeError` if `collection` or `database` is not - an instance of :class:`basestring` (:class:`str` in python 3). - `database` is optional and allows references to documents to work - across databases. Any additional keyword arguments will create - additional fields in the resultant embedded document. - - :Parameters: - - `collection`: name of the collection the document is stored in - - `id`: the value of the document's ``"_id"`` field - - `database` (optional): name of the database to reference - - `**kwargs` (optional): additional keyword arguments will + an instance of :class:`str`. `database` is optional and allows + references to documents to work across databases. Any additional + keyword arguments will create additional fields in the resultant + embedded document. + + :param collection: name of the collection the document is stored in + :param id: the value of the document's ``"_id"`` field + :param database: name of the database to reference + :param kwargs: additional keyword arguments will create additional, custom fields - .. versionchanged:: 1.8 - Now takes keyword arguments to specify additional fields. - .. versionadded:: 1.1.1 - The `database` parameter. - - .. mongodoc:: dbrefs + .. seealso:: The MongoDB documentation on `dbrefs `_. """ - if not isinstance(collection, basestring): - raise TypeError("collection must be an " - "instance of %s" % (basestring.__name__,)) - if database is not None and not isinstance(database, basestring): - raise TypeError("database must be an " - "instance of %s" % (basestring.__name__,)) + if not isinstance(collection, str): + raise TypeError(f"collection must be an instance of str, not {type(collection)}") + if database is not None and not isinstance(database, str): + raise TypeError(f"database must be an instance of str, not {type(database)}") self.__collection = collection self.__id = id self.__database = database - kwargs.update(_extra) + kwargs.update(_extra or {}) self.__kwargs = kwargs @property - def collection(self): - """Get the name of this DBRef's collection as unicode. - """ + def collection(self) -> str: + """Get the name of this DBRef's collection.""" return self.__collection @property - def id(self): - """Get this DBRef's _id. - """ + def id(self) -> Any: + """Get this DBRef's _id.""" return self.__id @property - def database(self): + def database(self) -> Optional[str]: """Get the name of this DBRef's database. Returns None if this DBRef doesn't specify a database. - - .. versionadded:: 1.1.1 """ return self.__database - def __getattr__(self, key): + def __getattr__(self, key: Any) -> Any: try: return self.__kwargs[key] except KeyError: - raise AttributeError(key) - - # Have to provide __setstate__ to avoid - # infinite recursion since we override - # __getattr__. - def __setstate__(self, state): - self.__dict__.update(state) + raise AttributeError(key) from None - def as_doc(self): + def as_doc(self) -> SON[str, Any]: """Get the SON document representation of this DBRef. Generally not needed by application developers """ - doc = SON([("$ref", self.collection), - ("$id", self.id)]) + doc = SON([("$ref", self.collection), ("$id", self.id)]) if self.database is not None: doc["$db"] = self.database doc.update(self.__kwargs) return doc - def __repr__(self): - extra = "".join([", %s=%r" % (k, v) - for k, v in self.__kwargs.iteritems()]) + def __repr__(self) -> str: + extra = "".join([f", {k}={v!r}" for k, v in self.__kwargs.items()]) if self.database is None: - return "DBRef(%r, %r%s)" % (self.collection, self.id, extra) - return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, - self.database, extra) + return f"DBRef({self.collection!r}, {self.id!r}{extra})" + return f"DBRef({self.collection!r}, {self.id!r}, {self.database!r}{extra})" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, DBRef): - us = (self.__database, self.__collection, - self.__id, self.__kwargs) - them = (other.__database, other.__collection, - other.__id, other.__kwargs) + us = (self.__database, self.__collection, self.__id, self.__kwargs) + them = (other.__database, other.__collection, other.__id, other.__kwargs) return us == them return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): - """Get a hash value for this :class:`DBRef`. - - .. versionadded:: 1.1 - """ - return hash((self.__collection, self.__id, self.__database, - tuple(sorted(self.__kwargs.items())))) - - def __deepcopy__(self, memo): - """Support function for `copy.deepcopy()`. - - .. versionadded:: 1.10 - """ - return DBRef(deepcopy(self.__collection, memo), - deepcopy(self.__id, memo), - deepcopy(self.__database, memo), - deepcopy(self.__kwargs, memo)) + def __hash__(self) -> int: + """Get a hash value for this :class:`DBRef`.""" + return hash( + (self.__collection, self.__id, self.__database, tuple(sorted(self.__kwargs.items()))) + ) + + def __deepcopy__(self, memo: Any) -> DBRef: + """Support function for `copy.deepcopy()`.""" + return DBRef( + deepcopy(self.__collection, memo), + deepcopy(self.__id, memo), + deepcopy(self.__database, memo), + deepcopy(self.__kwargs, memo), + ) diff --git a/bson/decimal128.py b/bson/decimal128.py new file mode 100644 index 0000000000..7480f94d0a --- /dev/null +++ b/bson/decimal128.py @@ -0,0 +1,351 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with the BSON decimal128 type. + +.. versionadded:: 3.4 +""" +from __future__ import annotations + +import decimal +import struct +from decimal import Decimal +from typing import Any, Sequence, Tuple, Type, Union + +from bson.codec_options import TypeDecoder, TypeEncoder + +_PACK_64 = struct.Struct(" Type[Decimal]: + return Decimal + + def transform_python(self, value: Any) -> Decimal128: + return Decimal128(value) + + +class DecimalDecoder(TypeDecoder): + """Converts BSON :class:`Decimal128` to Python :class:`decimal.Decimal`. + + For example:: + opts = CodecOptions(type_registry=TypeRegistry([DecimalDecoder()])) + bson.decode(data, codec_options=opts) + + .. versionadded:: 4.15 + """ + + @property + def bson_type(self) -> Type[Decimal128]: + return Decimal128 + + def transform_bson(self, value: Any) -> decimal.Decimal: + return value.to_decimal() + + +def create_decimal128_context() -> decimal.Context: + """Returns an instance of :class:`decimal.Context` appropriate + for working with IEEE-754 128-bit decimal floating point values. + """ + opts = _CTX_OPTIONS.copy() + opts["traps"] = [] + return decimal.Context(**opts) # type: ignore + + +def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: + """Converts a decimal.Decimal to BID (high bits, low bits). + + :param value: An instance of decimal.Decimal + """ + with decimal.localcontext(_DEC128_CTX) as ctx: + value = ctx.create_decimal(value) + + if value.is_infinite(): + return _NINF if value.is_signed() else _PINF + + sign, digits, exponent = value.as_tuple() + + if value.is_nan(): + if digits: + raise ValueError("NaN with debug payload is not supported") + if value.is_snan(): + return _NSNAN if value.is_signed() else _PSNAN + return _NNAN if value.is_signed() else _PNAN + + significand = int("".join([str(digit) for digit in digits])) + bit_length = significand.bit_length() + + high = 0 + low = 0 + for i in range(min(64, bit_length)): + if significand & (1 << i): + low |= 1 << i + + for i in range(64, bit_length): + if significand & (1 << i): + high |= 1 << (i - 64) + + biased_exponent = exponent + _EXPONENT_BIAS # type: ignore[operator] + + if high >> 49 == 1: + high = high & 0x7FFFFFFFFFFF + high |= _EXPONENT_MASK + high |= (biased_exponent & 0x3FFF) << 47 + else: + high |= biased_exponent << 49 + + if sign: + high |= _SIGN + + return high, low + + +class Decimal128: + """BSON Decimal128 type:: + + >>> Decimal128(Decimal("0.0005")) + Decimal128('0.0005') + >>> Decimal128("0.0005") + Decimal128('0.0005') + >>> Decimal128((3474527112516337664, 5)) + Decimal128('0.0005') + + :param value: An instance of :class:`decimal.Decimal`, string, or tuple of + (high bits, low bits) from Binary Integer Decimal (BID) format. + + .. note:: :class:`~Decimal128` uses an instance of :class:`decimal.Context` + configured for IEEE-754 Decimal128 when validating parameters. + Signals like :class:`decimal.InvalidOperation`, :class:`decimal.Inexact`, + and :class:`decimal.Overflow` are trapped and raised as exceptions:: + + >>> Decimal128(".13.1") + Traceback (most recent call last): + File "", line 1, in + ... + decimal.InvalidOperation: [] + >>> + >>> Decimal128("1E-6177") + Traceback (most recent call last): + File "", line 1, in + ... + decimal.Inexact: [] + >>> + >>> Decimal128("1E6145") + Traceback (most recent call last): + File "", line 1, in + ... + decimal.Overflow: [, ] + + To ensure the result of a calculation can always be stored as BSON + Decimal128 use the context returned by + :func:`create_decimal128_context`:: + + >>> import decimal + >>> decimal128_ctx = create_decimal128_context() + >>> with decimal.localcontext(decimal128_ctx) as ctx: + ... Decimal128(ctx.create_decimal(".13.3")) + ... + Decimal128('NaN') + >>> + >>> with decimal.localcontext(decimal128_ctx) as ctx: + ... Decimal128(ctx.create_decimal("1E-6177")) + ... + Decimal128('0E-6176') + >>> + >>> with decimal.localcontext(DECIMAL128_CTX) as ctx: + ... Decimal128(ctx.create_decimal("1E6145")) + ... + Decimal128('Infinity') + + To match the behavior of MongoDB's Decimal128 implementation + str(Decimal(value)) may not match str(Decimal128(value)) for NaN values:: + + >>> Decimal128(Decimal('NaN')) + Decimal128('NaN') + >>> Decimal128(Decimal('-NaN')) + Decimal128('NaN') + >>> Decimal128(Decimal('sNaN')) + Decimal128('NaN') + >>> Decimal128(Decimal('-sNaN')) + Decimal128('NaN') + + However, :meth:`~Decimal128.to_decimal` will return the exact value:: + + >>> Decimal128(Decimal('NaN')).to_decimal() + Decimal('NaN') + >>> Decimal128(Decimal('-NaN')).to_decimal() + Decimal('-NaN') + >>> Decimal128(Decimal('sNaN')).to_decimal() + Decimal('sNaN') + >>> Decimal128(Decimal('-sNaN')).to_decimal() + Decimal('-sNaN') + + Two instances of :class:`Decimal128` compare equal if their Binary + Integer Decimal encodings are equal:: + + >>> Decimal128('NaN') == Decimal128('NaN') + True + >>> Decimal128('NaN').bid == Decimal128('NaN').bid + True + + This differs from :class:`decimal.Decimal` comparisons for NaN:: + + >>> Decimal('NaN') == Decimal('NaN') + False + """ + + __slots__ = ("__high", "__low") + + _type_marker = 19 + + def __init__(self, value: _VALUE_OPTIONS) -> None: + if isinstance(value, (str, decimal.Decimal)): + self.__high, self.__low = _decimal_to_128(value) + elif isinstance(value, (list, tuple)): + if len(value) != 2: + raise ValueError( + "Invalid size for creation of Decimal128 " + "from list or tuple. Must have exactly 2 " + "elements." + ) + self.__high, self.__low = value + else: + raise TypeError(f"Cannot convert {value!r} to Decimal128") + + def to_decimal(self) -> decimal.Decimal: + """Returns an instance of :class:`decimal.Decimal` for this + :class:`Decimal128`. + """ + high = self.__high + low = self.__low + sign = 1 if (high & _SIGN) else 0 + + if (high & _SNAN) == _SNAN: + return decimal.Decimal((sign, (), "N")) # type: ignore + elif (high & _NAN) == _NAN: + return decimal.Decimal((sign, (), "n")) # type: ignore + elif (high & _INF) == _INF: + return decimal.Decimal((sign, (), "F")) # type: ignore + + if (high & _EXPONENT_MASK) == _EXPONENT_MASK: + exponent = ((high & 0x1FFFE00000000000) >> 47) - _EXPONENT_BIAS + return decimal.Decimal((sign, (0,), exponent)) + else: + exponent = ((high & 0x7FFF800000000000) >> 49) - _EXPONENT_BIAS + + arr = bytearray(15) + mask = 0x00000000000000FF + for i in range(14, 6, -1): + arr[i] = (low & mask) >> ((14 - i) << 3) + mask = mask << 8 + + mask = 0x00000000000000FF + for i in range(6, 0, -1): + arr[i] = (high & mask) >> ((6 - i) << 3) + mask = mask << 8 + + mask = 0x0001000000000000 + arr[0] = (high & mask) >> 48 + + # cdecimal only accepts a tuple for digits. + digits = tuple(int(digit) for digit in str(int.from_bytes(arr, "big"))) + + with decimal.localcontext(_DEC128_CTX) as ctx: + return ctx.create_decimal((sign, digits, exponent)) + + @classmethod + def from_bid(cls: Type[Decimal128], value: bytes) -> Decimal128: + """Create an instance of :class:`Decimal128` from Binary Integer + Decimal string. + + :param value: 16 byte string (128-bit IEEE 754-2008 decimal floating + point in Binary Integer Decimal (BID) format). + """ + if not isinstance(value, bytes): + raise TypeError(f"value must be an instance of bytes, not {type(value)}") + if len(value) != 16: + raise ValueError("value must be exactly 16 bytes") + return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) # type: ignore + + @property + def bid(self) -> bytes: + """The Binary Integer Decimal (BID) encoding of this instance.""" + return _PACK_64(self.__low) + _PACK_64(self.__high) + + def __str__(self) -> str: + dec = self.to_decimal() + if dec.is_nan(): + # Required by the drivers spec to match MongoDB behavior. + return "NaN" + return str(dec) + + def __repr__(self) -> str: + return f"Decimal128('{self!s}')" + + def __setstate__(self, value: Tuple[int, int]) -> None: + self.__high, self.__low = value + + def __getstate__(self) -> Tuple[int, int]: + return self.__high, self.__low + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Decimal128): + return self.bid == other.bid + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other diff --git a/bson/encoding_helpers.c b/bson/encoding_helpers.c deleted file mode 100644 index 24826963ad..0000000000 --- a/bson/encoding_helpers.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2009-2014 MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "encoding_helpers.h" - -/* - * Portions Copyright 2001 Unicode, Inc. - * - * Disclaimer - * - * This source code is provided as is by Unicode, Inc. No claims are - * made as to fitness for any particular purpose. No warranties of any - * kind are expressed or implied. The recipient agrees to determine - * applicability of information provided. If this file has been - * purchased on magnetic or optical media from Unicode, Inc., the - * sole remedy for any claim will be exchange of defective media - * within 90 days of receipt. - * - * Limitations on Rights to Redistribute This Code - * - * Unicode, Inc. hereby grants the right to freely use the information - * supplied in this file in the creation of products supporting the - * Unicode Standard, and to make copies of this file in any form - * for internal or external distribution as long as this notice - * remains attached. - */ - -/* - * Index into the table below with the first byte of a UTF-8 sequence to - * get the number of trailing bytes that are supposed to follow it. - */ -static const char trailingBytesForUTF8[256] = { - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 -}; - -/* --------------------------------------------------------------------- */ - -/* - * Utility routine to tell whether a sequence of bytes is legal UTF-8. - * This must be called with the length pre-determined by the first byte. - * The length can be set by: - * length = trailingBytesForUTF8[*source]+1; - * and the sequence is illegal right away if there aren't that many bytes - * available. - * If presented with a length > 4, this returns 0. The Unicode - * definition of UTF-8 goes up to 4-byte sequences. - */ -static unsigned char isLegalUTF8(const unsigned char* source, int length) { - unsigned char a; - const unsigned char* srcptr = source + length; - switch (length) { - default: return 0; - /* Everything else falls through when "true"... */ - case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; - case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; - case 2: if ((a = (*--srcptr)) > 0xBF) return 0; - switch (*source) { - /* no fall-through in this inner switch */ - case 0xE0: if (a < 0xA0) return 0; break; - case 0xF0: if (a < 0x90) return 0; break; - case 0xF4: if (a > 0x8F) return 0; break; - default: if (a < 0x80) return 0; - } - case 1: if (*source >= 0x80 && *source < 0xC2) return 0; - if (*source > 0xF4) return 0; - } - return 1; -} - -result_t check_string(const unsigned char* string, const int length, - const char check_utf8, const char check_null) { - int position = 0; - /* By default we go character by character. Will be different for checking - * UTF-8 */ - int sequence_length = 1; - - if (!check_utf8 && !check_null) { - return VALID; - } - - while (position < length) { - if (check_null && *(string + position) == 0) { - return HAS_NULL; - } - if (check_utf8) { - sequence_length = trailingBytesForUTF8[*(string + position)] + 1; - if ((position + sequence_length) > length) { - return NOT_UTF_8; - } - if (!isLegalUTF8(string + position, sequence_length)) { - return NOT_UTF_8; - } - } - position += sequence_length; - } - - return VALID; -} diff --git a/bson/encoding_helpers.h b/bson/encoding_helpers.h deleted file mode 100644 index 20aac9baaf..0000000000 --- a/bson/encoding_helpers.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2009-2014 MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ENCODING_HELPERS_H -#define ENCODING_HELPERS_H - -typedef enum { - VALID, - NOT_UTF_8, - HAS_NULL -} result_t; - -result_t check_string(const unsigned char* string, const int length, - const char check_utf8, const char check_null); - -#endif diff --git a/bson/errors.py b/bson/errors.py index 76e59e870a..ffc117f7ac 100644 --- a/bson/errors.py +++ b/bson/errors.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,28 +13,37 @@ # limitations under the License. """Exceptions raised by the BSON package.""" +from __future__ import annotations + +from typing import Any, Optional class BSONError(Exception): - """Base class for all BSON exceptions. - """ + """Base class for all BSON exceptions.""" class InvalidBSON(BSONError): - """Raised when trying to create a BSON object from invalid data. - """ + """Raised when trying to create a BSON object from invalid data.""" class InvalidStringData(BSONError): - """Raised when trying to encode a string containing non-UTF8 data. - """ + """Raised when trying to encode a string containing non-UTF8 data.""" class InvalidDocument(BSONError): - """Raised when trying to create a BSON object from an invalid document. - """ + """Raised when trying to create a BSON object from an invalid document.""" + + def __init__(self, message: str, document: Optional[Any] = None) -> None: + super().__init__(message) + self._document = document + + @property + def document(self) -> Any: + """The invalid document that caused the error. + + ..versionadded:: 4.16""" + return self._document class InvalidId(BSONError): - """Raised when trying to create an ObjectId from invalid data. - """ + """Raised when trying to create an ObjectId from invalid data.""" diff --git a/bson/int64.py b/bson/int64.py new file mode 100644 index 0000000000..5846504a2d --- /dev/null +++ b/bson/int64.py @@ -0,0 +1,39 @@ +# Copyright 2014-2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A BSON wrapper for long (int in python3)""" +from __future__ import annotations + +from typing import Any + + +class Int64(int): + """Representation of the BSON int64 type. + + This is necessary because every integral number is an :class:`int` in + Python 3. Small integral numbers are encoded to BSON int32 by default, + but Int64 numbers will always be encoded to BSON int64. + + :param value: the numeric value to represent + """ + + __slots__ = () + + _type_marker = 18 + + def __getstate__(self) -> Any: + return {} + + def __setstate__(self, state: Any) -> None: + pass diff --git a/bson/json_util.py b/bson/json_util.py index 3045ddb9e7..8151226a26 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,89 +16,131 @@ This module provides two helper methods `dumps` and `loads` that wrap the native :mod:`json` methods and provide explicit BSON conversion to and from -json. This allows for specialized encoding and decoding of BSON documents -into `Mongo Extended JSON -`_'s *Strict* -mode. This lets you encode / decode BSON documents to JSON even when -they use special BSON types. +JSON. :class:`~bson.json_util.JSONOptions` provides a way to control how JSON +is emitted and parsed, with the default being the Relaxed Extended JSON format. +:mod:`~bson.json_util` can also generate Canonical or legacy `Extended JSON`_ +when :const:`CANONICAL_JSON_OPTIONS` or :const:`LEGACY_JSON_OPTIONS` is +provided, respectively. -Example usage (serialization): +.. _Extended JSON: https://github.com/mongodb/specifications/blob/master/source/extended-json/extended-json.md + +Example usage (deserialization): + +.. doctest:: + + >>> from bson.json_util import loads + >>> loads( + ... '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]' + ... ) + [{'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code('function x() { return 1; }', {})}, {'bin': Binary(b'...', 128)}] + +Example usage with :const:`RELAXED_JSON_OPTIONS` (the default): .. doctest:: >>> from bson import Binary, Code >>> from bson.json_util import dumps - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }")}, - ... {'bin': Binary("\x01\x02\x03\x04")}]) - '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]' + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }")}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ] + ... ) + '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' -Example usage (deserialization): +Example usage (with :const:`CANONICAL_JSON_OPTIONS`): .. doctest:: - >>> from bson.json_util import loads - >>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "00", "$binary": "AQIDBA=="}}]') - [{u'foo': [1, 2]}, {u'bar': {u'hello': u'world'}}, {u'code': Code('function x() { return 1; }', {})}, {u'bin': Binary('...', 0)}] + >>> from bson import Binary, Code + >>> from bson.json_util import dumps, CANONICAL_JSON_OPTIONS + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }")}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ], + ... json_options=CANONICAL_JSON_OPTIONS, + ... ) + '[{"foo": [{"$numberInt": "1"}, {"$numberInt": "2"}]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' + +Example usage (with :const:`LEGACY_JSON_OPTIONS`): + +.. doctest:: + + >>> from bson import Binary, Code + >>> from bson.json_util import dumps, LEGACY_JSON_OPTIONS + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }", {})}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ], + ... json_options=LEGACY_JSON_OPTIONS, + ... ) + '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]' Alternatively, you can manually pass the `default` to :func:`json.dumps`. It won't handle :class:`~bson.binary.Binary` and :class:`~bson.code.Code` instances (as they are extended strings you can't provide custom defaults), but it will be faster as there is less recursion. -.. versionchanged:: 2.7 - Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef - instances. (But not in Python 2.4.) - -.. versionchanged:: 2.3 - Added dumps and loads helpers to automatically handle conversion to and - from json and supports :class:`~bson.binary.Binary` and - :class:`~bson.code.Code` - -.. versionchanged:: 1.9 - Handle :class:`uuid.UUID` instances, whenever possible. - -.. versionchanged:: 1.8 - Handle timezone aware datetime instances on encode, decode to - timezone aware datetime instances. - -.. versionchanged:: 1.8 - Added support for encoding/decoding :class:`~bson.max_key.MaxKey` - and :class:`~bson.min_key.MinKey`, and for encoding - :class:`~bson.timestamp.Timestamp`. - -.. versionchanged:: 1.2 - Added support for encoding/decoding datetimes and regular expressions. +.. note:: + If your application does not need the flexibility offered by + :class:`JSONOptions` and spends a large amount of time in the `json_util` + module, look to + `python-bsonjs `_ for a nice + performance improvement. `python-bsonjs` is a fast BSON to MongoDB + Extended JSON converter for Python built on top of + `libbson `_. `python-bsonjs` works best + with PyMongo when using :class:`~bson.raw_bson.RawBSONDocument`. """ +from __future__ import annotations import base64 -import calendar import datetime +import json +import math import re +import uuid +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) -json_lib = True -try: - import json -except ImportError: - try: - import simplejson as json - except ImportError: - json_lib = False - -import bson -from bson import EPOCH_AWARE, RE_TYPE, SON -from bson.binary import Binary +from bson.binary import ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, UuidRepresentation from bson.code import Code +from bson.codec_options import CodecOptions, DatetimeConversion +from bson.datetime_ms import ( + _MAX_UTC_MS, + EPOCH_AWARE, + DatetimeMS, + _datetime_to_millis, + _millis_to_datetime, +) from bson.dbref import DBRef +from bson.decimal128 import Decimal128 +from bson.int64 import Int64 from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId from bson.regex import Regex +from bson.son import RE_TYPE from bson.timestamp import Timestamp - -from bson.py3compat import PY3, binary_type, string_types - +from bson.tz_util import utc _RE_OPT_TABLE = { "i": re.I, @@ -110,147 +152,1013 @@ } -def dumps(obj, *args, **kwargs): - """Helper function that wraps :class:`json.dumps`. +class DatetimeRepresentation: + LEGACY = 0 + """Legacy MongoDB Extended JSON datetime representation. + + :class:`datetime.datetime` instances will be encoded to JSON in the + format `{"$date": }`, where `dateAsMilliseconds` is + a 64-bit signed integer giving the number of milliseconds since the Unix + epoch UTC. This was the default encoding before PyMongo version 3.4. + + .. versionadded:: 3.4 + """ + + NUMBERLONG = 1 + """NumberLong datetime representation. + + :class:`datetime.datetime` instances will be encoded to JSON in the + format `{"$date": {"$numberLong": ""}}`, + where `dateAsMilliseconds` is the string representation of a 64-bit signed + integer giving the number of milliseconds since the Unix epoch UTC. + + .. versionadded:: 3.4 + """ + + ISO8601 = 2 + """ISO-8601 datetime representation. + + :class:`datetime.datetime` instances greater than or equal to the Unix + epoch UTC will be encoded to JSON in the format `{"$date": ""}`. + :class:`datetime.datetime` instances before the Unix epoch UTC will be + encoded as if the datetime representation is + :const:`~DatetimeRepresentation.NUMBERLONG`. + + .. versionadded:: 3.4 + """ + + +class JSONMode: + LEGACY = 0 + """Legacy Extended JSON representation. + + In this mode, :func:`~bson.json_util.dumps` produces PyMongo's legacy + non-standard JSON output. Consider using + :const:`~bson.json_util.JSONMode.RELAXED` or + :const:`~bson.json_util.JSONMode.CANONICAL` instead. + + .. versionadded:: 3.5 + """ + + RELAXED = 1 + """Relaxed Extended JSON representation. + + In this mode, :func:`~bson.json_util.dumps` produces Relaxed Extended JSON, + a mostly JSON-like format. Consider using this for things like a web API, + where one is sending a document (or a projection of a document) that only + uses ordinary JSON type primitives. In particular, the ``int``, + :class:`~bson.int64.Int64`, and ``float`` numeric types are represented in + the native JSON number format. This output is also the most human readable + and is useful for debugging and documentation. + + .. seealso:: The specification for Relaxed `Extended JSON`_. + + .. versionadded:: 3.5 + """ + + CANONICAL = 2 + """Canonical Extended JSON representation. + + In this mode, :func:`~bson.json_util.dumps` produces Canonical Extended + JSON, a type preserving format. Consider using this for things like + testing, where one has to precisely specify expected types in JSON. In + particular, the ``int``, :class:`~bson.int64.Int64`, and ``float`` numeric + types are encoded with type wrappers. + + .. seealso:: The specification for Canonical `Extended JSON`_. + + .. versionadded:: 3.5 + """ + + +if TYPE_CHECKING: + _BASE_CLASS = CodecOptions[MutableMapping[str, Any]] +else: + _BASE_CLASS = CodecOptions + +_INT32_MAX = 2**31 + + +class JSONOptions(_BASE_CLASS): + json_mode: int + strict_number_long: bool + datetime_representation: int + strict_uuid: bool + document_class: Type[MutableMapping[str, Any]] + + def __init__(self, *args: Any, **kwargs: Any): + """Encapsulates JSON options for :func:`dumps` and :func:`loads`. + + :param strict_number_long: If ``True``, :class:`~bson.int64.Int64` objects + are encoded to MongoDB Extended JSON's *Strict mode* type + `NumberLong`, ie ``'{"$numberLong": "" }'``. Otherwise they + will be encoded as an `int`. Defaults to ``False``. + :param datetime_representation: The representation to use when encoding + instances of :class:`datetime.datetime`. Defaults to + :const:`~DatetimeRepresentation.LEGACY`. + :param strict_uuid: If ``True``, :class:`uuid.UUID` object are encoded to + MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it + will be encoded as ``'{"$uuid": "" }'``. Defaults to ``False``. + :param json_mode: The :class:`JSONMode` to use when encoding BSON types to + Extended JSON. Defaults to :const:`~JSONMode.LEGACY`. + :param document_class: BSON documents returned by :func:`loads` will be + decoded to an instance of this class. Must be a subclass of + :class:`collections.MutableMapping`. Defaults to :class:`dict`. + :param uuid_representation: The :class:`~bson.binary.UuidRepresentation` + to use when encoding and decoding instances of :class:`uuid.UUID`. + Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + :param tz_aware: If ``True``, MongoDB Extended JSON's *Strict mode* type + `Date` will be decoded to timezone aware instances of + :class:`datetime.datetime`. Otherwise they will be naive. Defaults + to ``False``. + :param tzinfo: A :class:`datetime.tzinfo` subclass that specifies the + timezone from which :class:`~datetime.datetime` objects should be + decoded. Defaults to :const:`~bson.tz_util.utc`. + :param datetime_conversion: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + `handling out of range datetimes `_ for details. + :param args: arguments to :class:`~bson.codec_options.CodecOptions` + :param kwargs: arguments to :class:`~bson.codec_options.CodecOptions` + + .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. + + .. versionchanged:: 4.0 + The default for `json_mode` was changed from :const:`JSONMode.LEGACY` + to :const:`JSONMode.RELAXED`. + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionchanged:: 3.5 + Accepts the optional parameter `json_mode`. + + .. versionchanged:: 4.0 + Changed default value of `tz_aware` to False. + """ + super().__init__() + + def __new__( + cls: Type[JSONOptions], + strict_number_long: Optional[bool] = None, + datetime_representation: Optional[int] = None, + strict_uuid: Optional[bool] = None, + json_mode: int = JSONMode.RELAXED, + *args: Any, + **kwargs: Any, + ) -> JSONOptions: + kwargs["tz_aware"] = kwargs.get("tz_aware", False) + if kwargs["tz_aware"]: + kwargs["tzinfo"] = kwargs.get("tzinfo", utc) + if datetime_representation not in ( + DatetimeRepresentation.LEGACY, + DatetimeRepresentation.NUMBERLONG, + DatetimeRepresentation.ISO8601, + None, + ): + raise ValueError( + "JSONOptions.datetime_representation must be one of LEGACY, " + "NUMBERLONG, or ISO8601 from DatetimeRepresentation." + ) + self = cast(JSONOptions, super().__new__(cls, *args, **kwargs)) + if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): + raise ValueError( + "JSONOptions.json_mode must be one of LEGACY, RELAXED, " + "or CANONICAL from JSONMode." + ) + self.json_mode = json_mode + if self.json_mode == JSONMode.RELAXED: + if strict_number_long: + raise ValueError("Cannot specify strict_number_long=True with JSONMode.RELAXED") + if datetime_representation not in (None, DatetimeRepresentation.ISO8601): + raise ValueError( + "datetime_representation must be DatetimeRepresentation." + "ISO8601 or omitted with JSONMode.RELAXED" + ) + if strict_uuid not in (None, True): + raise ValueError("Cannot specify strict_uuid=False with JSONMode.RELAXED") + self.strict_number_long = False + self.datetime_representation = DatetimeRepresentation.ISO8601 + self.strict_uuid = True + elif self.json_mode == JSONMode.CANONICAL: + if strict_number_long not in (None, True): + raise ValueError("Cannot specify strict_number_long=False with JSONMode.RELAXED") + if datetime_representation not in (None, DatetimeRepresentation.NUMBERLONG): + raise ValueError( + "datetime_representation must be DatetimeRepresentation." + "NUMBERLONG or omitted with JSONMode.RELAXED" + ) + if strict_uuid not in (None, True): + raise ValueError("Cannot specify strict_uuid=False with JSONMode.RELAXED") + self.strict_number_long = True + self.datetime_representation = DatetimeRepresentation.NUMBERLONG + self.strict_uuid = True + else: # JSONMode.LEGACY + self.strict_number_long = False + self.datetime_representation = DatetimeRepresentation.LEGACY + self.strict_uuid = False + if strict_number_long is not None: + self.strict_number_long = strict_number_long + if datetime_representation is not None: + self.datetime_representation = datetime_representation + if strict_uuid is not None: + self.strict_uuid = strict_uuid + return self + + def _arguments_repr(self) -> str: + return ( + "strict_number_long={!r}, " + "datetime_representation={!r}, " + "strict_uuid={!r}, json_mode={!r}, {}".format( + self.strict_number_long, + self.datetime_representation, + self.strict_uuid, + self.json_mode, + super()._arguments_repr(), + ) + ) + + def _options_dict(self) -> dict[Any, Any]: + # TODO: PYTHON-2442 use _asdict() instead + options_dict = super()._options_dict() + options_dict.update( + { + "strict_number_long": self.strict_number_long, + "datetime_representation": self.datetime_representation, + "strict_uuid": self.strict_uuid, + "json_mode": self.json_mode, + } + ) + return options_dict + + def with_options(self, **kwargs: Any) -> JSONOptions: + """ + Make a copy of this JSONOptions, overriding some options:: + + >>> from bson.json_util import CANONICAL_JSON_OPTIONS + >>> CANONICAL_JSON_OPTIONS.tz_aware + True + >>> json_options = CANONICAL_JSON_OPTIONS.with_options(tz_aware=False, tzinfo=None) + >>> json_options.tz_aware + False + + .. versionadded:: 3.12 + """ + opts = self._options_dict() + for opt in ("strict_number_long", "datetime_representation", "strict_uuid", "json_mode"): + opts[opt] = kwargs.get(opt, getattr(self, opt)) + opts.update(kwargs) + return JSONOptions(**opts) + + +LEGACY_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.LEGACY) +""":class:`JSONOptions` for encoding to PyMongo's legacy JSON format. + +.. seealso:: The documentation for :const:`bson.json_util.JSONMode.LEGACY`. + +.. versionadded:: 3.5 +""" + +CANONICAL_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.CANONICAL) +""":class:`JSONOptions` for Canonical Extended JSON. + +.. seealso:: The documentation for :const:`bson.json_util.JSONMode.CANONICAL`. + +.. versionadded:: 3.5 +""" + +RELAXED_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.RELAXED) +""":class:`JSONOptions` for Relaxed Extended JSON. + +.. seealso:: The documentation for :const:`bson.json_util.JSONMode.RELAXED`. + +.. versionadded:: 3.5 +""" + +DEFAULT_JSON_OPTIONS: JSONOptions = RELAXED_JSON_OPTIONS +"""The default :class:`JSONOptions` for JSON encoding/decoding. + +The same as :const:`RELAXED_JSON_OPTIONS`. + +.. versionchanged:: 4.0 + Changed from :const:`LEGACY_JSON_OPTIONS` to + :const:`RELAXED_JSON_OPTIONS`. + +.. versionadded:: 3.4 +""" + + +def dumps(obj: Any, *args: Any, **kwargs: Any) -> str: + """Helper function that wraps :func:`json.dumps`. Recursive function that handles all BSON types including :class:`~bson.binary.Binary` and :class:`~bson.code.Code`. - .. versionchanged:: 2.7 - Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef - instances. (But not in Python 2.4.) + :param json_options: A :class:`JSONOptions` instance used to modify the + encoding of MongoDB Extended JSON types. Defaults to + :const:`DEFAULT_JSON_OPTIONS`. + + .. versionchanged:: 4.0 + Now outputs MongoDB Relaxed Extended JSON by default (using + :const:`DEFAULT_JSON_OPTIONS`). + + .. versionchanged:: 3.4 + Accepts optional parameter `json_options`. See :class:`JSONOptions`. """ - if not json_lib: - raise Exception("No json library available") - return json.dumps(_json_convert(obj), *args, **kwargs) + json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) + return json.dumps(_json_convert(obj, json_options), *args, **kwargs) -def loads(s, *args, **kwargs): - """Helper function that wraps :class:`json.loads`. +def loads(s: Union[str, bytes, bytearray], *args: Any, **kwargs: Any) -> Any: + """Helper function that wraps :func:`json.loads`. Automatically passes the object_hook for BSON type conversion. - :Parameters: - - `compile_re` (optional): if ``False``, don't attempt to compile BSON - regular expressions into Python regular expressions. Return instances - of :class:`~bson.bsonregex.BSONRegex` instead. + Raises ``TypeError``, ``ValueError``, ``KeyError``, or + :exc:`~bson.errors.InvalidId` on invalid MongoDB Extended JSON. - .. versionchanged:: 2.7 - Added ``compile_re`` option. - """ - if not json_lib: - raise Exception("No json library available") + :param json_options: A :class:`JSONOptions` instance used to modify the + decoding of MongoDB Extended JSON types. Defaults to + :const:`DEFAULT_JSON_OPTIONS`. + + .. versionchanged:: 4.0 + Now loads :class:`datetime.datetime` instances as naive by default. To + load timezone aware instances utilize the `json_options` parameter. + See :ref:`tz_aware_default_change` for an example. - compile_re = kwargs.pop('compile_re', True) - kwargs['object_hook'] = lambda dct: object_hook(dct, compile_re) + .. versionchanged:: 3.5 + Parses Relaxed and Canonical Extended JSON as well as PyMongo's legacy + format. Now raises ``TypeError`` or ``ValueError`` when parsing JSON + type wrappers with values of the wrong type or any extra keys. + + .. versionchanged:: 3.4 + Accepts optional parameter `json_options`. See :class:`JSONOptions`. + """ + json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) + # Execution time optimization if json_options.document_class is dict + if json_options.document_class is dict: + kwargs["object_hook"] = lambda obj: object_hook(obj, json_options) + else: + kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(pairs, json_options) return json.loads(s, *args, **kwargs) -def _json_convert(obj): +def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: """Recursive helper method that converts BSON types so they can be converted into json. """ - if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support - return SON(((k, _json_convert(v)) for k, v in obj.iteritems())) - elif hasattr(obj, '__iter__') and not isinstance(obj, string_types): - return list((_json_convert(v) for v in obj)) + if hasattr(obj, "items"): + return {k: _json_convert(v, json_options) for k, v in obj.items()} + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): + return [_json_convert(v, json_options) for v in obj] try: - return default(obj) + return default(obj, json_options) except TypeError: return obj -def object_hook(dct, compile_re=True): - if "$oid" in dct: - return ObjectId(str(dct["$oid"])) - if "$ref" in dct: - return DBRef(dct["$ref"], dct["$id"], dct.get("$db", None)) - if "$date" in dct: - secs = float(dct["$date"]) / 1000.0 - return EPOCH_AWARE + datetime.timedelta(seconds=secs) - if "$regex" in dct: - flags = 0 - # PyMongo always adds $options but some other tools may not. - for opt in dct.get("$options", ""): - flags |= _RE_OPT_TABLE.get(opt, 0) - - if compile_re: - return re.compile(dct["$regex"], flags) - else: - return Regex(dct["$regex"], flags) - if "$minKey" in dct: - return MinKey() - if "$maxKey" in dct: - return MaxKey() - if "$binary" in dct: - if isinstance(dct["$type"], int): - dct["$type"] = "%02x" % dct["$type"] - subtype = int(dct["$type"], 16) - if subtype >= 0xffffff80: # Handle mongoexport values - subtype = int(dct["$type"][6:], 16) - return Binary(base64.b64decode(dct["$binary"].encode()), subtype) - if "$code" in dct: - return Code(dct["$code"], dct.get("$scope")) - if bson.has_uuid() and "$uuid" in dct: - return bson.uuid.UUID(dct["$uuid"]) +def object_pairs_hook( + pairs: Sequence[Tuple[str, Any]], json_options: JSONOptions = DEFAULT_JSON_OPTIONS +) -> Any: + return object_hook(json_options.document_class(pairs), json_options) # type:ignore[call-arg] + + +def object_hook(dct: Mapping[str, Any], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: + match = None + for k in dct: + if k in _PARSERS_SET: + match = k + break + if match: + return _PARSERS[match](dct, json_options) return dct -def default(obj): - # We preserve key order when rendering SON, DBRef, etc. as JSON by - # returning a SON for those types instead of a dict. This works with - # the "json" standard library in Python 2.6+ and with simplejson - # 2.1.0+ in Python 2.5+, because those libraries iterate the SON - # using PyIter_Next. Python 2.4 must use simplejson 2.0.9 or older, - # and those versions of simplejson use the lower-level PyDict_Next, - # which bypasses SON's order-preserving iteration, so we lose key - # order in Python 2.4. - if isinstance(obj, ObjectId): - return {"$oid": str(obj)} - if isinstance(obj, DBRef): - return _json_convert(obj.as_doc()) - if isinstance(obj, datetime.datetime): - # TODO share this code w/ bson.py? - if obj.utcoffset() is not None: - obj = obj - obj.utcoffset() - millis = int(calendar.timegm(obj.timetuple()) * 1000 + - obj.microsecond / 1000) - return {"$date": millis} - if isinstance(obj, (RE_TYPE, Regex)): - flags = "" - if obj.flags & re.IGNORECASE: - flags += "i" - if obj.flags & re.LOCALE: - flags += "l" - if obj.flags & re.MULTILINE: - flags += "m" - if obj.flags & re.DOTALL: - flags += "s" - if obj.flags & re.UNICODE: - flags += "u" - if obj.flags & re.VERBOSE: - flags += "x" - if isinstance(obj.pattern, unicode): - pattern = obj.pattern +def _parse_legacy_regex(doc: Any, dummy0: Any) -> Any: + pattern = doc["$regex"] + # Check if this is the $regex query operator. + if not isinstance(pattern, (str, bytes)): + return doc + flags = 0 + # PyMongo always adds $options but some other tools may not. + for opt in doc.get("$options", ""): + flags |= _RE_OPT_TABLE.get(opt, 0) + return Regex(pattern, flags) + + +def _parse_legacy_uuid(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: + """Decode a JSON legacy $uuid to Python UUID.""" + if len(doc) != 1: + raise TypeError(f"Bad $uuid, extra field(s): {doc}") + if not isinstance(doc["$uuid"], str): + raise TypeError(f"$uuid must be a string: {doc}") + if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED: + return Binary.from_uuid(uuid.UUID(doc["$uuid"])) + else: + return uuid.UUID(doc["$uuid"]) + + +def _binary_or_uuid(data: Any, subtype: int, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: + # special handling for UUID + if subtype in ALL_UUID_SUBTYPES: + uuid_representation = json_options.uuid_representation + binary_value = Binary(data, subtype) + if uuid_representation == UuidRepresentation.UNSPECIFIED: + return binary_value + if subtype == UUID_SUBTYPE: + # Legacy behavior: use STANDARD with binary subtype 4. + uuid_representation = UuidRepresentation.STANDARD + elif uuid_representation == UuidRepresentation.STANDARD: + # subtype == OLD_UUID_SUBTYPE + # Legacy behavior: STANDARD is the same as PYTHON_LEGACY. + uuid_representation = UuidRepresentation.PYTHON_LEGACY + return binary_value.as_uuid(uuid_representation) + + if subtype == 0: + return cast(uuid.UUID, data) + return Binary(data, subtype) + + +def _parse_legacy_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: + if isinstance(doc["$type"], int): + doc["$type"] = "%02x" % doc["$type"] + subtype = int(doc["$type"], 16) + if subtype >= 0xFFFFFF80: # Handle mongoexport values + subtype = int(doc["$type"][6:], 16) + data = base64.b64decode(doc["$binary"].encode()) + return _binary_or_uuid(data, subtype, json_options) + + +def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: + binary = doc["$binary"] + b64 = binary["base64"] + subtype = binary["subType"] + if not isinstance(b64, str): + raise TypeError(f"$binary base64 must be a string: {doc}") + if not isinstance(subtype, str) or len(subtype) > 2: + raise TypeError(f"$binary subType must be a string at most 2 characters: {doc}") + if len(binary) != 2: + raise TypeError(f'$binary must include only "base64" and "subType" components: {doc}') + + data = base64.b64decode(b64.encode()) + return _binary_or_uuid(data, int(subtype, 16), json_options) + + +def _parse_canonical_datetime( + doc: Any, json_options: JSONOptions +) -> Union[datetime.datetime, DatetimeMS]: + """Decode a JSON datetime to python datetime.datetime.""" + dtm = doc["$date"] + if len(doc) != 1: + raise TypeError(f"Bad $date, extra field(s): {doc}") + # mongoexport 2.6 and newer + if isinstance(dtm, str): + try: + # Parse offset + if dtm[-1] == "Z": + dt = dtm[:-1] + offset = "Z" + elif dtm[-6] in ("+", "-") and dtm[-3] == ":": + # (+|-)HH:MM + dt = dtm[:-6] + offset = dtm[-6:] + elif dtm[-5] in ("+", "-"): + # (+|-)HHMM + dt = dtm[:-5] + offset = dtm[-5:] + elif dtm[-3] in ("+", "-"): + # (+|-)HH + dt = dtm[:-3] + offset = dtm[-3:] + else: + dt = dtm + offset = "" + except IndexError as exc: + raise ValueError(f"time data {dtm!r} does not match ISO-8601 datetime format") from exc + + # Parse the optional factional seconds portion. + dot_index = dt.rfind(".") + microsecond = 0 + if dot_index != -1: + microsecond = int(float(dt[dot_index:]) * 1000000) + dt = dt[:dot_index] + + aware = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S").replace( + microsecond=microsecond, tzinfo=utc + ) + + if offset and offset != "Z": + if len(offset) == 6: + hours, minutes = offset[1:].split(":") + secs = int(hours) * 3600 + int(minutes) * 60 + elif len(offset) == 5: + secs = int(offset[1:3]) * 3600 + int(offset[3:]) * 60 + elif len(offset) == 3: + secs = int(offset[1:3]) * 3600 + if offset[0] == "-": + secs *= -1 + aware = aware - datetime.timedelta(seconds=secs) + + if json_options.tz_aware: + if json_options.tzinfo: + aware = aware.astimezone(json_options.tzinfo) + if json_options.datetime_conversion == DatetimeConversion.DATETIME_MS: + return DatetimeMS(aware) + return aware else: - pattern = obj.pattern.decode('utf-8') - return SON([("$regex", pattern), ("$options", flags)]) - if isinstance(obj, MinKey): - return {"$minKey": 1} - if isinstance(obj, MaxKey): - return {"$maxKey": 1} - if isinstance(obj, Timestamp): - return SON([("t", obj.time), ("i", obj.inc)]) - if isinstance(obj, Code): - return SON([('$code', str(obj)), ('$scope', obj.scope)]) - if isinstance(obj, Binary): - return SON([ - ('$binary', base64.b64encode(obj).decode()), - ('$type', "%02x" % obj.subtype)]) - if PY3 and isinstance(obj, binary_type): - return SON([ - ('$binary', base64.b64encode(obj).decode()), - ('$type', "00")]) - if bson.has_uuid() and isinstance(obj, bson.uuid.UUID): + aware_tzinfo_none = aware.replace(tzinfo=None) + if json_options.datetime_conversion == DatetimeConversion.DATETIME_MS: + return DatetimeMS(aware_tzinfo_none) + return aware_tzinfo_none + return _millis_to_datetime(int(dtm), cast("CodecOptions[Any]", json_options)) + + +def _parse_canonical_oid(doc: Any, dummy0: Any) -> ObjectId: + """Decode a JSON ObjectId to bson.objectid.ObjectId.""" + if len(doc) != 1: + raise TypeError(f"Bad $oid, extra field(s): {doc}") + return ObjectId(doc["$oid"]) + + +def _parse_canonical_symbol(doc: Any, dummy0: Any) -> str: + """Decode a JSON symbol to Python string.""" + symbol = doc["$symbol"] + if len(doc) != 1: + raise TypeError(f"Bad $symbol, extra field(s): {doc}") + return str(symbol) + + +def _parse_canonical_code(doc: Any, dummy0: Any) -> Code: + """Decode a JSON code to bson.code.Code.""" + for key in doc: + if key not in ("$code", "$scope"): + raise TypeError(f"Bad $code, extra field(s): {doc}") + return Code(doc["$code"], scope=doc.get("$scope")) + + +def _parse_canonical_regex(doc: Any, dummy0: Any) -> Regex[str]: + """Decode a JSON regex to bson.regex.Regex.""" + regex = doc["$regularExpression"] + if len(doc) != 1: + raise TypeError(f"Bad $regularExpression, extra field(s): {doc}") + if len(regex) != 2: + raise TypeError( + f'Bad $regularExpression must include only "pattern and "options" components: {doc}' + ) + opts = regex["options"] + if not isinstance(opts, str): + raise TypeError( + "Bad $regularExpression options, options must be string, was type %s" % (type(opts)) + ) + return Regex(regex["pattern"], opts) + + +def _parse_canonical_dbref(doc: Any, dummy0: Any) -> Any: + """Decode a JSON DBRef to bson.dbref.DBRef.""" + if ( + isinstance(doc.get("$ref"), str) + and "$id" in doc + and isinstance(doc.get("$db"), (str, type(None))) + ): + return DBRef(doc.pop("$ref"), doc.pop("$id"), database=doc.pop("$db", None), **doc) + return doc + + +def _parse_canonical_dbpointer(doc: Any, dummy0: Any) -> Any: + """Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef.""" + dbref = doc["$dbPointer"] + if len(doc) != 1: + raise TypeError(f"Bad $dbPointer, extra field(s): {doc}") + if isinstance(dbref, DBRef): + dbref_doc = dbref.as_doc() + # DBPointer must not contain $db in its value. + if dbref.database is not None: + raise TypeError(f"Bad $dbPointer, extra field $db: {dbref_doc}") + if not isinstance(dbref.id, ObjectId): + raise TypeError(f"Bad $dbPointer, $id must be an ObjectId: {dbref_doc}") + if len(dbref_doc) != 2: + raise TypeError(f"Bad $dbPointer, extra field(s) in DBRef: {dbref_doc}") + return dbref + else: + raise TypeError(f"Bad $dbPointer, expected a DBRef: {doc}") + + +def _parse_canonical_int32(doc: Any, dummy0: Any) -> int: + """Decode a JSON int32 to python int.""" + i_str = doc["$numberInt"] + if len(doc) != 1: + raise TypeError(f"Bad $numberInt, extra field(s): {doc}") + if not isinstance(i_str, str): + raise TypeError(f"$numberInt must be string: {doc}") + return int(i_str) + + +def _parse_canonical_int64(doc: Any, dummy0: Any) -> Int64: + """Decode a JSON int64 to bson.int64.Int64.""" + l_str = doc["$numberLong"] + if len(doc) != 1: + raise TypeError(f"Bad $numberLong, extra field(s): {doc}") + return Int64(l_str) + + +def _parse_canonical_double(doc: Any, dummy0: Any) -> float: + """Decode a JSON double to python float.""" + d_str = doc["$numberDouble"] + if len(doc) != 1: + raise TypeError(f"Bad $numberDouble, extra field(s): {doc}") + if not isinstance(d_str, str): + raise TypeError(f"$numberDouble must be string: {doc}") + return float(d_str) + + +def _parse_canonical_decimal128(doc: Any, dummy0: Any) -> Decimal128: + """Decode a JSON decimal128 to bson.decimal128.Decimal128.""" + d_str = doc["$numberDecimal"] + if len(doc) != 1: + raise TypeError(f"Bad $numberDecimal, extra field(s): {doc}") + if not isinstance(d_str, str): + raise TypeError(f"$numberDecimal must be string: {doc}") + return Decimal128(d_str) + + +def _parse_canonical_minkey(doc: Any, dummy0: Any) -> MinKey: + """Decode a JSON MinKey to bson.min_key.MinKey.""" + if type(doc["$minKey"]) is not int or doc["$minKey"] != 1: # noqa: E721 + raise TypeError(f"$minKey value must be 1: {doc}") + if len(doc) != 1: + raise TypeError(f"Bad $minKey, extra field(s): {doc}") + return MinKey() + + +def _parse_canonical_maxkey(doc: Any, dummy0: Any) -> MaxKey: + """Decode a JSON MaxKey to bson.max_key.MaxKey.""" + if type(doc["$maxKey"]) is not int or doc["$maxKey"] != 1: # noqa: E721 + raise TypeError("$maxKey value must be 1: %s", (doc,)) + if len(doc) != 1: + raise TypeError(f"Bad $minKey, extra field(s): {doc}") + return MaxKey() + + +def _parse_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: + if "$type" in doc: + return _parse_legacy_binary(doc, json_options) + else: + return _parse_canonical_binary(doc, json_options) + + +def _parse_timestamp(doc: Any, dummy0: Any) -> Timestamp: + tsp = doc["$timestamp"] + return Timestamp(tsp["t"], tsp["i"]) + + +_PARSERS: dict[str, Callable[[Any, JSONOptions], Any]] = { + "$oid": _parse_canonical_oid, + "$ref": _parse_canonical_dbref, + "$date": _parse_canonical_datetime, + "$regex": _parse_legacy_regex, + "$minKey": _parse_canonical_minkey, + "$maxKey": _parse_canonical_maxkey, + "$binary": _parse_binary, + "$code": _parse_canonical_code, + "$uuid": _parse_legacy_uuid, + "$undefined": lambda _, _1: None, + "$numberLong": _parse_canonical_int64, + "$timestamp": _parse_timestamp, + "$numberDecimal": _parse_canonical_decimal128, + "$dbPointer": _parse_canonical_dbpointer, + "$regularExpression": _parse_canonical_regex, + "$symbol": _parse_canonical_symbol, + "$numberInt": _parse_canonical_int32, + "$numberDouble": _parse_canonical_double, +} +_PARSERS_SET = set(_PARSERS) + + +def _encode_binary(data: bytes, subtype: int, json_options: JSONOptions) -> Any: + if json_options.json_mode == JSONMode.LEGACY: + return {"$binary": base64.b64encode(data).decode(), "$type": "%02x" % subtype} + return {"$binary": {"base64": base64.b64encode(data).decode(), "subType": "%02x" % subtype}} + + +def _encode_datetimems(obj: Any, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + if ( + json_options.datetime_representation == DatetimeRepresentation.ISO8601 + and 0 <= int(obj) <= _MAX_UTC_MS + ): + return _encode_datetime(obj.as_datetime(), json_options) + elif json_options.datetime_representation == DatetimeRepresentation.LEGACY: + return {"$date": int(obj)} + return {"$date": {"$numberLong": str(int(obj))}} + + +def _encode_code(obj: Code, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + if obj.scope is None: + return {"$code": str(obj)} + else: + return {"$code": str(obj), "$scope": _json_convert(obj.scope, json_options)} + + +def _encode_int64(obj: Int64, json_options: JSONOptions) -> Any: + if json_options.strict_number_long: + return {"$numberLong": str(obj)} + else: + return int(obj) + + +def _encode_noop(obj: Any, dummy0: Any) -> Any: + return obj + + +def _encode_regex(obj: Any, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + flags = "" + if obj.flags & re.IGNORECASE: + flags += "i" + if obj.flags & re.LOCALE: + flags += "l" + if obj.flags & re.MULTILINE: + flags += "m" + if obj.flags & re.DOTALL: + flags += "s" + if obj.flags & re.UNICODE: + flags += "u" + if obj.flags & re.VERBOSE: + flags += "x" + if isinstance(obj.pattern, str): + pattern = obj.pattern + else: + pattern = obj.pattern.decode("utf-8") + if json_options.json_mode == JSONMode.LEGACY: + return {"$regex": pattern, "$options": flags} + return {"$regularExpression": {"pattern": pattern, "options": flags}} + + +def _encode_int(obj: int, json_options: JSONOptions) -> Any: + if json_options.json_mode == JSONMode.CANONICAL: + if -_INT32_MAX <= obj < _INT32_MAX: + return {"$numberInt": str(obj)} + return {"$numberLong": str(obj)} + return obj + + +def _encode_float(obj: float, json_options: JSONOptions) -> Any: + if json_options.json_mode != JSONMode.LEGACY: + if math.isnan(obj): + return {"$numberDouble": "NaN"} + elif math.isinf(obj): + representation = "Infinity" if obj > 0 else "-Infinity" + return {"$numberDouble": representation} + elif json_options.json_mode == JSONMode.CANONICAL: + # repr() will return the shortest string guaranteed to produce the + # original value, when float() is called on it. + return {"$numberDouble": str(repr(obj))} + return obj + + +def _encode_datetime(obj: datetime.datetime, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + if json_options.datetime_representation == DatetimeRepresentation.ISO8601: + if not obj.tzinfo: + obj = obj.replace(tzinfo=utc) + assert obj.tzinfo is not None + if obj >= EPOCH_AWARE: + off = obj.tzinfo.utcoffset(obj) + if (off.days, off.seconds, off.microseconds) == (0, 0, 0): # type: ignore + tz_string = "Z" + else: + tz_string = obj.strftime("%z") + millis = int(obj.microsecond / 1000) + fracsecs = ".%03d" % (millis,) if millis else "" + return { + "$date": "{}{}{}".format(obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) + } + + millis = _datetime_to_millis(obj) + if json_options.datetime_representation == DatetimeRepresentation.LEGACY: + return {"$date": millis} + return {"$date": {"$numberLong": str(millis)}} + + +def _encode_bytes(obj: bytes, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + return _encode_binary(obj, 0, json_options) + + +def _encode_binary_obj(obj: Binary, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + return _encode_binary(obj, obj.subtype, json_options) + + +def _encode_uuid(obj: uuid.UUID, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + if json_options.strict_uuid: + binval = Binary.from_uuid(obj, uuid_representation=json_options.uuid_representation) + return _encode_binary(binval, binval.subtype, json_options) + else: return {"$uuid": obj.hex} + + +def _encode_objectid(obj: ObjectId, dummy0: Any) -> dict: # type: ignore[type-arg] + return {"$oid": str(obj)} + + +def _encode_timestamp(obj: Timestamp, dummy0: Any) -> dict: # type: ignore[type-arg] + return {"$timestamp": {"t": obj.time, "i": obj.inc}} + + +def _encode_decimal128(obj: Timestamp, dummy0: Any) -> dict: # type: ignore[type-arg] + return {"$numberDecimal": str(obj)} + + +def _encode_dbref(obj: DBRef, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + return _json_convert(obj.as_doc(), json_options=json_options) + + +def _encode_minkey(dummy0: Any, dummy1: Any) -> dict: # type: ignore[type-arg] + return {"$minKey": 1} + + +def _encode_maxkey(dummy0: Any, dummy1: Any) -> dict: # type: ignore[type-arg] + return {"$maxKey": 1} + + +# Encoders for BSON types +# Each encoder function's signature is: +# - obj: a Python data type, e.g. a Python int for _encode_int +# - json_options: a JSONOptions +_ENCODERS: dict[Type, Callable[[Any, JSONOptions], Any]] = { # type: ignore[type-arg] + bool: _encode_noop, + bytes: _encode_bytes, + datetime.datetime: _encode_datetime, + DatetimeMS: _encode_datetimems, + float: _encode_float, + int: _encode_int, + str: _encode_noop, + type(None): _encode_noop, + uuid.UUID: _encode_uuid, + Binary: _encode_binary_obj, + Int64: _encode_int64, + Code: _encode_code, + DBRef: _encode_dbref, + MaxKey: _encode_maxkey, + MinKey: _encode_minkey, + ObjectId: _encode_objectid, + Regex: _encode_regex, + RE_TYPE: _encode_regex, + Timestamp: _encode_timestamp, + Decimal128: _encode_decimal128, +} + +# Map each _type_marker to its encoder for faster lookup. +_MARKERS: dict[int, Callable[[Any, JSONOptions], Any]] = {} +for _typ in _ENCODERS: + if hasattr(_typ, "_type_marker"): + _MARKERS[_typ._type_marker] = _ENCODERS[_typ] + +_BUILT_IN_TYPES = tuple(t for t in _ENCODERS) + + +def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: + # First see if the type is already cached. KeyError will only ever + # happen once per subtype. + try: + return _ENCODERS[type(obj)](obj, json_options) + except KeyError: + pass + + # Second, fall back to trying _type_marker. This has to be done + # before the loop below since users could subclass one of our + # custom types that subclasses a python built-in (e.g. Binary) + if hasattr(obj, "_type_marker"): + marker = obj._type_marker + if marker in _MARKERS: + func = _MARKERS[marker] + # Cache this type for faster subsequent lookup. + _ENCODERS[type(obj)] = func + return func(obj, json_options) + + # Third, test each base type. This will only happen once for + # a subtype of a supported base type. + for base in _BUILT_IN_TYPES: + if isinstance(obj, base): + func = _ENCODERS[base] + # Cache this type for faster subsequent lookup. + _ENCODERS[type(obj)] = func + return func(obj, json_options) + raise TypeError("%r is not JSON serializable" % obj) + + +def _get_str_size(obj: Any) -> int: + return len(obj) + + +def _get_datetime_size(obj: datetime.datetime) -> int: + return 5 + len(str(obj.time())) + + +def _get_regex_size(obj: Regex) -> int: # type: ignore[type-arg] + return 18 + len(obj.pattern) + + +def _get_dbref_size(obj: DBRef) -> int: + return 34 + len(obj.collection) + + +_CONSTANT_SIZE_TABLE: dict[Any, int] = { + ObjectId: 28, + int: 11, + Int64: 11, + Decimal128: 11, + Timestamp: 14, + MinKey: 8, + MaxKey: 8, +} + +_VARIABLE_SIZE_TABLE: dict[Any, Callable[[Any], int]] = { + str: _get_str_size, + bytes: _get_str_size, + datetime.datetime: _get_datetime_size, + Regex: _get_regex_size, + DBRef: _get_dbref_size, +} + + +def get_size(obj: Any, max_size: int, current_size: int = 0) -> int: + """Recursively finds size of objects""" + if current_size >= max_size: + return current_size + + obj_type = type(obj) + + # Check to see if the obj has a constant size estimate + try: + return _CONSTANT_SIZE_TABLE[obj_type] + except KeyError: + pass + + # Check to see if the obj has a variable but simple size estimate + try: + return _VARIABLE_SIZE_TABLE[obj_type](obj) + except KeyError: + pass + + # Special cases that require recursion + if obj_type == Code: + if obj.scope: + current_size += ( + 5 + get_size(obj.scope, max_size, current_size) + len(obj) - len(obj.scope) + ) + else: + current_size += 5 + len(obj) + elif obj_type == dict: + for k, v in obj.items(): + current_size += get_size(k, max_size, current_size) + current_size += get_size(v, max_size, current_size) + if current_size >= max_size: + return current_size + elif hasattr(obj, "__iter__"): + for i in obj: + current_size += get_size(i, max_size, current_size) + if current_size >= max_size: + return current_size + return current_size + + +def _truncate_documents(obj: Any, max_length: int) -> Tuple[Any, int]: + """Recursively truncate documents as needed to fit inside max_length characters.""" + if max_length <= 0: + return None, 0 + remaining = max_length + if hasattr(obj, "items"): + truncated: Any = {} + for k, v in obj.items(): + truncated_v, remaining = _truncate_documents(v, remaining) + if truncated_v: + truncated[k] = truncated_v + if remaining <= 0: + break + return truncated, remaining + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): + truncated: Any = [] # type:ignore[no-redef] + for v in obj: + truncated_v, remaining = _truncate_documents(v, remaining) + if truncated_v: + truncated.append(truncated_v) + if remaining <= 0: + break + return truncated, remaining + else: + return _truncate(obj, remaining) + + +def _truncate(obj: Any, remaining: int) -> Tuple[Any, int]: + size = get_size(obj, remaining) + + if size <= remaining: + return obj, remaining - size + else: + try: + truncated = obj[:remaining] + except TypeError: + truncated = obj + return truncated, remaining - size diff --git a/bson/max_key.py b/bson/max_key.py index a6f50dcab7..445e12f519 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -1,4 +1,4 @@ -# Copyright 2010-2014 MongoDB, Inc. +# Copyright 2010-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,36 +12,45 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Representation for the MongoDB internal MaxKey type. -""" +"""Representation for the MongoDB internal MaxKey type.""" +from __future__ import annotations +from typing import Any -class MaxKey(object): - """MongoDB internal MaxKey type. - .. versionchanged:: 2.7 - ``MaxKey`` now implements comparison operators. - """ +class MaxKey: + """MongoDB internal MaxKey type.""" + + __slots__ = () _type_marker = 127 - def __eq__(self, other): + def __getstate__(self) -> Any: + return {} + + def __setstate__(self, state: Any) -> None: + pass + + def __eq__(self, other: Any) -> bool: return isinstance(other, MaxKey) - def __ne__(self, other): + def __hash__(self) -> int: + return hash(self._type_marker) + + def __ne__(self, other: Any) -> bool: return not self == other - def __le__(self, other): + def __le__(self, other: Any) -> bool: return isinstance(other, MaxKey) - - def __lt__(self, dummy): + + def __lt__(self, dummy: Any) -> bool: return False - def __ge__(self, dummy): + def __ge__(self, dummy: Any) -> bool: return True - - def __gt__(self, other): + + def __gt__(self, other: Any) -> bool: return not isinstance(other, MaxKey) - def __repr__(self): + def __repr__(self) -> str: return "MaxKey()" diff --git a/bson/min_key.py b/bson/min_key.py index 48d020f6cd..37828dcf74 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -1,4 +1,4 @@ -# Copyright 2010-2014 MongoDB, Inc. +# Copyright 2010-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,36 +12,45 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Representation for the MongoDB internal MinKey type. -""" +"""Representation for the MongoDB internal MinKey type.""" +from __future__ import annotations +from typing import Any -class MinKey(object): - """MongoDB internal MinKey type. - .. versionchanged:: 2.7 - ``MinKey`` now implements comparison operators. - """ +class MinKey: + """MongoDB internal MinKey type.""" + + __slots__ = () _type_marker = 255 - def __eq__(self, other): + def __getstate__(self) -> Any: + return {} + + def __setstate__(self, state: Any) -> None: + pass + + def __eq__(self, other: Any) -> bool: return isinstance(other, MinKey) - def __ne__(self, other): + def __hash__(self) -> int: + return hash(self._type_marker) + + def __ne__(self, other: Any) -> bool: return not self == other - - def __le__(self, dummy): + + def __le__(self, dummy: Any) -> bool: return True - - def __lt__(self, other): + + def __lt__(self, other: Any) -> bool: return not isinstance(other, MinKey) - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: return isinstance(other, MinKey) - - def __gt__(self, dummy): + + def __gt__(self, dummy: Any) -> bool: return False - def __repr__(self): - return "MinKey()" \ No newline at end of file + def __repr__(self) -> str: + return "MinKey()" diff --git a/bson/objectid.py b/bson/objectid.py index fc00ccebb4..970c4e52e8 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,87 +12,100 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for working with MongoDB `ObjectIds -`_. -""" +"""Tools for working with MongoDB ObjectIds.""" +from __future__ import annotations import binascii -import calendar import datetime -try: - import hashlib - _md5func = hashlib.md5 -except ImportError: # for Python < 2.5 - import md5 - _md5func = md5.new import os -import random -import socket import struct import threading import time +from random import SystemRandom +from typing import Any, NoReturn, Optional, Type, Union +from bson.datetime_ms import _datetime_to_millis from bson.errors import InvalidId -from bson.py3compat import (PY3, b, binary_type, text_type, - bytes_from_hex, string_types) from bson.tz_util import utc -EMPTY = b("") -ZERO = b("\x00") +_MAX_COUNTER_VALUE = 0xFFFFFF +_PACK_INT = struct.Struct(">I").pack +_PACK_INT_RANDOM = struct.Struct(">I5s").pack +_UNPACK_INT = struct.Struct(">I").unpack -def _machine_bytes(): - """Get the machine portion of an ObjectId. - """ - machine_hash = _md5func() - if PY3: - # gethostname() returns a unicode string in python 3.x - # while update() requires a byte string. - machine_hash.update(socket.gethostname().encode()) - else: - # Calling encode() here will fail with non-ascii hostnames - machine_hash.update(socket.gethostname()) - return machine_hash.digest()[0:3] +def _raise_invalid_id(oid: str) -> NoReturn: + raise InvalidId( + "%r is not a valid ObjectId, it must be a 12-byte input" + " or a 24-character hex string" % oid + ) -class ObjectId(object): - """A MongoDB ObjectId. - """ - _inc = random.randint(0, 0xFFFFFF) +def _random_bytes() -> bytes: + """Get the 5-byte random field of an ObjectId.""" + return os.urandom(5) + + +class ObjectId: + """A MongoDB ObjectId.""" + + _pid = os.getpid() + + _inc = SystemRandom().randint(0, _MAX_COUNTER_VALUE) _inc_lock = threading.Lock() - _machine_bytes = _machine_bytes() + __random = _random_bytes() - __slots__ = ('__id') + __slots__ = ("__id",) _type_marker = 7 - def __init__(self, oid=None): + def __init__(self, oid: Optional[Union[str, ObjectId, bytes]] = None) -> None: """Initialize a new ObjectId. - If `oid` is ``None``, create a new (unique) ObjectId. If `oid` - is an instance of (:class:`basestring` (:class:`str` or :class:`bytes` - in python 3), :class:`ObjectId`) validate it and use that. Otherwise, - a :class:`TypeError` is raised. If `oid` is invalid, - :class:`~bson.errors.InvalidId` is raised. + An ObjectId is a 12-byte unique identifier consisting of: + + - a 4-byte value representing the seconds since the Unix epoch, + - a 5-byte random value, + - a 3-byte counter, starting with a random value. + + By default, ``ObjectId()`` creates a new unique identifier. The + optional parameter `oid` can be an :class:`ObjectId`, or any 12 + :class:`bytes`. + + For example, the 12 bytes b'foo-bar-quux' do not follow the ObjectId + specification but they are acceptable input:: + + >>> ObjectId(b'foo-bar-quux') + ObjectId('666f6f2d6261722d71757578') - :Parameters: - - `oid` (optional): a valid ObjectId (12 byte binary or 24 character - hex string) + `oid` can also be a :class:`str` of 24 hex digits:: - .. versionadded:: 1.2.1 - The `oid` parameter can be a ``unicode`` instance (that contains - only hexadecimal digits). + >>> ObjectId('0123456789ab0123456789ab') + ObjectId('0123456789ab0123456789ab') - .. mongodoc:: objectids + Raises :class:`~bson.errors.InvalidId` if `oid` is not 12 bytes nor + 24 hex digits, or :class:`TypeError` if `oid` is not an accepted type. + + :param oid: a valid ObjectId. + + .. seealso:: The MongoDB documentation on `ObjectIds `_. + + .. versionchanged:: 3.8 + :class:`~bson.objectid.ObjectId` now implements the `ObjectID + specification version 0.2 + `_. """ if oid is None: self.__generate() + elif isinstance(oid, bytes) and len(oid) == 12: + self.__id = oid else: self.__validate(oid) @classmethod - def from_datetime(cls, generation_time): + def from_datetime(cls: Type[ObjectId], generation_time: datetime.datetime) -> ObjectId: """Create a dummy ObjectId instance with a specific generation time. This method is useful for doing range queries on a field @@ -115,123 +128,98 @@ def from_datetime(cls, generation_time): >>> dummy_id = ObjectId.from_datetime(gen_time) >>> result = collection.find({"_id": {"$lt": dummy_id}}) - :Parameters: - - `generation_time`: :class:`~datetime.datetime` to be used + :param generation_time: :class:`~datetime.datetime` to be used as the generation time for the resulting ObjectId. - - .. versionchanged:: 1.8 - Properly handle timezone aware values for - `generation_time`. - - .. versionadded:: 1.6 """ - if generation_time.utcoffset() is not None: - generation_time = generation_time - generation_time.utcoffset() - ts = calendar.timegm(generation_time.timetuple()) - oid = struct.pack(">i", int(ts)) + ZERO * 8 + oid = ( + _PACK_INT(_datetime_to_millis(generation_time) // 1000) + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + ) return cls(oid) @classmethod - def is_valid(cls, oid): + def is_valid(cls: Type[ObjectId], oid: Any) -> bool: """Checks if a `oid` string is valid or not. - :Parameters: - - `oid`: the object id to validate + :param oid: the object id to validate .. versionadded:: 2.3 """ + if not oid: + return False + try: ObjectId(oid) return True except (InvalidId, TypeError): return False - def __generate(self): - """Generate a new value for this ObjectId. - """ - oid = EMPTY - - # 4 bytes current time - oid += struct.pack(">i", int(time.time())) - - # 3 bytes machine - oid += ObjectId._machine_bytes - - # 2 bytes pid - oid += struct.pack(">H", os.getpid() % 0xFFFF) - - # 3 bytes inc - ObjectId._inc_lock.acquire() - oid += struct.pack(">i", ObjectId._inc)[1:4] - ObjectId._inc = (ObjectId._inc + 1) % 0xFFFFFF - ObjectId._inc_lock.release() - - self.__id = oid - - def __validate(self, oid): + @classmethod + def _random(cls) -> bytes: + """Generate a 5-byte random number once per process.""" + pid = os.getpid() + if pid != cls._pid: + cls._pid = pid + cls.__random = _random_bytes() + return cls.__random + + def __generate(self) -> None: + """Generate a new value for this ObjectId.""" + with ObjectId._inc_lock: + inc = ObjectId._inc + ObjectId._inc = (inc + 1) % (_MAX_COUNTER_VALUE + 1) + + # 4 bytes current time, 5 bytes random, 3 bytes inc. + self.__id = _PACK_INT_RANDOM(int(time.time()), ObjectId._random()) + _PACK_INT(inc)[1:4] + + def __validate(self, oid: Any) -> None: """Validate and use the given id for this ObjectId. - Raises TypeError if id is not an instance of - (:class:`basestring` (:class:`str` or :class:`bytes` - in python 3), ObjectId) and InvalidId if it is not a + Raises TypeError if id is not an instance of :class:`str`, + :class:`bytes`, or ObjectId. Raises InvalidId if it is not a valid ObjectId. - :Parameters: - - `oid`: a valid ObjectId + :param oid: a valid ObjectId """ if isinstance(oid, ObjectId): - self.__id = oid.__id - elif isinstance(oid, string_types): - if len(oid) == 12: - if isinstance(oid, binary_type): - self.__id = oid - else: - raise InvalidId("%s is not a valid ObjectId" % oid) - elif len(oid) == 24: + self.__id = oid.binary + elif isinstance(oid, str): + if len(oid) == 24: try: - self.__id = bytes_from_hex(oid) + self.__id = bytes.fromhex(oid) except (TypeError, ValueError): - raise InvalidId("%s is not a valid ObjectId" % oid) + _raise_invalid_id(oid) else: - raise InvalidId("%s is not a valid ObjectId" % oid) + _raise_invalid_id(oid) else: - raise TypeError("id must be an instance of (%s, %s, ObjectId), " - "not %s" % (binary_type.__name__, - text_type.__name__, type(oid))) + raise TypeError(f"id must be an instance of (bytes, str, ObjectId), not {type(oid)}") @property - def binary(self): - """12-byte binary representation of this ObjectId. - """ + def binary(self) -> bytes: + """12-byte binary representation of this ObjectId.""" return self.__id @property - def generation_time(self): + def generation_time(self) -> datetime.datetime: """A :class:`datetime.datetime` instance representing the time of generation for this :class:`ObjectId`. The :class:`datetime.datetime` is timezone aware, and represents the generation time in UTC. It is precise to the second. - - .. versionchanged:: 1.8 - Now return an aware datetime instead of a naive one. - - .. versionadded:: 1.2 """ - t = struct.unpack(">i", self.__id[0:4])[0] - return datetime.datetime.fromtimestamp(t, utc) + timestamp = _UNPACK_INT(self.__id[0:4])[0] + return datetime.datetime.fromtimestamp(timestamp, utc) - def __getstate__(self): - """return value of object for pickling. + def __getstate__(self) -> bytes: + """Return value of object for pickling. needed explicitly because __slots__() defined. """ return self.__id - def __setstate__(self, value): - """explicit state set from pickling - """ - # Provide backwards compatability with OIDs + def __setstate__(self, value: Any) -> None: + """Explicit state set from pickling""" + # Provide backwards compatibility with OIDs # pickled with pymongo-1.9 or older. if isinstance(value, dict): oid = value["_ObjectId__id"] @@ -240,52 +228,47 @@ def __setstate__(self, value): # ObjectIds pickled in python 2.x used `str` for __id. # In python 3.x this has to be converted to `bytes` # by encoding latin-1. - if PY3 and isinstance(oid, text_type): - self.__id = oid.encode('latin-1') + if isinstance(oid, str): + self.__id = oid.encode("latin-1") else: self.__id = oid - def __str__(self): - if PY3: - return binascii.hexlify(self.__id).decode() - return binascii.hexlify(self.__id) + def __str__(self) -> str: + return binascii.hexlify(self.__id).decode() - def __repr__(self): - return "ObjectId('%s')" % (str(self),) + def __repr__(self) -> str: + return f"ObjectId('{self!s}')" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, ObjectId): - return self.__id == other.__id + return self.__id == other.binary return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: if isinstance(other, ObjectId): - return self.__id != other.__id + return self.__id != other.binary return NotImplemented - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: if isinstance(other, ObjectId): - return self.__id < other.__id + return self.__id < other.binary return NotImplemented - def __le__(self, other): + def __le__(self, other: Any) -> bool: if isinstance(other, ObjectId): - return self.__id <= other.__id + return self.__id <= other.binary return NotImplemented - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: if isinstance(other, ObjectId): - return self.__id > other.__id + return self.__id > other.binary return NotImplemented - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: if isinstance(other, ObjectId): - return self.__id >= other.__id + return self.__id >= other.binary return NotImplemented - def __hash__(self): - """Get a hash value for this :class:`ObjectId`. - - .. versionadded:: 1.1 - """ + def __hash__(self) -> int: + """Get a hash value for this :class:`ObjectId`.""" return hash(self.__id) diff --git a/bson/py.typed b/bson/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/bson/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". diff --git a/bson/py3compat.py b/bson/py3compat.py deleted file mode 100644 index 1425ea4f2b..0000000000 --- a/bson/py3compat.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Utility functions and definitions for python3 compatibility.""" - -import sys - -PY3 = sys.version_info[0] == 3 - -if PY3: - import codecs - - from io import BytesIO as StringIO - - def b(s): - # BSON and socket operations deal in binary data. In - # python 3 that means instances of `bytes`. In python - # 2.6 and 2.7 you can create an alias for `bytes` using - # the b prefix (e.g. b'foo'). Python 2.4 and 2.5 don't - # provide this marker so we provide this compat function. - # In python 3.x b('foo') results in b'foo'. - # See http://python3porting.com/problems.html#nicer-solutions - return codecs.latin_1_encode(s)[0] - - def bytes_from_hex(h): - return bytes.fromhex(h) - - binary_type = bytes - text_type = str - next_item = "__next__" - -else: - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - - def b(s): - # See comments above. In python 2.x b('foo') is just 'foo'. - return s - - def bytes_from_hex(h): - return h.decode('hex') - - binary_type = str - # 2to3 will convert this to "str". That's okay - # since we won't ever get here under python3. - text_type = unicode - next_item = "next" - -string_types = (binary_type, text_type) diff --git a/bson/raw_bson.py b/bson/raw_bson.py new file mode 100644 index 0000000000..9ead0765dc --- /dev/null +++ b/bson/raw_bson.py @@ -0,0 +1,200 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for representing raw BSON documents. + +Inserting and Retrieving RawBSONDocuments +========================================= + +Example: Moving a document between different databases/collections + +.. doctest:: + + >>> import bson + >>> from pymongo import MongoClient + >>> from bson.raw_bson import RawBSONDocument + >>> client = MongoClient(document_class=RawBSONDocument) + >>> client.drop_database("db") + >>> client.drop_database("replica_db") + >>> db = client.db + >>> result = db.test.insert_many( + ... [{"_id": 1, "a": 1}, {"_id": 2, "b": 1}, {"_id": 3, "c": 1}, {"_id": 4, "d": 1}] + ... ) + >>> replica_db = client.replica_db + >>> for doc in db.test.find(): + ... print(f"raw document: {doc.raw}") + ... print(f"decoded document: {bson.decode(doc.raw)}") + ... result = replica_db.test.insert_one(doc) + ... + raw document: b'...' + decoded document: {'_id': 1, 'a': 1} + raw document: b'...' + decoded document: {'_id': 2, 'b': 1} + raw document: b'...' + decoded document: {'_id': 3, 'c': 1} + raw document: b'...' + decoded document: {'_id': 4, 'd': 1} + +For use cases like moving documents across different databases or writing binary +blobs to disk, using raw BSON documents provides better speed and avoids the +overhead of decoding or encoding BSON. +""" +from __future__ import annotations + +from typing import Any, ItemsView, Iterator, Mapping, Optional + +from bson import _get_object_size, _raw_to_dict +from bson.codec_options import _RAW_BSON_DOCUMENT_MARKER, CodecOptions +from bson.codec_options import DEFAULT_CODEC_OPTIONS as DEFAULT + + +def _inflate_bson( + bson_bytes: bytes | memoryview, + codec_options: CodecOptions[RawBSONDocument], + raw_array: bool = False, +) -> dict[str, Any]: + """Inflates the top level fields of a BSON document. + + :param bson_bytes: the BSON bytes that compose this document + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions` whose ``document_class`` + must be :class:`RawBSONDocument`. + """ + return _raw_to_dict(bson_bytes, 4, len(bson_bytes) - 1, codec_options, {}, raw_array=raw_array) + + +class RawBSONDocument(Mapping[str, Any]): + """Representation for a MongoDB document that provides access to the raw + BSON bytes that compose it. + + Only when a field is accessed or modified within the document does + RawBSONDocument decode its bytes. + """ + + __slots__ = ("__raw", "__inflated_doc", "__codec_options") + _type_marker = _RAW_BSON_DOCUMENT_MARKER + __codec_options: CodecOptions[RawBSONDocument] + + def __init__( + self, + bson_bytes: bytes | memoryview, + codec_options: Optional[CodecOptions[RawBSONDocument]] = None, + ) -> None: + """Create a new :class:`RawBSONDocument` + + :class:`RawBSONDocument` is a representation of a BSON document that + provides access to the underlying raw BSON bytes. Only when a field is + accessed or modified within the document does RawBSONDocument decode + its bytes. + + :class:`RawBSONDocument` implements the ``Mapping`` abstract base + class from the standard library so it can be used like a read-only + ``dict``:: + + >>> from bson import encode + >>> raw_doc = RawBSONDocument(encode({'_id': 'my_doc'})) + >>> raw_doc.raw + b'...' + >>> raw_doc['_id'] + 'my_doc' + + :param bson_bytes: the BSON bytes that compose this document + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions` whose ``document_class`` + must be :class:`RawBSONDocument`. The default is + :attr:`DEFAULT_RAW_BSON_OPTIONS`. + + .. versionchanged:: 3.8 + :class:`RawBSONDocument` now validates that the ``bson_bytes`` + passed in represent a single bson document. + + .. versionchanged:: 3.5 + If a :class:`~bson.codec_options.CodecOptions` is passed in, its + `document_class` must be :class:`RawBSONDocument`. + """ + self.__raw = bson_bytes + self.__inflated_doc: Optional[Mapping[str, Any]] = None + # Can't default codec_options to DEFAULT_RAW_BSON_OPTIONS in signature, + # it refers to this class RawBSONDocument. + if codec_options is None: + codec_options = DEFAULT_RAW_BSON_OPTIONS + elif not issubclass(codec_options.document_class, RawBSONDocument): + raise TypeError( + "RawBSONDocument cannot use CodecOptions with document " + f"class {codec_options.document_class}" + ) + self.__codec_options = codec_options + # Validate the bson object size. + _get_object_size(bson_bytes, 0, len(bson_bytes)) + + @property + def raw(self) -> bytes | memoryview: + """The raw BSON bytes composing this document.""" + return self.__raw + + def items(self) -> ItemsView[str, Any]: + """Lazily decode and iterate elements in this document.""" + return self.__inflated.items() + + @property + def __inflated(self) -> Mapping[str, Any]: + if self.__inflated_doc is None: + # We already validated the object's size when this document was + # created, so no need to do that again. + self.__inflated_doc = self._inflate_bson(self.__raw, self.__codec_options) + return self.__inflated_doc + + @staticmethod + def _inflate_bson( + bson_bytes: bytes | memoryview, codec_options: CodecOptions[RawBSONDocument] + ) -> Mapping[str, Any]: + return _inflate_bson(bson_bytes, codec_options) + + def __getitem__(self, item: str) -> Any: + return self.__inflated[item] + + def __iter__(self) -> Iterator[str]: + return iter(self.__inflated) + + def __len__(self) -> int: + return len(self.__inflated) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, RawBSONDocument): + return self.__raw == other.raw + return NotImplemented + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.raw!r}, codec_options={self.__codec_options!r})" + + +class _RawArrayBSONDocument(RawBSONDocument): + """A RawBSONDocument that only expands sub-documents and arrays when accessed.""" + + @staticmethod + def _inflate_bson( + bson_bytes: bytes | memoryview, codec_options: CodecOptions[RawBSONDocument] + ) -> Mapping[str, Any]: + return _inflate_bson(bson_bytes, codec_options, raw_array=True) + + +DEFAULT_RAW_BSON_OPTIONS: CodecOptions[RawBSONDocument] = DEFAULT.with_options( + document_class=RawBSONDocument +) +_RAW_ARRAY_BSON_OPTIONS: CodecOptions[_RawArrayBSONDocument] = DEFAULT.with_options( + document_class=_RawArrayBSONDocument +) +"""The default :class:`~bson.codec_options.CodecOptions` for +:class:`RawBSONDocument`. +""" diff --git a/bson/regex.py b/bson/regex.py index 97b1291917..60cff4fd08 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 MongoDB, Inc. +# Copyright 2013-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,16 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing MongoDB regular expressions. -""" +"""Tools for representing MongoDB regular expressions.""" +from __future__ import annotations import re +from typing import Any, Generic, Pattern, Type, TypeVar, Union +from bson._helpers import _getstate_slots, _setstate_slots from bson.son import RE_TYPE -from bson.py3compat import string_types -def str_flags_to_int(str_flags): +def str_flags_to_int(str_flags: str) -> int: flags = 0 if "i" in str_flags: flags |= re.IGNORECASE @@ -39,12 +40,21 @@ def str_flags_to_int(str_flags): return flags -class Regex(object): +_T = TypeVar("_T", str, bytes) + + +class Regex(Generic[_T]): """BSON regular expression data.""" + + __slots__ = ("pattern", "flags") + + __getstate__ = _getstate_slots + __setstate__ = _setstate_slots + _type_marker = 11 @classmethod - def from_native(cls, regex): + def from_native(cls: Type[Regex[Any]], regex: Pattern[_T]) -> Regex[_T]: """Convert a Python regular expression into a ``Regex`` instance. Note that in Python 3, a regular expression compiled from a @@ -54,10 +64,9 @@ def from_native(cls, regex): >>> pattern = re.compile('.*') >>> regex = Regex.from_native(pattern) >>> regex.flags ^= re.UNICODE - >>> db.collection.insert({'pattern': regex}) + >>> db.collection.insert_one({'pattern': regex}) - :Parameters: - - `regex`: A regular expression object from ``re.compile()``. + :param regex: A regular expression object from ``re.compile()``. .. warning:: Python regular expressions use a different syntax and different @@ -69,48 +78,46 @@ def from_native(cls, regex): .. _PCRE: http://www.pcre.org/ """ if not isinstance(regex, RE_TYPE): - raise TypeError( - "regex must be a compiled regular expression, not %s" - % type(regex)) + raise TypeError("regex must be a compiled regular expression, not %s" % type(regex)) return Regex(regex.pattern, regex.flags) - def __init__(self, pattern, flags=0): + def __init__(self, pattern: _T, flags: Union[str, int] = 0) -> None: """BSON regular expression data. This class is useful to store and retrieve regular expressions that are incompatible with Python's regular expression dialect. - :Parameters: - - `pattern`: string - - `flags`: (optional) an integer bitmask, or a string of flag + :param pattern: string + :param flags: an integer bitmask, or a string of flag characters like "im" for IGNORECASE and MULTILINE """ - if not isinstance(pattern, string_types): + if not isinstance(pattern, (str, bytes)): raise TypeError("pattern must be a string, not %s" % type(pattern)) - self.pattern = pattern + self.pattern: _T = pattern - if isinstance(flags, string_types): + if isinstance(flags, str): self.flags = str_flags_to_int(flags) elif isinstance(flags, int): self.flags = flags else: - raise TypeError( - "flags must be a string or int, not %s" % type(flags)) + raise TypeError("flags must be a string or int, not %s" % type(flags)) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Regex): - return self.pattern == self.pattern and self.flags == other.flags + return self.pattern == other.pattern and self.flags == other.flags else: return NotImplemented - def __ne__(self, other): + __hash__ = None # type: ignore + + def __ne__(self, other: Any) -> bool: return not self == other - def __repr__(self): - return "Regex(%r, %r)" % (self.pattern, self.flags) + def __repr__(self) -> str: + return f"Regex({self.pattern!r}, {self.flags!r})" - def try_compile(self): + def try_compile(self) -> Pattern[_T]: """Compile this :class:`Regex` as a Python regular expression. .. warning:: diff --git a/bson/son.py b/bson/son.py index 9e9defb204..8fd4f95cd2 100644 --- a/bson/son.py +++ b/bson/son.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,153 +16,116 @@ Regular dictionaries can be used instead of SON objects, but not when the order of keys is important. A SON object can be used just like a normal Python -dictionary.""" +dictionary. +""" +from __future__ import annotations import copy import re +from collections.abc import Mapping as _Mapping +from typing import ( + Any, + Dict, + Iterable, + Iterator, + Mapping, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, + cast, +) # This sort of sucks, but seems to be as good as it gets... # This is essentially the same as re._pattern_type -RE_TYPE = type(re.compile("")) +RE_TYPE: Type[Pattern[Any]] = type(re.compile("")) +_Key = TypeVar("_Key") +_Value = TypeVar("_Value") +_T = TypeVar("_T") -class SON(dict): + +class SON(Dict[_Key, _Value]): """SON data. A subclass of dict that maintains ordering of keys and provides a - few extra niceties for dealing with SON. SON objects can be - converted to and from BSON. - - The mapping from Python types to BSON types is as follows: - - ======================================= ============= =================== - Python Type BSON Type Supported Direction - ======================================= ============= =================== - None null both - bool boolean both - int [#int]_ int32 / int64 py -> bson - long int64 both - float number (real) both - string string py -> bson - unicode string both - list array both - dict / `SON` object both - datetime.datetime [#dt]_ [#dt2]_ date both - `bson.regex.Regex` / compiled re [#re]_ regex both - `bson.binary.Binary` binary both - `bson.objectid.ObjectId` oid both - `bson.dbref.DBRef` dbref both - None undefined bson -> py - unicode code bson -> py - `bson.code.Code` code py -> bson - unicode symbol bson -> py - bytes (Python 3) [#bytes]_ binary both - ======================================= ============= =================== - - Note that to save binary data it must be wrapped as an instance of - `bson.binary.Binary`. Otherwise it will be saved as a BSON string - and retrieved as unicode. - - .. [#int] A Python int will be saved as a BSON int32 or BSON int64 depending - on its size. A BSON int32 will always decode to a Python int. In Python 2.x - a BSON int64 will always decode to a Python long. In Python 3.x a BSON - int64 will decode to a Python int since there is no longer a long type. - .. [#dt] datetime.datetime instances will be rounded to the nearest - millisecond when saved - .. [#dt2] all datetime.datetime instances are treated as *naive*. clients - should always use UTC. - .. [#re] :class:`~bson.regex.Regex` instances and regular expression - objects from ``re.compile()`` are both saved as BSON regular expressions. - BSON regular expressions are decoded as Python regular expressions by - default, or as :class:`~bson.regex.Regex` instances if the ``compile_re`` - option is set to ``False``. - .. [#bytes] The bytes type from Python 3.x is encoded as BSON binary with - subtype 0. In Python 3.x it will be decoded back to bytes. In Python 2.x - it will be decoded to an instance of :class:`~bson.binary.Binary` with - subtype 0. + few extra niceties for dealing with SON. SON provides an API + similar to collections.OrderedDict. """ - def __init__(self, data=None, **kwargs): + __keys: list[Any] + + def __init__( + self, + data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, + **kwargs: Any, + ) -> None: self.__keys = [] dict.__init__(self) self.update(data) self.update(kwargs) - def __new__(cls, *args, **kwargs): - instance = super(SON, cls).__new__(cls, *args, **kwargs) + def __new__(cls: Type[SON[_Key, _Value]], *args: Any, **kwargs: Any) -> SON[_Key, _Value]: + instance = super().__new__(cls, *args, **kwargs) instance.__keys = [] return instance - def __repr__(self): + def __repr__(self) -> str: result = [] for key in self.__keys: - result.append("(%r, %r)" % (key, self[key])) + result.append(f"({key!r}, {self[key]!r})") return "SON([%s])" % ", ".join(result) - def __setitem__(self, key, value): - if key not in self: + def __setitem__(self, key: _Key, value: _Value) -> None: + if key not in self.__keys: self.__keys.append(key) dict.__setitem__(self, key, value) - def __delitem__(self, key): + def __delitem__(self, key: _Key) -> None: self.__keys.remove(key) dict.__delitem__(self, key) - def keys(self): - return list(self.__keys) - - def copy(self): - other = SON() + def copy(self) -> SON[_Key, _Value]: + other: SON[_Key, _Value] = SON() other.update(self) return other # TODO this is all from UserDict.DictMixin. it could probably be made more # efficient. # second level definitions support higher levels - def __iter__(self): - for k in self.keys(): - yield k - - def has_key(self, key): - return key in self.keys() + def __iter__(self) -> Iterator[_Key]: + yield from self.__keys - def __contains__(self, key): - return key in self.keys() + def has_key(self, key: _Key) -> bool: + return key in self.__keys - # third level takes advantage of second level definitions - def iteritems(self): - for k in self: - yield (k, self[k]) - - def iterkeys(self): + def iterkeys(self) -> Iterator[_Key]: return self.__iter__() # fourth level uses definitions from lower levels - def itervalues(self): - for _, v in self.iteritems(): + def itervalues(self) -> Iterator[_Value]: + for _, v in self.items(): yield v - def values(self): - return [v for _, v in self.iteritems()] - - def items(self): - return [(key, self[key]) for key in self] + def values(self) -> list[_Value]: # type: ignore[override] + return [v for _, v in self.items()] - def clear(self): - for key in self.keys(): - del self[key] + def clear(self) -> None: + self.__keys = [] + super().clear() - def setdefault(self, key, default=None): + def setdefault(self, key: _Key, default: _Value) -> _Value: try: return self[key] except KeyError: self[key] = default return default - def pop(self, key, *args): + def pop(self, key: _Key, *args: Union[_Value, _T]) -> Union[_Value, _T]: if len(args) > 1: - raise TypeError("pop expected at most 2 arguments, got "\ - + repr(1 + len(args))) + raise TypeError("pop expected at most 2 arguments, got " + repr(1 + len(args))) try: value = self[key] except KeyError: @@ -172,23 +135,23 @@ def pop(self, key, *args): del self[key] return value - def popitem(self): + def popitem(self) -> Tuple[_Key, _Value]: try: - k, v = self.iteritems().next() + k, v = next(iter(self.items())) except StopIteration: - raise KeyError('container is empty') + raise KeyError("container is empty") from None del self[k] return (k, v) - def update(self, other=None, **kwargs): + def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # Make progressively weaker assumptions about "other" if other is None: pass - elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups - for k, v in other.iteritems(): + elif hasattr(other, "items"): + for k, v in other.items(): self[k] = v - elif hasattr(other, 'keys'): - for k in other.keys(): + elif hasattr(other, "keys"): + for k in other: self[k] = other[k] else: for k, v in other: @@ -196,53 +159,53 @@ def update(self, other=None, **kwargs): if kwargs: self.update(kwargs) - def get(self, key, default=None): + def get( # type: ignore[override] + self, key: _Key, default: Optional[Union[_Value, _T]] = None + ) -> Union[_Value, _T, None]: try: return self[key] except KeyError: return default - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: """Comparison to another SON is order-sensitive while comparison to a regular dictionary is order-insensitive. """ if isinstance(other, SON): - return len(self) == len(other) and self.items() == other.items() - return self.to_dict() == other + return len(self) == len(other) and list(self.items()) == list(other.items()) + return cast(bool, self.to_dict() == other) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __len__(self): - return len(self.keys()) + def __len__(self) -> int: + return len(self.__keys) - def to_dict(self): + def to_dict(self) -> dict[_Key, _Value]: """Convert a SON document to a normal Python dictionary instance. This is trickier than just *dict(...)* because it needs to be recursive. """ - def transform_value(value): + def transform_value(value: Any) -> Any: if isinstance(value, list): return [transform_value(v) for v in value] - if isinstance(value, SON): - value = dict(value) - if isinstance(value, dict): - for k, v in value.iteritems(): - value[k] = transform_value(v) - return value + elif isinstance(value, _Mapping): + return {k: transform_value(v) for k, v in value.items()} + else: + return value - return transform_value(dict(self)) + return cast("dict[_Key, _Value]", transform_value(dict(self))) - def __deepcopy__(self, memo): - out = SON() + def __deepcopy__(self, memo: dict[int, SON[_Key, _Value]]) -> SON[_Key, _Value]: + out: SON[_Key, _Value] = SON() val_id = id(self) if val_id in memo: - return memo.get(val_id) + return memo[val_id] memo[val_id] = out - for k, v in self.iteritems(): + for k, v in self.items(): if not isinstance(v, RE_TYPE): - v = copy.deepcopy(v, memo) + v = copy.deepcopy(v, memo) # noqa: PLW2901 out[k] = v return out diff --git a/bson/time64.c b/bson/time64.c index dcc01ad353..a21fbb90bd 100644 --- a/bson/time64.c +++ b/bson/time64.c @@ -1,4 +1,4 @@ -/* +/* Copyright (c) 2007-2010 Michael G Schwern @@ -29,13 +29,13 @@ THE SOFTWARE. /* Programmers who have available to them 64-bit time values as a 'long -long' type can use localtime64_r() and gmtime64_r() which correctly -converts the time even on 32-bit systems. Whether you have 64-bit time +long' type can use cbson_localtime64_r() and cbson_gmtime64_r() which correctly +converts the time even on 32-bit systems. Whether you have 64-bit time values will depend on the operating system. -localtime64_r() is a 64-bit equivalent of localtime_r(). +cbson_localtime64_r() is a 64-bit equivalent of localtime_r(). -gmtime64_r() is a 64-bit equivalent of gmtime_r(). +cbson_gmtime64_r() is a 64-bit equivalent of gmtime_r(). */ @@ -43,12 +43,11 @@ gmtime64_r() is a 64-bit equivalent of gmtime_r(). #define _CRT_SECURE_NO_WARNINGS #endif -#include -#include -#include -#include +/* Including Python.h fixes issues with interpreters built with -std=c99. */ +#define PY_SSIZE_T_CLEAN +#include "Python.h" + #include -#include #include "time64.h" #include "time64_limits.h" @@ -74,7 +73,7 @@ static const Year years_in_gregorian_cycle = 400; #define days_in_gregorian_cycle ((365 * 400) + 100 - 4 + 1) static const Time64_T seconds_in_gregorian_cycle = days_in_gregorian_cycle * 60LL * 60LL * 24LL; -/* Year range we can trust the time funcitons with */ +/* Year range we can trust the time functions with */ #define MAX_SAFE_YEAR 2037 #define MIN_SAFE_YEAR 1971 @@ -112,7 +111,7 @@ static const int safe_years_low[SOLAR_CYCLE_LENGTH] = { #define CHEAT_YEARS 108 #define IS_LEAP(n) ((!(((n) + 1900) % 400) || (!(((n) + 1900) % 4) && (((n) + 1900) % 100))) != 0) -#define WRAP(a,b,m) ((a) = ((a) < 0 ) ? ((b)--, (a) + (m)) : (a)) +#define _TIME64_WRAP(a,b,m) ((a) = ((a) < 0 ) ? ((b)--, (a) + (m)) : (a)) #ifdef USE_SYSTEM_LOCALTIME # define SHOULD_USE_SYSTEM_LOCALTIME(a) ( \ @@ -159,7 +158,7 @@ static int is_exception_century(Year year) The result is like cmp. Ignores things like gmtoffset and dst */ -int cmp_date( const struct TM* left, const struct tm* right ) { +int cbson_cmp_date( const struct TM* left, const struct tm* right ) { if( left->tm_year > right->tm_year ) return 1; else if( left->tm_year < right->tm_year ) @@ -197,11 +196,11 @@ int cmp_date( const struct TM* left, const struct tm* right ) { /* Check if a date is safely inside a range. The intention is to check if its a few days inside. */ -int date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { - if( cmp_date(date, min) == -1 ) +int cbson_date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { + if( cbson_cmp_date(date, min) == -1 ) return 0; - if( cmp_date(date, max) == 1 ) + if( cbson_cmp_date(date, max) == 1 ) return 0; return 1; @@ -210,9 +209,9 @@ int date_in_safe_range( const struct TM* date, const struct tm* min, const struc /* timegm() is not in the C or POSIX spec, but it is such a useful extension I would be remiss in leaving it out. Also I need it - for localtime64() + for cbson_localtime64() */ -Time64_T timegm64(const struct TM *date) { +Time64_T cbson_timegm64(const struct TM *date) { Time64_T days = 0; Time64_T seconds = 0; Year year; @@ -280,7 +279,7 @@ static int check_tm(struct TM *tm) assert(tm->tm_wday >= 0); assert(tm->tm_wday <= 6); - + assert(tm->tm_yday >= 0); assert(tm->tm_yday <= length_of_year[IS_LEAP(tm->tm_year)]); @@ -356,7 +355,7 @@ static int safe_year(const Year year) year_cycle += 17; year_cycle %= SOLAR_CYCLE_LENGTH; - if( year_cycle < 0 ) + if( year_cycle < 0 ) year_cycle = SOLAR_CYCLE_LENGTH + year_cycle; assert( year_cycle >= 0 ); @@ -377,7 +376,7 @@ static int safe_year(const Year year) } -void copy_tm_to_TM64(const struct tm *src, struct TM *dest) { +void pymongo_copy_tm_to_TM64(const struct tm *src, struct TM *dest) { if( src == NULL ) { memset(dest, 0, sizeof(*dest)); } @@ -409,7 +408,7 @@ void copy_tm_to_TM64(const struct tm *src, struct TM *dest) { } -void copy_TM64_to_tm(const struct TM *src, struct tm *dest) { +void cbson_copy_TM64_to_tm(const struct TM *src, struct tm *dest) { if( src == NULL ) { memset(dest, 0, sizeof(*dest)); } @@ -442,7 +441,7 @@ void copy_TM64_to_tm(const struct TM *src, struct tm *dest) { /* Simulate localtime_r() to the best of our ability */ -struct tm * fake_localtime_r(const time_t *time, struct tm *result) { +struct tm * cbson_fake_localtime_r(const time_t *time, struct tm *result) { const struct tm *static_result = localtime(time); assert(result != NULL); @@ -459,7 +458,7 @@ struct tm * fake_localtime_r(const time_t *time, struct tm *result) { /* Simulate gmtime_r() to the best of our ability */ -struct tm * fake_gmtime_r(const time_t *time, struct tm *result) { +struct tm * cbson_fake_gmtime_r(const time_t *time, struct tm *result) { const struct tm *static_result = gmtime(time); assert(result != NULL); @@ -500,22 +499,22 @@ static Time64_T seconds_between_years(Year left_year, Year right_year) { } -Time64_T mktime64(const struct TM *input_date) { +Time64_T cbson_mktime64(const struct TM *input_date) { struct tm safe_date; struct TM date; Time64_T time; Year year = input_date->tm_year + 1900; - if( date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) + if( cbson_date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) { - copy_TM64_to_tm(input_date, &safe_date); + cbson_copy_TM64_to_tm(input_date, &safe_date); return (Time64_T)mktime(&safe_date); } /* Have to make the year safe in date else it won't fit in safe_date */ date = *input_date; date.tm_year = safe_year(year) - 1900; - copy_TM64_to_tm(&date, &safe_date); + cbson_copy_TM64_to_tm(&date, &safe_date); time = (Time64_T)mktime(&safe_date); @@ -527,11 +526,11 @@ Time64_T mktime64(const struct TM *input_date) { /* Because I think mktime() is a crappy name */ Time64_T timelocal64(const struct TM *date) { - return mktime64(date); + return cbson_mktime64(date); } -struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) +struct TM *cbson_gmtime64_r (const Time64_T *in_time, struct TM *p) { int v_tm_sec, v_tm_min, v_tm_hour, v_tm_mon, v_tm_wday; Time64_T v_tm_tday; @@ -543,17 +542,19 @@ struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) assert(p != NULL); +#ifdef USE_SYSTEM_GMTIME /* Use the system gmtime() if time_t is small enough */ if( SHOULD_USE_SYSTEM_GMTIME(*in_time) ) { time_t safe_time = (time_t)*in_time; struct tm safe_date; GMTIME_R(&safe_time, &safe_date); - copy_tm_to_TM64(&safe_date, p); + pymongo_copy_tm_to_TM64(&safe_date, p); assert(check_tm(p)); return p; } +#endif #ifdef HAS_TM_TM_GMTOFF p->tm_gmtoff = 0; @@ -572,9 +573,9 @@ struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) time /= 24; v_tm_tday = time; - WRAP (v_tm_sec, v_tm_min, 60); - WRAP (v_tm_min, v_tm_hour, 60); - WRAP (v_tm_hour, v_tm_tday, 24); + _TIME64_WRAP (v_tm_sec, v_tm_min, 60); + _TIME64_WRAP (v_tm_min, v_tm_hour, 60); + _TIME64_WRAP (v_tm_hour, v_tm_tday, 24); v_tm_wday = (int)((v_tm_tday + 4) % 7); if (v_tm_wday < 0) @@ -651,14 +652,14 @@ struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) p->tm_hour = v_tm_hour; p->tm_mon = v_tm_mon; p->tm_wday = v_tm_wday; - + assert(check_tm(p)); return p; } -struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) +struct TM *cbson_localtime64_r (const Time64_T *time, struct TM *local_tm) { time_t safe_time; struct tm safe_date; @@ -668,6 +669,7 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) assert(local_tm != NULL); +#ifdef USE_SYSTEM_LOCALTIME /* Use the system localtime() if time_t is small enough */ if( SHOULD_USE_SYSTEM_LOCALTIME(*time) ) { safe_time = (time_t)*time; @@ -676,14 +678,15 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) LOCALTIME_R(&safe_time, &safe_date); - copy_tm_to_TM64(&safe_date, local_tm); + pymongo_copy_tm_to_TM64(&safe_date, local_tm); assert(check_tm(local_tm)); return local_tm; } +#endif - if( gmtime64_r(time, &gm_tm) == NULL ) { - TIME64_TRACE1("gmtime64_r returned null for %lld\n", *time); + if( cbson_gmtime64_r(time, &gm_tm) == NULL ) { + TIME64_TRACE1("cbson_gmtime64_r returned null for %lld\n", *time); return NULL; } @@ -697,13 +700,13 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) gm_tm.tm_year = safe_year((Year)(gm_tm.tm_year + 1900)) - 1900; } - safe_time = (time_t)timegm64(&gm_tm); + safe_time = (time_t)cbson_timegm64(&gm_tm); if( LOCALTIME_R(&safe_time, &safe_date) == NULL ) { TIME64_TRACE1("localtime_r(%d) returned NULL\n", (int)safe_time); return NULL; } - copy_tm_to_TM64(&safe_date, local_tm); + pymongo_copy_tm_to_TM64(&safe_date, local_tm); local_tm->tm_year = (int)orig_year; if( local_tm->tm_year != orig_year ) { @@ -733,29 +736,29 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) local_tm->tm_year++; } - /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st + /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st in a non-leap xx00. There is one point in the cycle we can't account for which the safe xx00 year is a leap - year. So we need to correct for Dec 31st comming out as + year. So we need to correct for Dec 31st coming out as the 366th day of the year. */ if( !IS_LEAP(local_tm->tm_year) && local_tm->tm_yday == 365 ) local_tm->tm_yday--; assert(check_tm(local_tm)); - + return local_tm; } -int valid_tm_wday( const struct TM* date ) { +int cbson_valid_tm_wday( const struct TM* date ) { if( 0 <= date->tm_wday && date->tm_wday <= 6 ) return 1; else return 0; } -int valid_tm_mon( const struct TM* date ) { +int cbson_valid_tm_mon( const struct TM* date ) { if( 0 <= date->tm_mon && date->tm_mon <= 11 ) return 1; else @@ -764,15 +767,15 @@ int valid_tm_mon( const struct TM* date ) { /* Non-thread safe versions of the above */ -struct TM *localtime64(const Time64_T *time) { +struct TM *cbson_localtime64(const Time64_T *time) { #ifdef _MSC_VER _tzset(); #else tzset(); #endif - return localtime64_r(time, &Static_Return_Date); + return cbson_localtime64_r(time, &Static_Return_Date); } -struct TM *gmtime64(const Time64_T *time) { - return gmtime64_r(time, &Static_Return_Date); +struct TM *cbson_gmtime64(const Time64_T *time) { + return cbson_gmtime64_r(time, &Static_Return_Date); } diff --git a/bson/time64.h b/bson/time64.h index df9be9b0ee..6321eb307e 100644 --- a/bson/time64.h +++ b/bson/time64.h @@ -37,17 +37,17 @@ struct TM64 { #define TM TM64 #else #define TM tm -#endif +#endif /* Declare public functions */ -struct TM *gmtime64_r (const Time64_T *, struct TM *); -struct TM *localtime64_r (const Time64_T *, struct TM *); -struct TM *gmtime64 (const Time64_T *); -struct TM *localtime64 (const Time64_T *); +struct TM *cbson_gmtime64_r (const Time64_T *, struct TM *); +struct TM *cbson_localtime64_r (const Time64_T *, struct TM *); +struct TM *cbson_gmtime64 (const Time64_T *); +struct TM *cbson_localtime64 (const Time64_T *); -Time64_T timegm64 (const struct TM *); -Time64_T mktime64 (const struct TM *); +Time64_T cbson_timegm64 (const struct TM *); +Time64_T cbson_mktime64 (const struct TM *); Time64_T timelocal64 (const struct TM *); @@ -55,12 +55,12 @@ Time64_T timelocal64 (const struct TM *); #ifdef HAS_LOCALTIME_R # define LOCALTIME_R(clock, result) localtime_r(clock, result) #else -# define LOCALTIME_R(clock, result) fake_localtime_r(clock, result) +# define LOCALTIME_R(clock, result) cbson_fake_localtime_r(clock, result) #endif #ifdef HAS_GMTIME_R # define GMTIME_R(clock, result) gmtime_r(clock, result) #else -# define GMTIME_R(clock, result) fake_gmtime_r(clock, result) +# define GMTIME_R(clock, result) cbson_fake_gmtime_r(clock, result) #endif diff --git a/bson/time64_limits.h b/bson/time64_limits.h index fd4455f663..1d30607bae 100644 --- a/bson/time64_limits.h +++ b/bson/time64_limits.h @@ -1,4 +1,4 @@ -/* +/* Maximum and minimum inputs your system's respective time functions can correctly handle. time64.h will use your system functions if the input falls inside these ranges and corresponding USE_SYSTEM_* diff --git a/bson/timestamp.py b/bson/timestamp.py index 690abdacf1..949bd7b36c 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -1,4 +1,4 @@ -# Copyright 2010-2014 MongoDB, Inc. +# Copyright 2010-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,23 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing MongoDB internal Timestamps. -""" +"""Tools for representing MongoDB internal Timestamps.""" +from __future__ import annotations import calendar import datetime +from typing import Any, Union +from bson._helpers import _getstate_slots, _setstate_slots from bson.tz_util import utc UPPERBOUND = 4294967296 -class Timestamp(object): - """MongoDB internal timestamps used in the opLog. - """ + +class Timestamp: + """MongoDB internal timestamps used in the opLog.""" + + __slots__ = ("__time", "__inc") + + __getstate__ = _getstate_slots + __setstate__ = _setstate_slots _type_marker = 17 - def __init__(self, time, inc): + def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: """Create a new :class:`Timestamp`. This class is only for use with the MongoDB opLog. If you need @@ -40,23 +47,20 @@ def __init__(self, time, inc): an instance of :class:`int`. Raises :class:`ValueError` if `time` or `inc` is not in [0, 2**32). - :Parameters: - - `time`: time in seconds since epoch UTC, or a naive UTC + :param time: time in seconds since epoch UTC, or a naive UTC :class:`~datetime.datetime`, or an aware :class:`~datetime.datetime` - - `inc`: the incrementing counter - - .. versionchanged:: 1.7 - `time` can now be a :class:`~datetime.datetime` instance. + :param inc: the incrementing counter """ if isinstance(time, datetime.datetime): - if time.utcoffset() is not None: - time = time - time.utcoffset() + offset = time.utcoffset() + if offset is not None: + time = time - offset time = int(calendar.timegm(time.timetuple())) - if not isinstance(time, (int, long)): - raise TypeError("time must be an instance of int") - if not isinstance(inc, (int, long)): - raise TypeError("inc must be an instance of int") + if not isinstance(time, int): + raise TypeError(f"time must be an instance of int, not {type(time)}") + if not isinstance(inc, int): + raise TypeError(f"inc must be an instance of int, not {type(inc)}") if not 0 <= time < UPPERBOUND: raise ValueError("time must be contained in [0, 2**32)") if not 0 <= inc < UPPERBOUND: @@ -66,54 +70,54 @@ def __init__(self, time, inc): self.__inc = inc @property - def time(self): - """Get the time portion of this :class:`Timestamp`. - """ + def time(self) -> int: + """Get the time portion of this :class:`Timestamp`.""" return self.__time @property - def inc(self): - """Get the inc portion of this :class:`Timestamp`. - """ + def inc(self) -> int: + """Get the inc portion of this :class:`Timestamp`.""" return self.__inc - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Timestamp): - return (self.__time == other.time and self.__inc == other.inc) + return self.__time == other.time and self.__inc == other.inc else: return NotImplemented - def __ne__(self, other): + def __hash__(self) -> int: + return hash(self.time) ^ hash(self.inc) + + def __ne__(self, other: Any) -> bool: return not self == other - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) < (other.time, other.inc) return NotImplemented - def __le__(self, other): + def __le__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) <= (other.time, other.inc) return NotImplemented - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) > (other.time, other.inc) return NotImplemented - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) >= (other.time, other.inc) return NotImplemented - def __repr__(self): - return "Timestamp(%s, %s)" % (self.__time, self.__inc) + def __repr__(self) -> str: + return f"Timestamp({self.__time}, {self.__inc})" - def as_datetime(self): + def as_datetime(self) -> datetime.datetime: """Return a :class:`~datetime.datetime` instance corresponding to the time portion of this :class:`Timestamp`. - .. versionchanged:: 1.8 - The returned datetime is now timezone aware. + The returned datetime's timezone is UTC. """ return datetime.datetime.fromtimestamp(self.__time, utc) diff --git a/bson/typings.py b/bson/typings.py new file mode 100644 index 0000000000..5913860556 --- /dev/null +++ b/bson/typings.py @@ -0,0 +1,31 @@ +# Copyright 2023-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type aliases used by bson""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, TypeVar, Union + +if TYPE_CHECKING: + from array import array + from mmap import mmap + + from bson.raw_bson import RawBSONDocument + + +# Common Shared Types. +_DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) +_DocumentTypeArg = TypeVar("_DocumentTypeArg", bound=Mapping[str, Any]) +_ReadableBuffer = Union[bytes, memoryview, bytearray, "mmap", "array"] # type: ignore[type-arg] diff --git a/bson/tz_util.py b/bson/tz_util.py index 75e070cf6e..4d31c04f9c 100644 --- a/bson/tz_util.py +++ b/bson/tz_util.py @@ -1,4 +1,4 @@ -# Copyright 2010-2014 MongoDB, Inc. +# Copyright 2010-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,11 +13,12 @@ # limitations under the License. """Timezone related utilities for BSON.""" +from __future__ import annotations -from datetime import (timedelta, - tzinfo) +from datetime import datetime, timedelta, tzinfo +from typing import Optional, Tuple, Union -ZERO = timedelta(0) +ZERO: timedelta = timedelta(0) class FixedOffset(tzinfo): @@ -28,25 +29,28 @@ class FixedOffset(tzinfo): Defining __getinitargs__ enables pickling / copying. """ - def __init__(self, offset, name): + def __init__(self, offset: Union[float, timedelta], name: str) -> None: if isinstance(offset, timedelta): self.__offset = offset else: self.__offset = timedelta(minutes=offset) self.__name = name - def __getinitargs__(self): + def __getinitargs__(self) -> Tuple[timedelta, str]: return self.__offset, self.__name - def utcoffset(self, dt): + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__offset!r}, {self.__name!r})" + + def utcoffset(self, dt: Optional[datetime]) -> timedelta: return self.__offset - def tzname(self, dt): + def tzname(self, dt: Optional[datetime]) -> str: return self.__name - def dst(self, dt): + def dst(self, dt: Optional[datetime]) -> timedelta: return ZERO -utc = FixedOffset(0, "UTC") +utc: FixedOffset = FixedOffset(0, "UTC") """Fixed offset timezone representing UTC.""" diff --git a/doc/Makefile b/doc/Makefile index 9fa6e3a48c..d4bb2cbb9e 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -1,89 +1,20 @@ -# Makefile for Sphinx documentation +# Minimal makefile for Sphinx documentation # -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . BUILDDIR = _build -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest - +# Put it first so that "make" without argument is like "make help". help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyMongo.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyMongo.qhc" - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." +.PHONY: help Makefile -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/doc/__init__.py b/doc/__init__.py index 8b13789179..e69de29bb2 100644 --- a/doc/__init__.py +++ b/doc/__init__.py @@ -1 +0,0 @@ - diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html new file mode 100644 index 0000000000..6141284a48 --- /dev/null +++ b/doc/_templates/layout.html @@ -0,0 +1,18 @@ +{% extends "!layout.html" %} + +{%- block extrahead %} + + {% if theme_googletag %} + + + + + + {% endif %} +{%- endblock -%} diff --git a/doc/api/bson/binary.rst b/doc/api/bson/binary.rst index 57b87255fc..7084a45b4e 100644 --- a/doc/api/bson/binary.rst +++ b/doc/api/bson/binary.rst @@ -9,15 +9,27 @@ .. autodata:: OLD_BINARY_SUBTYPE .. autodata:: OLD_UUID_SUBTYPE .. autodata:: UUID_SUBTYPE + .. autodata:: STANDARD + .. autodata:: PYTHON_LEGACY .. autodata:: JAVA_LEGACY .. autodata:: CSHARP_LEGACY .. autodata:: MD5_SUBTYPE + .. autodata:: COLUMN_SUBTYPE + .. autodata:: SENSITIVE_SUBTYPE + .. autodata:: VECTOR_SUBTYPE .. autodata:: USER_DEFINED_SUBTYPE - .. autoclass:: Binary(data[, subtype=BINARY_SUBTYPE]) + .. autoclass:: UuidRepresentation + :members: + + .. autoclass:: BinaryVectorDtype :members: :show-inheritance: - .. autoclass:: UUIDLegacy(obj) + .. autoclass:: BinaryVector + :members: + + + .. autoclass:: Binary(data, subtype=BINARY_SUBTYPE) :members: :show-inheritance: diff --git a/doc/api/bson/code.rst b/doc/api/bson/code.rst index 80b3b78b9a..9050196d6a 100644 --- a/doc/api/bson/code.rst +++ b/doc/api/bson/code.rst @@ -4,6 +4,6 @@ .. automodule:: bson.code :synopsis: Tools for representing JavaScript code - .. autoclass:: Code(code[, scope=None[, **kwargs]]) + .. autoclass:: Code(code, scope=None, **kwargs) :members: :show-inheritance: diff --git a/doc/api/bson/codec_options.rst b/doc/api/bson/codec_options.rst new file mode 100644 index 0000000000..bea1358f5f --- /dev/null +++ b/doc/api/bson/codec_options.rst @@ -0,0 +1,6 @@ +:mod:`codec_options` -- Tools for specifying BSON codec options +=============================================================== + +.. automodule:: bson.codec_options + :synopsis: Tools for specifying BSON codec options. + :members: diff --git a/doc/api/bson/datetime_ms.rst b/doc/api/bson/datetime_ms.rst new file mode 100644 index 0000000000..1afaad69fc --- /dev/null +++ b/doc/api/bson/datetime_ms.rst @@ -0,0 +1,6 @@ +:mod:`datetime_ms` -- Support for BSON UTC Datetime +=================================================== + +.. automodule:: bson.datetime_ms + :synopsis: Support for BSON UTC datetimes. + :members: diff --git a/doc/api/bson/decimal128.rst b/doc/api/bson/decimal128.rst new file mode 100644 index 0000000000..60ac953046 --- /dev/null +++ b/doc/api/bson/decimal128.rst @@ -0,0 +1,4 @@ +:mod:`decimal128` -- Support for BSON Decimal128 +================================================ +.. automodule:: bson.decimal128 + :members: diff --git a/doc/api/bson/errors.rst b/doc/api/bson/errors.rst index cfb9e98164..f20174f468 100644 --- a/doc/api/bson/errors.rst +++ b/doc/api/bson/errors.rst @@ -1,5 +1,5 @@ :mod:`errors` -- Exceptions raised by the :mod:`bson` package -================================================================ +============================================================= .. automodule:: bson.errors :synopsis: Exceptions raised by the bson package diff --git a/doc/api/bson/index.rst b/doc/api/bson/index.rst index 8b8a905040..d5b69607de 100644 --- a/doc/api/bson/index.rst +++ b/doc/api/bson/index.rst @@ -3,7 +3,7 @@ .. automodule:: bson :synopsis: BSON (Binary JSON) Encoding and Decoding - :members: + :members: BSON, decode, decode_all, decode_file_iter, decode_iter, encode, gen_list_name, has_c, is_valid Sub-modules: @@ -11,14 +11,19 @@ Sub-modules: :maxdepth: 2 binary - regex code + codec_options + datetime_ms dbref + decimal128 errors + int64 json_util max_key min_key objectid + raw_bson + regex son timestamp tz_util diff --git a/doc/api/bson/int64.rst b/doc/api/bson/int64.rst new file mode 100644 index 0000000000..a06962d4f0 --- /dev/null +++ b/doc/api/bson/int64.rst @@ -0,0 +1,7 @@ +:mod:`int64` -- Tools for representing BSON int64 +================================================= +.. versionadded:: 3.0 + +.. automodule:: bson.int64 + :synopsis: Tools for representing BSON int64 + :members: diff --git a/doc/api/bson/json_util.rst b/doc/api/bson/json_util.rst index 6348269f91..f8dd50e0d3 100644 --- a/doc/api/bson/json_util.rst +++ b/doc/api/bson/json_util.rst @@ -1,8 +1,7 @@ :mod:`json_util` -- Tools for using Python's :mod:`json` module with BSON documents -====================================================================================== -.. versionadded:: 1.1.1 - +=================================================================================== .. automodule:: bson.json_util :synopsis: Tools for using Python's json module with BSON documents :members: :undoc-members: + :member-order: bysource diff --git a/doc/api/bson/max_key.rst b/doc/api/bson/max_key.rst index e3adf9a5cc..96ab58d1d6 100644 --- a/doc/api/bson/max_key.rst +++ b/doc/api/bson/max_key.rst @@ -1,7 +1,5 @@ :mod:`max_key` -- Representation for the MongoDB internal MaxKey type ===================================================================== -.. versionadded:: 1.7 - .. automodule:: bson.max_key :synopsis: Representation for the MongoDB internal MaxKey type :members: diff --git a/doc/api/bson/min_key.rst b/doc/api/bson/min_key.rst index 582568176d..7539bbb403 100644 --- a/doc/api/bson/min_key.rst +++ b/doc/api/bson/min_key.rst @@ -1,7 +1,5 @@ :mod:`min_key` -- Representation for the MongoDB internal MinKey type ===================================================================== -.. versionadded:: 1.7 - .. automodule:: bson.min_key :synopsis: Representation for the MongoDB internal MinKey type :members: diff --git a/doc/api/bson/objectid.rst b/doc/api/bson/objectid.rst index ff97fe7f3f..8f8ef066d9 100644 --- a/doc/api/bson/objectid.rst +++ b/doc/api/bson/objectid.rst @@ -4,7 +4,7 @@ .. automodule:: bson.objectid :synopsis: Tools for working with MongoDB ObjectIds - .. autoclass:: bson.objectid.ObjectId([oid=None]) + .. autoclass:: bson.objectid.ObjectId(oid=None) :members: .. describe:: str(o) diff --git a/doc/api/bson/raw_bson.rst b/doc/api/bson/raw_bson.rst new file mode 100644 index 0000000000..d4f9fbbb5b --- /dev/null +++ b/doc/api/bson/raw_bson.rst @@ -0,0 +1,5 @@ +:mod:`raw_bson` -- Tools for representing raw BSON documents. +============================================================= +.. automodule:: bson.raw_bson + :synopsis: Tools for representing raw BSON documents. + :members: diff --git a/doc/api/bson/timestamp.rst b/doc/api/bson/timestamp.rst index 964730e321..375d3dc9cd 100644 --- a/doc/api/bson/timestamp.rst +++ b/doc/api/bson/timestamp.rst @@ -1,7 +1,5 @@ :mod:`timestamp` -- Tools for representing MongoDB internal Timestamps ====================================================================== -.. versionadded:: 1.5 - .. automodule:: bson.timestamp :synopsis: Tools for representing MongoDB internal Timestamps :members: diff --git a/doc/api/gridfs/asynchronous/grid_file.rst b/doc/api/gridfs/asynchronous/grid_file.rst new file mode 100644 index 0000000000..fbf34adc8a --- /dev/null +++ b/doc/api/gridfs/asynchronous/grid_file.rst @@ -0,0 +1,19 @@ +:mod:`grid_file` -- Async tools for representing files stored in GridFS +======================================================================= + +.. automodule:: gridfs.asynchronous.grid_file + :synopsis: Async tools for representing files stored in GridFS + + .. autoclass:: AsyncGridIn + :members: + + .. autoattribute:: _id + + .. autoclass:: AsyncGridOut + :members: + + .. autoattribute:: _id + .. automethod:: __aiter__ + + .. autoclass:: AsyncGridOutCursor + :members: diff --git a/doc/api/gridfs/asynchronous/index.rst b/doc/api/gridfs/asynchronous/index.rst new file mode 100644 index 0000000000..7b6ebb28b8 --- /dev/null +++ b/doc/api/gridfs/asynchronous/index.rst @@ -0,0 +1,14 @@ +:mod:`gridfs async` -- Async tools for working with GridFS +========================================================== + + +.. automodule:: gridfs.asynchronous + :synopsis: Async tools for working with GridFS + :members: AsyncGridFS, AsyncGridFSBucket + +Sub-modules: + +.. toctree:: + :maxdepth: 2 + + grid_file diff --git a/doc/api/gridfs/grid_file.rst b/doc/api/gridfs/grid_file.rst index 5daa82a90d..59f4d03f5d 100644 --- a/doc/api/gridfs/grid_file.rst +++ b/doc/api/gridfs/grid_file.rst @@ -15,8 +15,5 @@ .. autoattribute:: _id .. automethod:: __iter__ - .. autoclass:: GridFile - :members: - .. autoclass:: GridOutCursor :members: diff --git a/doc/api/gridfs/index.rst b/doc/api/gridfs/index.rst index 6764ef622b..190c561d05 100644 --- a/doc/api/gridfs/index.rst +++ b/doc/api/gridfs/index.rst @@ -3,12 +3,13 @@ .. automodule:: gridfs :synopsis: Tools for working with GridFS - :members: + :members: GridFS, GridFSBucket Sub-modules: .. toctree:: - :maxdepth: 2 + :maxdepth: 3 + asynchronous/index errors grid_file diff --git a/doc/api/index.rst b/doc/api/index.rst index 64c407fd04..339f5843bf 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -3,10 +3,10 @@ API Documentation The PyMongo distribution contains three top-level packages for interacting with MongoDB. :mod:`bson` is an implementation of the -`BSON format `_, :mod:`pymongo` is a +`BSON format `_, :mod:`pymongo` is a full-featured driver for MongoDB, and :mod:`gridfs` is a set of tools for working with the `GridFS -`_ storage +`_ storage specification. .. toctree:: diff --git a/doc/api/pymongo/asynchronous/change_stream.rst b/doc/api/pymongo/asynchronous/change_stream.rst new file mode 100644 index 0000000000..1b506fdb55 --- /dev/null +++ b/doc/api/pymongo/asynchronous/change_stream.rst @@ -0,0 +1,6 @@ +:mod:`change_stream` -- Watch changes on a collection, database, or cluster +=========================================================================== + + +.. automodule:: pymongo.asynchronous.change_stream + :members: diff --git a/doc/api/pymongo/asynchronous/client_session.rst b/doc/api/pymongo/asynchronous/client_session.rst new file mode 100644 index 0000000000..d8403325d7 --- /dev/null +++ b/doc/api/pymongo/asynchronous/client_session.rst @@ -0,0 +1,6 @@ +:mod:`client_session` -- Logical sessions for sequential operations +=================================================================== + + +.. automodule:: pymongo.asynchronous.client_session + :members: diff --git a/doc/api/pymongo/asynchronous/collection.rst b/doc/api/pymongo/asynchronous/collection.rst new file mode 100644 index 0000000000..779295ced1 --- /dev/null +++ b/doc/api/pymongo/asynchronous/collection.rst @@ -0,0 +1,62 @@ +:mod:`collection` -- Collection level operations +================================================ + + +.. automodule:: pymongo.asynchronous.collection + :synopsis: Collection level operations + + .. autoclass:: pymongo.asynchronous.collection.ReturnDocument + + .. autoclass:: pymongo.asynchronous.collection.AsyncCollection(database, name, create=False, **kwargs) + + .. describe:: c[name] || c.name + + Get the `name` sub-collection of :class:`AsyncCollection` `c`. + + Raises :class:`~pymongo.asynchronous.errors.InvalidName` if an invalid + collection name is used. + + .. autoattribute:: full_name + .. autoattribute:: name + .. autoattribute:: database + .. autoattribute:: codec_options + .. autoattribute:: read_preference + .. autoattribute:: write_concern + .. autoattribute:: read_concern + .. automethod:: with_options + .. automethod:: bulk_write + .. automethod:: insert_one + .. automethod:: insert_many + .. automethod:: replace_one + .. automethod:: update_one + .. automethod:: update_many + .. automethod:: delete_one + .. automethod:: delete_many + .. automethod:: aggregate + .. automethod:: aggregate_raw_batches + .. automethod:: watch + .. automethod:: find(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. automethod:: find_one(filter=None, *args, **kwargs) + .. automethod:: find_one_and_delete + .. automethod:: find_one_and_replace(filter, replacement, projection=None, sort=None, return_document=ReturnDocument.BEFORE, hint=None, session=None, **kwargs) + .. automethod:: find_one_and_update(filter, update, projection=None, sort=None, return_document=ReturnDocument.BEFORE, array_filters=None, hint=None, session=None, **kwargs) + .. automethod:: count_documents + .. automethod:: estimated_document_count + .. automethod:: distinct + .. automethod:: create_index + .. automethod:: create_indexes + .. automethod:: drop_index + .. automethod:: drop_indexes + .. automethod:: list_indexes + .. automethod:: index_information + .. automethod:: create_search_index + .. automethod:: create_search_indexes + .. automethod:: drop_search_index + .. automethod:: list_search_indexes + .. automethod:: update_search_index + .. automethod:: drop + .. automethod:: rename + .. automethod:: options + .. automethod:: __getitem__ + .. automethod:: __getattr__ diff --git a/doc/api/pymongo/asynchronous/command_cursor.rst b/doc/api/pymongo/asynchronous/command_cursor.rst new file mode 100644 index 0000000000..1f94c6e525 --- /dev/null +++ b/doc/api/pymongo/asynchronous/command_cursor.rst @@ -0,0 +1,7 @@ +:mod:`command_cursor` -- Tools for iterating over MongoDB command results +========================================================================= + + +.. automodule:: pymongo.asynchronous.command_cursor + :synopsis: Tools for iterating over MongoDB command results + :members: diff --git a/doc/api/pymongo/asynchronous/cursor.rst b/doc/api/pymongo/asynchronous/cursor.rst new file mode 100644 index 0000000000..f511734de4 --- /dev/null +++ b/doc/api/pymongo/asynchronous/cursor.rst @@ -0,0 +1,17 @@ +:mod:`cursor` -- Tools for iterating over MongoDB query results +=============================================================== + + +.. automodule:: pymongo.asynchronous.cursor + :synopsis: Tools for iterating over MongoDB query results + + .. autoclass:: pymongo.asynchronous.cursor.AsyncCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + :members: + + .. describe:: c[index] + + See :meth:`__getitem__` and read the warning. + + .. automethod:: __getitem__ + + .. autoclass:: pymongo.asynchronous.cursor.AsyncRawBatchCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, allow_disk_use=None) diff --git a/doc/api/pymongo/asynchronous/database.rst b/doc/api/pymongo/asynchronous/database.rst new file mode 100644 index 0000000000..7b043ab0d1 --- /dev/null +++ b/doc/api/pymongo/asynchronous/database.rst @@ -0,0 +1,27 @@ +:mod:`database` -- Database level operations +============================================ + + +.. automodule:: pymongo.asynchronous.database + :synopsis: Database level operations + + .. autoclass:: pymongo.asynchronous.database.AsyncDatabase + :members: + + .. describe:: db[collection_name] || db.collection_name + + Get the `collection_name` :class:`~pymongo.asynchronous.collection.AsyncCollection` of + :class:`AsyncDatabase` `db`. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid collection + name is used. + + .. note:: Use dictionary style access if `collection_name` is an + attribute of the :class:`AsyncDatabase` class eg: db[`collection_name`]. + + .. automethod:: __getitem__ + .. automethod:: __getattr__ + .. autoattribute:: codec_options + .. autoattribute:: read_preference + .. autoattribute:: write_concern + .. autoattribute:: read_concern diff --git a/doc/api/pymongo/asynchronous/index.rst b/doc/api/pymongo/asynchronous/index.rst new file mode 100644 index 0000000000..b7fc985415 --- /dev/null +++ b/doc/api/pymongo/asynchronous/index.rst @@ -0,0 +1,23 @@ +:mod:`pymongo async` -- Async Python driver for MongoDB +======================================================= + + +.. automodule:: pymongo.asynchronous + :synopsis: Asynchronous Python driver for MongoDB + + .. data:: AsyncMongoClient + + Alias for :class:`pymongo.asynchronous.mongo_client.MongoClient`. + +Sub-modules: + +.. toctree:: + :maxdepth: 2 + + change_stream + client_session + collection + command_cursor + cursor + database + mongo_client diff --git a/doc/api/pymongo/asynchronous/mongo_client.rst b/doc/api/pymongo/asynchronous/mongo_client.rst new file mode 100644 index 0000000000..899ca687d5 --- /dev/null +++ b/doc/api/pymongo/asynchronous/mongo_client.rst @@ -0,0 +1,41 @@ +:mod:`mongo_client` -- Tools for connecting to MongoDB +====================================================== + + +.. automodule:: pymongo.asynchronous.mongo_client + :synopsis: Tools for connecting to MongoDB + + .. autoclass:: pymongo.asynchronous.mongo_client.AsyncMongoClient(host='localhost', port=27017, document_class=dict, tz_aware=False, connect=True, **kwargs) + + .. automethod:: close + + .. describe:: c[db_name] || c.db_name + + Get the `db_name` :class:`~pymongo.asynchronous.database.AsyncDatabase` on :class:`AsyncMongoClient` `c`. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. + + .. autoattribute:: topology_description + .. autoattribute:: address + .. autoattribute:: primary + .. autoattribute:: secondaries + .. autoattribute:: arbiters + .. autoattribute:: is_primary + .. autoattribute:: is_mongos + .. autoattribute:: nodes + .. autoattribute:: codec_options + .. autoattribute:: read_preference + .. autoattribute:: write_concern + .. autoattribute:: read_concern + .. autoattribute:: options + .. automethod:: start_session + .. automethod:: list_databases + .. automethod:: list_database_names + .. automethod:: drop_database + .. automethod:: get_default_database + .. automethod:: get_database + .. automethod:: server_info + .. automethod:: watch + .. automethod:: bulk_write + .. automethod:: __getitem__ + .. automethod:: __getattr__ diff --git a/doc/api/pymongo/auth_oidc.rst b/doc/api/pymongo/auth_oidc.rst new file mode 100644 index 0000000000..1466b21e9d --- /dev/null +++ b/doc/api/pymongo/auth_oidc.rst @@ -0,0 +1,5 @@ +:mod:`auth_oidc` -- MONGODB-OIDC Authentication +=========================================================================== + +.. automodule:: pymongo.auth_oidc + :members: diff --git a/doc/api/pymongo/bulk.rst b/doc/api/pymongo/bulk.rst deleted file mode 100644 index 0d597c26df..0000000000 --- a/doc/api/pymongo/bulk.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`bulk` -- The bulk write operations interface -================================================== - -.. automodule:: pymongo.bulk - :synopsis: The bulk write operations interface. - :members: diff --git a/doc/api/pymongo/change_stream.rst b/doc/api/pymongo/change_stream.rst new file mode 100644 index 0000000000..ca165f890e --- /dev/null +++ b/doc/api/pymongo/change_stream.rst @@ -0,0 +1,5 @@ +:mod:`change_stream` -- Watch changes on a collection, database, or cluster +=========================================================================== + +.. automodule:: pymongo.change_stream + :members: diff --git a/doc/api/pymongo/client_options.rst b/doc/api/pymongo/client_options.rst new file mode 100644 index 0000000000..3ffc10bad6 --- /dev/null +++ b/doc/api/pymongo/client_options.rst @@ -0,0 +1,7 @@ +:mod:`client_options` -- Read only configuration options for a MongoClient. +=========================================================================== + +.. automodule:: pymongo.client_options + + .. autoclass:: pymongo.client_options.ClientOptions() + :members: diff --git a/doc/api/pymongo/client_session.rst b/doc/api/pymongo/client_session.rst new file mode 100644 index 0000000000..d9527b002b --- /dev/null +++ b/doc/api/pymongo/client_session.rst @@ -0,0 +1,5 @@ +:mod:`client_session` -- Logical sessions for sequential operations +=================================================================== + +.. automodule:: pymongo.client_session + :members: diff --git a/doc/api/pymongo/collation.rst b/doc/api/pymongo/collation.rst new file mode 100644 index 0000000000..02a4aa42d7 --- /dev/null +++ b/doc/api/pymongo/collation.rst @@ -0,0 +1,19 @@ +:mod:`collation` -- Tools for working with collations. +====================================================== + +.. automodule:: pymongo.collation + :synopsis: Tools for working with collations. + + .. autoclass:: pymongo.collation.Collation + .. autoclass:: pymongo.collation.CollationStrength + :members: + :member-order: bysource + .. autoclass:: pymongo.collation.CollationAlternate + :members: + :member-order: bysource + .. autoclass:: pymongo.collation.CollationCaseFirst + :members: + :member-order: bysource + .. autoclass:: pymongo.collation.CollationMaxVariable + :members: + :member-order: bysource diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index e40760d7cb..e3746c68b7 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -7,11 +7,18 @@ .. autodata:: pymongo.ASCENDING .. autodata:: pymongo.DESCENDING .. autodata:: pymongo.GEO2D - .. autodata:: pymongo.GEOHAYSTACK .. autodata:: pymongo.GEOSPHERE .. autodata:: pymongo.HASHED + .. autodata:: pymongo.TEXT - .. autoclass:: pymongo.collection.Collection(database, name[, create=False[, **kwargs]]]) + .. autoclass:: pymongo.collection.ReturnDocument + + .. autoattribute:: BEFORE + :annotation: + .. autoattribute:: AFTER + :annotation: + + .. autoclass:: pymongo.collection.Collection(database, name, create=False, **kwargs) .. describe:: c[name] || c.name @@ -23,39 +30,44 @@ .. autoattribute:: full_name .. autoattribute:: name .. autoattribute:: database + .. autoattribute:: codec_options .. autoattribute:: read_preference - .. autoattribute:: tag_sets - .. autoattribute:: secondary_acceptable_latency_ms .. autoattribute:: write_concern - .. autoattribute:: uuid_subtype - .. automethod:: insert(doc_or_docs[, manipulate=True[, safe=None[, check_keys=True[, continue_on_error=False[, **kwargs]]]]]) - .. automethod:: save(to_save[, manipulate=True[, safe=None[, check_keys=True[, **kwargs]]]]) - .. automethod:: update(spec, document[, upsert=False[, manipulate=False[, safe=None[, multi=False[, check_keys=True[, **kwargs]]]]]]) - .. automethod:: remove([spec_or_id=None[, safe=None[, multi=True[, **kwargs]]]]) - .. automethod:: initialize_unordered_bulk_op - .. automethod:: initialize_ordered_bulk_op - .. automethod:: drop - .. automethod:: find([spec=None[, fields=None[, skip=0[, limit=0[, timeout=True[, snapshot=False[, tailable=False[, sort=None[, max_scan=None[, as_class=None[, slave_okay=False[, await_data=False[, partial=False[, manipulate=True[, read_preference=ReadPreference.PRIMARY[, exhaust=False, [compile_re=True, [,**kwargs]]]]]]]]]]]]]]]]]]) - .. automethod:: find_one([spec_or_id=None[, *args[, **kwargs]]]) - .. automethod:: parallel_scan - .. automethod:: count + .. autoattribute:: read_concern + .. automethod:: with_options + .. automethod:: bulk_write + .. automethod:: insert_one + .. automethod:: insert_many + .. automethod:: replace_one + .. automethod:: update_one + .. automethod:: update_many + .. automethod:: delete_one + .. automethod:: delete_many + .. automethod:: aggregate + .. automethod:: aggregate_raw_batches + .. automethod:: watch + .. automethod:: find(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. automethod:: find_one(filter=None, *args, **kwargs) + .. automethod:: find_one_and_delete + .. automethod:: find_one_and_replace(filter, replacement, projection=None, sort=None, return_document=ReturnDocument.BEFORE, hint=None, session=None, **kwargs) + .. automethod:: find_one_and_update(filter, update, projection=None, sort=None, return_document=ReturnDocument.BEFORE, array_filters=None, hint=None, session=None, **kwargs) + .. automethod:: count_documents + .. automethod:: estimated_document_count + .. automethod:: distinct .. automethod:: create_index - .. automethod:: ensure_index + .. automethod:: create_indexes .. automethod:: drop_index .. automethod:: drop_indexes - .. automethod:: reindex + .. automethod:: list_indexes .. automethod:: index_information - .. automethod:: options - .. automethod:: aggregate - .. automethod:: group + .. automethod:: create_search_index + .. automethod:: create_search_indexes + .. automethod:: drop_search_index + .. automethod:: list_search_indexes + .. automethod:: update_search_index + .. automethod:: drop .. automethod:: rename - .. automethod:: distinct - .. automethod:: map_reduce - .. automethod:: inline_map_reduce - .. automethod:: find_and_modify - .. autoattribute:: slave_okay - .. autoattribute:: safe - .. automethod:: get_lasterror_options - .. automethod:: set_lasterror_options - .. automethod:: unset_lasterror_options - + .. automethod:: options + .. automethod:: __getitem__ + .. automethod:: __getattr__ diff --git a/doc/api/pymongo/command_cursor.rst b/doc/api/pymongo/command_cursor.rst index 9c84f19a6e..2d0597a00f 100644 --- a/doc/api/pymongo/command_cursor.rst +++ b/doc/api/pymongo/command_cursor.rst @@ -4,4 +4,3 @@ .. automodule:: pymongo.command_cursor :synopsis: Tools for iterating over MongoDB command results :members: - diff --git a/doc/api/pymongo/connection.rst b/doc/api/pymongo/connection.rst deleted file mode 100644 index 1d8a0077b7..0000000000 --- a/doc/api/pymongo/connection.rst +++ /dev/null @@ -1,53 +0,0 @@ -:mod:`connection` -- Tools for connecting to MongoDB -==================================================== - -.. automodule:: pymongo.connection - :synopsis: Tools for connecting to MongoDB - - .. autoclass:: pymongo.connection.Connection([host='localhost'[, port=27017[, max_pool_size=None[, network_timeout=None[, document_class=dict[, tz_aware=False[, **kwargs]]]]]]]) - - .. automethod:: disconnect - .. automethod:: close - .. automethod:: alive - - .. describe:: c[db_name] || c.db_name - - Get the `db_name` :class:`~pymongo.database.Database` on :class:`Connection` `c`. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. - - .. autoattribute:: host - .. autoattribute:: port - .. autoattribute:: is_primary - .. autoattribute:: is_mongos - .. autoattribute:: max_pool_size - .. autoattribute:: nodes - .. autoattribute:: auto_start_request - .. autoattribute:: document_class - .. autoattribute:: tz_aware - .. autoattribute:: max_bson_size - .. autoattribute:: max_message_size - .. autoattribute:: min_wire_version - .. autoattribute:: max_wire_version - .. autoattribute:: read_preference - .. autoattribute:: tag_sets - .. autoattribute:: secondary_acceptable_latency_ms - .. autoattribute:: write_concern - .. autoattribute:: slave_okay - .. autoattribute:: safe - .. autoattribute:: is_locked - .. automethod:: database_names - .. automethod:: drop_database - .. automethod:: copy_database(from_name, to_name[, from_host=None[, username=None[, password=None]]]) - .. automethod:: get_default_database - .. automethod:: server_info - .. automethod:: start_request - .. automethod:: end_request - .. automethod:: close_cursor - .. automethod:: kill_cursors - .. automethod:: set_cursor_manager - .. automethod:: fsync - .. automethod:: unlock - .. automethod:: get_lasterror_options - .. automethod:: set_lasterror_options - .. automethod:: unset_lasterror_options diff --git a/doc/api/pymongo/cursor.rst b/doc/api/pymongo/cursor.rst index ba54121dca..513f051abb 100644 --- a/doc/api/pymongo/cursor.rst +++ b/doc/api/pymongo/cursor.rst @@ -4,11 +4,24 @@ .. automodule:: pymongo.cursor :synopsis: Tools for iterating over MongoDB query results - .. autoclass:: pymongo.cursor.Cursor(collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, as_class=None, slave_okay=False, await_data=False, partial=False, manipulate=True, read_preference=ReadPreference.PRIMARY, tag_sets=[{}], secondary_acceptable_latency_ms=None, exhaust=False, network_timeout=None) + .. autoclass:: pymongo.cursor.CursorType + + .. autoattribute:: NON_TAILABLE + :annotation: + .. autoattribute:: TAILABLE + :annotation: + .. autoattribute:: TAILABLE_AWAIT + :annotation: + .. autoattribute:: EXHAUST + :annotation: + + .. autoclass:: pymongo.cursor.Cursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) :members: .. describe:: c[index] - See :meth:`__getitem__`. + See :meth:`__getitem__` and read the warning. .. automethod:: __getitem__ + + .. autoclass:: pymongo.cursor.RawBatchCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, allow_disk_use=None) diff --git a/doc/api/pymongo/cursor_manager.rst b/doc/api/pymongo/cursor_manager.rst deleted file mode 100644 index 64715553d9..0000000000 --- a/doc/api/pymongo/cursor_manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`cursor_manager` -- Managers to handle when cursors are killed after being closed -- DEPRECATED -==================================================================================================== - -.. automodule:: pymongo.cursor_manager - :synopsis: Managers to handle when cursors are killed after being closed - :members: diff --git a/doc/api/pymongo/database.rst b/doc/api/pymongo/database.rst index 4ffb201745..044e04971e 100644 --- a/doc/api/pymongo/database.rst +++ b/doc/api/pymongo/database.rst @@ -5,9 +5,6 @@ :synopsis: Database level operations .. autodata:: pymongo.auth.MECHANISMS - .. autodata:: pymongo.OFF - .. autodata:: pymongo.SLOW_ONLY - .. autodata:: pymongo.ALL .. autoclass:: pymongo.database.Database :members: @@ -23,17 +20,9 @@ .. note:: Use dictionary style access if `collection_name` is an attribute of the :class:`Database` class eg: db[`collection_name`]. + .. automethod:: __getitem__ + .. automethod:: __getattr__ + .. autoattribute:: codec_options .. autoattribute:: read_preference - .. autoattribute:: tag_sets - .. autoattribute:: secondary_acceptable_latency_ms .. autoattribute:: write_concern - .. autoattribute:: uuid_subtype - .. autoattribute:: slave_okay - .. autoattribute:: safe - .. automethod:: get_lasterror_options - .. automethod:: set_lasterror_options - .. automethod:: unset_lasterror_options - - - .. autoclass:: pymongo.database.SystemJS - :members: + .. autoattribute:: read_concern diff --git a/doc/api/pymongo/driver_info.rst b/doc/api/pymongo/driver_info.rst new file mode 100644 index 0000000000..9e6f735577 --- /dev/null +++ b/doc/api/pymongo/driver_info.rst @@ -0,0 +1,6 @@ +:mod:`driver_info` +================== + +.. automodule:: pymongo.driver_info + + .. autoclass:: pymongo.driver_info.DriverInfo(name=None, version=None, platform=None) diff --git a/doc/api/pymongo/encryption.rst b/doc/api/pymongo/encryption.rst new file mode 100644 index 0000000000..3a8c3c5cc4 --- /dev/null +++ b/doc/api/pymongo/encryption.rst @@ -0,0 +1,5 @@ +:mod:`encryption` -- Client-Side Field Level Encryption +======================================================= + +.. automodule:: pymongo.encryption + :members: diff --git a/doc/api/pymongo/encryption_options.rst b/doc/api/pymongo/encryption_options.rst new file mode 100644 index 0000000000..b8a886ea68 --- /dev/null +++ b/doc/api/pymongo/encryption_options.rst @@ -0,0 +1,6 @@ +:mod:`encryption_options` -- Automatic Client-Side Field Level Encryption +========================================================================= + +.. automodule:: pymongo.encryption_options + :synopsis: Support for automatic client-side field level encryption + :members: diff --git a/doc/api/pymongo/event_loggers.rst b/doc/api/pymongo/event_loggers.rst new file mode 100644 index 0000000000..9be0779c20 --- /dev/null +++ b/doc/api/pymongo/event_loggers.rst @@ -0,0 +1,7 @@ +:mod:`event_loggers` -- Example loggers +=========================================== + + +.. automodule:: pymongo.event_loggers + :synopsis: A collection of simple listeners for monitoring driver events. + :members: diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index d187932a31..4fb6c81568 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -9,11 +9,14 @@ Alias for :class:`pymongo.mongo_client.MongoClient`. - .. data:: MongoReplicaSetClient + .. data:: AsyncMongoClient - Alias for :class:`pymongo.mongo_replica_set_client.MongoReplicaSetClient`. + Alias for :class:`pymongo.asynchronous.mongo_client.AsyncMongoClient`. + + .. data:: ReadPreference + + Alias for :class:`pymongo.read_preferences.ReadPreference`. - .. autoclass:: pymongo.read_preferences.ReadPreference .. autofunction:: has_c .. data:: MIN_SUPPORTED_WIRE_VERSION @@ -23,24 +26,37 @@ The maximum wire protocol version PyMongo supports. + .. autofunction:: timeout + Sub-modules: .. toctree:: - :maxdepth: 2 - - connection - database + :maxdepth: 3 + + asynchronous/index + auth_oidc + change_stream + client_options + client_session + collation collection command_cursor cursor - bulk + database + driver_info + encryption + encryption_options errors - master_slave_connection - message mongo_client - mongo_replica_set_client + monitoring + operations pool - replica_set_connection - son_manipulator - cursor_manager + read_concern + read_preferences + results + server_api + server_description + topology_description uri_parser + write_concern + event_loggers diff --git a/doc/api/pymongo/master_slave_connection.rst b/doc/api/pymongo/master_slave_connection.rst deleted file mode 100644 index 72388b8930..0000000000 --- a/doc/api/pymongo/master_slave_connection.rst +++ /dev/null @@ -1,14 +0,0 @@ -:mod:`master_slave_connection` -- Master-slave connection to MongoDB -==================================================================== - -.. automodule:: pymongo.master_slave_connection - :synopsis: Master-slave connection to MongoDB - - .. autoclass:: pymongo.master_slave_connection.MasterSlaveConnection - :members: - - .. autoattribute:: safe - .. automethod:: get_lasterror_options - .. automethod:: set_lasterror_options - .. automethod:: unset_lasterror_options - diff --git a/doc/api/pymongo/message.rst b/doc/api/pymongo/message.rst deleted file mode 100644 index 0a28052fb6..0000000000 --- a/doc/api/pymongo/message.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`message` -- Tools for creating messages to be sent to MongoDB -=================================================================== - -.. automodule:: pymongo.message - :synopsis: Tools for creating messages to be sent to MongoDB - :members: diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index 7e7bf7ca4c..0409e7ef68 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -4,11 +4,9 @@ .. automodule:: pymongo.mongo_client :synopsis: Tools for connecting to MongoDB - .. autoclass:: pymongo.mongo_client.MongoClient([host='localhost'[, port=27017[, max_pool_size=100[, document_class=dict[, tz_aware=False[, **kwargs]]]]]]) + .. autoclass:: pymongo.mongo_client.MongoClient(host='localhost', port=27017, document_class=dict, tz_aware=False, connect=True, **kwargs) - .. automethod:: disconnect .. automethod:: close - .. automethod:: alive .. describe:: c[db_name] || c.db_name @@ -16,34 +14,27 @@ Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. - .. autoattribute:: host - .. autoattribute:: port + .. autoattribute:: topology_description + .. autoattribute:: address + .. autoattribute:: primary + .. autoattribute:: secondaries + .. autoattribute:: arbiters .. autoattribute:: is_primary .. autoattribute:: is_mongos - .. autoattribute:: max_pool_size .. autoattribute:: nodes - .. autoattribute:: auto_start_request - .. autoattribute:: document_class - .. autoattribute:: tz_aware - .. autoattribute:: max_bson_size - .. autoattribute:: max_message_size - .. autoattribute:: min_wire_version - .. autoattribute:: max_wire_version + .. autoattribute:: codec_options .. autoattribute:: read_preference - .. autoattribute:: tag_sets - .. autoattribute:: secondary_acceptable_latency_ms .. autoattribute:: write_concern - .. autoattribute:: uuid_subtype - .. autoattribute:: is_locked - .. automethod:: database_names + .. autoattribute:: read_concern + .. autoattribute:: options + .. automethod:: start_session + .. automethod:: list_databases + .. automethod:: list_database_names .. automethod:: drop_database - .. automethod:: copy_database(from_name, to_name[, from_host=None[, username=None[, password=None]]]) .. automethod:: get_default_database + .. automethod:: get_database .. automethod:: server_info - .. automethod:: start_request - .. automethod:: end_request - .. automethod:: close_cursor - .. automethod:: kill_cursors - .. automethod:: set_cursor_manager - .. automethod:: fsync - .. automethod:: unlock + .. automethod:: watch + .. automethod:: bulk_write + .. automethod:: __getitem__ + .. automethod:: __getattr__ diff --git a/doc/api/pymongo/mongo_replica_set_client.rst b/doc/api/pymongo/mongo_replica_set_client.rst deleted file mode 100644 index da28a30eee..0000000000 --- a/doc/api/pymongo/mongo_replica_set_client.rst +++ /dev/null @@ -1,42 +0,0 @@ -:mod:`mongo_replica_set_client` -- Tools for connecting to a MongoDB replica set -================================================================================ - -.. automodule:: pymongo.mongo_replica_set_client - :synopsis: Tools for connecting to a MongoDB replica set - - .. autoclass:: pymongo.mongo_replica_set_client.MongoReplicaSetClient([hosts_or_uri[, max_pool_size=100[, document_class=dict[, tz_aware=False[, **kwargs]]]]]) - - .. automethod:: disconnect - .. automethod:: close - .. automethod:: alive - - .. describe:: c[db_name] || c.db_name - - Get the `db_name` :class:`~pymongo.database.Database` on :class:`MongoReplicaSetClient` `c`. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. - - .. autoattribute:: seeds - .. autoattribute:: hosts - .. autoattribute:: primary - .. autoattribute:: secondaries - .. autoattribute:: arbiters - .. autoattribute:: is_mongos - .. autoattribute:: max_pool_size - .. autoattribute:: document_class - .. autoattribute:: tz_aware - .. autoattribute:: max_bson_size - .. autoattribute:: max_message_size - .. autoattribute:: min_wire_version - .. autoattribute:: max_wire_version - .. autoattribute:: auto_start_request - .. autoattribute:: read_preference - .. autoattribute:: tag_sets - .. autoattribute:: secondary_acceptable_latency_ms - .. autoattribute:: write_concern - .. autoattribute:: uuid_subtype - .. automethod:: database_names - .. automethod:: drop_database - .. automethod:: copy_database(from_name, to_name[, from_host=None[, username=None[, password=None]]]) - .. automethod:: get_default_database - .. automethod:: close_cursor diff --git a/doc/api/pymongo/monitoring.rst b/doc/api/pymongo/monitoring.rst new file mode 100644 index 0000000000..6ee48173aa --- /dev/null +++ b/doc/api/pymongo/monitoring.rst @@ -0,0 +1,99 @@ +:mod:`monitoring` -- Tools for monitoring driver events. +======================================================== + +.. automodule:: pymongo.monitoring + :synopsis: Tools for monitoring driver events. + + .. autofunction:: register(listener) + .. autoclass:: CommandListener + :members: + :inherited-members: + .. autoclass:: ServerListener + :members: + :inherited-members: + .. autoclass:: ServerHeartbeatListener + :members: + :inherited-members: + .. autoclass:: TopologyListener + :members: + :inherited-members: + .. autoclass:: ConnectionPoolListener + :members: + :inherited-members: + + .. autoclass:: CommandStartedEvent + :members: + :inherited-members: + .. autoclass:: CommandSucceededEvent + :members: + :inherited-members: + .. autoclass:: CommandFailedEvent + :members: + :inherited-members: + .. autoclass:: ServerDescriptionChangedEvent + :members: + :inherited-members: + .. autoclass:: ServerOpeningEvent + :members: + :inherited-members: + .. autoclass:: ServerClosedEvent + :members: + :inherited-members: + .. autoclass:: TopologyDescriptionChangedEvent + :members: + :inherited-members: + .. autoclass:: TopologyOpenedEvent + :members: + :inherited-members: + .. autoclass:: TopologyClosedEvent + :members: + :inherited-members: + .. autoclass:: ServerHeartbeatStartedEvent + :members: + :inherited-members: + .. autoclass:: ServerHeartbeatSucceededEvent + :members: + :inherited-members: + .. autoclass:: ServerHeartbeatFailedEvent + :members: + :inherited-members: + + .. autoclass:: PoolCreatedEvent + :members: + :inherited-members: + .. autoclass:: PoolClearedEvent + :members: + :inherited-members: + .. autoclass:: PoolClosedEvent + :members: + :inherited-members: + + .. autoclass:: ConnectionCreatedEvent + :members: + :inherited-members: + .. autoclass:: ConnectionReadyEvent + :members: + :inherited-members: + + .. autoclass:: ConnectionClosedReason + :members: + + .. autoclass:: ConnectionClosedEvent + :members: + :inherited-members: + .. autoclass:: ConnectionCheckOutStartedEvent + :members: + :inherited-members: + + .. autoclass:: ConnectionCheckOutFailedReason + :members: + + .. autoclass:: ConnectionCheckOutFailedEvent + :members: + :inherited-members: + .. autoclass:: ConnectionCheckedOutEvent + :members: + :inherited-members: + .. autoclass:: ConnectionCheckedInEvent + :members: + :inherited-members: diff --git a/doc/api/pymongo/operations.rst b/doc/api/pymongo/operations.rst new file mode 100644 index 0000000000..e4ab1b9a86 --- /dev/null +++ b/doc/api/pymongo/operations.rst @@ -0,0 +1,6 @@ +:mod:`operations` -- Operation class definitions +================================================ + +.. automodule:: pymongo.operations + :synopsis: Operation class definitions + :members: diff --git a/doc/api/pymongo/pool.rst b/doc/api/pymongo/pool.rst index 4e37de4a35..78274e8f8b 100644 --- a/doc/api/pymongo/pool.rst +++ b/doc/api/pymongo/pool.rst @@ -2,5 +2,6 @@ ============================================================== .. automodule:: pymongo.pool - :synopsis: Pool module for use with a MongoDB client. - :members: + + .. autoclass:: pymongo.pool.PoolOptions() + :members: diff --git a/doc/api/pymongo/read_concern.rst b/doc/api/pymongo/read_concern.rst new file mode 100644 index 0000000000..378058abab --- /dev/null +++ b/doc/api/pymongo/read_concern.rst @@ -0,0 +1,7 @@ +:mod:`read_concern` -- Tools for working with read concern. +=========================================================== + +.. automodule:: pymongo.read_concern + :synopsis: Tools for working with read concern. + :members: + :inherited-members: diff --git a/doc/api/pymongo/read_preferences.rst b/doc/api/pymongo/read_preferences.rst new file mode 100644 index 0000000000..7605264394 --- /dev/null +++ b/doc/api/pymongo/read_preferences.rst @@ -0,0 +1,31 @@ +:mod:`read_preferences` -- Utilities for choosing which member of a replica set to read from. +============================================================================================= + +.. automodule:: pymongo.read_preferences + :synopsis: Utilities for choosing which member of a replica set to read from. + + .. autoclass:: pymongo.read_preferences.Primary + + .. max_staleness, min_wire_version, mongos_mode, and tag_sets don't + make sense for Primary. + + .. autoattribute:: document + .. autoattribute:: mode + .. autoattribute:: name + + .. autoclass:: pymongo.read_preferences.PrimaryPreferred + :inherited-members: + .. autoclass:: pymongo.read_preferences.Secondary + :inherited-members: + .. autoclass:: pymongo.read_preferences.SecondaryPreferred + :inherited-members: + .. autoclass:: pymongo.read_preferences.Nearest + :inherited-members: + + .. autoclass:: ReadPreference + + .. autoattribute:: PRIMARY + .. autoattribute:: PRIMARY_PREFERRED + .. autoattribute:: SECONDARY + .. autoattribute:: SECONDARY_PREFERRED + .. autoattribute:: NEAREST diff --git a/doc/api/pymongo/replica_set_connection.rst b/doc/api/pymongo/replica_set_connection.rst deleted file mode 100644 index 6a1305eb5f..0000000000 --- a/doc/api/pymongo/replica_set_connection.rst +++ /dev/null @@ -1,45 +0,0 @@ -:mod:`replica_set_connection` -- Tools for connecting to a MongoDB replica set -============================================================================== - -.. automodule:: pymongo.replica_set_connection - :synopsis: Tools for connecting to a MongoDB replica set - - .. autoclass:: pymongo.replica_set_connection.ReplicaSetConnection([hosts_or_uri[, max_pool_size=None[, document_class=dict[, tz_aware=False[, **kwargs]]]]]) - - .. automethod:: disconnect - .. automethod:: close - .. automethod:: alive - - .. describe:: c[db_name] || c.db_name - - Get the `db_name` :class:`~pymongo.database.Database` on :class:`ReplicaSetConnection` `c`. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. - - .. autoattribute:: seeds - .. autoattribute:: hosts - .. autoattribute:: primary - .. autoattribute:: secondaries - .. autoattribute:: arbiters - .. autoattribute:: is_mongos - .. autoattribute:: max_pool_size - .. autoattribute:: document_class - .. autoattribute:: tz_aware - .. autoattribute:: max_bson_size - .. autoattribute:: max_message_size - .. autoattribute:: min_wire_version - .. autoattribute:: max_wire_version - .. autoattribute:: auto_start_request - .. autoattribute:: read_preference - .. autoattribute:: tag_sets - .. autoattribute:: secondary_acceptable_latency_ms - .. autoattribute:: write_concern - .. autoattribute:: safe - .. automethod:: database_names - .. automethod:: drop_database - .. automethod:: copy_database(from_name, to_name[, from_host=None[, username=None[, password=None]]]) - .. automethod:: get_default_database - .. automethod:: close_cursor - .. automethod:: get_lasterror_options - .. automethod:: set_lasterror_options - .. automethod:: unset_lasterror_options diff --git a/doc/api/pymongo/results.rst b/doc/api/pymongo/results.rst new file mode 100644 index 0000000000..765e9fb25b --- /dev/null +++ b/doc/api/pymongo/results.rst @@ -0,0 +1,7 @@ +:mod:`results` -- Result class definitions +========================================== + +.. automodule:: pymongo.results + :synopsis: Result class definitions + :members: + :inherited-members: diff --git a/doc/api/pymongo/server_api.rst b/doc/api/pymongo/server_api.rst new file mode 100644 index 0000000000..de74411aa4 --- /dev/null +++ b/doc/api/pymongo/server_api.rst @@ -0,0 +1,11 @@ +:mod:`server_api` -- Support for MongoDB Stable API +====================================================== + +.. automodule:: pymongo.server_api + :synopsis: Support for MongoDB Stable API + + .. autoclass:: pymongo.server_api.ServerApi + :members: + + .. autoclass:: pymongo.server_api.ServerApiVersion + :members: diff --git a/doc/api/pymongo/server_description.rst b/doc/api/pymongo/server_description.rst new file mode 100644 index 0000000000..fc6b55ec74 --- /dev/null +++ b/doc/api/pymongo/server_description.rst @@ -0,0 +1,9 @@ +:orphan: + +:mod:`server_description` -- An object representation of a server the driver is connected to. +============================================================================================= + +.. automodule:: pymongo.server_description + + .. autoclass:: pymongo.server_description.ServerDescription() + :members: diff --git a/doc/api/pymongo/son_manipulator.rst b/doc/api/pymongo/son_manipulator.rst deleted file mode 100644 index 87503e6f83..0000000000 --- a/doc/api/pymongo/son_manipulator.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`son_manipulator` -- Manipulators that can edit SON documents as they are saved or retrieved -================================================================================================= - -.. automodule:: pymongo.son_manipulator - :synopsis: Manipulators that can edit SON documents as they are saved or retrieved - :members: diff --git a/doc/api/pymongo/topology_description.rst b/doc/api/pymongo/topology_description.rst new file mode 100644 index 0000000000..24353db2a9 --- /dev/null +++ b/doc/api/pymongo/topology_description.rst @@ -0,0 +1,9 @@ +:orphan: + +:mod:`topology_description` -- An object representation of a deployment of MongoDB servers. +=========================================================================================== + +.. automodule:: pymongo.topology_description + + .. autoclass:: pymongo.topology_description.TopologyDescription() + :members: diff --git a/doc/api/pymongo/write_concern.rst b/doc/api/pymongo/write_concern.rst new file mode 100644 index 0000000000..5c7b4b39f6 --- /dev/null +++ b/doc/api/pymongo/write_concern.rst @@ -0,0 +1,6 @@ +:mod:`write_concern` -- Tools for specifying write concern +========================================================== + +.. automodule:: pymongo.write_concern + :synopsis: Tools for specifying write concern. + :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index 8e90d1afc1..f3eb4f6f23 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,14 +1,3309 @@ Changelog ========= -Changes in Version 2.7 ----------------------- +Changes in Version 4.16.0 (XXXX/XX/XX) +-------------------------------------- + +PyMongo 4.16 brings a number of changes including: + +.. warning:: PyMongo 4.16 drops support for Python 3.9: Python 3.10+ is now required. + +- Dropped support for Python 3.9. +- Removed invalid documents from :class:`bson.errors.InvalidDocument` error messages as + doing so may leak sensitive user data. + Instead, invalid documents are stored in :attr:`bson.errors.InvalidDocument.document`. +- PyMongo now requires ``dnspython>=2.6.1``, since ``dnspython`` 1.0 is no longer maintained and is incompatible with + Python 3.10+. The minimum version is ``2.6.1`` to account for `CVE-2023-29483 `_. +- Removed support for Eventlet. + Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. + +Changes in Version 4.15.3 (2025/10/07) +-------------------------------------- + +Version 4.15.3 is a bug fix release. + +- Fixed a memory leak when raising :class:`bson.errors.InvalidDocument` with C extensions. +- Fixed the return type of the :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct`, + :meth:`~pymongo.synchronous.collection.Collection.distinct`, :meth:`pymongo.asynchronous.cursor.AsyncCursor.distinct`, + and :meth:`pymongo.asynchronous.cursor.AsyncCursor.distinct` methods. + +Issues Resolved +............... + +See the `PyMongo 4.15.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.15.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=47293 + +Changes in Version 4.15.2 (2025/10/01) +-------------------------------------- + +Version 4.15.2 is a bug fix release. + +- Add wheels for Python 3.14 and 3.14t that were missing from 4.15.0 release. Drop the 3.13t wheel. + +Issues Resolved +............... + +See the `PyMongo 4.15.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.15.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=47186 + +Changes in Version 4.15.1 (2025/09/16) +-------------------------------------- + +Version 4.15.1 is a bug fix release. + +- Fixed a bug in :meth:`~pymongo.synchronous.encryption.ClientEncryption.encrypt` + and :meth:`~pymongo.asynchronous.encryption.AsyncClientEncryption.encrypt` + that would cause a ``TypeError`` when using ``pymongocrypt<1.16`` by passing + an unsupported ``type_opts`` parameter even if Queryable Encryption text + queries beta was not used. + +- Fixed a bug in ``AsyncMongoClient`` that caused a ``ServerSelectionTimeoutError`` + when used with ``uvicorn``, ``FastAPI``, or ``uvloop``. + +Issues Resolved +............... + +See the `PyMongo 4.15.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.15.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=46486 + +Changes in Version 4.15.0 (2025/09/10) +-------------------------------------- + +PyMongo 4.15 brings a number of changes including: + +- Added :class:`~pymongo.encryption_options.TextOpts`, + :attr:`~pymongo.encryption.Algorithm.TEXTPREVIEW`, + :attr:`~pymongo.encryption.QueryType.PREFIXPREVIEW`, + :attr:`~pymongo.encryption.QueryType.SUFFIXPREVIEW`, + :attr:`~pymongo.encryption.QueryType.SUBSTRINGPREVIEW`, + as part of the experimental Queryable Encryption text queries beta. + ``pymongocrypt>=1.16`` is required for text query support. +- Added :class:`bson.decimal128.DecimalEncoder` and + :class:`bson.decimal128.DecimalDecoder` + to support encoding and decoding of BSON Decimal128 values to + decimal.Decimal values using the TypeRegistry API. +- Added support for Windows ``arm64`` wheels. + +Changes in Version 4.14.1 (2025/08/19) +-------------------------------------- + +Version 4.14.1 is a bug fix release. + +- Fixed a bug in ``MongoClient.append_metadata()`` and + ``AsyncMongoClient.append_metadata()`` + that allowed duplicate ``DriverInfo.name`` to be appended to the metadata. + +Issues Resolved +............... + +See the `PyMongo 4.14.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.14.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=45256 + +Changes in Version 4.14.0 (2025/08/06) +-------------------------------------- + +.. warning:: PyMongo 4.14 drops support for MongoDB 4.0. PyMongo now supports + MongoDB 4.2+. + +PyMongo 4.14 brings a number of changes including: + +- Dropped support for MongoDB 4.0. +- Added preliminary support for Python 3.14 and 3.14 with free-threading. We do + not yet support the following with Python 3.14: + + - Subinterpreters (``concurrent.interpreters``) + - Free-threading with Encryption + - mod_wsgi + +- Removed experimental support for free-threading support in Python 3.13. +- Added :attr:`bson.codec_options.TypeRegistry.codecs` and + :attr:`bson.codec_options.TypeRegistry.fallback_encoder` properties + to allow users to directly access the type codecs and fallback encoder for a + given :class:`bson.codec_options.TypeRegistry`. +- Added + :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.append_metadata` and + :meth:`pymongo.mongo_client.MongoClient.append_metadata` to allow instantiated + MongoClients to send client metadata on-demand +- Improved performance of selecting a server with the Primary selector. +- Introduces a minor breaking change. When encoding + :class:`bson.binary.BinaryVector`, a ``ValueError`` will be raised if the + 'padding' metadata field is < 0 or > 7, or non-zero for any type other than + PACKED_BIT. +- Changed :meth:`~pymongo.uri_parser.parse_uri`'s ``options`` return value to be + type ``dict`` instead of ``_CaseInsensitiveDictionary``. + +Issues Resolved +............... + +See the `PyMongo 4.14 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.14 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43041 + +Changes in Version 4.13.2 (2025/06/17) +-------------------------------------- + +Version 4.13.2 is a bug fix release. + +- Fixed a bug where ``AsyncMongoClient`` would block the event loop while creating new connections, + potentially significantly increasing latency for ongoing operations. + +Issues Resolved +............... + +See the `PyMongo 4.13.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.13.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43937 + + +Changes in Version 4.13.1 (2025/06/10) +-------------------------------------- + +Version 4.13.1 is a bug fix release. + +- Fixed a bug that could raise ``ServerSelectionTimeoutError`` when using timeouts with ``AsyncMongoClient``. +- Fixed a bug that could raise ``NetworkTimeout`` errors on Windows. + +Issues Resolved +............... + +See the `PyMongo 4.13.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.13.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43924 + +Changes in Version 4.13.0 (2025/05/14) +-------------------------------------- + +PyMongo 4.13 brings a number of changes including: + +- The asynchronous API is now stable and no longer in beta. + See the :mod:`pymongo.asynchronous` docs + or the `migration guide `_ for more information. +- Fixed a bug where :class:`pymongo.write_concern.WriteConcern` repr was not eval-able + when using ``w="majority"``. +- When padding is set, ignored bits in a BSON BinaryVector of PACKED_BIT dtype should be set to zero. + When encoding, this is enforced and is a breaking change. + It is not yet enforced when decoding, so reading from the database will not fail, however a warning will be triggered. + From PyMongo 5.0, this rule will be enforced for both encoding and decoding. + +Issues Resolved +............... + +See the `PyMongo 4.13 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.13 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=42509 + +Changes in Version 4.12.1 (2025/04/29) +-------------------------------------- + +Version 4.12.1 is a bug fix release. + +- Fixed a bug that could raise ``UnboundLocalError`` when creating asynchronous connections over SSL. +- Fixed a bug causing SRV hostname validation to fail when resolver and resolved hostnames are identical with three domain levels. +- Fixed a bug that caused direct use of ``pymongo.uri_parser`` to raise an ``AttributeError``. +- Fixed a bug where clients created with connect=False and a "mongodb+srv://" connection string + could cause public ``pymongo.MongoClient`` and ``pymongo.AsyncMongoClient`` attributes (topology_description, + nodes, address, primary, secondaries, arbiters) to incorrectly return a Database, leading to type + errors such as: "NotImplementedError: Database objects do not implement truth value testing or bool()". +- Removed Eventlet testing against Python versions newer than 3.9 since + Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. +- Fixed a bug where MongoDB cluster topology changes could cause asynchronous operations to take much longer to complete + due to holding the Topology lock while closing stale connections. +- Fixed a bug that would cause AsyncMongoClient to attempt to use PyOpenSSL when available, resulting in errors such as + "pymongo.errors.ServerSelectionTimeoutError: 'SSLContext' object has no attribute 'wrap_bio'". + +Issues Resolved +............... + +See the `PyMongo 4.12.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.12.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43094 + +Changes in Version 4.12.0 (2025/04/08) +-------------------------------------- + +.. warning:: Driver support for MongoDB 4.0 reached end of life in April 2025. + PyMongo 4.12 will be the last release to support MongoDB 4.0. + +PyMongo 4.12 brings a number of changes including: + +- Support for configuring DEK cache lifetime via the ``key_expiration_ms`` argument to + :class:`~pymongo.encryption_options.AutoEncryptionOpts`. +- Support for $lookup in CSFLE and QE supported on MongoDB 8.1+. +- pymongocrypt>=1.13 is now required for `In-Use Encryption `_ support. +- Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.rename_by_name` and :meth:`gridfs.grid_file.GridFSBucket.rename_by_name` + for more performant renaming of a file with multiple revisions. +- Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.delete_by_name` and :meth:`gridfs.grid_file.GridFSBucket.delete_by_name` + for more performant deletion of a file with multiple revisions. +- AsyncMongoClient no longer performs DNS resolution for "mongodb+srv://" connection strings on creation. + To avoid blocking the asyncio loop, the resolution is now deferred until the client is first connected. +- Added index hinting support to the + :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct` and + :meth:`~pymongo.collection.Collection.distinct` commands. +- Deprecated the ``hedge`` parameter for + :class:`~pymongo.read_preferences.PrimaryPreferred`, + :class:`~pymongo.read_preferences.Secondary`, + :class:`~pymongo.read_preferences.SecondaryPreferred`, + :class:`~pymongo.read_preferences.Nearest`. Support for ``hedge`` will be removed in PyMongo 5.0. +- Removed PyOpenSSL support from the asynchronous API due to limitations of the CPython asyncio.Protocol SSL implementation. +- Allow valid SRV hostnames with less than 3 parts. + +Issues Resolved +............... + +See the `PyMongo 4.12 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.12 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=41916 + +Changes in Version 4.11.2 (2025/03/05) +-------------------------------------- + +Version 4.11.2 is a bug fix release. + +- Fixed a bug where :meth:`~pymongo.database.Database.command` would fail when attempting to run the bulkWrite command. + +Issues Resolved +............... + +See the `PyMongo 4.11.2 release notes in JIRA`_ for the list of resolved issues in this release. + +.. _PyMongo 4.11.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=42506 + +Changes in Version 4.11.1 (2025/02/10) +-------------------------------------- + +- Fixed support for prebuilt ``ppc64le`` and ``s390x`` wheels. + +Changes in Version 4.11.0 (2025/01/28) +-------------------------------------- + +.. warning:: PyMongo 4.11 drops support for Python 3.8 and PyPy 3.9: Python 3.9+ or PyPy 3.10+ is now required. +.. warning:: PyMongo 4.11 drops support for MongoDB 3.6. PyMongo now supports MongoDB 4.0+. + Driver support for MongoDB 3.6 reached end of life in April 2024. +.. warning:: Driver support for MongoDB 4.0 reaches end of life in April 2025. + A future minor release of PyMongo will raise the minimum supported MongoDB Server version from 4.0 to 4.2. + This is in accordance with [MongoDB Software Lifecycle Schedules](https://www.mongodb.com/legal/support-policy/lifecycles). + **Support for MongoDB Server 4.0 will be dropped in a future release!** +.. warning:: This version does not include wheels for ``ppc64le`` or ``s390x`` architectures, see `PYTHON-5058`_ for more information. + +PyMongo 4.11 brings a number of changes including: + +- Dropped support for Python 3.8 and PyPy 3.9. +- Dropped support for MongoDB 3.6. +- Dropped support for the MONGODB-CR authenticate mechanism, which is no longer supported by MongoDB 4.0+. +- pymongocrypt>=1.12 is now required for `In-Use Encryption `_ support. +- Added support for free-threaded Python with the GIL disabled. For more information see: + `Free-threaded CPython `_. + We do not yet support free-threaded Python on Windows (`PYTHON-5027`_) or with In-Use Encryption (`PYTHON-5024`_). +- :attr:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.address` and + :attr:`~pymongo.mongo_client.MongoClient.address` now correctly block when called on unconnected clients + until either connection succeeds or a server selection timeout error is raised. +- Added :func:`repr` support to :class:`pymongo.operations.IndexModel`. +- Added :func:`repr` support to :class:`pymongo.operations.SearchIndexModel`. +- Added ``sort`` parameter to + :meth:`~pymongo.collection.Collection.update_one`, :meth:`~pymongo.collection.Collection.replace_one`, + :class:`~pymongo.operations.UpdateOne`, and + :class:`~pymongo.operations.UpdateMany`, +- :meth:`~pymongo.mongo_client.MongoClient.bulk_write` and + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` now throw an error + when ``ordered=True`` or ``verboseResults=True`` are used with unacknowledged writes. + These are unavoidable breaking changes. +- Fixed a bug in :const:`bson.json_util.dumps` where a :class:`bson.datetime_ms.DatetimeMS` would + be incorrectly encoded as ``'{"$date": "X"}'`` instead of ``'{"$date": X}'`` when using the + legacy MongoDB Extended JSON datetime representation. +- Fixed a bug where :const:`bson.json_util.loads` would raise an IndexError when parsing an invalid + ``"$date"`` instead of a ValueError. + +Issues Resolved +............... + +See the `PyMongo 4.11 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.11 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40784 +.. _PYTHON-5027: https://jira.mongodb.org/browse/PYTHON-5027 +.. _PYTHON-5024: https://jira.mongodb.org/browse/PYTHON-5024 +.. _PYTHON-5058: https://jira.mongodb.org/browse/PYTHON-5058 + +Changes in Version 4.10.1 (2024/10/01) +-------------------------------------- + +Version 4.10.1 is a bug fix release. + +- Fixed a bug where :meth:`~pymongo.results.UpdateResult.did_upsert` would raise a ``TypeError``. +- Fixed Binary BSON subtype (9) support on big-endian operating systems (such as zSeries). + +Issues Resolved +............... + +See the `PyMongo 4.10.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.10.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40788 + + +Changes in Version 4.10.0 (2024/09/30) +-------------------------------------- + +- Added provisional **(BETA)** support for a new Binary BSON subtype (9) used for efficient storage and retrieval of vectors: + densely packed arrays of numbers, all of the same type. + This includes new methods :meth:`~bson.binary.Binary.from_vector` and :meth:`~bson.binary.Binary.as_vector`. +- Added C extension use to client metadata, for example: ``{"driver": {"name": "PyMongo|c", "version": "4.10.0"}, ...}`` +- Fixed a bug where :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` could deadlock. +- Fixed a bug where PyMongo could fail to import on Windows if ``asyncio`` is misconfigured. + +Issues Resolved +............... + +See the `PyMongo 4.10 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.10 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40553 + +Changes in Version 4.9.2 (2024/10/02) +------------------------------------- + +- Fixed a bug where :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` could deadlock. +- Fixed a bug where PyMongo could fail to import on Windows if ``asyncio`` is misconfigured. +- Fixed a bug where :meth:`~pymongo.results.UpdateResult.did_upsert` would raise a ``TypeError``. + +Issues Resolved +............... + +See the `PyMongo 4.9.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.9.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40732 + + +Changes in Version 4.9.1 (2024/09/18) +------------------------------------- + +- Add missing documentation about the fact the async API is in beta state. + +Issues Resolved +............... + +See the `PyMongo 4.9.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.9.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40720 + + +Changes in Version 4.9 (2024/09/18) +----------------------------------- + +.. warning:: Driver support for MongoDB 3.6 reached end of life in April 2024. + PyMongo 4.9 will be the last release to support MongoDB 3.6. + +.. warning:: PyMongo 4.9 refactors a large portion of internal APIs to support the new asynchronous API beta. + As a result, versions of Motor older than 3.6 are not compatible with PyMongo 4.9. + Existing users of these versions must either upgrade to Motor 3.6 and PyMongo 4.9, + or cap their PyMongo version to ``< 4.9``. + Any applications that use private APIs may also break as a result of these internal changes. + +PyMongo 4.9 brings a number of improvements including: + +- Added support for MongoDB 8.0. +- Added support for Python 3.13. +- A new beta asynchronous API with full asyncio support. + This new asynchronous API is a work-in-progress that may change during the beta period before the full release. +- Added support for In-Use Encryption range queries with MongoDB 8.0. + Added :attr:`~pymongo.encryption.Algorithm.RANGE`. + ``sparsity`` and ``trim_factor`` are now optional in :class:`~pymongo.encryption_options.RangeOpts`. +- Added support for the "delegated" option for the KMIP ``master_key`` in + :meth:`~pymongo.encryption.ClientEncryption.create_data_key`. +- pymongocrypt>=1.10 is now required for `In-Use Encryption `_ support. +- Added :meth:`~pymongo.cursor.Cursor.to_list` to :class:`~pymongo.cursor.Cursor`, + :class:`~pymongo.command_cursor.CommandCursor`, + :class:`~pymongo.asynchronous.cursor.AsyncCursor`, + and :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor` + as an asynchronous-friendly alternative to ``list(cursor)``. +- Added :meth:`~pymongo.mongo_client.MongoClient.bulk_write` to :class:`~pymongo.mongo_client.MongoClient` + and :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient`, + enabling users to perform insert, update, and delete operations + against mixed namespaces in a minimized number of round trips. + Please see `Client Bulk Write `_ for more information. +- Added support for the ``namespace`` parameter to the + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.DeleteOne`, and + :class:`~pymongo.operations.DeleteMany` operations, so + they can be used in the new :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. +- Added :func:`repr` support to :class:`bson.tz_util.FixedOffset`. +- Fixed a bug where PyMongo would raise ``InvalidBSON: unhashable type: 'tzfile'`` + when using :attr:`~bson.codec_options.DatetimeConversion.DATETIME_CLAMP` or + :attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` with a timezone from dateutil. +- Fixed a bug where PyMongo would raise ``InvalidBSON: date value out of range`` + when using :attr:`~bson.codec_options.DatetimeConversion.DATETIME_CLAMP` or + :attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` with a non-UTC timezone. +- Added a warning to unclosed MongoClient instances + telling users to explicitly close clients when finished with them to avoid leaking resources. + For example: + + .. code-block:: + + sys:1: ResourceWarning: Unclosed MongoClient opened at: + File "/Users//my_file.py", line 8, in `` + client = MongoClient() + Call MongoClient.close() to safely shut down your client and free up resources. +- The default value for ``connect`` in ``MongoClient`` is changed to ``False`` when running on + unction-as-a-service (FaaS) like AWS Lambda, Google Cloud Functions, and Microsoft Azure Functions. + On some FaaS systems, there is a ``fork()`` operation at function + startup. By delaying the connection to the first operation, we avoid a deadlock. See + `multiple forks `_ for more information. + + +Issues Resolved +............... + +See the `PyMongo 4.9 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39940 + + +Changes in Version 4.8.0 (2024/06/26) +------------------------------------- + +.. warning:: PyMongo 4.8 drops support for Python 3.7 and PyPy 3.8: Python 3.8+ or PyPy 3.9+ is now required. + +PyMongo 4.8 brings a number of improvements including: + +- The handshake metadata for "os.name" on Windows has been simplified to "Windows" to improve import time. +- The repr of ``bson.binary.Binary`` is now redacted when the subtype is SENSITIVE_SUBTYPE(8). +- Secure Software Development Life Cycle automation for release process. + GitHub Releases now include a Software Bill of Materials, and signature + files corresponding to the distribution files released on PyPI. +- Fixed a bug in change streams where both ``startAtOperationTime`` and ``resumeToken`` + could be added to a retry attempt, which caused the retry to fail. +- Fallback to stdlib ``ssl`` module when ``pyopenssl`` import fails with AttributeError. +- Improved performance of MongoClient operations, especially when many operations are being run concurrently. + +Unavoidable breaking changes +............................ + +- Since we are now using ``hatch`` as our build backend, we no longer have a usable ``setup.py`` file + and require installation using ``pip``. Attempts to invoke the ``setup.py`` file will raise an exception. + Additionally, ``pip`` >= 21.3 is now required for editable installs. +- We no longer support the ``srv`` extra, since ``dnspython`` is included as a dependency in PyMongo 4.7+. + Instead of ``pip install pymongo[srv]``, use ``pip install pymongo``. +- We no longer support the ``tls`` extra, which was only valid for Python 2. + Instead of ``pip install pymongo[tls]``, use ``pip install pymongo``. + +Issues Resolved +............... + +See the `PyMongo 4.8 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.8 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37057 + +Changes in Version 4.7.3 (2024/06/04) +------------------------------------- + +Version 4.7.3 has further fixes for lazily loading modules. + +- Use deferred imports instead of importlib lazy module loading. +- Improve import time on Windows. +- Reduce verbosity of "Waiting for suitable server to become available" log message from info to debug. + +Issues Resolved +............... + +See the `PyMongo 4.7.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.7.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39865 + +Changes in Version 4.7.2 (2024/05/07) +------------------------------------- + +Version 4.7.2 fixes a bug introduced in 4.7.0: + +- Fixed a bug where PyMongo could not be used with the Nuitka compiler. + +Issues Resolved +............... + +See the `PyMongo 4.7.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.7.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39710 + + +Changes in Version 4.7.1 (2024/04/30) +------------------------------------- + +Version 4.7.1 fixes a bug introduced in 4.7.0: + +- Fixed a bug where PyMongo would cause an ``AttributeError`` if ``dns.resolver`` was imported and referenced + after PyMongo was imported. +- Clarified the behavior of the ``TOKEN_RESOURCE`` auth mechanism property for ``MONGODB-OIDC``. + +Issues Resolved +............... + +See the `PyMongo 4.7.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.7.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39680 + +Changes in Version 4.7.0 (2024/04/24) +------------------------------------- + +PyMongo 4.7 brings a number of improvements including: + +- Added support for ``MONGODB-OIDC`` authentication. The MONGODB-OIDC mechanism authenticates + using an OpenID Connect (OIDC) access token. + The driver supports OIDC for workload identity, defined as an identity you assign to a software workload + (such as an application, service, script, or container) to authenticate and access other services and resources. + Please see `Authentication `_ for more information. +- Added support for Python's `native logging library `_, + enabling developers to customize the verbosity of log messages for their applications. + Please see `Logging `_ for more information. +- Significantly improved the performance of encoding BSON documents to JSON. +- Added support for named KMS providers for client side field level encryption. + Previously supported KMS providers were only: aws, azure, gcp, kmip, and local. + The KMS provider is now expanded to support name suffixes (e.g. local:myname). + Named KMS providers enables more than one of each KMS provider type to be configured. + See the docstring for :class:`~pymongo.encryption_options.AutoEncryptionOpts`. + Note that named KMS providers requires pymongocrypt >=1.9 and libmongocrypt >=1.9. +- Added the :class:`pymongo.hello.Hello.connection_id`, + :attr:`pymongo.monitoring.CommandStartedEvent.server_connection_id`, + :attr:`pymongo.monitoring.CommandSucceededEvent.server_connection_id`, and + :attr:`pymongo.monitoring.CommandFailedEvent.server_connection_id` properties. +- Fixed a bug where inflating a :class:`~bson.raw_bson.RawBSONDocument` containing a :class:`~bson.code.Code` would cause an error. +- :meth:`~pymongo.encryption.ClientEncryption.encrypt` and + :meth:`~pymongo.encryption.ClientEncryption.encrypt_expression` now allow ``key_id`` + to be passed in as a :class:`uuid.UUID`. +- Fixed a bug where :class:`~bson.int64.Int64` instances could not always be encoded by `orjson`_. The following now + works:: + + >>> import orjson + >>> from bson import json_util + >>> orjson.dumps({'a': Int64(1)}, default=json_util.default, option=orjson.OPT_PASSTHROUGH_SUBCLASS) + +.. _orjson: https://github.com/ijl/orjson + +- Fixed a bug appearing in Python 3.12 where "RuntimeError: can't create new thread at interpreter shutdown" + could be written to stderr when a MongoClient's thread starts as the python interpreter is shutting down. +- Added a warning when connecting to DocumentDB and CosmosDB clusters. + For more information regarding feature compatibility and support please visit + `mongodb.com/supportability/documentdb `_ and + `mongodb.com/supportability/cosmosdb `_. +- Added the :attr:`pymongo.monitoring.ConnectionCheckedOutEvent.duration`, + :attr:`pymongo.monitoring.ConnectionCheckOutFailedEvent.duration`, and + :attr:`pymongo.monitoring.ConnectionReadyEvent.duration` properties. +- Added the ``type`` and ``kwargs`` arguments to :class:`~pymongo.operations.SearchIndexModel` to enable + creating vector search indexes in MongoDB Atlas. +- Fixed a bug where ``read_concern`` and ``write_concern`` were improperly added to + :meth:`~pymongo.collection.Collection.list_search_indexes` queries. +- Deprecated :attr:`pymongo.write_concern.WriteConcern.wtimeout` and :attr:`pymongo.mongo_client.MongoClient.wTimeoutMS`. + Use :meth:`~pymongo.timeout` instead. + +.. warning:: PyMongo depends on ``dnspython``, which released version 2.6.1 with a fix for + `CVE-2023-29483 `_. We do not explicitly require + that version, but we strongly recommend that you install at least that version in your environment. + +Unavoidable breaking changes +............................ + +- Replaced usage of :class:`bson.son.SON` on all internal classes and commands to dict, + :attr:`options.pool_options.metadata` is now of type ``dict`` as opposed to :class:`bson.son.SON`. + Here's some examples of how this changes expected output as well as how to convert from :class:`dict` to :class:`bson.son.SON`:: + + # Before + >>> from pymongo import MongoClient + >>> client = MongoClient() + >>> client.options.pool_options.metadata + SON([('driver', SON([('name', 'PyMongo'), ('version', '4.7.0.dev0')])), ('os', SON([('type', 'Darwin'), ('name', 'Darwin'), ('architecture', 'arm64'), ('version', '14.3')])), ('platform', 'CPython 3.11.6.final.0')]) + + # After + >>> client.options.pool_options.metadata + {'driver': {'name': 'PyMongo', 'version': '4.7.0.dev0'}, 'os': {'type': 'Darwin', 'name': 'Darwin', 'architecture': 'arm64', 'version': '14.3'}, 'platform': 'CPython 3.11.6.final.0'} + + # To convert from dict to SON + # This will only convert the first layer of the dictionary + >>> data_as_dict = client.options.pool_options.metadata + >>> SON(data_as_dict) + SON([('driver', {'name': 'PyMongo', 'version': '4.7.0.dev0'}), ('os', {'type': 'Darwin', 'name': 'Darwin', 'architecture': 'arm64', 'version': '14.3'}), ('platform', 'CPython 3.11.6.final.0')]) + + # To convert from dict to SON on a nested dictionary + >>> def dict_to_SON(data_as_dict: dict[Any, Any]): + ... data_as_SON = SON() + ... for key, value in data_as_dict.items(): + ... data_as_SON[key] = dict_to_SON(value) if isinstance(value, dict) else value + ... return data_as_SON + >>> + >>> dict_to_SON(data_as_dict) + SON([('driver', SON([('name', 'PyMongo'), ('version', '4.7.0.dev0')])), ('os', SON([('type', 'Darwin'), ('name', 'Darwin'), ('architecture', 'arm64'), ('version', '14.3')])), ('platform', 'CPython 3.11.6.final.0')]) + +- PyMongo now uses `lazy imports `_ for external dependencies. + If you are relying on any kind of monkey-patching of the standard library, you may need to explicitly import those external libraries in addition + to ``pymongo`` before applying the patch. Note that we test with ``gevent`` and ``eventlet`` patching, and those continue to work. + +- The "aws" extra now requires minimum version of ``1.1.0`` for ``pymongo_auth_aws``. + +Changes in Version 4.6.3 (2024/03/27) +------------------------------------- + +PyMongo 4.6.3 fixes the following bug: + +- Fixed a potential memory access violation when decoding invalid bson. + +Issues Resolved +............... + +See the `PyMongo 4.6.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=38360 + +Changes in Version 4.6.2 (2024/02/21) +------------------------------------- + +PyMongo 4.6.2 fixes the following bug: + +- Fixed a bug appearing in Python 3.12 where "RuntimeError: can't create new thread at interpreter shutdown" + could be written to stderr when a MongoClient's thread starts as the python interpreter is shutting down. + +Issues Resolved +............... + +See the `PyMongo 4.6.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37906 + +Changes in Version 4.6.1 (2023/11/29) +------------------------------------- + +PyMongo 4.6.1 fixes the following bug: + +- Ensure retryable read ``OperationFailure`` errors re-raise exception when 0 or NoneType error code is provided. + +Issues Resolved +............... + +See the `PyMongo 4.6.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37138 + +Changes in Version 4.6.0 (2023/11/01) +------------------------------------- + +PyMongo 4.6 brings a number of improvements including: + +- Added the ``serverMonitoringMode`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. +- Improved client performance and reduced connection requirements in Function-as-a-service (FaaS) + environments like AWS Lambda, Google Cloud Functions, and Microsoft Azure Functions. +- Added the :attr:`pymongo.monitoring.CommandSucceededEvent.database_name` property. +- Added the :attr:`pymongo.monitoring.CommandFailedEvent.database_name` property. +- Allow passing a ``dict`` to sort/create_index/hint. +- Added :func:`repr` support to the write result classes: + :class:`~pymongo.results.BulkWriteResult`, + :class:`~pymongo.results.DeleteResult`, + :class:`~pymongo.results.InsertManyResult`, + :class:`~pymongo.results.InsertOneResult`, + :class:`~pymongo.results.UpdateResult`, and + :class:`~pymongo.encryption.RewrapManyDataKeyResult`. For example: + + >>> client.t.t.insert_one({}) + InsertOneResult(ObjectId('65319acdd55bb3a27ab5502b'), acknowledged=True) + >>> client.t.t.insert_many([{} for _ in range(3)]) + InsertManyResult([ObjectId('6532f85e826f2b6125d6ce39'), ObjectId('6532f85e826f2b6125d6ce3a'), ObjectId('6532f85e826f2b6125d6ce3b')], acknowledged=True) + +- :meth:`~pymongo.uri_parser.parse_uri` now considers the delimiting slash (``/``) + between hosts and connection options optional. For example, + "mongodb://example.com?tls=true" is now a valid URI. +- Fixed a bug where PyMongo would incorrectly promote all cursors to exhaust cursors + when connected to load balanced MongoDB clusters or Serverless clusters. +- Added the `network compression `_ documentation page. +- Added more timeout information to network errors. + +Issues Resolved +............... + +See the `PyMongo 4.6 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=36542 + +Changes in Version 4.5.0 (2023/08/22) +------------------------------------- + +PyMongo 4.5 brings a number of improvements including: + +- Added new helper methods for Atlas Search Index (requires MongoDB Server 7.0+): + :meth:`~pymongo.collection.Collection.list_search_indexes`, + :meth:`~pymongo.collection.Collection.create_search_index`, + :meth:`~pymongo.collection.Collection.create_search_indexes`, + :meth:`~pymongo.collection.Collection.drop_search_index`, + :meth:`~pymongo.collection.Collection.update_search_index` +- Added :meth:`~pymongo.database.Database.cursor_command` + and :meth:`~pymongo.command_cursor.CommandCursor.try_next` to support + executing an arbitrary command that returns a cursor. +- ``cryptography`` 2.5 or later is now required for `OCSP `_ support. +- Improved bson encoding and decoding performance by up to 134%(`PYTHON-3729`_, `PYTHON-3797`_, `PYTHON-3816`_, `PYTHON-3817`_, `PYTHON-3820`_, `PYTHON-3824`_, and `PYTHON-3846`_). + +.. warning:: PyMongo no longer supports PyPy3 versions older than 3.8. Users + must upgrade to PyPy3.8+. + +Issues Resolved +............... + +See the `PyMongo 4.5 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=35492 + +.. _PYTHON-3729: https://jira.mongodb.org/browse/PYTHON-3729 +.. _PYTHON-3797: https://jira.mongodb.org/browse/PYTHON-3797 +.. _PYTHON-3816: https://jira.mongodb.org/browse/PYTHON-3816 +.. _PYTHON-3817: https://jira.mongodb.org/browse/PYTHON-3817 +.. _PYTHON-3820: https://jira.mongodb.org/browse/PYTHON-3820 +.. _PYTHON-3824: https://jira.mongodb.org/browse/PYTHON-3824 +.. _PYTHON-3846: https://jira.mongodb.org/browse/PYTHON-3846 + +Changes in Version 4.4.1 (2023/07/13) +------------------------------------- + +Version 4.4.1 fixes the following bugs: + +- Fixed a bug where pymongo would raise a ``ConfigurationError: Invalid SRV host`` + error when connecting to a "mongodb+srv://" URI that included capital letters + in the SRV hosts returned from DNS. (`PYTHON-3800`_). +- Fixed a minor reference counting bug in the C extension (`PYTHON-3798`_). + +Issues Resolved +............... + +See the `PyMongo 4.4.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3798: https://jira.mongodb.org/browse/PYTHON-3798 +.. _PYTHON-3800: https://jira.mongodb.org/browse/PYTHON-3800 +.. _PyMongo 4.4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=36329 + +Changes in Version 4.4.0 (2023/06/21) +------------------------------------- + +PyMongo 4.4 brings a number of improvements including: + +- Added support for MongoDB 7.0. +- Added support for Python 3.11. +- Added support for passing a list containing (key, direction) pairs + or keys to :meth:`~pymongo.collection.Collection.create_index`. +- Improved bson encoding performance (`PYTHON-3717`_ and `PYTHON-3718`_). +- Improved support for Pyright to improve typing support for IDEs like Visual Studio Code + or Visual Studio. +- Improved support for type-checking with MyPy "strict" mode (`--strict`). +- Added :meth:`~pymongo.encryption.ClientEncryption.create_encrypted_collection`, + :class:`~pymongo.errors.EncryptedCollectionError`, + :meth:`~pymongo.encryption.ClientEncryption.encrypt_expression`, + :class:`~pymongo.encryption_options.RangeOpts`, + and :attr:`~pymongo.encryption.Algorithm.RANGEPREVIEW` as part of the experimental + Queryable Encryption beta. +- pymongocrypt 1.6.0 or later is now required for `In-Use Encryption `_ support. MongoDB + Server 7.0 introduced a backwards breaking change to the QE protocol. Users taking + advantage of the Queryable Encryption beta must now upgrade to MongoDB 7.0+ and + PyMongo 4.4+. +- Previously, PyMongo's docs recommended using :meth:`datetime.datetime.utcnow` and + :meth:`datetime.datetime.utcfromtimestamp`. utcnow and utcfromtimestamp are deprecated + in Python 3.12, for reasons explained `in this Github issue`_. Instead, users should + use :meth:`datetime.datetime.now(tz=timezone.utc)` and + :meth:`datetime.datetime.fromtimestamp(tz=timezone.utc)` instead. + +.. _in this Github issue: https://github.com/python/cpython/issues/103857 + +Issues Resolved +............... + +See the `PyMongo 4.4 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34354 + +.. _PYTHON-3717: https://jira.mongodb.org/browse/PYTHON-3717 +.. _PYTHON-3718: https://jira.mongodb.org/browse/PYTHON-3718 + +Changes in Version 4.3.3 (2022/11/17) +------------------------------------- + +Version 4.3.3 documents support for the following: + +- `CSFLE on-demand credentials `_ for cloud KMS providers. +- Authentication support for `EKS Clusters `_. +- Added the `timeout `_ example page to improve the documentation + for :func:`pymongo.timeout`. + +Bug Fixes +......... +- Fixed a performance regression in :meth:`~gridfs.GridFSBucket.download_to_stream` + and :meth:`~gridfs.GridFSBucket.download_to_stream_by_name` by reading in chunks + instead of line by line (`PYTHON-3502`_). +- Improved performance of :meth:`gridfs.grid_file.GridOut.read` and + :meth:`gridfs.grid_file.GridOut.readline` (`PYTHON-3508`_). + +Issues Resolved +............... + +See the `PyMongo 4.3.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3502: https://jira.mongodb.org/browse/PYTHON-3502 +.. _PYTHON-3508: https://jira.mongodb.org/browse/PYTHON-3508 +.. _PyMongo 4.3.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34709 + +Changes in Version 4.3.2 (2022/10/18) +------------------------------------- + +Note: We withheld uploading tags 4.3.0 and 4.3.1 to PyPI due to a +version handling error and a necessary documentation update. + +`dnspython `_ is now a required +dependency. This change makes PyMongo easier to install for use with "mongodb+srv://" +connection strings and `MongoDB Atlas `_. + +PyMongo 4.3 brings a number of improvements including: + +- Added support for decoding BSON datetimes outside of the range supported + by Python's :class:`~datetime.datetime` builtin. See + `handling out of range datetimes `_ for examples, as well as + :class:`bson.datetime_ms.DatetimeMS`, + :class:`bson.codec_options.DatetimeConversion`, and + :class:`bson.codec_options.CodecOptions`'s ``datetime_conversion`` + parameter for more details (`PYTHON-1824`_). +- PyMongo now resets its locks and other shared state in the child process + after a :py:func:`os.fork` to reduce the frequency of deadlocks. Note that + deadlocks are still possible because libraries that PyMongo depends like + OpenSSL cannot be made fork() safe in multithreaded applications. + (`PYTHON-2484`_). For more info see `multiple forks `_. +- When used with MongoDB 6.0+, :class:`~pymongo.change_stream.ChangeStream` s + now allow for new types of events (such as DDL and C2C replication events) + to be recorded with the new parameter ``show_expanded_events`` + that can be passed to methods such as :meth:`~pymongo.collection.Collection.watch`. +- PyMongo now internally caches AWS credentials that it fetches from AWS + endpoints, to avoid rate limitations. The cache is cleared when the + credentials expire or an error is encountered. +- When using the ``MONGODB-AWS`` authentication mechanism with the + ``aws`` extra, the behavior of credential fetching has changed with + ``pymongo_auth_aws>=1.1.0``. Please see `Authentication `_ for + more information. + +Bug fixes +......... + +- Fixed a bug where :class:`~pymongo.change_stream.ChangeStream` + would allow an app to retry calling ``next()`` or ``try_next()`` even + after non-resumable errors (`PYTHON-3389`_). +- Fixed a bug where the client could be unable to discover the new primary + after a simultaneous replica set election and reconfig (`PYTHON-2970`_). + +Issues Resolved +............... + +See the `PyMongo 4.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-1824: https://jira.mongodb.org/browse/PYTHON-1824 +.. _PYTHON-2484: https://jira.mongodb.org/browse/PYTHON-2484 +.. _PYTHON-2970: https://jira.mongodb.org/browse/PYTHON-2970 +.. _PYTHON-3389: https://jira.mongodb.org/browse/PYTHON-3389 +.. _PyMongo 4.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33425 + +Changes in Version 4.2.0 (2022/07/20) +------------------------------------- + +.. warning:: PyMongo 4.2 drops support for Python 3.6: Python 3.7+ is now required. + +PyMongo 4.2 brings a number of improvements including: + +- Support for MongoDB 6.0. +- Support for the Queryable Encryption beta with MongoDB 6.0. Note that backwards-breaking + changes may be made before the final release. See `automatic queryable client-side encryption `_ for example usage. +- Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout + to an entire block of pymongo operations. See `timeout `_ for examples. +- Added the ``timeoutMS`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. +- Added the :attr:`pymongo.errors.PyMongoError.timeout` property which is ``True`` when + the error was caused by a timeout. +- Added the ``check_exists`` argument to :meth:`~pymongo.database.Database.create_collection` + that when True (the default) runs an additional ``listCollections`` command to verify that the + collection does not exist already. +- Added the following key management APIs to :class:`~pymongo.encryption.ClientEncryption`: + + - :meth:`~pymongo.encryption.ClientEncryption.get_key` + - :meth:`~pymongo.encryption.ClientEncryption.get_keys` + - :meth:`~pymongo.encryption.ClientEncryption.delete_key` + - :meth:`~pymongo.encryption.ClientEncryption.add_key_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.get_key_by_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.remove_key_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.rewrap_many_data_key` + - :class:`~pymongo.encryption.RewrapManyDataKeyResult` + +- Support for the ``crypt_shared`` library to replace ``mongocryptd`` using the new + ``crypt_shared_lib_path`` and ``crypt_shared_lib_required`` arguments to + :class:`~pymongo.encryption_options.AutoEncryptionOpts`. + +Bug fixes +......... + +- Fixed a bug where :meth:`~pymongo.collection.Collection.estimated_document_count` + would fail with a "CommandNotSupportedOnView" error on views (`PYTHON-2885`_). +- Fixed a bug where invalid UTF-8 strings could be passed as patterns for :class:`~bson.regex.Regex` + objects. :func:`bson.encode` now correctly raises :class:`bson.errors.InvalidStringData` (`PYTHON-3048`_). +- Fixed a bug that caused ``AutoReconnect("connection pool paused")`` errors in the child + process after fork (`PYTHON-3257`_). +- Fixed a bug where :meth:`~pymongo.collection.Collection.count_documents` and + :meth:`~pymongo.collection.Collection.distinct` would fail in a transaction with + ``directConnection=True`` (`PYTHON-3333`_). +- GridFS no longer uploads an incomplete files collection document after encountering an + error in the middle of an upload fork. This results in fewer + :class:`~gridfs.errors.CorruptGridFile` errors (`PYTHON-1552`_). +- Renamed PyMongo's internal C extension methods to avoid crashing due to name conflicts + with mpi4py and other shared libraries (`PYTHON-2110`_). +- Fixed tight CPU loop for network I/O when using PyOpenSSL (`PYTHON-3187`_). + +Unavoidable breaking changes +............................ + +- pymongocrypt 1.3.0 or later is now required for client side field level + encryption support. +- :meth:`~pymongo.collection.Collection.estimated_document_count` now always uses + the `count`_ command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, + the count command was not included in V1 of the `Stable API `_. + Users of the Stable API with estimated_document_count are recommended to upgrade + their server version to 5.0.9+ or set :attr:`pymongo.server_api.ServerApi.strict` + to ``False`` to avoid encountering errors (`PYTHON-3167`_). +- Removed generic typing from :class:`~pymongo.client_session.ClientSession` to improve + support for Pyright (`PYTHON-3283`_). +- Added ``__all__`` to the bson, pymongo, and gridfs packages. This could be a breaking + change for apps that relied on ``from bson import *`` to import APIs not present in + ``__all__`` (`PYTHON-3311`_). + +.. _count: https://mongodb.com/docs/manual/reference/command/count/ + +Issues Resolved +............... + +See the `PyMongo 4.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3048: https://jira.mongodb.org/browse/PYTHON-3048 +.. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 +.. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 +.. _PYTHON-3257: https://jira.mongodb.org/browse/PYTHON-3257 +.. _PYTHON-3333: https://jira.mongodb.org/browse/PYTHON-3333 +.. _PYTHON-1552: https://jira.mongodb.org/browse/PYTHON-1552 +.. _PYTHON-2110: https://jira.mongodb.org/browse/PYTHON-2110 +.. _PYTHON-3283: https://jira.mongodb.org/browse/PYTHON-3283 +.. _PYTHON-3311: https://jira.mongodb.org/browse/PYTHON-3311 +.. _PYTHON-3187: https://jira.mongodb.org/browse/PYTHON-3187 +.. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 + +Changes in Version 4.1.1 (2022/04/13) +------------------------------------- + +Version 4.1.1 fixes a number of bugs: + +- Fixed a memory leak bug when calling :func:`~bson.decode_all` without a + ``codec_options`` argument (`PYTHON-3222`_). +- Fixed a bug where :func:`~bson.decode_all` did not accept ``codec_options`` + as a keyword argument (`PYTHON-3222`_). +- Fixed an oversight where type markers (py.typed files) were not included + in our release distributions (`PYTHON-3214`_). +- Fixed a bug where pymongo would raise a "NameError: name sys is not defined" + exception when attempting to parse a "mongodb+srv://" URI when the dnspython + dependency was not installed (`PYTHON-3198`_). + +Issues Resolved +............... + +See the `PyMongo 4.1.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3198: https://jira.mongodb.org/browse/PYTHON-3198 +.. _PYTHON-3214: https://jira.mongodb.org/browse/PYTHON-3214 +.. _PYTHON-3222: https://jira.mongodb.org/browse/PYTHON-3222 +.. _PyMongo 4.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33290 + +Changes in Version 4.1 (2021/12/07) +----------------------------------- + +.. warning:: PyMongo 4.1 drops support for Python 3.6.0 and 3.6.1, Python 3.6.2+ is now required. + +PyMongo 4.1 brings a number of improvements including: + +- Type Hinting support (formerly provided by `pymongo-stubs`_). See `Type Hints `_ for more information. +- Added support for the ``comment`` parameter to all helpers. For example see + :meth:`~pymongo.collection.Collection.insert_one`. +- Added support for the ``let`` parameter to + :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`, + :meth:`~pymongo.collection.Collection.delete_one`, + :meth:`~pymongo.collection.Collection.delete_many`, + :meth:`~pymongo.collection.Collection.replace_one`, + :meth:`~pymongo.collection.Collection.aggregate`, + :meth:`~pymongo.collection.Collection.find_one_and_delete`, + :meth:`~pymongo.collection.Collection.find_one_and_replace`, + :meth:`~pymongo.collection.Collection.find_one_and_update`, + :meth:`~pymongo.collection.Collection.find`, + :meth:`~pymongo.collection.Collection.find_one`, + and :meth:`~pymongo.collection.Collection.bulk_write`. + ``let`` is a map of parameter names and values. + Parameters can then be accessed as variables in an aggregate expression + context. +- :meth:`~pymongo.collection.Collection.aggregate` now supports + $merge and $out executing on secondaries on MongoDB >=5.0. + aggregate() now always obeys the collection's :attr:`read_preference` on + MongoDB >= 5.0. +- :meth:`gridfs.grid_file.GridOut.seek` now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. +- Improved reuse of implicit sessions (`PYTHON-2956`_). + +Bug fixes +......... + +- Fixed bug that would cause SDAM heartbeat timeouts and connection churn on + AWS Lambda and other FaaS environments (`PYTHON-3186`_). +- Fixed bug where :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.database.Database`, and :class:`~pymongo.collection.Collection` + mistakenly implemented :class:`typing.Iterable` (`PYTHON-3084`_). + +Issues Resolved +............... + +See the `PyMongo 4.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30619 +.. _PYTHON-2956: https://jira.mongodb.org/browse/PYTHON-2956 +.. _PYTHON-3084: https://jira.mongodb.org/browse/PYTHON-3084 +.. _PYTHON-3186: https://jira.mongodb.org/browse/PYTHON-3186 +.. _pymongo-stubs: https://github.com/mongodb-labs/pymongo-stubs + +Changes in Version 4.0.2 (2022/03/03) +------------------------------------- + +- No changes + +Changes in Version 4.0.1 (2021/12/07) +------------------------------------- + +- No changes + +Changes in Version 4.0 (2021/11/29) +----------------------------------- + +.. warning:: PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. + +.. warning:: PyMongo 4.0 drops support for MongoDB 2.6, 3.0, 3.2, and 3.4. + +.. warning:: PyMongo 4.0 changes the default value of the ``directConnection`` URI option and + keyword argument to :class:`~pymongo.mongo_client.MongoClient` + to ``False`` instead of ``None``, allowing for the automatic + discovery of replica sets. This means that if you + want a direct connection to a single server you must pass + ``directConnection=True`` as a URI option or keyword argument. + For more details, see the relevant section of the PyMongo 4.x migration + guide: :ref:`pymongo4-migration-direct-connection`. + +PyMongo 4.0 brings a number of improvements as well as some backward breaking +changes. For example, all APIs deprecated in PyMongo 3.X have been removed. +Be sure to read the changes listed below and the :doc:`migrate-to-pymongo4` +before upgrading from PyMongo 3.x. + +Breaking Changes in 4.0 +....................... + +- Removed support for Python 2.7, 3.4, and 3.5. Python 3.6.2+ is now required. +- The default uuid_representation for :class:`~bson.codec_options.CodecOptions`, + :class:`~bson.json_util.JSONOptions`, and + :class:`~pymongo.mongo_client.MongoClient` has been changed from + :data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a + :class:`uuid.UUID` instance to BSON or JSON now produces an error by default. + See `UUID representations `_ for details. +- Removed the ``waitQueueMultiple`` keyword argument to + :class:`~pymongo.mongo_client.MongoClient` and removed + :exc:`pymongo.errors.ExceededMaxWaiters`. +- Removed the ``socketKeepAlive`` keyword argument to + :class:`~pymongo.mongo_client.MongoClient`. +- Removed :meth:`pymongo.mongo_client.MongoClient.fsync`, + :meth:`pymongo.mongo_client.MongoClient.unlock`, and + :attr:`pymongo.mongo_client.MongoClient.is_locked`. +- Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_bson_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_message_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_write_batch_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.event_listeners`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_pool_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_idle_time_ms`. +- Removed :attr:`pymongo.mongo_client.MongoClient.local_threshold_ms`. +- Removed :attr:`pymongo.mongo_client.MongoClient.server_selection_timeout`. +- Removed :attr:`pymongo.mongo_client.MongoClient.retry_writes`. +- Removed :attr:`pymongo.mongo_client.MongoClient.retry_reads`. +- Removed :meth:`pymongo.database.Database.eval`, + :data:`pymongo.database.Database.system_js` and + :class:`pymongo.database.SystemJS`. +- Removed :meth:`pymongo.database.Database.collection_names`. +- Removed :meth:`pymongo.database.Database.current_op`. +- Removed :meth:`pymongo.database.Database.authenticate` and + :meth:`pymongo.database.Database.logout`. +- Removed :meth:`pymongo.database.Database.error`, + :meth:`pymongo.database.Database.last_status`, + :meth:`pymongo.database.Database.previous_error`, + :meth:`pymongo.database.Database.reset_error_history`. +- Removed :meth:`pymongo.database.Database.add_user` and + :meth:`pymongo.database.Database.remove_user`. +- Removed support for database profiler helpers + :meth:`~pymongo.database.Database.profiling_level`, + :meth:`~pymongo.database.Database.set_profiling_level`, + and :meth:`~pymongo.database.Database.profiling_info`. Instead, users + should run the `profile command`_ with the + :meth:`~pymongo.database.Database.command` helper directly. +- Removed :attr:`pymongo.OFF`, :attr:`pymongo.SLOW_ONLY`, and + :attr:`pymongo.ALL`. +- Removed :meth:`pymongo.collection.Collection.parallel_scan`. +- Removed :meth:`pymongo.collection.Collection.ensure_index`. +- Removed :meth:`pymongo.collection.Collection.reindex`. +- Removed :meth:`pymongo.collection.Collection.save`. +- Removed :meth:`pymongo.collection.Collection.insert`. +- Removed :meth:`pymongo.collection.Collection.update`. +- Removed :meth:`pymongo.collection.Collection.remove`. +- Removed :meth:`pymongo.collection.Collection.find_and_modify`. +- Removed :meth:`pymongo.collection.Collection.count`. +- Removed :meth:`pymongo.collection.Collection.initialize_ordered_bulk_op`, + :meth:`pymongo.collection.Collection.initialize_unordered_bulk_op`, and + :class:`pymongo.bulk.BulkOperationBuilder`. Use + :meth:`pymongo.collection.Collection.bulk_write` instead. +- Removed :meth:`pymongo.collection.Collection.group`. +- Removed :meth:`pymongo.collection.Collection.map_reduce` and + :meth:`pymongo.collection.Collection.inline_map_reduce`. +- Removed the ``useCursor`` option for + :meth:`~pymongo.collection.Collection.aggregate`. +- Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor`. Use + :meth:`pymongo.cursor.Cursor.close` instead. +- Removed :meth:`pymongo.mongo_client.MongoClient.kill_cursors`. +- Removed :class:`pymongo.cursor_manager.CursorManager` and + :mod:`pymongo.cursor_manager`. +- Removed :meth:`pymongo.mongo_client.MongoClient.set_cursor_manager`. +- Removed :meth:`pymongo.cursor.Cursor.count`. +- Removed :mod:`pymongo.thread_util`. +- Removed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`. +- Removed :class:`~pymongo.ismaster.IsMaster`. + Use :class:`~pymongo.hello.Hello` instead. +- Removed :mod:`pymongo.son_manipulator`, + :class:`pymongo.son_manipulator.SONManipulator`, + :class:`pymongo.son_manipulator.ObjectIdInjector`, + :class:`pymongo.son_manipulator.ObjectIdShuffler`, + :class:`pymongo.son_manipulator.AutoReference`, + :class:`pymongo.son_manipulator.NamespaceInjector`, + :meth:`pymongo.database.Database.add_son_manipulator`, + :attr:`pymongo.database.Database.outgoing_copying_manipulators`, + :attr:`pymongo.database.Database.outgoing_manipulators`, + :attr:`pymongo.database.Database.incoming_copying_manipulators`, and + :attr:`pymongo.database.Database.incoming_manipulators`. +- Removed the ``manipulate`` and ``modifiers`` parameters from + :meth:`~pymongo.collection.Collection.find`, + :meth:`~pymongo.collection.Collection.find_one`, + :meth:`~pymongo.collection.Collection.find_raw_batches`, and + :meth:`~pymongo.cursor.Cursor`. +- Removed :meth:`pymongo.message.delete`, :meth:`pymongo.message.get_more`, + :meth:`pymongo.message.insert`, :meth:`pymongo.message.kill_cursors`, + :meth:`pymongo.message.query`, and :meth:`pymongo.message.update`. +- Removed :exc:`pymongo.errors.NotMasterError`. + Use :exc:`pymongo.errors.NotPrimaryError` instead. +- Removed :exc:`pymongo.errors.CertificateError`. +- Removed :attr:`pymongo.GEOHAYSTACK`. +- Removed :class:`bson.binary.UUIDLegacy`. +- Removed :const:`bson.json_util.STRICT_JSON_OPTIONS`. Use + :const:`~bson.json_util.RELAXED_JSON_OPTIONS` or + :const:`~bson.json_util.CANONICAL_JSON_OPTIONS` instead. +- Changed the default JSON encoding representation from legacy to relaxed. + The json_mode parameter for :const:`bson.json_util.dumps` now defaults to + :const:`~bson.json_util.RELAXED_JSON_OPTIONS`. +- Changed the BSON and JSON decoding behavior of :class:`~bson.dbref.DBRef` + to match the behavior outlined in the `DBRef specification`_ version 1.0. + Specifically, PyMongo now only decodes a subdocument into a + :class:`~bson.dbref.DBRef` if and only if, it contains both ``$ref`` and + ``$id`` fields and the ``$ref``, ``$id``, and ``$db`` fields are of the + correct type. Otherwise the document is returned as normal. Previously, any + subdocument containing a ``$ref`` field would be decoded as a + :class:`~bson.dbref.DBRef`. +- The "tls" install extra is no longer necessary or supported and will be + ignored by pip. +- The ``tz_aware`` argument to :class:`~bson.json_util.JSONOptions` + now defaults to ``False`` instead of ``True``. :meth:`bson.json_util.loads` now + decodes datetime as naive by default. See :ref:`tz_aware_default_change` for more info. +- ``directConnection`` URI option and keyword argument to :class:`~pymongo.mongo_client.MongoClient` + defaults to ``False`` instead of ``None``, allowing for the automatic + discovery of replica sets. This means that if you + want a direct connection to a single server you must pass + ``directConnection=True`` as a URI option or keyword argument. +- The ``hint`` option is now required when using ``min`` or ``max`` queries + with :meth:`~pymongo.collection.Collection.find`. +- ``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. +- When providing a "mongodb+srv://" URI to + :class:`~pymongo.mongo_client.MongoClient` constructor you can now use the + ``srvServiceName`` URI option to specify your own SRV service name. +- :meth:`~bson.son.SON.items` now returns a ``dict_items`` object rather + than a list. +- Removed :meth:`bson.son.SON.iteritems`. +- :class:`~pymongo.collection.Collection` and :class:`~pymongo.database.Database` + now raises an error upon evaluating as a Boolean, please use the + syntax ``if collection is not None:`` or ``if database is not None:`` as + opposed to + the previous syntax which was simply ``if collection:`` or ``if database:``. + You must now explicitly compare with None. +- :class:`~pymongo.mongo_client.MongoClient` cannot execute any operations + after being closed. The previous behavior would simply reconnect. However, + now you must create a new instance. +- Classes :class:`~bson.int64.Int64`, :class:`~bson.min_key.MinKey`, + :class:`~bson.max_key.MaxKey`, :class:`~bson.timestamp.Timestamp`, + :class:`~bson.regex.Regex`, and :class:`~bson.dbref.DBRef` all implement + ``__slots__`` now. This means that their attributes are fixed, and new + attributes cannot be added to them at runtime. +- Empty projections (eg {} or []) for + :meth:`~pymongo.collection.Collection.find`, and + :meth:`~pymongo.collection.Collection.find_one` + are passed to the server as-is rather than the previous behavior which + substituted in a projection of ``{"_id": 1}``. This means that an empty + projection will now return the entire document, not just the ``"_id"`` field. +- :class:`~pymongo.mongo_client.MongoClient` now raises a + :exc:`~pymongo.errors.ConfigurationError` when more than one URI is passed + into the ``hosts`` argument. +- :class:`~pymongo.mongo_client.MongoClient`` now raises an + :exc:`~pymongo.errors.InvalidURI` exception + when it encounters unescaped percent signs in username and password when + parsing MongoDB URIs. +- Comparing two :class:`~pymongo.mongo_client.MongoClient` instances now + uses a set of immutable properties rather than + :attr:`~pymongo.mongo_client.MongoClient.address` which can change. +- Removed the ``disable_md5`` parameter for :class:`~gridfs.GridFSBucket` and + :class:`~gridfs.GridFS`. See :ref:`removed-gridfs-checksum` for details. +- pymongocrypt 1.2.0 or later is now required for client side field level + encryption support. + +Notable improvements +.................... + +- Enhanced connection pooling to create connections more efficiently and + avoid connection storms. +- Added the ``maxConnecting`` URI and + :class:`~pymongo.mongo_client.MongoClient` keyword argument. +- :class:`~pymongo.mongo_client.MongoClient` now accepts a URI and keyword + argument ``srvMaxHosts`` that limits the number of mongos-like hosts a client + will connect to. More specifically, when a mongodb+srv:// connection string + resolves to more than ``srvMaxHosts`` number of hosts, the client will randomly + choose a ``srvMaxHosts`` sized subset of hosts. +- Added :attr:`pymongo.mongo_client.MongoClient.options` for read-only access + to a client's configuration options. +- Support for the "kmip" KMS provider for client side field level encryption. + See the docstring for :class:`~pymongo.encryption_options.AutoEncryptionOpts` + and :mod:`~pymongo.encryption`. + +Issues Resolved +............... + +See the `PyMongo 4.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18463 +.. _DBRef specification: https://github.com/mongodb/specifications/blob/master/source/dbref/dbref.md + +Changes in Version 3.13.0 (2022/11/01) +-------------------------------------- + +Version 3.13 provides an upgrade path to PyMongo 4.x. Most of the API changes +from PyMongo 4.0 have been backported in a backward compatible way, allowing +applications to be written against PyMongo >= 3.13, rather then PyMongo 3.x or +PyMongo 4.x. See the `PyMongo 4 Migration Guide`_ for detailed examples. + +Notable improvements +.................... +- Added :attr:`pymongo.mongo_client.MongoClient.options` for read-only access + to a client's configuration options. + + +Issues Resolved +............... + +PyMongo 3.13 drops support for Python 3.4. + +Bug fixes +......... + +- Fixed a memory leak bug when calling :func:`~bson.decode_all` without a + ``codec_options`` argument (`PYTHON-3222`_). +- Fixed a bug where :func:`~bson.decode_all` did not accept ``codec_options`` + as a keyword argument (`PYTHON-3222`_). + +Deprecations +............ +- Deprecated :meth:`~pymongo.collection.Collection.map_reduce` and + :meth:`~pymongo.collection.Collection.inline_map_reduce`. + Use :meth:`~pymongo.collection.Collection.aggregate` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.event_listeners`. + Use :attr:`~pymongo.mongo_client.options.event_listeners` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.max_pool_size`. + Use :attr:`~pymongo.mongo_client.options.pool_options.max_pool_size` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.max_idle_time_ms`. + Use :attr:`~pymongo.mongo_client.options.pool_options.max_idle_time_seconds` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.local_threshold_ms`. + Use :attr:`~pymongo.mongo_client.options.local_threshold_ms` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.server_selection_timeout`. + Use :attr:`~pymongo.mongo_client.options.server_selection_timeout` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.retry_writes`. + Use :attr:`~pymongo.mongo_client.options.retry_writes` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.retry_reads`. + Use :attr:`~pymongo.mongo_client.options.retry_reads` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.max_bson_size`, + :attr:`pymongo.mongo_client.MongoClient.max_message_size`, and + :attr:`pymongo.mongo_client.MongoClient.max_write_batch_size`. These helpers + were incorrect when in ``loadBalanced=true mode`` and ambiguous in clusters + with mixed versions. Use the `hello command`_ to get the authoritative + value from the remote server instead. Code like this:: + + max_bson_size = client.max_bson_size + max_message_size = client.max_message_size + max_write_batch_size = client.max_write_batch_size + +can be changed to this:: + + doc = client.admin.command('hello') + max_bson_size = doc['maxBsonObjectSize'] + max_message_size = doc['maxMessageSizeBytes'] + max_write_batch_size = doc['maxWriteBatchSize'] + +.. _hello command: https://docs.mongodb.com/manual/reference/command/hello/ + +See the `PyMongo 3.13.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4 Migration Guide: https://pymongo.readthedocs.io/en/stable/migrate-to-pymongo4.html +.. _PYTHON-3222: https://jira.mongodb.org/browse/PYTHON-3222 +.. _PyMongo 3.13.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=31570 + +Changes in Version 3.12.3 (2021/12/07) +-------------------------------------- + +Issues Resolved +............... + +Version 3.12.3 fixes a bug that prevented :meth:`bson.json_util.loads` from +decoding a document with a non-string "$regex" field (`PYTHON-3028`_). + +See the `PyMongo 3.12.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3028: https://jira.mongodb.org/browse/PYTHON-3028 +.. _PyMongo 3.12.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=32505 + +Changes in Version 3.12.2 (2021/11/29) +-------------------------------------- + +Issues Resolved +............... + +Version 3.12.2 fixes a number of bugs: + +- Fixed a bug that prevented PyMongo from retrying bulk writes + after a ``writeConcernError`` on MongoDB 4.4+ (`PYTHON-2984`_). +- Fixed a bug that could cause the driver to hang during automatic + client side field level encryption (`PYTHON-3017`_). + +See the `PyMongo 3.12.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2984: https://jira.mongodb.org/browse/PYTHON-2984 +.. _PYTHON-3017: https://jira.mongodb.org/browse/PYTHON-3017 +.. _PyMongo 3.12.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=32310 + +Changes in Version 3.12.1 (2021/10/19) +-------------------------------------- + +Issues Resolved +............... + +Version 3.12.1 fixes a number of bugs: + +- Fixed a bug that caused a multi-document transaction to fail when the first + operation was large bulk write (>48MB) that required splitting a batched + write command (`PYTHON-2915`_). +- Fixed a bug that caused the ``tlsDisableOCSPEndpointCheck`` URI option to + be applied incorrectly (`PYTHON-2866`_). + +See the `PyMongo 3.12.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2915: https://jira.mongodb.org/browse/PYTHON-2915 +.. _PYTHON-2866: https://jira.mongodb.org/browse/PYTHON-2866 +.. _PyMongo 3.12.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=31527 + +Changes in Version 3.12.0 (2021/07/13) +-------------------------------------- + +.. warning:: PyMongo 3.12.0 deprecates support for Python 2.7, 3.4 and 3.5. + These Python versions will not be supported by PyMongo 4. + +.. warning:: PyMongo now allows insertion of documents with keys that include + dots ('.') or start with dollar signs ('$'). + +- pymongocrypt 1.1.0 or later is now required for client side field level + encryption support. +- Iterating over :class:`gridfs.grid_file.GridOut` now moves through + the file line by line instead of chunk by chunk, and does not + restart at the top for subsequent iterations on the same object. + Call ``seek(0)`` to reset the iterator. + +Notable improvements +.................... + +- Added support for MongoDB 5.0. +- Support for MongoDB Stable API, see :class:`~pymongo.server_api.ServerApi`. +- Support for snapshot reads on secondaries (see `snapshot reads `_). +- Support for Azure and GCP KMS providers for client side field level + encryption. See the docstring for :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.encryption_options.AutoEncryptionOpts`, + and :mod:`~pymongo.encryption`. +- Support AWS authentication with temporary credentials when connecting to KMS + in client side field level encryption. +- Support for connecting to load balanced MongoDB clusters via the new + ``loadBalanced`` URI option. +- Support for creating timeseries collections via the ``timeseries`` and + ``expireAfterSeconds`` arguments to + :meth:`~pymongo.database.Database.create_collection`. +- Added :attr:`pymongo.mongo_client.MongoClient.topology_description`. +- Added hash support to :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.database.Database` and + :class:`~pymongo.collection.Collection` (`PYTHON-2466`_). +- Improved the error message returned by + :meth:`~pymongo.collection.Collection.insert_many` when supplied with an + argument of incorrect type (`PYTHON-1690`_). +- Added session and read concern support to + :meth:`~pymongo.collection.Collection.find_raw_batches` + and :meth:`~pymongo.collection.Collection.aggregate_raw_batches`. + +Bug fixes +......... + +- Fixed a bug that could cause the driver to deadlock during automatic + client side field level encryption (`PYTHON-2472`_). +- Fixed a potential deadlock when garbage collecting an unclosed exhaust + :class:`~pymongo.cursor.Cursor`. +- Fixed an bug where using gevent.Timeout to timeout an operation could + lead to a deadlock. +- Fixed the following bug with Atlas Data Lake. When closing cursors, + pymongo now sends killCursors with the namespace returned the cursor's + initial command response. +- Fixed a bug in :class:`~pymongo.cursor.RawBatchCursor` that caused it to + return an empty bytestring when the cursor contained no results. It now + raises :exc:`StopIteration` instead. + +Deprecations +............ + +- Deprecated support for Python 2.7, 3.4 and 3.5. +- Deprecated support for database profiler helpers + :meth:`~pymongo.database.Database.profiling_level`, + :meth:`~pymongo.database.Database.set_profiling_level`, + and :meth:`~pymongo.database.Database.profiling_info`. Instead, users + should run the `profile command`_ with the + :meth:`~pymongo.database.Database.command` helper directly. +- Deprecated :exc:`~pymongo.errors.NotMasterError`. Users should + use :exc:`~pymongo.errors.NotPrimaryError` instead. +- Deprecated :class:`~pymongo.ismaster.IsMaster` and :mod:`~pymongo.ismaster` + which will be removed in PyMongo 4.0 and are replaced by + :class:`~pymongo.hello.Hello` and :mod:`~pymongo.hello` which provide the + same API. +- Deprecated the :mod:`pymongo.messeage` module. +- Deprecated the ``ssl_keyfile`` and ``ssl_certfile`` URI options in favor + of ``tlsCertificateKeyFile`` (see `TLS `_). + +.. _PYTHON-2466: https://jira.mongodb.org/browse/PYTHON-2466 +.. _PYTHON-1690: https://jira.mongodb.org/browse/PYTHON-1690 +.. _PYTHON-2472: https://jira.mongodb.org/browse/PYTHON-2472 +.. _profile command: https://mongodb.com/docs/manual/reference/command/profile/ + +Issues Resolved +............... + +See the `PyMongo 3.12.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.12.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29594 + +Changes in Version 3.11.3 (2021/02/02) +-------------------------------------- + +Issues Resolved +............... + +Version 3.11.3 fixes a bug that prevented PyMongo from retrying writes after +a ``writeConcernError`` on MongoDB 4.4+ (`PYTHON-2452`_) + +See the `PyMongo 3.11.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2452: https://jira.mongodb.org/browse/PYTHON-2452 +.. _PyMongo 3.11.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30355 + +Changes in Version 3.11.2 (2020/12/02) +-------------------------------------- + +Issues Resolved +............... + +Version 3.11.2 includes a number of bugfixes. Highlights include: + +- Fixed a memory leak caused by failing SDAM monitor checks on Python 3 (`PYTHON-2433`_). +- Fixed a regression that changed the string representation of + :exc:`~pymongo.errors.BulkWriteError` (`PYTHON-2438`_). +- Fixed a bug that made it impossible to use + :meth:`bson.codec_options.CodecOptions.with_options` and + :meth:`~bson.json_util.JSONOptions.with_options` on some early versions of + Python 3.4 and Python 3.5 due to a bug in the standard library implementation + of :meth:`collections.namedtuple._asdict` (`PYTHON-2440`_). +- Fixed a bug that resulted in a :exc:`TypeError` exception when a PyOpenSSL + socket was configured with a timeout of ``None`` (`PYTHON-2443`_). + +See the `PyMongo 3.11.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2433: https://jira.mongodb.org/browse/PYTHON-2433 +.. _PYTHON-2438: https://jira.mongodb.org/browse/PYTHON-2438 +.. _PYTHON-2440: https://jira.mongodb.org/browse/PYTHON-2440 +.. _PYTHON-2443: https://jira.mongodb.org/browse/PYTHON-2443 +.. _PyMongo 3.11.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30315 + +Changes in Version 3.11.1 (2020/11/17) +-------------------------------------- + +Version 3.11.1 adds support for Python 3.9 and includes a number of bugfixes. +Highlights include: + +- Support for Python 3.9. +- Initial support for Azure and GCP KMS providers for client side field level + encryption is in beta. See the docstring for + :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.encryption_options.AutoEncryptionOpts`, + and :mod:`~pymongo.encryption`. **Note: Backwards-breaking changes may be + made before the final release.** +- Fixed a bug where the :class:`bson.json_util.JSONOptions` API did not match + the :class:`bson.codec_options.CodecOptions` API due to the absence of + a :meth:`bson.json_util.JSONOptions.with_options` method. This method has now + been added. +- Fixed a bug which made it impossible to serialize + :class:`~pymongo.errors.BulkWriteError` instances using :mod:`pickle`. +- Fixed a bug wherein PyMongo did not always discard an implicit session after + encountering a network error. +- Fixed a bug where connections created in the background were not + authenticated. +- Fixed a memory leak in the :mod:`bson` module when using a + :class:`~bson.codec_options.TypeRegistry`. + +Issues Resolved +............... + +See the `PyMongo 3.11.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.11.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29997 + +Changes in Version 3.11.0 (2020/07/30) +-------------------------------------- + +Version 3.11 adds support for MongoDB 4.4 and includes a number of bug fixes. +Highlights include: + +- Support for `OCSP `_ (Online Certificate Status Protocol). +- Support for `PyOpenSSL `_ as an + alternative TLS implementation. PyOpenSSL is required for `OCSP `_ + support. It will also be installed when using the "tls" extra if the + version of Python in use is older than 2.7.9. +- Support for the `MONGODB-AWS `_ authentication mechanism. +- Support for the ``directConnection`` URI option and kwarg to + :class:`~pymongo.mongo_client.MongoClient`. +- Support for speculative authentication attempts in connection handshakes + which reduces the number of network roundtrips needed to authenticate new + connections on MongoDB 4.4+. +- Support for creating collections in multi-document transactions with + :meth:`~pymongo.database.Database.create_collection` on MongoDB 4.4+. +- Added index hinting support to the + :meth:`~pymongo.collection.Collection.replace_one`, + :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`, + :meth:`~pymongo.collection.Collection.find_one_and_replace`, + :meth:`~pymongo.collection.Collection.find_one_and_update`, + :meth:`~pymongo.collection.Collection.delete_one`, + :meth:`~pymongo.collection.Collection.delete_many`, and + :meth:`~pymongo.collection.Collection.find_one_and_delete` commands. +- Added index hinting support to the + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.DeleteOne`, and + :class:`~pymongo.operations.DeleteMany` bulk operations. +- Added support for :data:`bson.binary.UuidRepresentation.UNSPECIFIED` and + ``MongoClient(uuidRepresentation='unspecified')`` which will become the + default UUID representation starting in PyMongo 4.0. See + `UUID representations `_ for details. +- New methods :meth:`bson.binary.Binary.from_uuid` and + :meth:`bson.binary.Binary.as_uuid`. +- Added the ``background`` parameter to + :meth:`pymongo.database.Database.validate_collection`. For a description + of this parameter see the MongoDB documentation for the `validate command`_. +- Added the ``allow_disk_use`` parameters to + :meth:`pymongo.collection.Collection.find`. +- Added the ``hedge`` parameter to + :class:`~pymongo.read_preferences.PrimaryPreferred`, + :class:`~pymongo.read_preferences.Secondary`, + :class:`~pymongo.read_preferences.SecondaryPreferred`, + :class:`~pymongo.read_preferences.Nearest` to support disabling + (or explicitly enabling) hedged reads in MongoDB 4.4+. +- Fixed a bug in change streams that could cause PyMongo to miss some change + documents when resuming a stream that was started without a resume token and + whose first batch did not contain any change documents. +- Fixed an bug where using gevent.Timeout to timeout an operation could + lead to a deadlock. + +Deprecations: + +- Deprecated the ``oplog_replay`` parameter to + :meth:`pymongo.collection.Collection.find`. Starting in MongoDB 4.4, the + server optimizes queries against the oplog collection without requiring + the user to set this flag. +- Deprecated :meth:`pymongo.collection.Collection.reindex`. Use + :meth:`~pymongo.database.Database.command` to run the ``reIndex`` command + instead. +- Deprecated :meth:`pymongo.mongo_client.MongoClient.fsync`. Use + :meth:`~pymongo.database.Database.command` to run the ``fsync`` command + instead. +- Deprecated :meth:`pymongo.mongo_client.MongoClient.unlock`. Use + :meth:`~pymongo.database.Database.command` to run the ``fsyncUnlock`` command + instead. See the documentation for more information. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.is_locked`. Use + :meth:`~pymongo.database.Database.command` to run the ``currentOp`` command + instead. See the documentation for more information. +- Deprecated :class:`bson.binary.UUIDLegacy`. Use + :meth:`bson.binary.Binary.from_uuid` instead. + +Unavoidable breaking changes: + +- :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS` do not support + multi-document transactions. Running a GridFS operation in a transaction + now always raises the following error: + ``InvalidOperation: GridFS does not support multi-document transactions`` + +.. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ + +Issues Resolved +............... + +See the `PyMongo 3.11.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.11.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=24799 + +Changes in Version 3.10.1 (2020/01/07) +-------------------------------------- + +Version 3.10.1 fixes the following issues discovered since the release of +3.10.0: + +- Fix a TypeError logged to stderr that could be triggered during server + maintenance or during :meth:`pymongo.mongo_client.MongoClient.close`. +- Avoid creating new connections during + :meth:`pymongo.mongo_client.MongoClient.close`. + +Issues Resolved +............... + +See the `PyMongo 3.10.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.10.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=25039 + +Changes in Version 3.10.0 (2019/12/10) +-------------------------------------- + +Version 3.10 includes a number of improvements and bug fixes. Highlights +include: + +- Support for Client-Side Field Level Encryption with MongoDB 4.2. See + `Client-Side Field Level Encryption `_ for examples. +- Support for Python 3.8. +- Added :attr:`pymongo.client_session.ClientSession.in_transaction`. +- Do not hold the Topology lock while creating connections in a MongoClient's + background thread. This change fixes a bug where application operations would + block while the background thread ensures that all server pools have + minPoolSize connections. +- Fix a UnicodeDecodeError bug when coercing a PyMongoError with a non-ascii + error message to unicode on Python 2. +- Fix an edge case bug where PyMongo could exceed the server's + maxMessageSizeBytes when generating a compressed bulk write command. + +Issues Resolved +............... + +See the `PyMongo 3.10 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.10 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=23944 + +Changes in Version 3.9.0 (2019/08/13) +------------------------------------- + +Version 3.9 adds support for MongoDB 4.2. Highlights include: + +- Support for MongoDB 4.2 sharded transactions. Sharded transactions have + the same API as replica set transactions. See `Transactions `_. +- New method :meth:`pymongo.client_session.ClientSession.with_transaction` to + support conveniently running a transaction in a session with automatic + retries and at-most-once semantics. +- Initial support for client side field level encryption. See the docstring for + :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.encryption_options.AutoEncryptionOpts`, + and :mod:`~pymongo.encryption` for details. **Note: Support for client side + encryption is in beta. Backwards-breaking changes may be made before the + final release.** +- Added the ``max_commit_time_ms`` parameter to + :meth:`~pymongo.client_session.ClientSession.start_transaction`. +- Implement the `URI options specification`_ in the + :meth:`~pymongo.mongo_client.MongoClient` constructor. Consequently, there are + a number of changes in connection options: + + - The ``tlsInsecure`` option has been added. + - The ``tls`` option has been added. The older ``ssl`` option has been retained + as an alias to the new ``tls`` option. + - ``wTimeout`` has been deprecated in favor of ``wTimeoutMS``. + - ``wTimeoutMS`` now overrides ``wTimeout`` if the user provides both. + - ``j`` has been deprecated in favor of ``journal``. + - ``journal`` now overrides ``j`` if the user provides both. + - ``ssl_cert_reqs`` has been deprecated in favor of ``tlsAllowInvalidCertificates``. + Instead of ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` and ``ssl.CERT_REQUIRED``, the + new option expects a boolean value - ``True`` is equivalent to ``ssl.CERT_NONE``, + while ``False`` is equivalent to ``ssl.CERT_REQUIRED``. + - ``ssl_match_hostname`` has been deprecated in favor of ``tlsAllowInvalidHostnames``. + - ``ssl_ca_certs`` has been deprecated in favor of ``tlsCAFile``. + - ``ssl_certfile`` has been deprecated in favor of ``tlsCertificateKeyFile``. + - ``ssl_pem_passphrase`` has been deprecated in favor of ``tlsCertificateKeyFilePassword``. + - ``waitQueueMultiple`` has been deprecated without replacement. This option + was a poor solution for putting an upper bound on queuing since it didn't + affect queuing in other parts of the driver. +- The ``retryWrites`` URI option now defaults to ``True``. Supported write + operations that fail with a retryable error will automatically be retried one + time, with at-most-once semantics. +- Support for retryable reads and the ``retryReads`` URI option which is + enabled by default. See the :class:`~pymongo.mongo_client.MongoClient` + documentation for details. Now that supported operations are retried + automatically and transparently, users should consider adjusting any custom + retry logic to prevent an application from inadvertently retrying for too + long. +- Support zstandard for wire protocol compression. +- Support for periodically polling DNS SRV records to update the mongos proxy + list without having to change client configuration. +- New method :meth:`pymongo.database.Database.aggregate` to support running + database level aggregations. +- Support for publishing Connection Monitoring and Pooling events via the new + :class:`~pymongo.monitoring.ConnectionPoolListener` class. See + :mod:`~pymongo.monitoring` for an example. +- :meth:`pymongo.collection.Collection.aggregate` and + :meth:`pymongo.database.Database.aggregate` now support the ``$merge`` pipeline + stage and use read preference + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` if the ``$out`` or + ``$merge`` pipeline stages are used. +- Support for specifying a pipeline or document in + :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`, + :meth:`~pymongo.collection.Collection.find_one_and_update`, + :meth:`~pymongo.operations.UpdateOne`, and + :meth:`~pymongo.operations.UpdateMany`. +- New BSON utility functions :func:`~bson.encode` and :func:`~bson.decode` +- :class:`~bson.binary.Binary` now supports any bytes-like type that implements + the buffer protocol. +- Resume tokens can now be accessed from a ``ChangeStream`` cursor using the + :attr:`~pymongo.change_stream.ChangeStream.resume_token` attribute. +- Connections now survive primary step-down when using MongoDB 4.2+. + Applications should expect less socket connection turnover during + replica set elections. + +Unavoidable breaking changes: + +- Applications that use MongoDB with the MMAPv1 storage engine must now + explicitly disable retryable writes via the connection string + (e.g. ``MongoClient("mongodb://my.mongodb.cluster/db?retryWrites=false")``) or + the :class:`~pymongo.mongo_client.MongoClient` constructor's keyword argument + (e.g. ``MongoClient("mongodb://my.mongodb.cluster/db", retryWrites=False)``) + to avoid running into :class:`~pymongo.errors.OperationFailure` exceptions + during write operations. The MMAPv1 storage engine is deprecated and does + not support retryable writes which are now turned on by default. +- In order to ensure that the ``connectTimeoutMS`` URI option is honored when + connecting to clusters with a ``mongodb+srv://`` connection string, the + minimum required version of the optional ``dnspython`` dependency has been + bumped to 1.16.0. This is a breaking change for applications that use + PyMongo's SRV support with a version of ``dnspython`` older than 1.16.0. + +.. _URI options specification: https://github.com/mongodb/specifications/blob/master/source/uri-options/uri-options.md + + +Issues Resolved +............... + +See the `PyMongo 3.9 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=21787 + +Changes in Version 3.8.0 (2019/04/22) +------------------------------------- + +.. warning:: PyMongo no longer supports Python 2.6. RHEL 6 users should install + Python 2.7 or newer from Red Hat Software Collections. + CentOS 6 users should install Python 2.7 or newer from `SCL + `_ + +.. warning:: PyMongo no longer supports PyPy3 versions older than 3.5. Users + must upgrade to PyPy3.5+. + +- :class:`~bson.objectid.ObjectId` now implements the `ObjectID specification + version 0.2 `_. +- For better performance and to better follow the GridFS spec, + :class:`~gridfs.grid_file.GridOut` now uses a single cursor to read all the + chunks in the file. Previously, each chunk in the file was queried + individually using :meth:`~pymongo.collection.Collection.find_one`. +- :meth:`gridfs.grid_file.GridOut.read` now only checks for extra chunks after + reading the entire file. Previously, this method would check for extra + chunks on every call. +- :meth:`~pymongo.database.Database.current_op` now always uses the + ``Database``'s :attr:`~pymongo.database.Database.codec_options` + when decoding the command response. Previously the codec_options + was only used when the MongoDB server version was <= 3.0. +- Undeprecated :meth:`~pymongo.mongo_client.MongoClient.get_default_database` + and added the ``default`` parameter. +- TLS Renegotiation is now disabled when possible. +- Custom types can now be directly encoded to, and decoded from MongoDB using + the :class:`~bson.codec_options.TypeCodec` and + :class:`~bson.codec_options.TypeRegistry` APIs. For more information, see + `Custom Types `_. +- Attempting a multi-document transaction on a sharded cluster now raises a + :exc:`~pymongo.errors.ConfigurationError`. +- :meth:`pymongo.cursor.Cursor.distinct` and + :meth:`pymongo.cursor.Cursor.count` now send the Cursor's + :meth:`~pymongo.cursor.Cursor.comment` as the "comment" top-level + command option instead of "$comment". Also, note that "comment" must be a + string. +- Add the ``filter`` parameter to + :meth:`~pymongo.database.Database.list_collection_names`. +- Changes can now be requested from a ``ChangeStream`` cursor without blocking + indefinitely using the new + :meth:`pymongo.change_stream.ChangeStream.try_next` method. +- Fixed a reference leak bug when splitting a batched write command based on + maxWriteBatchSize or the max message size. +- Deprecated running find queries that set :meth:`~pymongo.cursor.Cursor.min` + and/or :meth:`~pymongo.cursor.Cursor.max` but do not also set a + :meth:`~pymongo.cursor.Cursor.hint` of which index to use. The find command + is expected to require a :meth:`~pymongo.cursor.Cursor.hint` when using + min/max starting in MongoDB 4.2. +- Documented support for the uuidRepresentation URI option, which has been + supported since PyMongo 2.7. Valid values are ``pythonLegacy`` (the default), + ``javaLegacy``, ``csharpLegacy`` and ``standard``. New applications should consider + setting this to ``standard`` for cross language compatibility. +- :class:`~bson.raw_bson.RawBSONDocument` now validates that the ``bson_bytes`` + passed in represent a single bson document. Earlier versions would mistakenly + accept multiple bson documents. +- Iterating over a :class:`~bson.raw_bson.RawBSONDocument` now maintains the + same field order of the underlying raw BSON document. +- Applications can now register a custom server selector. For more information + see `Customize Server Selection `_. +- The connection pool now implements a LIFO policy. + +Unavoidable breaking changes: + +- In order to follow the ObjectID Spec version 0.2, an ObjectId's 3-byte + machine identifier and 2-byte process id have been replaced with a single + 5-byte random value generated per process. This is a breaking change for any + application that attempts to interpret those bytes. + +Issues Resolved +............... + +See the `PyMongo 3.8 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.8 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=19904 + +Changes in Version 3.7.2 (2018/10/10) +------------------------------------- + +Version 3.7.2 fixes a few issues discovered since the release of 3.7.1. + +- Fixed a bug in retryable writes where a previous command's "txnNumber" + field could be sent leading to incorrect results. +- Fixed a memory leak of a few bytes on some insert, update, or delete + commands when running against MongoDB 3.6+. +- Fixed a bug that caused :meth:`pymongo.collection.Collection.ensure_index` + to only cache a single index per database. +- Updated the documentation examples to use + :meth:`pymongo.collection.Collection.count_documents` instead of + :meth:`pymongo.collection.Collection.count` and + :meth:`pymongo.cursor.Cursor.count`. + +Issues Resolved +............... + +See the `PyMongo 3.7.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.7.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=21519 + +Changes in Version 3.7.1 (2018/07/16) +------------------------------------- + +Version 3.7.1 fixes a few issues discovered since the release of 3.7.0. + +- Calling :meth:`~pymongo.database.Database.authenticate` more than once + with the same credentials results in OperationFailure. +- Authentication fails when SCRAM-SHA-1 is used to authenticate users with + only MONGODB-CR credentials. +- A millisecond rounding problem when decoding datetimes in the pure Python + BSON decoder on 32 bit systems and AWS lambda. + +Issues Resolved +............... + +See the `PyMongo 3.7.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.7.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=21096 + +Changes in Version 3.7.0 (2018/06/26) +------------------------------------- + +Version 3.7 adds support for MongoDB 4.0. Highlights include: + +- Support for single replica set multi-document ACID transactions. + See `transactions `_. +- Support for wire protocol compression via the new ``compressors`` URI and keyword argument to + :meth:`~pymongo.mongo_client.MongoClient`. See `network compression `_ for details. +- Support for Python 3.7. +- New count methods, :meth:`~pymongo.collection.Collection.count_documents` + and :meth:`~pymongo.collection.Collection.estimated_document_count`. + :meth:`~pymongo.collection.Collection.count_documents` is always + accurate when used with MongoDB 3.6+, or when used with older standalone + or replica set deployments. With older sharded clusters is it always + accurate when used with Primary read preference. It can also be used in + a transaction, unlike the now deprecated + :meth:`pymongo.collection.Collection.count` and + :meth:`pymongo.cursor.Cursor.count` methods. +- Support for watching changes on all collections in a database using the + new :meth:`pymongo.database.Database.watch` method. +- Support for watching changes on all collections in all databases using the + new :meth:`pymongo.mongo_client.MongoClient.watch` method. +- Support for watching changes starting at a user provided timestamp using the + new ``start_at_operation_time`` parameter for the ``watch()`` helpers. +- Better support for using PyMongo in a FIPS 140-2 environment. Specifically, + the following features and changes allow PyMongo to function when MD5 support + is disabled in OpenSSL by the FIPS Object Module: + + - Support for the `SCRAM-SHA-256 `_ + authentication mechanism. The `GSSAPI `_, + `PLAIN `_, and `MONGODB-X509 `_ + mechanisms can also be used to avoid issues with OpenSSL in FIPS + environments. + - MD5 checksums are now optional in GridFS. See the ``disable_md5`` option + of :class:`~gridfs.GridFS` and :class:`~gridfs.GridFSBucket`. + - :class:`~bson.objectid.ObjectId` machine bytes are now hashed using + `FNV-1a + `_ + instead of MD5. + +- The :meth:`~pymongo.database.Database.list_collection_names` and + :meth:`~pymongo.database.Database.collection_names` methods use + the nameOnly option when supported by MongoDB. +- The :meth:`pymongo.collection.Collection.watch` method now returns an + instance of the :class:`~pymongo.change_stream.CollectionChangeStream` + class which is a subclass of :class:`~pymongo.change_stream.ChangeStream`. +- SCRAM client and server keys are cached for improved performance, following + `RFC 5802 `_. +- If not specified, the authSource for the `PLAIN `_ + authentication mechanism defaults to $external. +- wtimeoutMS is once again supported as a URI option. +- When using unacknowledged write concern and connected to MongoDB server + version 3.6 or greater, the ``bypass_document_validation`` option is now + supported in the following write helpers: + :meth:`~pymongo.collection.Collection.insert_one`, + :meth:`~pymongo.collection.Collection.replace_one`, + :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`. + +Deprecations: + +- Deprecated :meth:`pymongo.collection.Collection.count` and + :meth:`pymongo.cursor.Cursor.count`. These two methods use the ``count`` + command and `may or may not be accurate + `_, + depending on the options used and connected MongoDB topology. Use + :meth:`~pymongo.collection.Collection.count_documents` instead. +- Deprecated the snapshot option of :meth:`~pymongo.collection.Collection.find` + and :meth:`~pymongo.collection.Collection.find_one`. The option was + deprecated in MongoDB 3.6 and removed in MongoDB 4.0. +- Deprecated the max_scan option of :meth:`~pymongo.collection.Collection.find` + and :meth:`~pymongo.collection.Collection.find_one`. The option was + deprecated in MongoDB 4.0. Use ``maxTimeMS`` instead. +- Deprecated :meth:`~pymongo.mongo_client.MongoClient.close_cursor`. Use + :meth:`~pymongo.cursor.Cursor.close` instead. +- Deprecated :meth:`~pymongo.mongo_client.MongoClient.database_names`. Use + :meth:`~pymongo.mongo_client.MongoClient.list_database_names` instead. +- Deprecated :meth:`~pymongo.database.Database.collection_names`. Use + :meth:`~pymongo.database.Database.list_collection_names` instead. +- Deprecated :meth:`~pymongo.collection.Collection.parallel_scan`. MongoDB 4.2 + will remove the parallelCollectionScan command. + +Unavoidable breaking changes: + +- Commands that fail with server error codes 10107, 13435, 13436, 11600, + 11602, 189, 91 (NotMaster, NotMasterNoSlaveOk, NotMasterOrSecondary, + InterruptedAtShutdown, InterruptedDueToReplStateChange, + PrimarySteppedDown, ShutdownInProgress respectively) now always raise + :class:`~pymongo.errors.NotMasterError` instead of + :class:`~pymongo.errors.OperationFailure`. +- :meth:`~pymongo.collection.Collection.parallel_scan` no longer uses an + implicit session. Explicit sessions are still supported. +- Unacknowledged writes (``w=0``) with an explicit ``session`` parameter now + raise a client side error. Since PyMongo does not wait for a response for an + unacknowledged write, two unacknowledged writes run serially by the client + may be executed simultaneously on the server. However, the server requires a + single session must not be used simultaneously by more than one operation. + Therefore explicit sessions cannot support unacknowledged writes. + Unacknowledged writes without a ``session`` parameter are still supported. + + +Issues Resolved +............... + +See the `PyMongo 3.7 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.7 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=19287 + +Changes in Version 3.6.1 (2018/03/01) +------------------------------------- + +Version 3.6.1 fixes bugs reported since the release of 3.6.0: + +- Fix regression in PyMongo 3.5.0 that causes idle sockets to be closed almost + instantly when ``maxIdleTimeMS`` is set. Idle sockets are now closed after + ``maxIdleTimeMS`` milliseconds. +- :attr:`pymongo.mongo_client.MongoClient.max_idle_time_ms` now returns + milliseconds instead of seconds. +- Properly import and use the + `monotonic `_ + library for monotonic time when it is installed. +- :meth:`~pymongo.collection.Collection.aggregate` now ignores the + ``batchSize`` argument when running a pipeline with a ``$out`` stage. +- Always send handshake metadata for new connections. + +Issues Resolved +............... + +See the `PyMongo 3.6.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.6.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=19438 + + +Changes in Version 3.6.0 (2017/08/23) +------------------------------------- + +Version 3.6 adds support for MongoDB 3.6, drops support for CPython 3.3 (PyPy3 +is still supported), and drops support for MongoDB versions older than 2.6. If +connecting to a MongoDB 2.4 server or older, PyMongo now throws a +:exc:`~pymongo.errors.ConfigurationError`. + +Highlights include: + +- Support for change streams. See the + :meth:`~pymongo.collection.Collection.watch` method for details. +- Support for array_filters in + :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`, + :meth:`~pymongo.collection.Collection.find_one_and_update`, + :meth:`~pymongo.operations.UpdateOne`, and + :meth:`~pymongo.operations.UpdateMany`. +- New Session API, see :meth:`~pymongo.mongo_client.MongoClient.start_session`. +- New methods :meth:`~pymongo.collection.Collection.find_raw_batches` and + :meth:`~pymongo.collection.Collection.aggregate_raw_batches` for use with + external libraries that can parse raw batches of BSON data. +- New methods :meth:`~pymongo.mongo_client.MongoClient.list_databases` and + :meth:`~pymongo.mongo_client.MongoClient.list_database_names`. +- New methods :meth:`~pymongo.database.Database.list_collections` and + :meth:`~pymongo.database.Database.list_collection_names`. +- Support for mongodb+srv:// URIs. See + :class:`~pymongo.mongo_client.MongoClient` for details. +- Index management helpers + (:meth:`~pymongo.collection.Collection.create_index`, + :meth:`~pymongo.collection.Collection.create_indexes`, + :meth:`~pymongo.collection.Collection.drop_index`, + :meth:`~pymongo.collection.Collection.drop_indexes`, + :meth:`~pymongo.collection.Collection.reindex`) now support maxTimeMS. +- Support for retryable writes and the ``retryWrites`` URI option. See + :class:`~pymongo.mongo_client.MongoClient` for details. + +Deprecations: + +- The ``useCursor`` option for :meth:`~pymongo.collection.Collection.aggregate` + is deprecated. The option was only necessary when upgrading from MongoDB + 2.4 to MongoDB 2.6. MongoDB 2.4 is no longer supported. +- The :meth:`~pymongo.database.Database.add_user` and + :meth:`~pymongo.database.Database.remove_user` methods are deprecated. See + the method docstrings for alternatives. + +Unavoidable breaking changes: + +- Starting in MongoDB 3.6, the deprecated methods + :meth:`~pymongo.database.Database.authenticate` and + :meth:`~pymongo.database.Database.logout` now invalidate all cursors created + prior. Instead of using these methods to change credentials, pass credentials + for one user to the :class:`~pymongo.mongo_client.MongoClient` at construction + time, and either grant access to several databases to one user account, or use + a distinct client object for each user. +- BSON binary subtype 4 is decoded using RFC-4122 byte order regardless + of the UUID representation. This is a change in behavior for applications + that use UUID representation :data:`bson.binary.JAVA_LEGACY` or + :data:`bson.binary.CSHARP_LEGACY` to decode BSON binary subtype 4. Other + UUID representations, :data:`bson.binary.PYTHON_LEGACY` (the default) and + :data:`bson.binary.STANDARD`, and the decoding of BSON binary subtype 3 + are unchanged. + + +Issues Resolved +............... + +See the `PyMongo 3.6 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.6 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18043 + +Changes in Version 3.5.1 (2017/08/23) +------------------------------------- + +Version 3.5.1 fixes bugs reported since the release of 3.5.0: + +- Work around socket.getsockopt issue with NetBSD. +- :meth:`pymongo.command_cursor.CommandCursor.close` now closes + the cursor synchronously instead of deferring to a background + thread. +- Fix documentation build warnings with Sphinx 1.6.x. + +Issues Resolved +............... + +See the `PyMongo 3.5.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.5.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18721 + +Changes in Version 3.5.0 (2017/08/08) +------------------------------------- + +Version 3.5 implements a number of improvements and bug fixes: + +Highlights include: + +- Username and password can be passed to + :class:`~pymongo.mongo_client.MongoClient` as keyword arguments. Before, the + only way to pass them was in the URI. +- Increased the performance of using :class:`~bson.raw_bson.RawBSONDocument`. +- Increased the performance of + :meth:`~pymongo.mongo_client.MongoClient.database_names` by using the + ``nameOnly`` option for listDatabases when available. +- Increased the performance of + :meth:`~pymongo.collection.Collection.bulk_write` by reducing the memory + overhead of :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.DeleteOne`, and + :class:`~pymongo.operations.DeleteMany`. +- Added the ``collation`` option to :class:`~pymongo.operations.DeleteOne`, + :class:`~pymongo.operations.DeleteMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.UpdateOne`, and + :class:`~pymongo.operations.UpdateMany`. +- Implemented the `MongoDB Extended JSON + `_ + specification. +- :class:`~bson.decimal128.Decimal128` now works when cdecimal is installed. +- PyMongo is now tested against a wider array of operating systems and CPU + architectures (including s390x, ARM64, and POWER8). + +Changes and Deprecations: + +- :meth:`~pymongo.collection.Collection.find` has new options ``return_key``, + ``show_record_id``, ``snapshot``, ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, + and ``comment``. Deprecated the option ``modifiers``. +- Deprecated :meth:`~pymongo.collection.Collection.group`. The group command + was deprecated in MongoDB 3.4 and is expected to be removed in MongoDB 3.6. + Applications should use :meth:`~pymongo.collection.Collection.aggregate` + with the ``$group`` pipeline stage instead. +- Deprecated :meth:`~pymongo.database.Database.authenticate`. Authenticating + multiple users conflicts with support for logical sessions in MongoDB 3.6. + To authenticate as multiple users, create multiple instances of + :class:`~pymongo.mongo_client.MongoClient`. +- Deprecated :meth:`~pymongo.database.Database.eval`. The eval command + was deprecated in MongoDB 3.0 and will be removed in a future server version. +- Deprecated :class:`~pymongo.database.SystemJS`. +- Deprecated :meth:`~pymongo.mongo_client.MongoClient.get_default_database`. + Applications should use + :meth:`~pymongo.mongo_client.MongoClient.get_database` without the ```name``` + parameter instead. +- Deprecated the MongoClient option ``socketKeepAlive```. It now defaults to true + and disabling it is not recommended, see `does TCP keepalive time affect + MongoDB Deployments? + `_ +- Deprecated :meth:`~pymongo.collection.Collection.initialize_ordered_bulk_op`, + :meth:`~pymongo.collection.Collection.initialize_unordered_bulk_op`, and + :class:`~pymongo.bulk.BulkOperationBuilder`. Use + :meth:`~pymongo.collection.Collection.bulk_write` instead. +- Deprecated :const:`~bson.json_util.STRICT_JSON_OPTIONS`. Use + :const:`~bson.json_util.RELAXED_JSON_OPTIONS` or + :const:`~bson.json_util.CANONICAL_JSON_OPTIONS` instead. +- If a custom :class:`~bson.codec_options.CodecOptions` is passed to + :class:`RawBSONDocument`, its ``document_class``` must be + :class:`RawBSONDocument`. +- :meth:`~pymongo.collection.Collection.list_indexes` no longer raises + OperationFailure when the collection (or database) does not exist on + MongoDB >= 3.0. Instead, it returns an empty + :class:`~pymongo.command_cursor.CommandCursor` to make the behavior + consistent across all MongoDB versions. +- In Python 3, :meth:`~bson.json_util.loads` now automatically decodes JSON + $binary with a subtype of 0 into :class:`bytes` instead of + :class:`~bson.binary.Binary`. +- :meth:`~bson.json_util.loads` now raises ``TypeError`` or ``ValueError`` + when parsing JSON type wrappers with values of the wrong type or any + extra keys. +- :meth:`pymongo.cursor.Cursor.close` and + :meth:`pymongo.mongo_client.MongoClient.close` + now kill cursors synchronously instead of deferring to a background thread. +- :meth:`~pymongo.uri_parser.parse_uri` now returns the original value + of the ``readPreference`` MongoDB URI option instead of the validated read + preference mode. + +Issues Resolved +............... + +See the `PyMongo 3.5 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=17590 + +Changes in Version 3.4.0 (2016/11/29) +------------------------------------- + +Version 3.4 implements the new server features introduced in MongoDB 3.4 +and a whole lot more: + +Highlights include: + +- Complete support for MongoDB 3.4: + + - Unicode aware string comparison using `Collation `_. + - Support for the new :class:`~bson.decimal128.Decimal128` BSON type. + - A new maxStalenessSeconds read preference option. + - A username is no longer required for the MONGODB-X509 authentication + mechanism when connected to MongoDB >= 3.4. + - :meth:`~pymongo.collection.Collection.parallel_scan` supports maxTimeMS. + - :attr:`~pymongo.write_concern.WriteConcern` is automatically + applied by all helpers for commands that write to the database when + connected to MongoDB 3.4+. This change affects the following helpers: + + - :meth:`~pymongo.mongo_client.MongoClient.drop_database` + - :meth:`~pymongo.database.Database.create_collection` + - :meth:`~pymongo.database.Database.drop_collection` + - :meth:`~pymongo.collection.Collection.aggregate` (when using $out) + - :meth:`~pymongo.collection.Collection.create_indexes` + - :meth:`~pymongo.collection.Collection.create_index` + - :meth:`~pymongo.collection.Collection.drop_indexes` + - :meth:`~pymongo.collection.Collection.drop_indexes` + - :meth:`~pymongo.collection.Collection.drop_index` + - :meth:`~pymongo.collection.Collection.map_reduce` (when output is not + "inline") + - :meth:`~pymongo.collection.Collection.reindex` + - :meth:`~pymongo.collection.Collection.rename` + +- Improved support for logging server discovery and monitoring events. See + :mod:`~pymongo.monitoring` for examples. +- Support for matching iPAddress subjectAltName values for TLS certificate + verification. +- TLS compression is now explicitly disabled when possible. +- The Server Name Indication (SNI) TLS extension is used when possible. +- Finer control over JSON encoding/decoding with + :class:`~bson.json_util.JSONOptions`. +- Allow :class:`~bson.code.Code` objects to have a scope of ``None``, + signifying no scope. Also allow encoding Code objects with an empty scope + (i.e. ``{}``). + +.. warning:: Starting in PyMongo 3.4, :attr:`bson.code.Code.scope` may return + ``None``, as the default scope is ``None`` instead of ``{}``. + +.. note:: PyMongo 3.4+ attempts to create sockets non-inheritable when possible + (i.e. it sets the close-on-exec flag on socket file descriptors). Support + is limited to a subset of POSIX operating systems (not including Windows) and + the flag usually cannot be set in a single atomic operation. CPython 3.4+ + implements `PEP 446`_, creating all file descriptors non-inheritable by + default. Users that require this behavior are encouraged to upgrade to + CPython 3.4+. + +Since 3.4rc0, the max staleness option has been renamed from ``maxStalenessMS`` +to ``maxStalenessSeconds``, its smallest value has changed from twice +``heartbeatFrequencyMS`` to 90 seconds, and its default value has changed from +``None`` or 0 to -1. + +.. _PEP 446: https://www.python.org/dev/peps/pep-0446/ + +Issues Resolved +............... + +See the `PyMongo 3.4 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16594 + +Changes in Version 3.3.1 (2016/10/27) +------------------------------------- + +Version 3.3.1 fixes a memory leak when decoding elements inside of a +:class:`~bson.raw_bson.RawBSONDocument`. + +Issues Resolved +............... + +See the `PyMongo 3.3.1 release notes in Jira`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.3.1 release notes in Jira: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=17636 + +Changes in Version 3.3.0 (2016/07/12) +------------------------------------- + +Version 3.3 adds the following major new features: + +- C extensions support on big endian systems. +- Kerberos authentication support on Windows using `WinKerberos + `_. +- A new ``ssl_clrfile`` option to support certificate revocation lists. +- A new ``ssl_pem_passphrase`` option to support encrypted key files. +- Support for publishing server discovery and monitoring events. See + :mod:`~pymongo.monitoring` for details. +- New connection pool options ``minPoolSize`` and ``maxIdleTimeMS``. +- New ``heartbeatFrequencyMS`` option controls the rate at which background + monitoring threads re-check servers. Default is once every 10 seconds. + +.. warning:: PyMongo 3.3 drops support for MongoDB versions older than 2.4. + It also drops support for python 3.2 (pypy3 continues to be supported). + +Issues Resolved +............... + +See the `PyMongo 3.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16005 + +Changes in Version 3.2.2 (2016/03/15) +------------------------------------- + +Version 3.2.2 fixes a few issues reported since the release of 3.2.1, including +a fix for using the ``connect`` option in the MongoDB URI and support for setting +the batch size for a query to 1 when using MongoDB 3.2+. + +Issues Resolved +............... + +See the `PyMongo 3.2.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.2.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16538 + + +Changes in Version 3.2.1 (2016/02/02) +------------------------------------- + +Version 3.2.1 fixes a few issues reported since the release of 3.2, including +running the mapreduce command twice when calling the +:meth:`~pymongo.collection.Collection.inline_map_reduce` method and a +:exc:`TypeError` being raised when calling +:meth:`~gridfs.GridFSBucket.download_to_stream`. This release also +improves error messaging around BSON decoding. + +Issues Resolved +............... + +See the `PyMongo 3.2.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.2.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16312 + +Changes in Version 3.2 (2015/12/07) +----------------------------------- + +Version 3.2 implements the new server features introduced in MongoDB 3.2. + +Highlights include: + +- Full support for MongoDB 3.2 including: + + - Support for :class:`~pymongo.read_concern.ReadConcern` + - :class:`~pymongo.write_concern.WriteConcern` is now applied to + :meth:`~pymongo.collection.Collection.find_one_and_replace`, + :meth:`~pymongo.collection.Collection.find_one_and_update`, and + :meth:`~pymongo.collection.Collection.find_one_and_delete`. + - Support for the new ``bypassDocumentValidation`` option in write + helpers. + +- Support for reading and writing raw BSON with + :class:`~bson.raw_bson.RawBSONDocument` + +.. note:: Certain :class:`~pymongo.mongo_client.MongoClient` properties now + block until a connection is established or raise + :exc:`~pymongo.errors.ServerSelectionTimeoutError` if no server is available. + See :class:`~pymongo.mongo_client.MongoClient` for details. + +Issues Resolved +............... + +See the `PyMongo 3.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15612 + +Changes in Version 3.1.1 (2015/11/17) +------------------------------------- + +Version 3.1.1 fixes a few issues reported since the release of 3.1, including a +regression in error handling for oversize command documents and interrupt +handling issues in the C extensions. + +Issues Resolved +............... + +See the `PyMongo 3.1.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16211 + +Changes in Version 3.1 (2015/11/02) +----------------------------------- + +Version 3.1 implements a few new features and fixes bugs reported since the release +of 3.0.3. + +Highlights include: + +- Command monitoring support. See :mod:`~pymongo.monitoring` for details. +- Configurable error handling for :exc:`UnicodeDecodeError`. See the + ``unicode_decode_error_handler`` option of + :class:`~bson.codec_options.CodecOptions`. +- Optional automatic timezone conversion when decoding BSON datetime. See the + ``tzinfo`` option of :class:`~bson.codec_options.CodecOptions`. +- An implementation of :class:`~gridfs.GridFSBucket` from the new GridFS spec. +- Compliance with the new Connection String spec. +- Reduced idle CPU usage in Python 2. + +Changes in internal classes +........................... + +The private ``PeriodicExecutor`` class no longer takes a ``condition_class`` +option, and the private ``thread_util.Event`` class is removed. + +Issues Resolved +............... + +See the `PyMongo 3.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14796 + +Changes in Version 3.0.3 (2015/06/30) +------------------------------------- + +Version 3.0.3 fixes issues reported since the release of 3.0.2, including a +feature breaking bug in the GSSAPI implementation. + +Issues Resolved +............... + +See the `PyMongo 3.0.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.0.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15528 + +Changes in Version 3.0.2 (2015/05/12) +------------------------------------- + +Version 3.0.2 fixes issues reported since the release of 3.0.1, most +importantly a bug that could route operations to replica set members +that are not in primary or secondary state when using +:class:`~pymongo.read_preferences.PrimaryPreferred` or +:class:`~pymongo.read_preferences.Nearest`. It is a recommended upgrade for +all users of PyMongo 3.0.x. + +Issues Resolved +............... + +See the `PyMongo 3.0.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.0.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15430 + +Changes in Version 3.0.1 (2015/04/21) +------------------------------------- + +Version 3.0.1 fixes issues reported since the release of 3.0, most +importantly a bug in GridFS.delete that could prevent file chunks from +actually being deleted. + +Issues Resolved +............... + +See the `PyMongo 3.0.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.0.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15322 + +Changes in Version 3.0 (2015/04/07) +----------------------------------- + +PyMongo 3.0 is a partial rewrite of PyMongo bringing a large number of +improvements: + +- A unified client class. MongoClient is the one and only client class for + connecting to a standalone mongod, replica set, or sharded cluster. Migrating + from a standalone, to a replica set, to a sharded cluster can be accomplished + with only a simple URI change. +- MongoClient is much more responsive to configuration changes in your MongoDB + deployment. All connected servers are monitored in a non-blocking manner. + Slow to respond or down servers no longer block server discovery, reducing + application startup time and time to respond to new or reconfigured + servers and replica set failovers. +- A unified CRUD API. All official MongoDB drivers now implement a standard + CRUD API allowing polyglot developers to move from language to language + with ease. +- Single source support for Python 2.x and 3.x. PyMongo no longer relies on + 2to3 to support Python 3. +- A rewritten pure Python BSON implementation, improving performance + with pypy and cpython deployments without support for C extensions. +- Better support for greenlet based async frameworks including eventlet. +- Immutable client, database, and collection classes, avoiding a host of thread + safety issues in client applications. + +PyMongo 3.0 brings a large number of API changes. Be sure to read the changes +listed below before upgrading from PyMongo 2.x. + +.. warning:: PyMongo no longer supports Python 2.4, 2.5, or 3.1. If you + must use PyMongo with these versions of Python the 2.x branch of PyMongo + will be minimally supported for some time. + +SONManipulator changes +...................... + +The :class:`~pymongo.son_manipulator.SONManipulator` API has limitations as a +technique for transforming your data. Instead, it is more flexible and +straightforward to transform outgoing documents in your own code before passing +them to PyMongo, and transform incoming documents after receiving them from +PyMongo. + +Thus the :meth:`~pymongo.database.Database.add_son_manipulator` method is +deprecated. PyMongo 3's new CRUD API does **not** apply SON manipulators to +documents passed to :meth:`~pymongo.collection.Collection.bulk_write`, +:meth:`~pymongo.collection.Collection.insert_one`, +:meth:`~pymongo.collection.Collection.insert_many`, +:meth:`~pymongo.collection.Collection.update_one`, or +:meth:`~pymongo.collection.Collection.update_many`. SON manipulators are **not** +applied to documents returned by the new methods +:meth:`~pymongo.collection.Collection.find_one_and_delete`, +:meth:`~pymongo.collection.Collection.find_one_and_replace`, and +:meth:`~pymongo.collection.Collection.find_one_and_update`. + +SSL/TLS changes +............... + +When ``ssl`` is ``True`` the ``ssl_cert_reqs`` option now defaults to +:attr:`ssl.CERT_REQUIRED` if not provided. PyMongo will attempt to load OS +provided CA certificates to verify the server, raising +:exc:`~pymongo.errors.ConfigurationError` if it cannot. + +Gevent Support +.............. + +In previous versions, PyMongo supported Gevent in two modes: you could call +``gevent.monkey.patch_socket()`` and pass ``use_greenlets=True`` to +:class:`~pymongo.mongo_client.MongoClient`, or you could simply call +``gevent.monkey.patch_all()`` and omit the ``use_greenlets`` argument. + +In PyMongo 3.0, the ``use_greenlets`` option is gone. To use PyMongo with +Gevent simply call ``gevent.monkey.patch_all()``. + +For more information, +see `Gevent `_. + +:class:`~pymongo.mongo_client.MongoClient` changes +.................................................. + +:class:`~pymongo.mongo_client.MongoClient` is now the one and only +client class for a standalone server, mongos, or replica set. +It includes the functionality that had been split into +``MongoReplicaSetClient``: it can connect to a replica set, discover all its +members, and monitor the set for stepdowns, elections, and reconfigs. +:class:`~pymongo.mongo_client.MongoClient` now also supports the full +:class:`~pymongo.read_preferences.ReadPreference` API. + +The obsolete classes ``MasterSlaveConnection``, ``Connection``, and +``ReplicaSetConnection`` are removed. + +The :class:`~pymongo.mongo_client.MongoClient` constructor no +longer blocks while connecting to the server or servers, and it no +longer raises :class:`~pymongo.errors.ConnectionFailure` if they +are unavailable, nor :class:`~pymongo.errors.ConfigurationError` +if the user's credentials are wrong. Instead, the constructor +returns immediately and launches the connection process on +background threads. The ``connect`` option is added to control whether +these threads are started immediately, or when the client is first used. + +Therefore the ``alive`` method is removed since it no longer provides meaningful +information; even if the client is disconnected, it may discover a server in +time to fulfill the next operation. + +In PyMongo 2.x, :class:`~pymongo.mongo_client.MongoClient` accepted a list of +standalone MongoDB servers and used the first it could connect to:: + + MongoClient(['host1.com:27017', 'host2.com:27017']) + +A list of multiple standalones is no longer supported; if multiple servers +are listed they must be members of the same replica set, or mongoses in the +same sharded cluster. + +The behavior for a list of mongoses is changed from "high availability" to +"load balancing". Before, the client connected to the lowest-latency mongos in +the list, and used it until a network error prompted it to re-evaluate all +mongoses' latencies and reconnect to one of them. In PyMongo 3, the client +monitors its network latency to all the mongoses continuously, and distributes +operations evenly among those with the lowest latency. +See `load balancing `_ for more information. + +The client methods ``start_request``, ``in_request``, and ``end_request`` +are removed, and so is the ``auto_start_request`` option. Requests were +designed to make read-your-writes consistency more likely with the ``w=0`` +write concern. Additionally, a thread in a request used the same member for +all secondary reads in a replica set. To ensure read-your-writes consistency +in PyMongo 3.0, do not override the default write concern with ``w=0``, and +do not override the default `read preference `_ of +PRIMARY. + +Support for the ``slaveOk`` (or ``slave_okay``), ``safe``, and +``network_timeout`` options has been removed. Use +:attr:`~pymongo.read_preferences.ReadPreference.SECONDARY_PREFERRED` instead of +slave_okay. Accept the default write concern, acknowledged writes, instead of +setting safe=True. Use socketTimeoutMS in place of network_timeout (note that +network_timeout was in seconds, where as socketTimeoutMS is milliseconds). + +The ``max_pool_size`` option has been removed. It is replaced by the +``maxPoolSize`` MongoDB URI option. ``maxPoolSize`` is now a supported URI +option in PyMongo and can be passed as a keyword argument. + +The ``copy_database`` method is removed, see `Copy and Clone Databases `_ for alternatives. + +The ``disconnect`` method is removed. Use +:meth:`~pymongo.mongo_client.MongoClient.close` instead. + +The ``get_document_class`` method is removed. Use +:attr:`~pymongo.mongo_client.MongoClient.codec_options` instead. + +The ``get_lasterror_options``, ``set_lasterror_options``, and +``unset_lasterror_options`` methods are removed. Write concern options +can be passed to :class:`~pymongo.mongo_client.MongoClient` as keyword +arguments or MongoDB URI options. + +The :meth:`~pymongo.mongo_client.MongoClient.get_database` method is added for +getting a Database instance with its options configured differently than the +MongoClient's. + +The following read-only attributes have been added: + +- :attr:`~pymongo.mongo_client.MongoClient.codec_options` + +The following attributes are now read-only: + +- :attr:`~pymongo.mongo_client.MongoClient.read_preference` +- :attr:`~pymongo.mongo_client.MongoClient.write_concern` + +The following attributes have been removed: + +- :attr:`~pymongo.mongo_client.MongoClient.document_class` + (use :attr:`~pymongo.mongo_client.MongoClient.codec_options` instead) +- :attr:`~pymongo.mongo_client.MongoClient.host` + (use :attr:`~pymongo.mongo_client.MongoClient.address` instead) +- :attr:`~pymongo.mongo_client.MongoClient.min_wire_version` +- :attr:`~pymongo.mongo_client.MongoClient.max_wire_version` +- :attr:`~pymongo.mongo_client.MongoClient.port` + (use :attr:`~pymongo.mongo_client.MongoClient.address` instead) +- :attr:`~pymongo.mongo_client.MongoClient.safe` + (use :attr:`~pymongo.mongo_client.MongoClient.write_concern` instead) +- :attr:`~pymongo.mongo_client.MongoClient.slave_okay` + (use :attr:`~pymongo.mongo_client.MongoClient.read_preference` instead) +- :attr:`~pymongo.mongo_client.MongoClient.tag_sets` + (use :attr:`~pymongo.mongo_client.MongoClient.read_preference` instead) +- :attr:`~pymongo.mongo_client.MongoClient.tz_aware` + (use :attr:`~pymongo.mongo_client.MongoClient.codec_options` instead) + +The following attributes have been renamed: + +- :attr:`~pymongo.mongo_client.MongoClient.secondary_acceptable_latency_ms` is + now :attr:`~pymongo.mongo_client.MongoClient.local_threshold_ms` and is now + read-only. + +:class:`~pymongo.cursor.Cursor` changes +....................................... + +The ``conn_id`` property is renamed to :attr:`~pymongo.cursor.Cursor.address`. + +Cursor management changes +......................... + +:class:`~pymongo.cursor_manager.CursorManager` and +:meth:`~pymongo.mongo_client.MongoClient.set_cursor_manager` are no longer +deprecated. If you subclass :class:`~pymongo.cursor_manager.CursorManager` +your implementation of :meth:`~pymongo.cursor_manager.CursorManager.close` +must now take a second parameter, ``address``. The ``BatchCursorManager`` class +is removed. + +The second parameter to :meth:`~pymongo.mongo_client.MongoClient.close_cursor` +is renamed from ``_conn_id`` to ``address``. +:meth:`~pymongo.mongo_client.MongoClient.kill_cursors` now accepts an ``address`` +parameter. + +:class:`~pymongo.database.Database` changes +........................................... + +The ``connection`` property is renamed to +:attr:`~pymongo.database.Database.client`. + +The following read-only attributes have been added: + +- :attr:`~pymongo.database.Database.codec_options` + +The following attributes are now read-only: + +- :attr:`~pymongo.database.Database.read_preference` +- :attr:`~pymongo.database.Database.write_concern` + +Use :meth:`~pymongo.mongo_client.MongoClient.get_database` for getting a +Database instance with its options configured differently than the +MongoClient's. + +The following attributes have been removed: + +- :attr:`~pymongo.database.Database.safe` +- :attr:`~pymongo.database.Database.secondary_acceptable_latency_ms` +- :attr:`~pymongo.database.Database.slave_okay` +- :attr:`~pymongo.database.Database.tag_sets` + +The following methods have been added: + +- :meth:`~pymongo.database.Database.get_collection` + +The following methods have been changed: + +- :meth:`~pymongo.database.Database.command`. Support for ``as_class``, + ``uuid_subtype``, ``tag_sets``, and ``secondary_acceptable_latency_ms`` have been + removed. You can instead pass an instance of + :class:`~bson.codec_options.CodecOptions` as ``codec_options`` and an instance + of a read preference class from :mod:`~pymongo.read_preferences` as + ``read_preference``. The ``fields`` and ``compile_re`` options are also removed. + The ``fields`` options was undocumented and never really worked. Regular + expressions are always decoded to :class:`~bson.regex.Regex`. + +The following methods have been deprecated: + +- :meth:`~pymongo.database.Database.add_son_manipulator` + +The following methods have been removed: + +The ``get_lasterror_options``, ``set_lasterror_options``, and +``unset_lasterror_options`` methods have been removed. Use +:class:`~pymongo.write_concern.WriteConcern` with +:meth:`~pymongo.mongo_client.MongoClient.get_database` instead. + +:class:`~pymongo.collection.Collection` changes +............................................... + +The following read-only attributes have been added: + +- :attr:`~pymongo.collection.Collection.codec_options` + +The following attributes are now read-only: + +- :attr:`~pymongo.collection.Collection.read_preference` +- :attr:`~pymongo.collection.Collection.write_concern` + +Use :meth:`~pymongo.database.Database.get_collection` or +:meth:`~pymongo.collection.Collection.with_options` for getting a Collection +instance with its options configured differently than the Database's. + +The following attributes have been removed: + +- :attr:`~pymongo.collection.Collection.safe` +- :attr:`~pymongo.collection.Collection.secondary_acceptable_latency_ms` +- :attr:`~pymongo.collection.Collection.slave_okay` +- :attr:`~pymongo.collection.Collection.tag_sets` + +The following methods have been added: + +- :meth:`~pymongo.collection.Collection.bulk_write` +- :meth:`~pymongo.collection.Collection.insert_one` +- :meth:`~pymongo.collection.Collection.insert_many` +- :meth:`~pymongo.collection.Collection.update_one` +- :meth:`~pymongo.collection.Collection.update_many` +- :meth:`~pymongo.collection.Collection.replace_one` +- :meth:`~pymongo.collection.Collection.delete_one` +- :meth:`~pymongo.collection.Collection.delete_many` +- :meth:`~pymongo.collection.Collection.find_one_and_delete` +- :meth:`~pymongo.collection.Collection.find_one_and_replace` +- :meth:`~pymongo.collection.Collection.find_one_and_update` +- :meth:`~pymongo.collection.Collection.with_options` +- :meth:`~pymongo.collection.Collection.create_indexes` +- :meth:`~pymongo.collection.Collection.list_indexes` + +The following methods have changed: + +- :meth:`~pymongo.collection.Collection.aggregate` now **always** returns an + instance of :class:`~pymongo.command_cursor.CommandCursor`. See the + documentation for all options. +- :meth:`~pymongo.collection.Collection.count` now optionally takes a filter + argument, as well as other options supported by the count command. +- :meth:`~pymongo.collection.Collection.distinct` now optionally takes a filter + argument. +- :meth:`~pymongo.collection.Collection.create_index` no longer caches + indexes, therefore the ``cache_for`` parameter has been removed. It also + no longer supports the ``bucket_size`` and ``drop_dups`` aliases for ``bucketSize`` + and ``dropDups``. + +The following methods are deprecated: + +- :meth:`~pymongo.collection.Collection.save` +- :meth:`~pymongo.collection.Collection.insert` +- :meth:`~pymongo.collection.Collection.update` +- :meth:`~pymongo.collection.Collection.remove` +- :meth:`~pymongo.collection.Collection.find_and_modify` +- :meth:`~pymongo.collection.Collection.ensure_index` + +The following methods have been removed: + +The ``get_lasterror_options``, ``set_lasterror_options``, and +``unset_lasterror_options`` methods have been removed. Use +:class:`~pymongo.write_concern.WriteConcern` with +:meth:`~pymongo.collection.Collection.with_options` instead. + +Changes to :meth:`~pymongo.collection.Collection.find` and :meth:`~pymongo.collection.Collection.find_one` +`````````````````````````````````````````````````````````````````````````````````````````````````````````` + +The following find/find_one options have been renamed: + +These renames only affect your code if you passed these as keyword arguments, +like find(fields=['fieldname']). If you passed only positional parameters these +changes are not significant for your application. + +- spec -> filter +- fields -> projection +- partial -> allow_partial_results + +The following find/find_one options have been added: + +- cursor_type (see :class:`~pymongo.cursor.CursorType` for values) +- oplog_replay +- modifiers + +The following find/find_one options have been removed: + +- network_timeout (use :meth:`~pymongo.cursor.Cursor.max_time_ms` instead) +- slave_okay (use one of the read preference classes from + :mod:`~pymongo.read_preferences` and + :meth:`~pymongo.collection.Collection.with_options` instead) +- read_preference (use :meth:`~pymongo.collection.Collection.with_options` + instead) +- tag_sets (use one of the read preference classes from + :mod:`~pymongo.read_preferences` and + :meth:`~pymongo.collection.Collection.with_options` instead) +- secondary_acceptable_latency_ms (use the ``localThresholdMS`` URI option + instead) +- max_scan (use the new ``modifiers`` option instead) +- snapshot (use the new ``modifiers`` option instead) +- tailable (use the new ``cursor_type`` option instead) +- await_data (use the new ``cursor_type`` option instead) +- exhaust (use the new ``cursor_type`` option instead) +- as_class (use :meth:`~pymongo.collection.Collection.with_options` with + :class:`~bson.codec_options.CodecOptions` instead) +- compile_re (BSON regular expressions are always decoded to + :class:`~bson.regex.Regex`) + +The following find/find_one options are deprecated: + +- manipulate + +The following renames need special handling. + +- timeout -> no_cursor_timeout - + The default for ``timeout`` was True. The default for ``no_cursor_timeout`` is + False. If you were previously passing False for ``t`imeout`` you must pass + **True** for ``no_cursor_timeout`` to keep the previous behavior. + +:mod:`~pymongo.errors` changes +.............................. + +The exception classes ``UnsupportedOption`` and ``TimeoutError`` are deleted. + +:mod:`~gridfs` changes +...................... + +Since PyMongo 1.6, methods ``open`` and ``close`` of :class:`~gridfs.GridFS` +raised an ``UnsupportedAPI`` exception, as did the entire ``GridFile`` class. +The unsupported methods, the class, and the exception are all deleted. + +:mod:`~bson` changes +.................... + +The ``compile_re`` option is removed from all methods +that accepted it in :mod:`~bson` and :mod:`~bson.json_util`. Additionally, it +is removed from :meth:`~pymongo.collection.Collection.find`, +:meth:`~pymongo.collection.Collection.find_one`, +:meth:`~pymongo.collection.Collection.aggregate`, +:meth:`~pymongo.database.Database.command`, and so on. +PyMongo now always represents BSON regular expressions as +:class:`~bson.regex.Regex` objects. This prevents errors for incompatible +patterns, see `PYTHON-500`_. Use :meth:`~bson.regex.Regex.try_compile` to +attempt to convert from a BSON regular expression to a Python regular +expression object. + +PyMongo now decodes the int64 BSON type to :class:`~bson.int64.Int64`, a +trivial wrapper around long (in python 2.x) or int (in python 3.x). This +allows BSON int64 to be round tripped without losing type information in +python 3. Note that if you store a python long (or a python int larger than +4 bytes) it will be returned from PyMongo as :class:`~bson.int64.Int64`. + +The ``as_class``, ``tz_aware``, and ``uuid_subtype`` options are removed from all +BSON encoding and decoding methods. Use +:class:`~bson.codec_options.CodecOptions` to configure these options. The +APIs affected are: + +- :func:`~bson.decode_all` +- :func:`~bson.decode_iter` +- :func:`~bson.decode_file_iter` +- :meth:`~bson.BSON.encode` +- :meth:`~bson.BSON.decode` + +This is a breaking change for any application that uses the BSON API directly +and changes any of the named parameter defaults. No changes are required for +applications that use the default values for these options. The behavior +remains the same. + +.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 + +Issues Resolved +............... + +See the `PyMongo 3.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12501 + +Changes in Version 2.9.5 (2017/06/30) +------------------------------------- + +Version 2.9.5 works around ssl module deprecations in Python 3.6, and expected +future ssl module deprecations. It also fixes bugs found since the release of +2.9.4. + +- Use ssl.SSLContext and ssl.PROTOCOL_TLS_CLIENT when available. +- Fixed a C extensions build issue when the interpreter was built with -std=c99 +- Fixed various build issues with MinGW32. +- Fixed a write concern bug in :meth:`~pymongo.database.Database.add_user` and + :meth:`~pymongo.database.Database.remove_user` when connected to MongoDB 3.2+ +- Fixed various test failures related to changes in gevent, MongoDB, and our CI + test environment. + +Issues Resolved +............... + +See the `PyMongo 2.9.5 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 2.9.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=17605 + +Changes in Version 2.9.4 (2016/09/30) +------------------------------------- + +Version 2.9.4 fixes issues reported since the release of 2.9.3. + +- Fixed __repr__ for closed instances of :class:`~pymongo.mongo_client.MongoClient`. +- Fixed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` handling of + uuidRepresentation. +- Fixed building and testing the documentation with python 3.x. +- New documentation for `TLS `_ and `Atlas `_. + +Issues Resolved +............... + +See the `PyMongo 2.9.4 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 2.9.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16885 + +Changes in Version 2.9.3 (2016/03/15) +------------------------------------- + +Version 2.9.3 fixes a few issues reported since the release of 2.9.2 including +thread safety issues in :meth:`~pymongo.collection.Collection.ensure_index`, +:meth:`~pymongo.collection.Collection.drop_index`, and +:meth:`~pymongo.collection.Collection.drop_indexes`. + +Issues Resolved +............... + +See the `PyMongo 2.9.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 2.9.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16539 + +Changes in Version 2.9.2 (2016/02/16) +------------------------------------- + +Version 2.9.2 restores Python 3.1 support, which was broken in PyMongo 2.8. It +improves an error message when decoding BSON as well as fixes a couple other +issues including :meth:`~pymongo.collection.Collection.aggregate` ignoring +:attr:`~pymongo.collection.Collection.codec_options` and +:meth:`~pymongo.database.Database.command` raising a superfluous +``DeprecationWarning``. + +Issues Resolved +............... + +See the `PyMongo 2.9.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 2.9.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16303 + +Changes in Version 2.9.1 (2015/11/17) +------------------------------------- + +Version 2.9.1 fixes two interrupt handling issues in the C extensions and +adapts a test case for a behavior change in MongoDB 3.2. + +Issues Resolved +............... + +See the `PyMongo 2.9.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 2.9.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16208 + +Changes in Version 2.9 (2015/09/30) +----------------------------------- + +Version 2.9 provides an upgrade path to PyMongo 3.x. Most of the API changes +from PyMongo 3.0 have been backported in a backward compatible way, allowing +applications to be written against PyMongo >= 2.9, rather then PyMongo 2.x or +PyMongo 3.x. See the `PyMongo 3 Migration Guide +`_ for +detailed examples. + +.. note:: There are a number of new deprecations in this release for features + that were removed in PyMongo 3.0. + + :class:`~pymongo.mongo_client.MongoClient`: + - :attr:`~pymongo.mongo_client.MongoClient.host` + - :attr:`~pymongo.mongo_client.MongoClient.port` + - :attr:`~pymongo.mongo_client.MongoClient.use_greenlets` + - :attr:`~pymongo.mongo_client.MongoClient.document_class` + - :attr:`~pymongo.mongo_client.MongoClient.tz_aware` + - :attr:`~pymongo.mongo_client.MongoClient.secondary_acceptable_latency_ms` + - :attr:`~pymongo.mongo_client.MongoClient.tag_sets` + - :attr:`~pymongo.mongo_client.MongoClient.uuid_subtype` + - :meth:`~pymongo.mongo_client.MongoClient.disconnect` + - :meth:`~pymongo.mongo_client.MongoClient.alive` + + :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`: + - :attr:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient.use_greenlets` + - :attr:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient.document_class` + - :attr:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient.tz_aware` + - :attr:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient.secondary_acceptable_latency_ms` + - :attr:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient.tag_sets` + - :attr:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient.uuid_subtype` + - :meth:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient.alive` + + :class:`~pymongo.database.Database`: + - :attr:`~pymongo.database.Database.secondary_acceptable_latency_ms` + - :attr:`~pymongo.database.Database.tag_sets` + - :attr:`~pymongo.database.Database.uuid_subtype` + + :class:`~pymongo.collection.Collection`: + - :attr:`~pymongo.collection.Collection.secondary_acceptable_latency_ms` + - :attr:`~pymongo.collection.Collection.tag_sets` + - :attr:`~pymongo.collection.Collection.uuid_subtype` + +.. warning:: + In previous versions of PyMongo, changing the value of + :attr:`~pymongo.mongo_client.MongoClient.document_class` changed + the behavior of all existing instances of + :class:`~pymongo.collection.Collection`:: + + >>> coll = client.test.test + >>> coll.find_one() + {u'_id': ObjectId('5579dc7cfba5220cc14d9a18')} + >>> from bson.son import SON + >>> client.document_class = SON + >>> coll.find_one() + SON([(u'_id', ObjectId('5579dc7cfba5220cc14d9a18'))]) + + The document_class setting is now configurable at the client, + database, collection, and per-operation level. This required breaking + the existing behavior. To change the document class per operation in a + forward compatible way use + :meth:`~pymongo.collection.Collection.with_options`:: + + >>> coll.find_one() + {u'_id': ObjectId('5579dc7cfba5220cc14d9a18')} + >>> from bson.codec_options import CodecOptions + >>> coll.with_options(CodecOptions(SON)).find_one() + SON([(u'_id', ObjectId('5579dc7cfba5220cc14d9a18'))]) + +Issues Resolved +............... + +See the `PyMongo 2.9 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 2.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14795 + +Changes in Version 2.8.1 (2015/05/11) +------------------------------------- + +Version 2.8.1 fixes a number of issues reported since the release of PyMongo +2.8. It is a recommended upgrade for all users of PyMongo 2.x. + +Issues Resolved +............... + +See the `PyMongo 2.8.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 2.8.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15324 + +Changes in Version 2.8 (2015/01/28) +----------------------------------- + +Version 2.8 is a major release that provides full support for MongoDB 3.0 and +fixes a number of bugs. + +Special thanks to Don Mitchell, Ximing, Can Zhang, Sergey Azovskov, and Heewa +Barfchin for their contributions to this release. + +Highlights include: + +- Support for the SCRAM-SHA-1 authentication mechanism (new in MongoDB 3.0). +- JSON decoder support for the new $numberLong and $undefined types. +- JSON decoder support for the $date type as an ISO-8601 string. +- Support passing an index name to :meth:`~pymongo.cursor.Cursor.hint`. +- The :meth:`~pymongo.cursor.Cursor.count` method will use a hint if one + has been provided through :meth:`~pymongo.cursor.Cursor.hint`. +- A new socketKeepAlive option for the connection pool. +- New generator based BSON decode functions, :func:`~bson.decode_iter` + and :func:`~bson.decode_file_iter`. +- Internal changes to support alternative storage engines like wiredtiger. + +.. note:: There are a number of deprecations in this release for features that + will be removed in PyMongo 3.0. These include: + + - :meth:`~pymongo.mongo_client.MongoClient.start_request` + - :meth:`~pymongo.mongo_client.MongoClient.in_request` + - :meth:`~pymongo.mongo_client.MongoClient.end_request` + - :meth:`~pymongo.mongo_client.MongoClient.copy_database` + - :meth:`~pymongo.database.Database.error` + - :meth:`~pymongo.database.Database.last_status` + - :meth:`~pymongo.database.Database.previous_error` + - :meth:`~pymongo.database.Database.reset_error_history` + - :class:`~pymongo.master_slave_connection.MasterSlaveConnection` + + The JSON format for :class:`~bson.timestamp.Timestamp` has changed from + '{"t": , "i": }' to '{"$timestamp": {"t": , "i": }}'. + This new format will be decoded to an instance of + :class:`~bson.timestamp.Timestamp`. The old format will continue to be + decoded to a python dict as before. Encoding to the old format is no + longer supported as it was never correct and loses type information. + +Issues Resolved +............... + +See the `PyMongo 2.8 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 2.8 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14223 + +Changes in Version 2.7.2 (2014/07/29) +------------------------------------- + +Version 2.7.2 includes fixes for upsert reporting in the bulk API for MongoDB +versions previous to 2.6, a regression in how son manipulators are applied in +:meth:`~pymongo.collection.Collection.insert`, a few obscure connection pool +semaphore leaks, and a few other minor issues. See the list of issues resolved +for full details. + +Issues Resolved +............... + +See the `PyMongo 2.7.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 2.7.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14005 + +Changes in Version 2.7.1 (2014/05/23) +------------------------------------- + +Version 2.7.1 fixes a number of issues reported since the release of 2.7, +most importantly a fix for creating indexes and manipulating users through +mongos versions older than 2.4.0. + +Issues Resolved +............... + +See the `PyMongo 2.7.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 2.7.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=13823 + +Changes in Version 2.7 (2014/04/03) +----------------------------------- PyMongo 2.7 is a major release with a large number of new features and bug fixes. Highlights include: - Full support for MongoDB 2.6. -- A new :doc:`bulk write operations API `. +- A new `bulk write operations API `_. - Support for server side query timeouts using :meth:`~pymongo.cursor.Cursor.max_time_ms`. - Support for writing :meth:`~pymongo.collection.Collection.aggregate` @@ -19,9 +3314,9 @@ fixes. Highlights include: error details from the server. - A new GridFS :meth:`~gridfs.GridFS.find` method that returns a :class:`~gridfs.grid_file.GridOutCursor`. -- Greatly improved :doc:`support for mod_wsgi ` when using +- Greatly improved `support for mod_wsgi `_ when using PyMongo's C extensions. Read `Jesse's blog post - `_ for details. + `_ for details. - Improved C extension support for ARM little endian. Breaking changes @@ -36,10 +3331,10 @@ Issues Resolved See the `PyMongo 2.7 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.7 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/12892 +.. _PyMongo 2.7 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12892 -Changes in Version 2.6.3 ------------------------- +Changes in Version 2.6.3 (2013/10/11) +------------------------------------- Version 2.6.3 fixes issues reported since the release of 2.6.2, most importantly a semaphore leak when a connection to the server fails. @@ -50,10 +3345,10 @@ Issues Resolved See the `PyMongo 2.6.3 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.6.3 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/13098 +.. _PyMongo 2.6.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=13098 -Changes in Version 2.6.2 ------------------------- +Changes in Version 2.6.2 (2013/09/06) +------------------------------------- Version 2.6.2 fixes a :exc:`TypeError` problem when max_pool_size=None is used in Python 3. @@ -64,10 +3359,10 @@ Issues Resolved See the `PyMongo 2.6.2 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.6.2 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/12910 +.. _PyMongo 2.6.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12910 -Changes in Version 2.6.1 ------------------------- +Changes in Version 2.6.1 (2013/09/03) +------------------------------------- Version 2.6.1 fixes a reference leak in the :meth:`~pymongo.collection.Collection.insert` method. @@ -78,10 +3373,10 @@ Issues Resolved See the `PyMongo 2.6.1 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.6.1 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/12905 +.. _PyMongo 2.6.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12905 -Changes in Version 2.6 ----------------------- +Changes in Version 2.6 (2013/08/19) +----------------------------------- Version 2.6 includes some frequently requested improvements and adds support for some early MongoDB 2.6 features. @@ -94,24 +3389,24 @@ Important new features: - The ``max_pool_size`` option for :class:`~pymongo.mongo_client.MongoClient` and :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` now actually caps the number of sockets the pool will open concurrently. - Once the pool has reached :attr:`~pymongo.mongo_client.MongoClient.max_pool_size` + Once the pool has reaches max_pool_size operations will block waiting for a socket to become available. If ``waitQueueTimeoutMS`` is set, an operation that blocks waiting for a socket will raise :exc:`~pymongo.errors.ConnectionFailure` after the timeout. By default ``waitQueueTimeoutMS`` is not set. - See :ref:`connection-pooling` for more information. + See `connection pooling `_ for more information. - The :meth:`~pymongo.collection.Collection.insert` method automatically splits large batches of documents into multiple insert messages based on :attr:`~pymongo.mongo_client.MongoClient.max_message_size` - Support for the exhaust cursor flag. See :meth:`~pymongo.collection.Collection.find` for details and caveats. - Support for the PLAIN and MONGODB-X509 authentication mechanisms. - See :doc:`the authentication docs ` for more + See `the authentication docs `_ for more information. - Support aggregation output as a :class:`~pymongo.cursor.Cursor`. See :meth:`~pymongo.collection.Collection.aggregate` for details. -.. warning:: SIGNIFICANT BEHAVIOR CHANGE in 2.6. Previously, `max_pool_size` +.. warning:: SIGNIFICANT BEHAVIOR CHANGE in 2.6. Previously, ``max_pool_size`` would limit only the idle sockets the pool would hold onto, not the number of open sockets. The default has also changed, from 10 to 100. If you pass a value for ``max_pool_size`` make sure it is large enough for @@ -119,7 +3414,7 @@ Important new features: to having a ``max_pool_size`` larger than necessary. Err towards a larger value.) If your application accepts the default, continue to do so. - See :ref:`connection-pooling` for more information. + See `connection pooling `_ for more information. Issues Resolved ............... @@ -127,10 +3422,10 @@ Issues Resolved See the `PyMongo 2.6 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.6 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/12380 +.. _PyMongo 2.6 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12380 -Changes in Version 2.5.2 ------------------------- +Changes in Version 2.5.2 (2013/06/01) +------------------------------------- Version 2.5.2 fixes a NULL pointer dereference issue when decoding an invalid :class:`~bson.dbref.DBRef`. @@ -141,10 +3436,10 @@ Issues Resolved See the `PyMongo 2.5.2 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.5.2 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/12581 +.. _PyMongo 2.5.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12581 -Changes in Version 2.5.1 ------------------------- +Changes in Version 2.5.1 (2013/05/13) +------------------------------------- Version 2.5.1 is a minor release that fixes issues discovered after the release of 2.5. Most importantly, this release addresses some race @@ -156,16 +3451,16 @@ Issues Resolved See the `PyMongo 2.5.1 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.5.1 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/12484 +.. _PyMongo 2.5.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12484 -Changes in Version 2.5 ----------------------- +Changes in Version 2.5 (2013/03/22) +----------------------------------- Version 2.5 includes changes to support new features in MongoDB 2.4. Important new features: -- Support for :ref:`GSSAPI (Kerberos) authentication `. +- Support for `GSSAPI (Kerberos) `_. - Support for SSL certificate validation with hostname matching. - Support for delegated and role based authentication. - New GEOSPHERE (2dsphere) and HASHED index constants. @@ -180,10 +3475,10 @@ Issues Resolved See the `PyMongo 2.5 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.5 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/11981 +.. _PyMongo 2.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11981 -Changes in Version 2.4.2 ------------------------- +Changes in Version 2.4.2 (2013/01/23) +------------------------------------- Version 2.4.2 is a minor release that fixes issues discovered after the release of 2.4.1. Most importantly, PyMongo will no longer select a replica @@ -195,10 +3490,10 @@ Issues Resolved See the `PyMongo 2.4.2 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.4.2 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/12299 +.. _PyMongo 2.4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12299 -Changes in Version 2.4.1 ------------------------- +Changes in Version 2.4.1 (2012/12/06) +------------------------------------- Version 2.4.1 is a minor release that fixes issues discovered after the release of 2.4. Most importantly, this release fixes a regression using @@ -211,10 +3506,10 @@ Issues Resolved See the `PyMongo 2.4.1 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.4.1 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/12286 +.. _PyMongo 2.4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12286 -Changes in Version 2.4 ----------------------- +Changes in Version 2.4 (2012/11/27) +----------------------------------- Version 2.4 includes a few important new features and a large number of bug fixes. @@ -236,7 +3531,7 @@ Important new features: - :class:`~pymongo.cursor.Cursor` can be copied with functions from the :mod:`copy` module. - The :meth:`~pymongo.database.Database.set_profiling_level` method now supports - a `slow_ms` option. + a ``slow_ms`` option. - The replica set monitor task (used by :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` and :class:`~pymongo.replica_set_connection.ReplicaSetConnection`) is a daemon thread @@ -261,10 +3556,10 @@ Issues Resolved See the `PyMongo 2.4 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.4 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/11485 +.. _PyMongo 2.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11485 -Changes in Version 2.3 ----------------------- +Changes in Version 2.3 (2012/08/29) +----------------------------------- Version 2.3 adds support for new features and behavior changes in MongoDB 2.2. @@ -272,12 +3567,11 @@ Version 2.3 adds support for new features and behavior changes in MongoDB Important New Features: - Support for expanded read preferences including directing reads to tagged - servers - See :ref:`secondary-reads` for more information. -- Support for mongos failover - - See :ref:`mongos-high-availability` for more information. + servers - See `secondary reads `_ for more information. +- Support for mongos failover. - A new :meth:`~pymongo.collection.Collection.aggregate` method to support MongoDB's new `aggregation framework - `_. + `_. - Support for legacy Java and C# byte order when encoding and decoding UUIDs. - Support for connecting directly to an arbiter. @@ -285,7 +3579,7 @@ Important New Features: Starting with MongoDB 2.2 the getLastError command requires authentication when the server's `authentication features - `_ are enabled. + `_ are enabled. Changes to PyMongo were required to support this behavior change. Users of authentication must upgrade to PyMongo 2.3 (or newer) for "safe" write operations to function correctly. @@ -296,10 +3590,10 @@ Issues Resolved See the `PyMongo 2.3 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.3 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/11146 +.. _PyMongo 2.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11146 -Changes in Version 2.2.1 ------------------------- +Changes in Version 2.2.1 (2012/07/06) +------------------------------------- Version 2.2.1 is a minor release that fixes issues discovered after the release of 2.2. Most importantly, this release fixes an incompatibility @@ -312,10 +3606,10 @@ Issues Resolved See the `PyMongo 2.2.1 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.2.1 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/11185 +.. _PyMongo 2.2.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11185 -Changes in Version 2.2 ------------------------ +Changes in Version 2.2 (2012/04/30) +----------------------------------- Version 2.2 adds a few more frequently requested features and fixes a number of bugs. @@ -327,12 +3621,12 @@ to this release. Important New Features: -- Support for Python 3 - - See the :doc:`python3` for more information. +- Support for Python 3. + See `Python 3 `_ for more information. - Support for Gevent - - See :doc:`examples/gevent` for more information. -- Improved connection pooling - - See :doc:`examples/requests` for more information. + See `Gevent `_ for more information. +- Improved connection pooling. + See `PYTHON-287 `_. .. warning:: @@ -357,10 +3651,10 @@ Issues Resolved See the `PyMongo 2.2 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.2 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/10584 +.. _PyMongo 2.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=10584 -Changes in Version 2.1.1 ------------------------- +Changes in Version 2.1.1 (2012/01/04) +------------------------------------- Version 2.1.1 is a minor release that fixes a few issues discovered after the release of 2.1. You can now use @@ -377,10 +3671,10 @@ Issues Resolved See the `PyMongo 2.1.1 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 2.1.1 release notes in JIRA: https://jira.mongodb.org/browse/PYTHON/fixforversion/11081 +.. _PyMongo 2.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?version=11081&styleName=Html&projectId=10004 -Changes in Version 2.1 ------------------------- +Changes in Version 2.1 (2011/12/07) +----------------------------------- Version 2.1 adds a few frequently requested features and includes the usual round of bug fixes and improvements. @@ -422,8 +3716,8 @@ See the `PyMongo 2.1 release notes in JIRA`_ for the list of resolved issues in .. _PyMongo 2.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=10583 -Changes in Version 2.0.1 ------------------------- +Changes in Version 2.0.1 (2011/08/15) +------------------------------------- Version 2.0.1 fixes a regression in :class:`~gridfs.grid_file.GridIn` when writing pre-chunked strings. Thanks go to Alexey Borzenkov for reporting the @@ -435,8 +3729,8 @@ Issues Resolved - `PYTHON-271 `_: Regression in GridFS leads to serious loss of data. -Changes in Version 2.0 ----------------------- +Changes in Version 2.0 (2011/08/05) +----------------------------------- Version 2.0 adds a large number of features and fixes a number of issues. @@ -457,16 +3751,16 @@ Important New Features: independently at the connection, database, collection or query level. Each level will inherit settings from the previous level and each level can override the previous level's setting. -- PyMongo now supports the `await_data` and `partial` cursor flags. If the - `await_data` flag is set on a `tailable` cursor the server will block for - some extra time waiting for more data to return. The `partial` flag tells +- PyMongo now supports the ``await_data`` and ``partial`` cursor flags. If the + ``await_data`` flag is set on a ``tailable`` cursor the server will block for + some extra time waiting for more data to return. The ``partial`` flag tells a mongos to return partial data for a query if not all shards are available. -- :meth:`~pymongo.collection.Collection.map_reduce` will accept a `dict` or - instance of :class:`~bson.son.SON` as the `out` parameter. +- :meth:`~pymongo.collection.Collection.map_reduce` will accept a ``dict`` or + instance of :class:`~bson.son.SON` as the ``out`` parameter. - The URI parser has been moved into its own module and can be used directly by application code. - AutoReconnect exception now provides information about the error that - actually occured instead of a generic failure message. + actually occurred instead of a generic failure message. - A number of new helper methods have been added with options for setting and unsetting cursor flags, re-indexing a collection, fsync and locking a server, and getting the server's current operations. @@ -474,9 +3768,9 @@ Important New Features: API changes: - If only one host:port pair is specified :class:`~pymongo.connection.Connection` - will make a direct connection to only that host. Please note that `slave_okay` - must be `True` in order to query from a secondary. -- If more than one host:port pair is specified or the `replicaset` option is + will make a direct connection to only that host. Please note that ``slave_okay`` + must be ``True`` in order to query from a secondary. +- If more than one host:port pair is specified or the ``replicaset`` option is used PyMongo will treat the specified host:port pair(s) as a seed list and connect using replica set behavior. @@ -493,15 +3787,15 @@ See the `PyMongo 2.0 release notes in JIRA`_ for the list of resolved issues in .. _PyMongo 2.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=10274 -Changes in Version 1.11 ------------------------ +Changes in Version 1.11 (2011/05/05) +------------------------------------ Version 1.11 adds a few new features and fixes a few more bugs. New Features: - Basic IPv6 support: pymongo prefers IPv4 but will try IPv6. You can - also specify an IPv6 address literal in the `host` parameter or a + also specify an IPv6 address literal in the ``host`` parameter or a MongoDB URI provided it is enclosed in '[' and ']'. - max_pool_size option: previously pymongo had a hard coded pool size of 10 connections. With this change you can specify a different pool @@ -519,10 +3813,10 @@ API changes: - :meth:`~pymongo.database.Database.validate_collection` now returns a dict instead of a string. This change was required to deal with an API change on the server. This method also now takes the optional - `scandata` and `full` parameters. See the documentation for more + ``scandata`` and ``full`` parameters. See the documentation for more details. -.. warning:: The `pool_size`, `auto_start_request`, and `timeout` parameters +.. warning:: The ``pool_size``, ``auto_start_request```, and ``timeout`` parameters for :class:`~pymongo.connection.Connection` have been completely removed in this release. They were deprecated in pymongo-1.4 and have had no effect since then. Please make sure that your code @@ -547,8 +3841,8 @@ Issues resolved - `PYTHON-138 `_: Find method for GridFS -Changes in Version 1.10.1 -------------------------- +Changes in Version 1.10.1 (2011/04/07) +-------------------------------------- Version 1.10.1 is primarily a bugfix release. It fixes a regression in version 1.10 that broke pickling of ObjectIds. A number of other bugs @@ -564,9 +3858,9 @@ There are two behavior changes to be aware of: Previously the read would be sent to one randomly chosen slave and :class:`~pymongo.errors.AutoReconnect` was immediately raised in case of a connection failure. -- A Python `long` is now always BSON encoded as an int64. Previously the - encoding was based only on the value of the field and a `long` with a - value less than `2147483648` or greater than `-2147483649` would always +- A Python ``long`` is now always BSON encoded as an int64. Previously the + encoding was based only on the value of the field and a ``long`` with a + value less than ``2147483648`` or greater than ``-2147483649`` would always be BSON encoded as an int32. Issues resolved @@ -591,12 +3885,12 @@ Issues resolved - `PYTHON-186 `_: When storing integers, type is selected according to value instead of type - `PYTHON-173 `_: - as_class option is not propogated by Cursor.clone + as_class option is not propagated by Cursor.clone - `PYTHON-113 `_: Redunducy in MasterSlaveConnection -Changes in Version 1.10 ------------------------ +Changes in Version 1.10 (2011/03/30) +------------------------------------ Version 1.10 includes changes to support new features in MongoDB 1.8.x. Highlights include a modified map/reduce API including an inline map/reduce @@ -611,7 +3905,7 @@ server for the maximum BSON document size it supports. collections for map/reduce results. An output collection name must be provided and the output will replace any existing output collection with the same name. :meth:`~pymongo.collection.Collection.map_reduce` now - requires the `out` parameter. + requires the ``out`` parameter. Issues resolved ............... @@ -634,14 +3928,14 @@ Issues resolved - PYTHON-169: Support deepcopy of DBRef. - PYTHON-167: Duplicate of PYTHON-166. - PYTHON-166: Fixes a concurrency issue. -- PYTHON-158: Add code and err string to `db assertion` messages. +- PYTHON-158: Add code and err string to ``db assertion`` messages. -Changes in Version 1.9 ----------------------- +Changes in Version 1.9 (2010/09/28) +----------------------------------- Version 1.9 adds a new package to the PyMongo distribution, :mod:`bson`. :mod:`bson` contains all of the `BSON -`_ encoding and decoding logic, and the BSON +`_ encoding and decoding logic, and the BSON types that were formerly in the :mod:`pymongo` package. The following modules have been renamed: @@ -711,15 +4005,15 @@ rather than :class:`pymongo.errors.PyMongoError`. :class:`~pymongo.connection.Connection` has been idle for a while. - added :meth:`~pymongo.database.SystemJS.list` to :class:`~pymongo.database.SystemJS`. -- added `file_document` argument to :meth:`~gridfs.grid_file.GridOut` +- added ``file_document`` argument to :meth:`~gridfs.grid_file.GridOut` to allow initializing from an existing file document. - raise :class:`~pymongo.errors.TimeoutError` even if the ``getLastError`` command was run manually and not through "safe" mode. - added :class:`uuid` support to :mod:`~bson.json_util`. -Changes in Version 1.8.1 ------------------------- +Changes in Version 1.8.1 (2010/08/13) +------------------------------------- - fixed a typo in the C extension that could cause safe-mode operations to report a failure (:class:`SystemError`) even when none @@ -727,17 +4021,17 @@ Changes in Version 1.8.1 - added a :meth:`__ne__` implementation to any class where we define :meth:`__eq__`. -Changes in Version 1.8 ----------------------- +Changes in Version 1.8 (2010/08/05) +----------------------------------- Version 1.8 adds support for connecting to replica sets, specifying -per-operation values for `w` and `wtimeout`, and decoding to +per-operation values for ``w`` and ``wtimeout``, and decoding to timezone-aware datetimes. - fixed a reference leak in the C extension when decoding a :class:`~bson.dbref.DBRef`. -- added support for `w`, `wtimeout`, and `fsync` (and any other - options for `getLastError`) to "safe mode" operations. +- added support for ``w``, ``wtimeout``, and ``fsync`` (and any other + options for ``getLastError``) to "safe mode" operations. - added :attr:`~pymongo.connection.Connection.nodes` property. - added a maximum pool size of 10 sockets. - added support for replica sets. @@ -755,9 +4049,9 @@ timezone-aware datetimes. :class:`~bson.max_key.MaxKey` and :class:`~bson.timestamp.Timestamp` to :mod:`~bson.json_util`. - added support for decoding datetimes as aware (UTC) - it is highly - recommended to enable this by setting the `tz_aware` parameter to + recommended to enable this by setting the ``tz_aware`` parameter to :meth:`~pymongo.connection.Connection` to ``True``. -- added `network_timeout` option for individual calls to +- added ``network_timeout`` option for individual calls to :meth:`~pymongo.collection.Collection.find` and :meth:`~pymongo.collection.Collection.find_one`. - added :meth:`~gridfs.GridFS.exists` to check if a file exists in @@ -768,13 +4062,13 @@ timezone-aware datetimes. :class:`~pymongo.errors.OperationFailure` exceptions. - fixed serialization of int and float subclasses in the C extension. -Changes in Version 1.7 ----------------------- +Changes in Version 1.7 (2010/06/17) +----------------------------------- Version 1.7 is a recommended upgrade for all PyMongo users. The full release notes are below, and some more in depth discussion of the highlights is `here -`_. +`_. - no longer attempt to build the C extension on big-endian systems. - added :class:`~bson.min_key.MinKey` and @@ -788,27 +4082,27 @@ highlights is `here support for querying unique status and other index information. - added :attr:`~pymongo.connection.Connection.document_class`, to specify class for returned documents. -- added `as_class` argument for +- added ``as_class`` argument for :meth:`~pymongo.collection.Collection.find`, and in the BSON decoder. - added support for creating :class:`~bson.timestamp.Timestamp` instances using a :class:`~datetime.datetime`. -- allow `dropTarget` argument for +- allow ``dropTarget`` argument for :class:`~pymongo.collection.Collection.rename`. - handle aware :class:`~datetime.datetime` instances, by converting to UTC. - added support for :class:`~pymongo.cursor.Cursor.max_scan`. - raise :class:`~gridfs.errors.FileExists` exception when creating a duplicate GridFS file. -- use `y2038 `_ for time handling in +- use `y2038 `_ for time handling in the C extension - eliminates 2038 problems when extension is installed. -- added `sort` parameter to +- added ``sort`` parameter to :meth:`~pymongo.collection.Collection.find` - finalized deprecation of changes from versions **<= 1.4** - take any non-:class:`dict` as an ``"_id"`` query for :meth:`~pymongo.collection.Collection.find_one` or :meth:`~pymongo.collection.Collection.remove` -- added ability to pass a :class:`dict` for `fields` argument to +- added ability to pass a :class:`dict` for ``fields`` argument to :meth:`~pymongo.collection.Collection.find` (supports ``"$slice"`` and field negation) - simplified code to find master, since paired setups don't always have @@ -818,14 +4112,14 @@ highlights is `here - don't transparently map ``"filename"`` key to :attr:`name` attribute for GridFS. -Changes in Version 1.6 ----------------------- +Changes in Version 1.6 (2010/04/14) +----------------------------------- The biggest change in version 1.6 is a complete re-implementation of :mod:`gridfs` with a lot of improvements over the old implementation. There are many details and examples of using the new API in `this blog post -`_. The +`_. The old API has been removed in this version, so existing code will need to be modified before upgrading to 1.6. @@ -840,17 +4134,17 @@ to be modified before upgrading to 1.6. on non-existent collections. - disallow empty bulk inserts. -Changes in Version 1.5.2 ------------------------- +Changes in Version 1.5.2 (2010/03/31) +------------------------------------- - fixed response handling to ignore unknown response flags in queries. - handle server versions containing '-pre-'. -Changes in Version 1.5.1 ------------------------- +Changes in Version 1.5.1 (2010/03/17) +------------------------------------- - added :data:`~gridfs.grid_file.GridFile._id` property for :class:`~gridfs.grid_file.GridFile` instances. - fix for making a :class:`~pymongo.connection.Connection` (with - `slave_okay` set) directly to a slave in a replica pair. + ``slave_okay`` set) directly to a slave in a replica pair. - accept kwargs for :meth:`~pymongo.collection.Collection.create_index` and :meth:`~pymongo.collection.Collection.ensure_index` to support all @@ -859,10 +4153,10 @@ Changes in Version 1.5.1 - improvements to Python code caching in C extension - should improve behavior on mod_wsgi. -Changes in Version 1.5 ----------------------- +Changes in Version 1.5 (2010/03/10) +----------------------------------- - added subtype constants to :mod:`~bson.binary` module. -- DEPRECATED `options` argument to +- DEPRECATED ``options`` argument to :meth:`~pymongo.collection.Collection` and :meth:`~pymongo.database.Database.create_collection` in favor of kwargs. @@ -872,7 +4166,7 @@ Changes in Version 1.5 might have more data to return (useful for tailable cursors). - added :class:`~bson.timestamp.Timestamp` to better support dealing with internal MongoDB timestamps. -- added `name` argument for +- added ``name`` argument for :meth:`~pymongo.collection.Collection.create_index` and :meth:`~pymongo.collection.Collection.ensure_index`. - fixed connection pooling w/ fork @@ -894,8 +4188,8 @@ Changes in Version 1.5 - added :class:`~gridfs.errors.GridFSError` as base class for :mod:`gridfs` exceptions. -Changes in Version 1.4 ----------------------- +Changes in Version 1.4 (2010/01/17) +----------------------------------- Perhaps the most important change in version 1.4 is that we have decided to **no longer support Python 2.3**. The most immediate reason @@ -924,7 +4218,7 @@ Other changes: for example. - added :class:`~pymongo.errors.DuplicateKeyError` for calls to :meth:`~pymongo.collection.Collection.insert` or - :meth:`~pymongo.collection.Collection.update` with `safe` set to + :meth:`~pymongo.collection.Collection.update` with ``safe`` set to ``True``. - removed :mod:`~pymongo.thread_util`. - added :meth:`~pymongo.database.Database.add_user` and @@ -936,8 +4230,8 @@ Other changes: - clean up all cases where :class:`~pymongo.errors.ConnectionFailure` is raised. - simplification of connection pooling - makes driver ~2x faster for - simple benchmarks. see :ref:`connection-pooling` for more information. -- DEPRECATED `pool_size`, `auto_start_request` and `timeout` + simple benchmarks. see `connection pooling `_ for more information. +- DEPRECATED ``pool_size``, ``auto_start_request`` and ``timeout`` parameters to :class:`~pymongo.connection.Connection`. DEPRECATED :meth:`~pymongo.connection.Connection.start_request`. - use :meth:`socket.sendall`. @@ -948,7 +4242,7 @@ Other changes: - deprecate :meth:`~pymongo.database.Database._command` in favor of :meth:`~pymongo.database.Database.command`. - send all commands without wrapping as ``{"query": ...}``. -- support string as `key` argument to +- support string as ``key`` argument to :meth:`~pymongo.collection.Collection.group` (keyf) and run all groups as commands. - support for equality testing for :class:`~bson.code.Code` @@ -956,8 +4250,8 @@ Other changes: - allow the NULL byte in strings and disallow it in key names or regex patterns -Changes in Version 1.3 ----------------------- +Changes in Version 1.3 (2009/12/16) +----------------------------------- - DEPRECATED running :meth:`~pymongo.collection.Collection.group` as :meth:`~pymongo.database.Database.eval`, also changed default for :meth:`~pymongo.collection.Collection.group` to running as a command @@ -982,8 +4276,8 @@ Changes in Version 1.3 usual, as it carries some performance implications. - added :meth:`~pymongo.connection.Connection.disconnect` -Changes in Version 1.2.1 ------------------------- +Changes in Version 1.2.1 (2009/12/10) +------------------------------------- - added :doc:`changelog` to docs - added ``setup.py doc --test`` to run doctests for tutorial, examples - moved most examples to Sphinx docs (and remove from *examples/* @@ -994,16 +4288,16 @@ Changes in Version 1.2.1 characters - allow :class:`unicode` instances for :class:`~bson.objectid.ObjectId` init -Changes in Version 1.2 ----------------------- -- `spec` parameter for :meth:`~pymongo.collection.Collection.remove` is +Changes in Version 1.2 (2009/12/09) +----------------------------------- +- ``spec`` parameter for :meth:`~pymongo.collection.Collection.remove` is now optional to allow for deleting all documents in a :class:`~pymongo.collection.Collection` - always wrap queries with ``{query: ...}`` even when no special options - get around some issues with queries on fields named ``query`` - enforce 4MB document limit on the client side - added :meth:`~pymongo.collection.Collection.map_reduce` helper - see - :doc:`example ` + `Aggregation `_ - added :meth:`~pymongo.cursor.Cursor.distinct` method on :class:`~pymongo.cursor.Cursor` instances to allow distinct with queries @@ -1021,29 +4315,29 @@ Changes in Version 1.2 - some minor fixes for installation process - added support for datetime and regex in :mod:`~bson.json_util` -Changes in Version 1.1.2 ------------------------- +Changes in Version 1.1.2 (2009/11/23) +------------------------------------- - improvements to :meth:`~pymongo.collection.Collection.insert` speed (using C for insert message creation) - use random number for request_id - fix some race conditions with :class:`~pymongo.errors.AutoReconnect` -Changes in Version 1.1.1 ------------------------- -- added `multi` parameter for +Changes in Version 1.1.1 (2009/11/14) +------------------------------------- +- added ``multi`` parameter for :meth:`~pymongo.collection.Collection.update` - fix unicode regex patterns with C extension - added :meth:`~pymongo.collection.Collection.distinct` -- added `database` support for :class:`~bson.dbref.DBRef` +- added ``database`` support for :class:`~bson.dbref.DBRef` - added :mod:`~bson.json_util` with helpers for encoding / decoding special types to JSON - DEPRECATED :meth:`pymongo.cursor.Cursor.__len__` in favor of - :meth:`~pymongo.cursor.Cursor.count` with `with_limit_and_skip` set + :meth:`~pymongo.cursor.Cursor.count` with ``with_limit_and_skip`` set to ``True`` due to performance regression - switch documentation to Sphinx -Changes in Version 1.1 ----------------------- +Changes in Version 1.1 (2009/10/21) +----------------------------------- - added :meth:`__hash__` for :class:`~bson.dbref.DBRef` and :class:`~bson.objectid.ObjectId` - bulk :meth:`~pymongo.collection.Collection.insert` works with any @@ -1051,35 +4345,35 @@ Changes in Version 1.1 - fix :class:`~bson.objectid.ObjectId` generation when using :mod:`multiprocessing` - added :attr:`~pymongo.cursor.Cursor.collection` -- added `network_timeout` parameter for +- added ``network_timeout`` parameter for :meth:`~pymongo.connection.Connection` -- DEPRECATED `slave_okay` parameter for individual queries -- fix for `safe` mode when multi-threaded -- added `safe` parameter for :meth:`~pymongo.collection.Collection.remove` -- added `tailable` parameter for :meth:`~pymongo.collection.Collection.find` +- DEPRECATED ``slave_okay`` parameter for individual queries +- fix for ``safe`` mode when multi-threaded +- added ``safe`` parameter for :meth:`~pymongo.collection.Collection.remove` +- added ``tailable`` parameter for :meth:`~pymongo.collection.Collection.find` -Changes in Version 1.0 ----------------------- +Changes in Version 1.0 (2009/09/30) +----------------------------------- - fixes for :class:`~pymongo.master_slave_connection.MasterSlaveConnection` -- added `finalize` parameter for :meth:`~pymongo.collection.Collection.group` +- added ``finalize`` parameter for :meth:`~pymongo.collection.Collection.group` - improvements to :meth:`~pymongo.collection.Collection.insert` speed - improvements to :mod:`gridfs` speed - added :meth:`~pymongo.cursor.Cursor.__getitem__` and :meth:`~pymongo.cursor.Cursor.__len__` for :class:`~pymongo.cursor.Cursor` instances -Changes in Version 0.16 ------------------------ +Changes in Version 0.16 (2009/09/16) +------------------------------------ - support for encoding/decoding :class:`uuid.UUID` instances - fix for :meth:`~pymongo.cursor.Cursor.explain` with limits -Changes in Version 0.15.2 -------------------------- +Changes in Version 0.15.2 (2009/09/09) +-------------------------------------- - documentation changes only -Changes in Version 0.15.1 -------------------------- +Changes in Version 0.15.1 (2009/09/02) +-------------------------------------- - various performance improvements - API CHANGE no longer need to specify direction for :meth:`~pymongo.collection.Collection.create_index` and @@ -1088,34 +4382,34 @@ Changes in Version 0.15.1 - support for encoding :class:`tuple` instances as :class:`list` instances -Changes in Version 0.15 ------------------------ +Changes in Version 0.15 (2009/08/26) +------------------------------------ - fix string representation of :class:`~bson.objectid.ObjectId` instances -- added `timeout` parameter for +- added ``timeout`` parameter for :meth:`~pymongo.collection.Collection.find` -- allow scope for `reduce` function in +- allow scope for ``reduce`` function in :meth:`~pymongo.collection.Collection.group` -Changes in Version 0.14.2 -------------------------- +Changes in Version 0.14.2 (2009/08/24) +-------------------------------------- - minor bugfixes -Changes in Version 0.14.1 -------------------------- +Changes in Version 0.14.1 (2009/08/21) +-------------------------------------- - :meth:`~gridfs.grid_file.GridFile.seek` and :meth:`~gridfs.grid_file.GridFile.tell` for (read mode) :class:`~gridfs.grid_file.GridFile` instances -Changes in Version 0.14 ------------------------ +Changes in Version 0.14 (2009/08/19) +------------------------------------ - support for long in :class:`~bson.BSON` - added :meth:`~pymongo.collection.Collection.rename` -- added `snapshot` parameter for +- added ``snapshot`` parameter for :meth:`~pymongo.collection.Collection.find` -Changes in Version 0.13 ------------------------ +Changes in Version 0.13 (2009/07/29) +------------------------------------ - better :class:`~pymongo.master_slave_connection.MasterSlaveConnection` support @@ -1125,38 +4419,38 @@ Changes in Version 0.13 - DEPRECATED passing an index name to :meth:`~pymongo.cursor.Cursor.hint` -Changes in Version 0.12 ------------------------ +Changes in Version 0.12 (2009/07/08) +------------------------------------ - improved :class:`~bson.objectid.ObjectId` generation - added :class:`~pymongo.errors.AutoReconnect` exception for when reconnection is possible - make :mod:`gridfs` thread-safe - fix for :mod:`gridfs` with non :class:`~bson.objectid.ObjectId` ``_id`` -Changes in Version 0.11.3 -------------------------- +Changes in Version 0.11.3 (2009/06/18) +-------------------------------------- - don't allow NULL bytes in string encoder - fixes for Python 2.3 -Changes in Version 0.11.2 -------------------------- +Changes in Version 0.11.2 (2009/06/08) +-------------------------------------- - PEP 8 - updates for :meth:`~pymongo.collection.Collection.group` - VS build -Changes in Version 0.11.1 -------------------------- +Changes in Version 0.11.1 (2009/06/04) +-------------------------------------- - fix for connection pooling under Python 2.5 -Changes in Version 0.11 ------------------------ +Changes in Version 0.11 (2009/06/03) +------------------------------------ - better build failure detection - driver support for selecting fields in sub-documents - disallow insertion of invalid key names -- added `timeout` parameter for :meth:`~pymongo.connection.Connection` +- added ``timeout`` parameter for :meth:`~pymongo.connection.Connection` -Changes in Version 0.10.3 -------------------------- +Changes in Version 0.10.3 (2009/05/27) +-------------------------------------- - fix bug with large :meth:`~pymongo.cursor.Cursor.limit` - better exception when modules get reloaded out from underneath the C extension @@ -1164,30 +4458,23 @@ Changes in Version 0.10.3 :class:`~pymongo.collection.Collection` or :class:`~pymongo.database.Database` instance -Changes in Version 0.10.2 -------------------------- +Changes in Version 0.10.2 (2009/05/22) +-------------------------------------- - support subclasses of :class:`dict` in C encoder -Changes in Version 0.10.1 -------------------------- +Changes in Version 0.10.1 (2009/05/18) +-------------------------------------- - alias :class:`~pymongo.connection.Connection` as :attr:`pymongo.Connection` - raise an exception rather than silently overflowing in encoder -Changes in Version 0.10 ------------------------ +Changes in Version 0.10 (2009/05/14) +------------------------------------ - added :meth:`~pymongo.collection.Collection.ensure_index` -Changes in Version 0.9.7 ------------------------- +Changes in Version 0.9.7 (2009/05/13) +------------------------------------- - allow sub-collections of *$cmd* as valid :class:`~pymongo.collection.Collection` names - add version as :attr:`pymongo.version` - add ``--no_ext`` command line option to *setup.py* - -.. toctree:: - :hidden: - - python3 - examples/gevent - examples/requests diff --git a/doc/conf.py b/doc/conf.py index b321a21690..063429cd98 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,33 +1,50 @@ -# -*- coding: utf-8 -*- # # PyMongo documentation build configuration file # # This file is execfile()d with the current directory set to its containing dir. +from __future__ import annotations -import sys, os -sys.path[0:0] = [os.path.abspath('..')] +import sys +from pathlib import Path -import pymongo +sys.path[0:0] = [Path("..").resolve()] + +import pymongo # noqa: E402 # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', - 'sphinx.ext.todo', 'doc.mongo_extensions'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.coverage", + "sphinx.ext.todo", + "sphinx.ext.intersphinx", +] + + +# Add optional extensions +try: + import sphinxcontrib.shellcheck # noqa: F401 + + extensions += ["sphinxcontrib.shellcheck"] +except ImportError: + pass # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'PyMongo' -copyright = u'2008 - 2014, MongoDB, Inc.' +project = "PyMongo" +copyright = "MongoDB, Inc. 2008-present. MongoDB, Mongo, and the leaf logo are registered trademarks of MongoDB, Inc" +html_show_sphinx = False # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -43,117 +60,151 @@ # List of directories, relative to source directory, that shouldn't be searched # for source files. -exclude_trees = ['_build'] +exclude_trees = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] + +# Options for link checking +# The anchors on the rendered markdown page are created after the fact, +# so those link results in a 404. +# wiki.centos.org has been flaky. +# sourceforge.net is giving a 403 error, but is still accessible from the browser. +# Links to release notes in jira give 401 error: unauthorized. PYTHON-5585 +linkcheck_ignore = [ + "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.md#requesting-an-immediate-check", + "https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/transactions-convenient-api.md#handling-errors-inside-the-callback", + "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", + r"https://wiki.centos.org/[\w/]*", + r"https://sourceforge.net/", + r"https://jira\.mongodb\.org/secure/ReleaseNote\.jspa.*", +] + +# Allow for flaky links. +linkcheck_retries = 3 # -- Options for extensions ---------------------------------------------------- -autoclass_content = 'init' +autoclass_content = "init" + +autodoc_typehints = "description" -doctest_path = os.path.abspath('..') +doctest_path = [Path("..").resolve()] -doctest_test_doctest_blocks = False +doctest_test_doctest_blocks = "" doctest_global_setup = """ from pymongo.mongo_client import MongoClient client = MongoClient() client.drop_database("doctest_test") db = client.doctest_test +server_major_version = int(client.server_info()['version'].split()[-1][0]) """ # -- Options for HTML output --------------------------------------------------- -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' -html_theme_options = {'collapsiblesidebar': True} +try: + import furo # noqa: F401 + + html_theme = "furo" +except ImportError: + # Theme gratefully vendored from CPython source. + html_theme = "pydoctheme" + html_theme_path = ["."] + html_theme_options = {"collapsiblesidebar": True, "googletag": False} + + # Additional static files. + html_static_path = ["static"] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +# html_static_path = ['_static'] # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'PyMongo' + release.replace('.', '_') +htmlhelp_basename = "PyMongo" + release.replace(".", "_") # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'PyMongo.tex', u'PyMongo Documentation', - u'Michael Dirolf', 'manual'), + ("index", "PyMongo.tex", "PyMongo Documentation", "Michael Dirolf", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True + + +intersphinx_mapping = { + "gevent": ("https://www.gevent.org/", None), + "py": ("https://docs.python.org/3/", None), +} diff --git a/doc/contributors.rst b/doc/contributors.rst index 1a3bc67674..08296e9595 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -69,3 +69,41 @@ The following is a list of people who have contributed to - Yuchen Ying (yegle) - Kyle Erf (3rf) - Luke Lovett (lovett89) +- Jaroslav Semančík (girogiro) +- Don Mitchell (dmitchell) +- Ximing (armnotstrong) +- Can Zhang (cannium) +- Sergey Azovskov (last-g) +- Heewa Barfchin (heewa) +- Anna Herlihy (aherlihy) +- Len Buckens (buckensl) +- ultrabug +- Shane Harvey (ShaneHarvey) +- Cao Siyang (caosiyang) +- Zhecong Kwok (gzcf) +- TaoBeier(tao12345666333) +- Jagrut Trivedi(Jagrut) +- Shrey Batra(shreybatra) +- Felipe Rodrigues(fbidu) +- Terence Honles (terencehonles) +- Paul Fisher (thetorpedodog) +- Julius Park (juliusgeo) +- Khanh Nguyen (KN99HN) +- Henri Froese (henrifroese) +- Ishmum Jawad Khan (ishmum123) +- Arie Bovenberg (ariebovenberg) +- Ben Warner (bcwarner) +- Jean-Christophe Fillion-Robin (jcfr) +- Sean Cheah (thalassemia) +- Dainis Gorbunovs (DainisGorbunovs) +- Iris Ho (sleepyStick) +- Stephan Hof (stephan-hof) +- Casey Clements (caseyclements) +- Ivan Lukyanchikov (ilukyanchikov) +- Terry Patterson +- Romain Morotti +- Navjot Singh (navjots18) +- Jib Adegunloye (Jibola) +- Jeffrey A. Clark (aclark4life) +- Steven Silvester (blink1073) +- Noah Stapp (NoahStapp) diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst deleted file mode 100644 index 56294bcf74..0000000000 --- a/doc/examples/aggregation.rst +++ /dev/null @@ -1,192 +0,0 @@ -Aggregation Examples -==================== - -There are several methods of performing aggregations in MongoDB. These -examples cover the new aggregation framework, using map reduce and using the -group method. - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - client.drop_database('aggregation_example') - -Setup ------ -To start, we'll insert some example data which we can perform -aggregations on: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> db = MongoClient().aggregation_example - >>> db.things.insert({"x": 1, "tags": ["dog", "cat"]}) - ObjectId('...') - >>> db.things.insert({"x": 2, "tags": ["cat"]}) - ObjectId('...') - >>> db.things.insert({"x": 2, "tags": ["mouse", "cat", "dog"]}) - ObjectId('...') - >>> db.things.insert({"x": 3, "tags": []}) - ObjectId('...') - -Aggregation Framework ---------------------- - -This example shows how to use the -:meth:`~pymongo.collection.Collection.aggregate` method to use the aggregation -framework. We'll perform a simple aggregation to count the number of -occurrences for each tag in the ``tags`` array, across the entire collection. -To achieve this we need to pass in three operations to the pipeline. -First, we need to unwind the ``tags`` array, then group by the tags and -sum them up, finally we sort by count. - -As python dictionaries don't maintain order you should use :class:`~bson.son.SON` -or :class:`collections.OrderedDict` where explicit ordering is required -eg "$sort": - -.. note:: - - aggregate requires server version **>= 2.1.0**. The PyMongo - :meth:`~pymongo.collection.Collection.aggregate` helper requires - PyMongo version **>= 2.3**. - -.. doctest:: - - >>> from bson.son import SON - >>> db.things.aggregate([ - ... {"$unwind": "$tags"}, - ... {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, - ... {"$sort": SON([("count", -1), ("_id", -1)])} - ... ]) - ... - {u'ok': 1.0, u'result': [{u'count': 3, u'_id': u'cat'}, {u'count': 2, u'_id': u'dog'}, {u'count': 1, u'_id': u'mouse'}]} - - -As well as simple aggregations the aggregation framework provides projection -capabilities to reshape the returned data. Using projections and aggregation, -you can add computed fields, create new virtual sub-objects, and extract -sub-fields into the top-level of results. - -.. seealso:: The full documentation for MongoDB's `aggregation framework - `_ - -Map/Reduce ----------- - -Another option for aggregation is to use the map reduce framework. Here we -will define **map** and **reduce** functions to also count he number of -occurrences for each tag in the ``tags`` array, across the entire collection. - -Our **map** function just emits a single `(key, 1)` pair for each tag in -the array: - -.. doctest:: - - >>> from bson.code import Code - >>> mapper = Code(""" - ... function () { - ... this.tags.forEach(function(z) { - ... emit(z, 1); - ... }); - ... } - ... """) - -The **reduce** function sums over all of the emitted values for a given key: - -.. doctest:: - - >>> reducer = Code(""" - ... function (key, values) { - ... var total = 0; - ... for (var i = 0; i < values.length; i++) { - ... total += values[i]; - ... } - ... return total; - ... } - ... """) - -.. note:: We can't just return ``values.length`` as the **reduce** function - might be called iteratively on the results of other reduce steps. - -Finally, we call :meth:`~pymongo.collection.Collection.map_reduce` and -iterate over the result collection: - -.. doctest:: - - >>> result = db.things.map_reduce(mapper, reducer, "myresults") - >>> for doc in result.find(): - ... print doc - ... - {u'_id': u'cat', u'value': 3.0} - {u'_id': u'dog', u'value': 2.0} - {u'_id': u'mouse', u'value': 1.0} - -Advanced Map/Reduce -------------------- - -PyMongo's API supports all of the features of MongoDB's map/reduce engine. -One interesting feature is the ability to get more detailed results when -desired, by passing `full_response=True` to -:meth:`~pymongo.collection.Collection.map_reduce`. This returns the full -response to the map/reduce command, rather than just the result collection: - -.. doctest:: - - >>> db.things.map_reduce(mapper, reducer, "myresults", full_response=True) - {u'counts': {u'input': 4, u'reduce': 2, u'emit': 6, u'output': 3}, u'timeMillis': ..., u'ok': ..., u'result': u'...'} - -All of the optional map/reduce parameters are also supported, simply pass them -as keyword arguments. In this example we use the `query` parameter to limit the -documents that will be mapped over: - -.. doctest:: - - >>> result = db.things.map_reduce(mapper, reducer, "myresults", query={"x": {"$lt": 2}}) - >>> for doc in result.find(): - ... print doc - ... - {u'_id': u'cat', u'value': 1.0} - {u'_id': u'dog', u'value': 1.0} - -With MongoDB 1.8.0 or newer you can use :class:`~bson.son.SON` or -:class:`collections.OrderedDict` to specify a different database to store the -result collection: - -.. doctest:: - - >>> from bson.son import SON - >>> db.things.map_reduce(mapper, reducer, out=SON([("replace", "results"), ("db", "outdb")]), full_response=True) - {u'counts': {u'input': 4, u'reduce': 2, u'emit': 6, u'output': 3}, u'timeMillis': ..., u'ok': ..., u'result': {u'db': ..., u'collection': ...}} - -.. seealso:: The full list of options for MongoDB's `map reduce engine `_ - -Group ------ - -The :meth:`~pymongo.collection.Collection.group` method provides some of the -same functionality as SQL's GROUP BY. Simpler than a map reduce you need to -provide a key to group by, an initial value for the aggregation and a -reduce function. - -.. note:: Doesn't work with sharded MongoDB configurations, use aggregation or - map/reduce instead of group(). - -Here we are doing a simple group and count of the occurrences ``x`` values: - -.. doctest:: - - >>> reducer = Code(""" - ... function(obj, prev){ - ... prev.count++; - ... } - ... """) - ... - >>> from bson.son import SON - >>> results = db.things.group(key={"x":1}, condition={}, initial={"count": 0}, reduce=reducer) - >>> for doc in results: - ... print doc - {u'count': 1.0, u'x': 1.0} - {u'count': 2.0, u'x': 2.0} - {u'count': 1.0, u'x': 3.0} - -.. seealso:: The full list of options for MongoDB's `group method `_ diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst deleted file mode 100644 index 7c381d16e9..0000000000 --- a/doc/examples/authentication.rst +++ /dev/null @@ -1,180 +0,0 @@ -Authentication Examples -======================= - -MongoDB supports several different authentication mechanisms. These examples -cover all authentication methods currently supported by PyMongo, documenting -Python module and MongoDB version dependencies. - -MONGODB-CR ----------- -MONGODB-CR is the default authentication mechanism supported by a MongoDB -cluster configured for authentication. Authentication is per-database and -credentials can be specified through the MongoDB URI or passed to the -:meth:`~pymongo.database.Database.authenticate` method:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com') - >>> client.the_database.authenticate('user', 'password') - True - >>> - >>> uri = "mongodb://user:password@example.com/the_database" - >>> client = MongoClient(uri) - >>> - -When using MongoDB's delegated authentication features, a separate -authentication source can be specified (using PyMongo 2.5 or newer):: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com') - >>> client.the_database.authenticate('user', - ... 'password', - ... source='source_database') - True - >>> - >>> uri = "mongodb://user:password@example.com/?authSource=source_database" - >>> client = MongoClient(uri) - >>> - -MONGODB-X509 ------------- -.. versionadded:: 2.6 - -The MONGODB-X509 mechanism authenticates a username derived from the -distinguished subject name of the X.509 certificate presented by the driver -during SSL negotiation. This authentication method requires the use of SSL -connections with certificate validation and is available in MongoDB 2.5.1 -and newer:: - - >>> import ssl - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com', - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_cert_reqs=ssl.CERT_REQUIRED, - ... ssl_ca_certs='/path/to/ca.pem') - >>> client.the_database.authenticate("", - ... mechanism='MONGODB-X509') - True - >>> - -MONGODB-X509 authenticates against the $external virtual database, so you -do not have to specify a database in the URI:: - - >>> uri = "mongodb://@example.com/?authMechanism=MONGODB-X509" - >>> client = MongoClient(uri, - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_cert_reqs=ssl.CERT_REQUIRED, - ... ssl_ca_certs='/path/to/ca.pem') - >>> - -.. note:: - If you are using CPython 2.4 or 2.5 you must install the python - `ssl module`_ using easy_install or pip. - -.. _ssl module: https://pypi.python.org/pypi/ssl/ - -.. _use_kerberos: - -GSSAPI (Kerberos) ------------------ -.. versionadded:: 2.5 - -GSSAPI (Kerberos) authentication is available in the Enterprise Edition of -MongoDB, version 2.4 and newer. To authenticate using GSSAPI you must first -install the python `kerberos`_ or `pykerberos`_ module using easy_install or -pip. Make sure you run kinit before using the following authentication methods:: - - $ kinit mongodbuser@EXAMPLE.COM - mongodbuser@EXAMPLE.COM's Password: - $ klist - Credentials cache: FILE:/tmp/krb5cc_1000 - Principal: mongodbuser@EXAMPLE.COM - - Issued Expires Principal - Feb 9 13:48:51 2013 Feb 9 23:48:51 2013 krbtgt/EXAMPLE.COM@EXAMPLE.COM - -Now authenticate using the MongoDB URI. GSSAPI authenticates against the -$external virtual database so you do not have to specify a database in the -URI:: - - >>> # Note: the kerberos principal must be url encoded. - >>> from pymongo import MongoClient - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@example.com/?authMechanism=GSSAPI" - >>> client = MongoClient(uri) - >>> - -or using :meth:`~pymongo.database.Database.authenticate`:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com') - >>> db = client.test - >>> db.authenticate('mongodbuser@EXAMPLE.COM', mechanism='GSSAPI') - True - -The default service name used by MongoDB and PyMongo is `mongodb`. You can -specify a custom service name with the ``gssapiServiceName`` option:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@example.com/?authMechanism=GSSAPI&gssapiServiceName=myservicename" - >>> client = MongoClient(uri) - >>> - >>> client = MongoClient('example.com') - >>> db = client.test - >>> db.authenticate('mongodbuser@EXAMPLE.COM', mechanism='GSSAPI', gssapiServiceName='myservicename') - True - -.. note:: - Kerberos support is only provided in environments supported by the python - `kerberos`_ or `pykerberos`_ modules. This currently limits support to - CPython and Unix environments. - -.. _kerberos: http://pypi.python.org/pypi/kerberos -.. _pykerberos: https://pypi.python.org/pypi/pykerberos - -SASL PLAIN (RFC 4616) ---------------------- -.. versionadded:: 2.6 - -MongoDB Enterprise Edition versions 2.5.0 and newer support the SASL PLAIN -authentication mechanism, initially intended for delegating authentication -to an LDAP server. Using the PLAIN mechanism is very similar to MONGODB-CR. -These examples use the $external virtual database for LDAP support:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com') - >>> client.the_database.authenticate('user', - ... 'password', - ... source='$external', - ... mechanism='PLAIN') - True - >>> - >>> uri = "mongodb://user:password@example.com/?authMechanism=PLAIN&authSource=$external" - >>> client = MongoClient(uri) - >>> - -SASL PLAIN is a clear-text authentication mechanism. We **strongly** recommend -that you connect to MongoDB using SSL with certificate validation when using -the SASL PLAIN mechanism:: - - >>> import ssl - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com', - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_cert_reqs=ssl.CERT_REQUIRED, - ... ssl_ca_certs='/path/to/ca.pem') - >>> client.the_database.authenticate('user', - ... 'password', - ... source='$external', - ... mechanism='PLAIN') - True - >>> - >>> uri = "mongodb://user:password@example.com/?authMechanism=PLAIN&authSource=$external" - >>> client = MongoClient(uri, - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_cert_reqs=ssl.CERT_REQUIRED, - ... ssl_ca_certs='/path/to/ca.pem') - >>> - diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst deleted file mode 100644 index a8beb5fb3e..0000000000 --- a/doc/examples/bulk.rst +++ /dev/null @@ -1,200 +0,0 @@ -Bulk Write Operations -===================== - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - client.drop_database('bulk_example') - -This tutorial explains how to take advantage of PyMongo's bulk -write operation features. Executing write operations in batches -reduces the number of network round trips, increasing write -throughput. - -Bulk Insert ------------ - -.. versionadded:: 2.6 - -A batch of documents can be inserted by passing a list or generator -to the :meth:`~pymongo.collection.Collection.insert` method. PyMongo -will automatically split the batch into smaller sub-batches based on -the maximum message size accepted by MongoDB, supporting very large -bulk insert operations. - -.. doctest:: - - >>> import pymongo - >>> db = pymongo.MongoClient().bulk_example - >>> db.test.insert(({'i': i} for i in xrange(10000))) - [...] - >>> db.test.count() - 10000 - -Mixed Bulk Write Operations ---------------------------- - -.. versionadded:: 2.7 - -PyMongo also supports executing mixed bulk write operations. A batch -of insert, update, and remove operations can be executed together using -the bulk write operations API. - -.. note:: - - Though the following API will work with all versions of MongoDB, it is - designed to be used with MongoDB versions >= 2.6. Much better bulk insert - performance can be achieved with older versions of MongoDB through the - :meth:`~pymongo.collection.Collection.insert` method. - -.. _ordered_bulk: - -Ordered Bulk Write Operations -............................. - -Ordered bulk write operations are batched and sent to the server in the -order provided for serial execution. The return value is a document -describing the type and count of operations performed. - -.. doctest:: - - >>> from pprint import pprint - >>> - >>> bulk = db.test.initialize_ordered_bulk_op() - >>> # Remove all documents from the previous example. - ... - >>> bulk.find({}).remove() - >>> bulk.insert({'_id': 1}) - >>> bulk.insert({'_id': 2}) - >>> bulk.insert({'_id': 3}) - >>> bulk.find({'_id': 1}).update({'$set': {'foo': 'bar'}}) - >>> bulk.find({'_id': 4}).upsert().update({'$inc': {'j': 1}}) - >>> bulk.find({'j': 1}).replace_one({'j': 2}) - >>> result = bulk.execute() - >>> pprint(result) - {'nInserted': 3, - 'nMatched': 2, - 'nModified': 2, - 'nRemoved': 10000, - 'nUpserted': 1, - 'upserted': [{u'_id': 4, u'index': 5}], - 'writeConcernErrors': [], - 'writeErrors': []} - >>> - -.. warning:: ``nModified`` is only reported by MongoDB 2.6 and later. When - connected to an earlier server version, or in certain mixed version sharding - configurations, PyMongo omits this field from the results of a bulk - write operation. - -The first write failure that occurs (e.g. duplicate key error) aborts the -remaining operations, and PyMongo raises -:class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attibute of -the exception instance provides the execution results up until the failure -occurred and details about the failure - including the operation that caused -the failure. - -.. doctest:: - - >>> from pymongo.errors import BulkWriteError - >>> bulk = db.test.initialize_ordered_bulk_op() - >>> bulk.find({'j': 2}).replace_one({'i': 5}) - >>> # Violates the unique key constraint on _id. - ... - >>> bulk.insert({'_id': 4}) - >>> bulk.find({'i': 5}).remove_one() - >>> try: - ... bulk.execute() - ... except BulkWriteError as bwe: - ... pprint(bwe.details) - ... - {'nInserted': 0, - 'nMatched': 1, - 'nModified': 1, - 'nRemoved': 0, - 'nUpserted': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [{u'code': 11000, - u'errmsg': u'insertDocument :: caused by :: 11000 E11000 duplicate key error index: bulk_example.test.$_id_ dup key: { : 4 }', - u'index': 1, - u'op': {'_id': 4}}]} - >>> - -.. _unordered_bulk: - -Unordered Bulk Write Operations -............................... - -Unordered bulk write operations are batched and sent to the server in -**arbitrary order** where they may be executed in parallel. Any errors -that occur are reported after all operations are attempted. - -In the next example the first and third operations fail due to the unique -constraint on _id. Since we are doing unordered execution the second -and fourth operations succeed. - -.. doctest:: - - >>> bulk = db.test.initialize_unordered_bulk_op() - >>> bulk.insert({'_id': 1}) - >>> bulk.find({'_id': 2}).remove_one() - >>> bulk.insert({'_id': 3}) - >>> bulk.find({'_id': 4}).replace_one({'i': 1}) - >>> try: - ... bulk.execute() - ... except BulkWriteError as bwe: - ... pprint(bwe.details) - ... - {'nInserted': 0, - 'nMatched': 1, - 'nModified': 1, - 'nRemoved': 1, - 'nUpserted': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [{u'code': 11000, - u'errmsg': u'insertDocument :: caused by :: 11000 E11000 duplicate key error index: bulk_example.test.$_id_ dup key: { : 1 }', - u'index': 0, - u'op': {'_id': 1}}, - {u'code': 11000, - u'errmsg': u'insertDocument :: caused by :: 11000 E11000 duplicate key error index: bulk_example.test.$_id_ dup key: { : 3 }', - u'index': 2, - u'op': {'_id': 3}}]} - >>> - -Write Concern -............. - -By default bulk operations are executed with the -:attr:`~pymongo.collection.Collection.write_concern` of the collection they -are executed against. A custom write concern can be passed to the -:meth:`~pymongo.bulk.BulkOperationBuilder.execute` method. Write concern -errors (e.g. wtimeout) will be reported after all operations are attempted, -regardless of execution order. - -.. doctest:: - - >>> bulk = db.test.initialize_ordered_bulk_op() - >>> bulk.insert({'a': 0}) - >>> bulk.insert({'a': 1}) - >>> bulk.insert({'a': 2}) - >>> bulk.insert({'a': 3}) - >>> try: - ... bulk.execute({'w': 4, 'wtimeout': 1}) - ... except BulkWriteError as bwe: - ... pprint(bwe.details) - ... - {'nInserted': 4, - 'nMatched': 0, - 'nModified': 0, - 'nRemoved': 0, - 'nUpserted': 0, - 'upserted': [], - 'writeConcernErrors': [{u'code': 64, - u'errInfo': {u'wtimeout': True}, - u'errmsg': u'waiting for replication timed out'}], - 'writeErrors': []} - >>> - diff --git a/doc/examples/custom_type.rst b/doc/examples/custom_type.rst deleted file mode 100644 index 8c995c4784..0000000000 --- a/doc/examples/custom_type.rst +++ /dev/null @@ -1,226 +0,0 @@ -Custom Type Example -=================== - -This is an example of using a custom type with PyMongo. The example -here is a bit contrived, but shows how to use a -:class:`~pymongo.son_manipulator.SONManipulator` to manipulate -documents as they are saved or retrieved from MongoDB. More -specifically, it shows a couple different mechanisms for working with -custom datatypes in PyMongo. - -Setup ------ - -We'll start by getting a clean database to use for the example: - -.. doctest:: - - >>> from pymongo.mongo_client import MongoClient - >>> client = MongoClient() - >>> client.drop_database("custom_type_example") - >>> db = client.custom_type_example - -Since the purpose of the example is to demonstrate working with custom -types, we'll need a custom datatype to use. Here we define the aptly -named :class:`Custom` class, which has a single method, :meth:`x`: - -.. doctest:: - - >>> class Custom(object): - ... def __init__(self, x): - ... self.__x = x - ... - ... def x(self): - ... return self.__x - ... - >>> foo = Custom(10) - >>> foo.x() - 10 - -When we try to save an instance of :class:`Custom` with PyMongo, we'll -get an :class:`~bson.errors.InvalidDocument` exception: - -.. doctest:: - - >>> db.test.insert({"custom": Custom(5)}) - Traceback (most recent call last): - InvalidDocument: cannot convert value of type to bson - -Manual Encoding ---------------- - -One way to work around this is to manipulate our data into something -we *can* save with PyMongo. To do so we define two methods, -:meth:`encode_custom` and :meth:`decode_custom`: - -.. doctest:: - - >>> def encode_custom(custom): - ... return {"_type": "custom", "x": custom.x()} - ... - >>> def decode_custom(document): - ... assert document["_type"] == "custom" - ... return Custom(document["x"]) - ... - -We can now manually encode and decode :class:`Custom` instances and -use them with PyMongo: - -.. doctest:: - - >>> db.test.insert({"custom": encode_custom(Custom(5))}) - ObjectId('...') - >>> db.test.find_one() - {u'_id': ObjectId('...'), u'custom': {u'x': 5, u'_type': u'custom'}} - >>> decode_custom(db.test.find_one()["custom"]) - - >>> decode_custom(db.test.find_one()["custom"]).x() - 5 - -Automatic Encoding and Decoding -------------------------------- - -Needless to say, that was a little unwieldy. Let's make this a bit -more seamless by creating a new -:class:`~pymongo.son_manipulator.SONManipulator`. -:class:`~pymongo.son_manipulator.SONManipulator` instances allow you -to specify transformations to be applied automatically by PyMongo: - -.. doctest:: - - >>> from pymongo.son_manipulator import SONManipulator - >>> class Transform(SONManipulator): - ... def transform_incoming(self, son, collection): - ... for (key, value) in son.items(): - ... if isinstance(value, Custom): - ... son[key] = encode_custom(value) - ... elif isinstance(value, dict): # Make sure we recurse into sub-docs - ... son[key] = self.transform_incoming(value, collection) - ... return son - ... - ... def transform_outgoing(self, son, collection): - ... for (key, value) in son.items(): - ... if isinstance(value, dict): - ... if "_type" in value and value["_type"] == "custom": - ... son[key] = decode_custom(value) - ... else: # Again, make sure to recurse into sub-docs - ... son[key] = self.transform_outgoing(value, collection) - ... return son - ... - -Now we add our manipulator to the :class:`~pymongo.database.Database`: - -.. doctest:: - - >>> db.add_son_manipulator(Transform()) - -After doing so we can save and restore :class:`Custom` instances seamlessly: - -.. doctest:: - - >>> db.test.remove() # remove whatever has already been saved - {...} - >>> db.test.insert({"custom": Custom(5)}) - ObjectId('...') - >>> db.test.find_one() - {u'_id': ObjectId('...'), u'custom': } - >>> db.test.find_one()["custom"].x() - 5 - -If we get a new :class:`~pymongo.database.Database` instance we'll -clear out the :class:`~pymongo.son_manipulator.SONManipulator` -instance we added: - -.. doctest:: - - >>> db = client.custom_type_example - -This allows us to see what was actually saved to the database: - -.. doctest:: - - >>> db.test.find_one() - {u'_id': ObjectId('...'), u'custom': {u'x': 5, u'_type': u'custom'}} - -which is the same format that we encode to with our -:meth:`encode_custom` method! - -Binary Encoding ---------------- - -We can take this one step further by encoding to binary, using a user -defined subtype. This allows us to identify what to decode without -resorting to tricks like the ``_type`` field used above. - -We'll start by defining the methods :meth:`to_binary` and -:meth:`from_binary`, which convert :class:`Custom` instances to and -from :class:`~bson.binary.Binary` instances: - -.. note:: You could just pickle the instance and save that. What we do - here is a little more lightweight. - -.. doctest:: - - >>> from bson.binary import Binary - >>> def to_binary(custom): - ... return Binary(str(custom.x()), 128) - ... - >>> def from_binary(binary): - ... return Custom(int(binary)) - ... - -Next we'll create another -:class:`~pymongo.son_manipulator.SONManipulator`, this time using the -methods we just defined: - -.. doctest:: - - >>> class TransformToBinary(SONManipulator): - ... def transform_incoming(self, son, collection): - ... for (key, value) in son.items(): - ... if isinstance(value, Custom): - ... son[key] = to_binary(value) - ... elif isinstance(value, dict): - ... son[key] = self.transform_incoming(value, collection) - ... return son - ... - ... def transform_outgoing(self, son, collection): - ... for (key, value) in son.items(): - ... if isinstance(value, Binary) and value.subtype == 128: - ... son[key] = from_binary(value) - ... elif isinstance(value, dict): - ... son[key] = self.transform_outgoing(value, collection) - ... return son - ... - -Now we'll empty the :class:`~pymongo.database.Database` and add the -new manipulator: - -.. doctest:: - - >>> db.test.remove() - {...} - >>> db.add_son_manipulator(TransformToBinary()) - -After doing so we can save and restore :class:`Custom` instances -seamlessly: - -.. doctest:: - - >>> db.test.insert({"custom": Custom(5)}) - ObjectId('...') - >>> db.test.find_one() - {u'_id': ObjectId('...'), u'custom': } - >>> db.test.find_one()["custom"].x() - 5 - -We can see what's actually being saved to the database (and verify -that it is using a :class:`~bson.binary.Binary` instance) by -clearing out the manipulators and repeating our -:meth:`~pymongo.collection.Collection.find_one`: - -.. doctest:: - - >>> db = client.custom_type_example - >>> db.test.find_one() - {u'_id': ObjectId('...'), u'custom': Binary('5', 128)} diff --git a/doc/examples/geo.rst b/doc/examples/geo.rst deleted file mode 100644 index 26264de457..0000000000 --- a/doc/examples/geo.rst +++ /dev/null @@ -1,101 +0,0 @@ -Geospatial Indexing Example -=========================== - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - client.drop_database('geo_example') - -This example shows how to create and use a :data:`~pymongo.GEO2D` -index in PyMongo. - -.. note:: 2D indexes require server version **>= 1.3.4**. Support for - 2D indexes also requires PyMongo version **>= 1.5.1**. - -.. mongodoc:: geo - -Creating a Geospatial Index ---------------------------- - -Creating a geospatial index in pymongo is easy: - -.. doctest:: - - >>> from pymongo import MongoClient, GEO2D - >>> db = MongoClient().geo_example - >>> db.places.create_index([("loc", GEO2D)]) - u'loc_2d' - -Inserting Places ----------------- - -Locations in MongoDB are represented using either embedded documents -or lists where the first two elements are coordinates. Here, we'll -insert a couple of example locations: - -.. doctest:: - - >>> db.places.insert({"loc": [2, 5]}) - ObjectId('...') - >>> db.places.insert({"loc": [30, 5]}) - ObjectId('...') - >>> db.places.insert({"loc": [1, 2]}) - ObjectId('...') - >>> db.places.insert({"loc": [4, 4]}) - ObjectId('...') - -Querying --------- - -Using the geospatial index we can find documents near another point: - -.. doctest:: - - >>> for doc in db.places.find({"loc": {"$near": [3, 6]}}).limit(3): - ... repr(doc) - ... - "{u'loc': [2, 5], u'_id': ObjectId('...')}" - "{u'loc': [4, 4], u'_id': ObjectId('...')}" - "{u'loc': [1, 2], u'_id': ObjectId('...')}" - -The $maxDistance operator requires the use of :class:`~bson.son.SON`: - -.. doctest:: - - >>> from bson.son import SON - >>> for doc in db.places.find({"loc": SON([("$near", [3, 6]), ("$maxDistance", 100)])}).limit(3): - ... repr(doc) - ... - "{u'loc': [2, 5], u'_id': ObjectId('...')}" - "{u'loc': [4, 4], u'_id': ObjectId('...')}" - "{u'loc': [1, 2], u'_id': ObjectId('...')}" - -It's also possible to query for all items within a given rectangle -(specified by lower-left and upper-right coordinates): - -.. doctest:: - - >>> for doc in db.places.find({"loc": {"$within": {"$box": [[2, 2], [5, 6]]}}}): - ... repr(doc) - ... - "{u'loc': [4, 4], u'_id': ObjectId('...')}" - "{u'loc': [2, 5], u'_id': ObjectId('...')}" - -Or circle (specified by center point and radius): - -.. doctest:: - - >>> for doc in db.places.find({"loc": {"$within": {"$center": [[0, 0], 6]}}}): - ... repr(doc) - ... - "{u'loc': [1, 2], u'_id': ObjectId('...')}" - "{u'loc': [4, 4], u'_id': ObjectId('...')}" - "{u'loc': [2, 5], u'_id': ObjectId('...')}" - -geoNear queries are also supported using :class:`~bson.son.SON`:: - - >>> from bson.son import SON - >>> db.command(SON([('geoNear', 'places'), ('near', [1, 2])])) - {u'ok': 1.0, u'stats': ...} - diff --git a/doc/examples/gevent.rst b/doc/examples/gevent.rst deleted file mode 100644 index 809d3d5511..0000000000 --- a/doc/examples/gevent.rst +++ /dev/null @@ -1,51 +0,0 @@ -Gevent -====== - -PyMongo supports `Gevent `_. Simply call Gevent's -``monkey.patch_all()`` before loading any other modules: - -.. doctest:: - - >>> # You must call patch_all() *before* importing any other modules - >>> from gevent import monkey; monkey.patch_all() - >>> from pymongo import MongoClient - >>> client = MongoClient() - -PyMongo's Gevent support means -that :meth:`~pymongo.mongo_client.MongoClient.start_request()` ensures the -current greenlet (not merely the current thread) uses the same socket for all -operations until :meth:`~pymongo.mongo_client.MongoClient.end_request()` is called. -See the :doc:`requests documentation ` for details on requests in -PyMongo. - -Using Gevent With Threads -------------------------- - -If you need to use standard Python threads in the same process as Gevent and -greenlets, run ``monkey.patch_socket()``, rather than -``monkey.patch_all()``, and create a -:class:`~pymongo.mongo_client.MongoClient` with ``use_greenlets=True``. -The :class:`~pymongo.mongo_client.MongoClient` will use a special greenlet-aware -connection pool. - -.. doctest:: - - >>> from gevent import monkey; monkey.patch_socket() - >>> from pymongo import MongoClient - >>> client = MongoClient(use_greenlets=True) - -An instance of :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` -created with ``use_greenlets=True`` will also use a greenlet-aware pool. -Additionally, it will use a background greenlet instead of a background thread -to monitor the state of the replica set. - -.. doctest:: - - >>> from gevent import monkey; monkey.patch_socket() - >>> from pymongo.mongo_replica_set_client import MongoReplicaSetClient - >>> rsc = MongoReplicaSetClient( - ... 'mongodb://localhost:27017,localhost:27018,localhost:27019', - ... replicaSet='repl0', use_greenlets=True) - -Setting ``use_greenlets`` is unnecessary under normal circumstances; simply call -``patch_all`` to use Gevent with PyMongo. \ No newline at end of file diff --git a/doc/examples/gridfs.rst b/doc/examples/gridfs.rst deleted file mode 100644 index 08be41ddf4..0000000000 --- a/doc/examples/gridfs.rst +++ /dev/null @@ -1,83 +0,0 @@ -GridFS Example -============== - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - client.drop_database('gridfs_example') - -This example shows how to use :mod:`gridfs` to store large binary -objects (e.g. files) in MongoDB. - -.. seealso:: The API docs for :mod:`gridfs`. - -.. seealso:: `This blog post - `_ - for some motivation behind this API. - -Setup ------ - -We start by creating a :class:`~gridfs.GridFS` instance to use: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> import gridfs - >>> - >>> db = MongoClient().gridfs_example - >>> fs = gridfs.GridFS(db) - -Every :class:`~gridfs.GridFS` instance is created with and will -operate on a specific :class:`~pymongo.database.Database` instance. - -Saving and Retrieving Data --------------------------- - -The simplest way to work with :mod:`gridfs` is to use its key/value -interface (the :meth:`~gridfs.GridFS.put` and -:meth:`~gridfs.GridFS.get` methods). To write data to GridFS, use -:meth:`~gridfs.GridFS.put`: - -.. doctest:: - - >>> a = fs.put("hello world") - -:meth:`~gridfs.GridFS.put` creates a new file in GridFS, and returns -the value of the file document's ``"_id"`` key. Given that ``"_id"`` -we can use :meth:`~gridfs.GridFS.get` to get back the contents of the -file: - -.. doctest:: - - >>> fs.get(a).read() - 'hello world' - -:meth:`~gridfs.GridFS.get` returns a file-like object, so we get the -file's contents by calling :meth:`~gridfs.grid_file.GridOut.read`. - -In addition to putting a :class:`str` as a GridFS file, we can also -put any file-like object (an object with a :meth:`read` -method). GridFS will handle reading the file in chunk-sized segments -automatically. We can also add additional attributes to the file as -keyword arguments: - -.. doctest:: - - >>> b = fs.put(fs.get(a), filename="foo", bar="baz") - >>> out = fs.get(b) - >>> out.read() - 'hello world' - >>> out.filename - u'foo' - >>> out.bar - u'baz' - >>> out.upload_date - datetime.datetime(...) - -The attributes we set in :meth:`~gridfs.GridFS.put` are stored in the -file document, and retrievable after calling -:meth:`~gridfs.GridFS.get`. Some attributes (like ``"filename"``) are -special and are defined in the GridFS specification - see that -document for more details. diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst deleted file mode 100644 index 30a11d2432..0000000000 --- a/doc/examples/high_availability.rst +++ /dev/null @@ -1,343 +0,0 @@ -High Availability and PyMongo -============================= - -PyMongo makes it easy to write highly available applications whether -you use a `single replica set `_ -or a `large sharded cluster -`_. - -Connecting to a Replica Set ---------------------------- - -PyMongo makes working with `replica sets -`_ easy. Here we'll launch a new -replica set and show how to handle both initialization and normal -connections with PyMongo. - -.. note:: Replica sets require server version **>= 1.6.0**. Support - for connecting to replica sets also requires PyMongo version **>= - 1.8.0**. - -.. mongodoc:: rs - -Starting a Replica Set -~~~~~~~~~~~~~~~~~~~~~~ - -The main `replica set documentation -`_ contains extensive information -about setting up a new replica set or migrating an existing MongoDB -setup, be sure to check that out. Here, we'll just do the bare minimum -to get a three node replica set setup locally. - -.. warning:: Replica sets should always use multiple nodes in - production - putting all set members on the same physical node is - only recommended for testing and development. - -We start three ``mongod`` processes, each on a different port and with -a different dbpath, but all using the same replica set name "foo". In -the example we use the hostname "morton.local", so replace that with -your hostname when running: - -.. code-block:: bash - - $ hostname - morton.local - $ mongod --replSet foo/morton.local:27018,morton.local:27019 --rest - -.. code-block:: bash - - $ mongod --port 27018 --dbpath /data/db1 --replSet foo/morton.local:27017 --rest - -.. code-block:: bash - - $ mongod --port 27019 --dbpath /data/db2 --replSet foo/morton.local:27017 --rest - -Initializing the Set -~~~~~~~~~~~~~~~~~~~~ - -At this point all of our nodes are up and running, but the set has yet -to be initialized. Until the set is initialized no node will become -the primary, and things are essentially "offline". - -To initialize the set we need to connect to a single node and run the -initiate command. Since we don't have a primary yet, we'll need to -tell PyMongo that it's okay to connect to a slave/secondary:: - - >>> from pymongo import MongoClient, ReadPreference - >>> c = MongoClient("morton.local:27017", - read_preference=ReadPreference.SECONDARY) - -.. note:: We could have connected to any of the other nodes instead, - but only the node we initiate from is allowed to contain any - initial data. - -After connecting, we run the initiate command to get things started -(here we just use an implicit configuration, for more advanced -configuration options see the replica set documentation):: - - >>> c.admin.command("replSetInitiate") - {u'info': u'Config now saved locally. Should come online in about a minute.', - u'info2': u'no configuration explicitly specified -- making one', u'ok': 1.0} - -The three ``mongod`` servers we started earlier will now coordinate -and come online as a replica set. - -Connecting to a Replica Set -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The initial connection as made above is a special case for an -uninitialized replica set. Normally we'll want to connect -differently. A connection to a replica set can be made using the -normal :meth:`~pymongo.mongo_client.MongoClient` constructor, specifying -one or more members of the set. For example, any of the following -will create a connection to the set we just created:: - - >>> MongoClient("morton.local", replicaset='foo') - MongoClient([u'morton.local:27019', 'morton.local:27017', u'morton.local:27018']) - >>> MongoClient("morton.local:27018", replicaset='foo') - MongoClient([u'morton.local:27019', u'morton.local:27017', 'morton.local:27018']) - >>> MongoClient("morton.local", 27019, replicaset='foo') - MongoClient(['morton.local:27019', u'morton.local:27017', u'morton.local:27018']) - >>> MongoClient(["morton.local:27018", "morton.local:27019"]) - MongoClient(['morton.local:27019', u'morton.local:27017', 'morton.local:27018']) - >>> MongoClient("mongodb://morton.local:27017,morton.local:27018,morton.local:27019") - MongoClient(['morton.local:27019', 'morton.local:27017', 'morton.local:27018']) - -The nodes passed to :meth:`~pymongo.mongo_client.MongoClient` are called -the *seeds*. If only one host is specified the `replicaset` parameter -must be used to indicate this isn't a connection to a single node. -As long as at least one of the seeds is online, the driver will be able -to "discover" all of the nodes in the set and make a connection to the -current primary. - -Handling Failover -~~~~~~~~~~~~~~~~~ - -When a failover occurs, PyMongo will automatically attempt to find the -new primary node and perform subsequent operations on that node. This -can't happen completely transparently, however. Here we'll perform an -example failover to illustrate how everything behaves. First, we'll -connect to the replica set and perform a couple of basic operations:: - - >>> db = MongoClient("morton.local", replicaSet='foo').test - >>> db.test.save({"x": 1}) - ObjectId('...') - >>> db.test.find_one() - {u'x': 1, u'_id': ObjectId('...')} - -By checking the host and port, we can see that we're connected to -*morton.local:27017*, which is the current primary:: - - >>> db.connection.host - 'morton.local' - >>> db.connection.port - 27017 - -Now let's bring down that node and see what happens when we run our -query again:: - - >>> db.test.find_one() - Traceback (most recent call last): - pymongo.errors.AutoReconnect: ... - -We get an :class:`~pymongo.errors.AutoReconnect` exception. This means -that the driver was not able to connect to the old primary (which -makes sense, as we killed the server), but that it will attempt to -automatically reconnect on subsequent operations. When this exception -is raised our application code needs to decide whether to retry the -operation or to simply continue, accepting the fact that the operation -might have failed. - -On subsequent attempts to run the query we might continue to see this -exception. Eventually, however, the replica set will failover and -elect a new primary (this should take a couple of seconds in -general). At that point the driver will connect to the new primary and -the operation will succeed:: - - >>> db.test.find_one() - {u'x': 1, u'_id': ObjectId('...')} - >>> db.connection.host - 'morton.local' - >>> db.connection.port - 27018 - -MongoReplicaSetClient -~~~~~~~~~~~~~~~~~~~~~ - -Using a :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` instead -of a simple :class:`~pymongo.mongo_client.MongoClient` offers two key features: -secondary reads and replica set health monitoring. To connect using -:class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` just provide a -host:port pair and the name of the replica set:: - - >>> from pymongo import MongoReplicaSetClient - >>> MongoReplicaSetClient("morton.local:27017", replicaSet='foo') - MongoReplicaSetClient([u'morton.local:27019', u'morton.local:27017', u'morton.local:27018']) - -.. _secondary-reads: - -Secondary Reads -''''''''''''''' - -By default an instance of MongoReplicaSetClient will only send queries to -the primary member of the replica set. To use secondaries for queries -we have to change the :class:`~pymongo.read_preferences.ReadPreference`:: - - >>> db = MongoReplicaSetClient("morton.local:27017", replicaSet='foo').test - >>> from pymongo.read_preferences import ReadPreference - >>> db.read_preference = ReadPreference.SECONDARY_PREFERRED - -Now all queries will be sent to the secondary members of the set. If there are -no secondary members the primary will be used as a fallback. If you have -queries you would prefer to never send to the primary you can specify that -using the ``SECONDARY`` read preference:: - - >>> db.read_preference = ReadPreference.SECONDARY - -Read preference can be set on a client, database, collection, or on a -per-query basis, e.g.:: - - >>> db.collection.find_one(read_preference=ReadPreference.PRIMARY) - -Reads are configured using three options: **read_preference**, **tag_sets**, -and **secondary_acceptable_latency_ms**. - -**read_preference**: - -- ``PRIMARY``: Read from the primary. This is the default, and provides the - strongest consistency. If no primary is available, raise - :class:`~pymongo.errors.AutoReconnect`. - -- ``PRIMARY_PREFERRED``: Read from the primary if available, or if there is - none, read from a secondary matching your choice of ``tag_sets`` and - ``secondary_acceptable_latency_ms``. - -- ``SECONDARY``: Read from a secondary matching your choice of ``tag_sets`` and - ``secondary_acceptable_latency_ms``. If no matching secondary is available, - raise :class:`~pymongo.errors.AutoReconnect`. - -- ``SECONDARY_PREFERRED``: Read from a secondary matching your choice of - ``tag_sets`` and ``secondary_acceptable_latency_ms`` if available, otherwise - from primary (regardless of the primary's tags and latency). - -- ``NEAREST``: Read from any member matching your choice of ``tag_sets`` and - ``secondary_acceptable_latency_ms``. - -**tag_sets**: - -Replica-set members can be `tagged -`_ according to any -criteria you choose. By default, MongoReplicaSetClient ignores tags when -choosing a member to read from, but it can be configured with the ``tag_sets`` -parameter. ``tag_sets`` must be a list of dictionaries, each dict providing tag -values that the replica set member must match. MongoReplicaSetClient tries each -set of tags in turn until it finds a set of tags with at least one matching -member. For example, to prefer reads from the New York data center, but fall -back to the San Francisco data center, tag your replica set members according -to their location and create a MongoReplicaSetClient like so: - - >>> rsc = MongoReplicaSetClient( - ... "morton.local:27017", - ... replicaSet='foo' - ... read_preference=ReadPreference.SECONDARY, - ... tag_sets=[{'dc': 'ny'}, {'dc': 'sf'}] - ... ) - -MongoReplicaSetClient tries to find secondaries in New York, then San Francisco, -and raises :class:`~pymongo.errors.AutoReconnect` if none are available. As an -additional fallback, specify a final, empty tag set, ``{}``, which means "read -from any member that matches the mode, ignoring tags." - -**secondary_acceptable_latency_ms**: - -If multiple members match the mode and tag sets, MongoReplicaSetClient reads -from among the nearest members, chosen according to ping time. By default, -only members whose ping times are within 15 milliseconds of the nearest -are used for queries. You can choose to distribute reads among members with -higher latencies by setting ``secondary_acceptable_latency_ms`` to a larger -number. In that case, MongoReplicaSetClient distributes reads among matching -members within ``secondary_acceptable_latency_ms`` of the closest member's -ping time. - -.. note:: ``secondary_acceptable_latency_ms`` is ignored when talking to a - replica set *through* a mongos. The equivalent is the localThreshold_ command - line option. - -.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold - -Health Monitoring -''''''''''''''''' - -When MongoReplicaSetClient is initialized it launches a background task to -monitor the replica set for changes in: - -* Health: detect when a member goes down or comes up, or if a different member - becomes primary -* Configuration: detect changes in tags -* Latency: track a moving average of each member's ping time - -Replica-set monitoring ensures queries are continually routed to the proper -members as the state of the replica set changes. - -It is critical to call -:meth:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient.close` to terminate -the monitoring task before your process exits. - -.. _mongos-high-availability: - -High Availability and mongos ----------------------------- - -An instance of :class:`~pymongo.mongo_client.MongoClient` can be configured -to automatically connect to a different mongos if the instance it is -currently connected to fails. If a failure occurs, PyMongo will attempt -to find the nearest mongos to perform subsequent operations. As with a -replica set this can't happen completely transparently, Here we'll perform -an example failover to illustrate how everything behaves. First, we'll -connect to a sharded cluster, using a seed list, and perform a couple of -basic operations:: - - >>> db = MongoClient('morton.local:30000,morton.local:30001,morton.local:30002').test - >>> db.test.save({"x": 1}) - ObjectId('...') - >>> db.test.find_one() - {u'x': 1, u'_id': ObjectId('...')} - -Each member of the seed list passed to MongoClient must be a mongos. By checking -the host, port, and is_mongos attributes we can see that we're connected to -*morton.local:30001*, a mongos:: - - >>> db.connection.host - 'morton.local' - >>> db.connection.port - 30001 - >>> db.connection.is_mongos - True - -Now let's shut down that mongos instance and see what happens when we run our -query again:: - - >>> db.test.find_one() - Traceback (most recent call last): - pymongo.errors.AutoReconnect: ... - -As in the replica set example earlier in this document, we get -an :class:`~pymongo.errors.AutoReconnect` exception. This means -that the driver was not able to connect to the original mongos at port -30001 (which makes sense, since we shut it down), but that it will -attempt to connect to a new mongos on subsequent operations. When this -exception is raised our application code needs to decide whether to retry -the operation or to simply continue, accepting the fact that the operation -might have failed. - -As long as one of the seed list members is still available the next -operation will succeed:: - - >>> db.test.find_one() - {u'x': 1, u'_id': ObjectId('...')} - >>> db.connection.host - 'morton.local' - >>> db.connection.port - 30002 - >>> db.connection.is_mongos - True diff --git a/doc/examples/index.rst b/doc/examples/index.rst deleted file mode 100644 index 9d4746a8f5..0000000000 --- a/doc/examples/index.rst +++ /dev/null @@ -1,28 +0,0 @@ -Examples -======== - -The examples in this section are intended to give in depth overviews -of how to accomplish specific tasks with MongoDB and PyMongo. - -Unless otherwise noted, all examples assume that a MongoDB instance is -running on the default host and port. Assuming you have `downloaded -and installed `_ -MongoDB, you can start it like so: - -.. code-block:: bash - - $ mongod - -.. toctree:: - :maxdepth: 1 - - aggregation - authentication - bulk - custom_type - geo - gevent - gridfs - high_availability - mod_wsgi - requests diff --git a/doc/examples/mod_wsgi.rst b/doc/examples/mod_wsgi.rst deleted file mode 100644 index 0549d6653e..0000000000 --- a/doc/examples/mod_wsgi.rst +++ /dev/null @@ -1,59 +0,0 @@ -.. _pymongo-and-mod_wsgi: - -PyMongo and mod_wsgi -==================== - -If you run your application under -`mod_wsgi `_ and you use PyMongo with its C -extensions enabled, follow these guidelines for best performance: - -* Run ``mod_wsgi`` in daemon mode with the ``WSGIDaemon`` directive. -* Assign each application to a separate daemon with ``WSGIProcessGroup``. -* Use ``WSGIApplicationGroup %{GLOBAL}`` to ensure your application is running - in the daemon's main Python interpreter, not a sub interpreter. - -For example, this ``mod_wsgi`` configuration ensures an application runs in the -main interpreter:: - - - WSGIDaemonProcess my_process - WSGIScriptAlias /my_app /path/to/app.wsgi - WSGIProcessGroup my_process - WSGIApplicationGroup %{GLOBAL} - - -If you have multiple applications that use PyMongo, put each in a separate -daemon, still in the global application group:: - - - WSGIDaemonProcess my_process - WSGIScriptAlias /my_app /path/to/app.wsgi - - WSGIProcessGroup my_process - - - WSGIDaemonProcess my_other_process - WSGIScriptAlias /my_other_app /path/to/other_app.wsgi - - WSGIProcessGroup my_other_process - - - WSGIApplicationGroup %{GLOBAL} - - -Background: Python C extensions in general have issues running in multiple -Python sub interpreters. These difficulties are explained in the documentation for -`Py_NewInterpreter `_ -and in the `Multiple Python Sub Interpreters -`_ -section of the ``mod_wsgi`` documentation. - -Beginning with PyMongo 2.7, the C extension for BSON detects when it is running -in a sub interpreter and activates a workaround, which adds a small cost to -BSON decoding. To avoid this cost, use ``WSGIApplicationGroup %{GLOBAL}`` to -ensure your application runs in the main interpreter. - -Since your program runs in the main interpreter it should not share its -process with any other applications, lest they interfere with each other's -state. Each application should have its own daemon process, as shown in the -example above. \ No newline at end of file diff --git a/doc/examples/requests.rst b/doc/examples/requests.rst deleted file mode 100644 index 08da05ab2e..0000000000 --- a/doc/examples/requests.rst +++ /dev/null @@ -1,110 +0,0 @@ -Requests -======== - -PyMongo supports the idea of a *request*: a series of operations executed with -a single socket, which are guaranteed to be processed on the server in the same -order as they ran on the client. - -Requests are not usually necessary with PyMongo. -By default, the methods :meth:`~pymongo.collection.Collection.insert`, -:meth:`~pymongo.collection.Collection.update`, -:meth:`~pymongo.collection.Collection.save`, and -:meth:`~pymongo.collection.Collection.remove` block until they receive -acknowledgment from the server, so ordered execution is already guaranteed. You -can be certain the next :meth:`~pymongo.collection.Collection.find` or -:meth:`~pymongo.collection.Collection.count`, for example, is executed on the -server after the writes complete. This is called "read-your-writes -consistency." - -An example of when a request is necessary is if a series of documents are -inserted with ``w=0`` for performance reasons, and you want to query those -documents immediately afterward: With ``w=0`` the writes can queue up at the -server and might not be immediately visible in query results. Wrapping the -inserts and queries within -:meth:`~pymongo.mongo_client.MongoClient.start_request` and -:meth:`~pymongo.mongo_client.MongoClient.end_request` forces a query to be on -the same socket as the inserts, so the query won't execute until the inserts -are complete on the server side. - -Example -------- - -Let's consider a collection of web-analytics counters. We want to count the -number of page views our site has served for each combination of browser, -region, and OS, and then show the user the number of page views from his or her -region, *including* the user's own visit. We have three ways to do so reliably: - -1. Simply update the counters with an acknowledged write (the default), and -then ``find`` all counters for the visitor's region. This will ensure that the -``update`` completes before the ``find`` begins, but it comes with a performance -penalty that may be unacceptable for analytics. - -2. Create the :class:`~pymongo.mongo_client.MongoClient` with ``w=0`` and -``auto_start_request=True`` to do unacknowledged writes and ensure each thread -gets its own socket. - -3. Explicitly call :meth:`~pymongo.mongo_client.MongoClient.start_request`, -then do the unacknowledged updates and the queries within the request. This -third method looks like: - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - counts = client.requests_example.counts - counts.drop() - region, browser, os = 'US', 'Firefox', 'Mac OS X' - -.. doctest:: - - >>> client = MongoClient() - >>> counts = client.requests_example.counts - >>> region, browser, os = 'US', 'Firefox', 'Mac OS X' - >>> request = client.start_request() - >>> try: - ... counts.update( - ... {'region': region, 'browser': browser, 'os': os}, - ... {'$inc': {'n': 1 }}, - ... upsert=True, - ... w=0) # unacknowledged write - ... - ... # This always runs after update has completed: - ... count = sum([p['n'] for p in counts.find({'region': region})]) - ... finally: - ... request.end() - >>> print count - 1 - -Requests can also be used as context managers, with the `with statement -`_, which makes -the previous example more terse: - -.. doctest:: - - >>> client.in_request() - False - >>> with client.start_request(): - ... # MongoClient is now in request - ... counts.update( - ... {'region': region, 'browser': browser, 'os': os}, - ... {'$inc': {'n': 1 }}, - ... upsert=True, - ... safe=False) - ... print sum([p['n'] for p in counts.find({'region': region})]) - 2 - >>> client.in_request() # request automatically ended - False - -Requests And ``max_pool_size`` ------------------------------- - -A thread in a request retains exclusive access to a socket until its request -ends or the thread dies; thus, applications in which more than 100 threads are -in requests at once should disable the ``max_pool_size`` option:: - - client = MongoClient(host, port, max_pool_size=None) - -Failure to increase or disable ``max_pool_size`` in such an application can -leave threads forever waiting for sockets. - -See :ref:`connection-pooling` diff --git a/doc/faq.rst b/doc/faq.rst deleted file mode 100644 index 4e072be730..0000000000 --- a/doc/faq.rst +++ /dev/null @@ -1,311 +0,0 @@ -Frequently Asked Questions -========================== - -.. contents:: - -Is PyMongo thread-safe? ------------------------ - -PyMongo is thread-safe and even provides built-in connection pooling -for threaded applications. - -.. _connection-pooling: - -How does connection pooling work in PyMongo? --------------------------------------------- - -Every :class:`~pymongo.mongo_client.MongoClient` instance has a built-in -connection pool. The pool begins with one open connection. If necessary to -support concurrent access to MongoDB from multiple threads in your application, -the client opens new connections on demand. - -By default, there is no thread-affinity for connections. - -In versions before 2.6, the default ``max_pool_size`` was 10, and it did not -actually bound the number of open connections; it only determined the number -of connections that would be kept open when no longer in use. - -Starting with PyMongo 2.6, the size of the connection pool is capped at -``max_pool_size``, which now defaults to 100. When a thread in your application -begins an operation on MongoDB, if all other connections are in use and the -pool has reached its maximum, the thread pauses, waiting for a connection to -be returned to the pool by another thread. - -The default configuration for a :class:`~pymongo.mongo_client.MongoClient` -works for most applications:: - - client = MongoClient(host, port) - -Create this client **once** when your program starts up, and reuse it for all -operations. It is a common mistake to create a new client for each request, -which is very inefficient. - -To support extremely high numbers of concurrent MongoDB operations within one -process, increase ``max_pool_size``:: - - client = MongoClient(host, port, max_pool_size=200) - -... or make it unbounded:: - - client = MongoClient(host, port, max_pool_size=None) - -By default, any number of threads are allowed to wait for connections to become -available, and they can wait any length of time. Override ``waitQueueMultiple`` -to cap the number of waiting threads. E.g., to keep the number of waiters less -than or equal to 500:: - - client = MongoClient(host, port, max_pool_size=50, waitQueueMultiple=10) - -When 500 threads are waiting for a socket, the 501st that needs a connection -raises :exc:`~pymongo.errors.ExceededMaxWaiters`. Use this option to -bound the amount of queueing in your application during a load spike, at the -cost of additional exceptions. - -Once the pool reaches its max size, additional threads are allowed to wait -indefinitely for connections to become available, unless you set -``waitQueueTimeoutMS``:: - - client = MongoClient(host, port, waitQueueTimeoutMS=100) - -A thread that waits more than 100ms (in this example) for a connection raises -:exc:`~pymongo.errors.ConnectionFailure`. Use this option if it is more -important to bound the duration of operations during a load spike than it is to -complete every operation. - -When :meth:`~pymongo.mongo_client.MongoClient.disconnect` is called by any thread, -all sockets are closed. - -:class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` maintains one -connection pool per server in your replica set. - -.. seealso:: :doc:`examples/requests` - -Does PyMongo support Python 3? ------------------------------- - -Starting with version 2.2 PyMongo supports Python 3.x where x >= 1. See the -:doc:`python3` for details. - -Does PyMongo support asynchronous frameworks like Gevent, Tornado, or Twisted? ------------------------------------------------------------------------------- - -PyMongo fully supports :doc:`Gevent `. - -To use MongoDB with `Tornado `_ see the -`Motor `_ project. - -For `Twisted `_, see `TxMongo -`_. Compared to PyMongo, -TxMongo is less stable, lacks features, and is less actively maintained. - -What does *CursorNotFound* cursor id not valid at server mean? --------------------------------------------------------------- -Cursors in MongoDB can timeout on the server if they've been open for -a long time without any operations being performed on them. This can -lead to an :class:`~pymongo.errors.CursorNotFound` exception being -raised when attempting to iterate the cursor. - -How do I change the timeout value for cursors? ----------------------------------------------- -MongoDB doesn't support custom timeouts for cursors, but cursor -timeouts can be turned off entirely. Pass ``timeout=False`` to -:meth:`~pymongo.collection.Collection.find`. - -How can I store :mod:`decimal.Decimal` instances? -------------------------------------------------- -MongoDB only supports IEEE 754 floating points - the same as the -Python float type. The only way PyMongo could store Decimal instances -would be to convert them to this standard, so you'd really only be -storing floats anyway - we force users to do this conversion -explicitly so that they are aware that it is happening. - -I'm saving ``9.99`` but when I query my document contains ``9.9900000000000002`` - what's going on here? --------------------------------------------------------------------------------------------------------- -The database representation is ``9.99`` as an IEEE floating point (which -is common to MongoDB and Python as well as most other modern -languages). The problem is that ``9.99`` cannot be represented exactly -with a double precision floating point - this is true in some versions of -Python as well: - - >>> 9.99 - 9.9900000000000002 - -The result that you get when you save ``9.99`` with PyMongo is exactly the -same as the result you'd get saving it with the JavaScript shell or -any of the other languages (and as the data you're working with when -you type ``9.99`` into a Python program). - -Can you add attribute style access for documents? -------------------------------------------------- -This request has come up a number of times but we've decided not to -implement anything like this. The relevant `jira case -`_ has some information -about the decision, but here is a brief summary: - -1. This will pollute the attribute namespace for documents, so could - lead to subtle bugs / confusing errors when using a key with the - same name as a dictionary method. - -2. The only reason we even use SON objects instead of regular - dictionaries is to maintain key ordering, since the server - requires this for certain operations. So we're hesitant to - needlessly complicate SON (at some point it's hypothetically - possible we might want to revert back to using dictionaries alone, - without breaking backwards compatibility for everyone). - -3. It's easy (and Pythonic) for new users to deal with documents, - since they behave just like dictionaries. If we start changing - their behavior it adds a barrier to entry for new users - another - class to learn. - -What is the correct way to handle time zones with PyMongo? ----------------------------------------------------------- - -Prior to PyMongo version 1.7, the correct way is to only save naive -:class:`~datetime.datetime` instances, and to save all dates as -UTC. In versions >= 1.7, the driver will automatically convert aware -datetimes to UTC before saving them. By default, datetimes retrieved -from the server (no matter what version of the driver you're using) -will be naive and represent UTC. In newer versions of the driver you -can set the :class:`~pymongo.mongo_client.MongoClient` `tz_aware` -parameter to ``True``, which will cause all -:class:`~datetime.datetime` instances returned from that MongoClient to -be aware (UTC). This setting is recommended, as it can force -application code to handle timezones properly. - -.. warning:: - - Be careful not to save naive :class:`~datetime.datetime` - instances that are not UTC (i.e. the result of calling - :meth:`datetime.datetime.now`). - -Something like :mod:`pytz` can be used to convert dates to localtime -after retrieving them from the database. - -How can I save a :mod:`datetime.date` instance? ------------------------------------------------ -PyMongo doesn't support saving :mod:`datetime.date` instances, since -there is no BSON type for dates without times. Rather than having the -driver enforce a convention for converting :mod:`datetime.date` -instances to :mod:`datetime.datetime` instances for you, any -conversion should be performed in your client code. - -.. _web-application-querying-by-objectid: - -When I query for a document by ObjectId in my web application I get no result ------------------------------------------------------------------------------ -It's common in web applications to encode documents' ObjectIds in URLs, like:: - - "/posts/50b3bda58a02fb9a84d8991e" - -Your web framework will pass the ObjectId portion of the URL to your request -handler as a string, so it must be converted to :class:`~bson.objectid.ObjectId` -before it is passed to :meth:`~pymongo.collection.Collection.find_one`. It is a -common mistake to forget to do this conversion. Here's how to do it correctly -in Flask_ (other web frameworks are similar):: - - from pymongo import MongoClient - from bson.objectid import ObjectId - - from flask import Flask, render_template - - client = MongoClient() - app = Flask(__name__) - - @app.route("/posts/<_id>") - def show_post(_id): - # NOTE!: converting _id from string to ObjectId before passing to find_one - post = client.db.posts.find_one({'_id': ObjectId(_id)}) - return render_template('post.html', post=post) - - if __name__ == "__main__": - app.run() - -.. _Flask: http://flask.pocoo.org/ - -.. seealso:: :ref:`querying-by-objectid` - -How can I use PyMongo from Django? ----------------------------------- -`Django `_ is a popular Python web -framework. Django includes an ORM, :mod:`django.db`. Currently, -there's no official MongoDB backend for Django. - -`django-mongodb-engine `_ -is an unofficial MongoDB backend that supports Django aggregations, (atomic) -updates, embedded objects, Map/Reduce and GridFS. It allows you to use most -of Django's built-in features, including the ORM, admin, authentication, site -and session frameworks and caching. - -However, it's easy to use MongoDB (and PyMongo) from Django -without using a Django backend. Certain features of Django that require -:mod:`django.db` (admin, authentication and sessions) will not work -using just MongoDB, but most of what Django provides can still be -used. - -One project which should make working with MongoDB and Django easier -is `mango `_. Mango is a set of -MongoDB backends for Django sessions and authentication (bypassing -:mod:`django.db` entirely). - -.. _using-with-mod-wsgi: - -Does PyMongo work with **mod_wsgi**? ------------------------------------- -Yes. See the configuration guide for :ref:`pymongo-and-mod_wsgi`. - -How can I use something like Python's :mod:`json` module to encode my documents to JSON? ----------------------------------------------------------------------------------------- -The :mod:`json` module won't work out of the box with all documents -from PyMongo as PyMongo supports some special types (like -:class:`~bson.objectid.ObjectId` and :class:`~bson.dbref.DBRef`) -that are not supported in JSON. We've added some utilities for working -with :mod:`json` and :mod:`simplejson` in the -:mod:`~bson.json_util` module. - -.. _year-2038-problem: - -Why do I get an error for dates on or after 2038? -------------------------------------------------- -On Unix systems, dates are represented as seconds from 1 January 1970 and -usually stored in the C :mod:`time_t` type. On most 32-bit operating systems -:mod:`time_t` is a signed 4 byte integer which means it can't handle dates -after 19 January 2038; this is known as the `year 2038 problem -`_. Neither MongoDB nor Python -uses :mod:`time_t` to represent dates internally so do not suffer from this -problem, but Python's :mod:`datetime.datetime.fromtimestamp()` does, which -means it is susceptible. - -Previous to version 2.0, PyMongo used :mod:`datetime.datetime.fromtimestamp()` -in its pure Python :mod:`bson` module. Therefore, on 32-bit systems you may -get an error retrieving dates after 2038 from MongoDB using older versions -of PyMongo with the pure Python version of :mod:`bson`. - -This problem was fixed in the pure Python implementation of :mod:`bson` by -commit ``b19ab334af2a29353529`` (8 June 2011 - PyMongo 2.0). - -The C implementation of :mod:`bson` also used to suffer from this problem but -it was fixed in commit ``566bc9fb7be6f9ab2604`` (10 May 2010 - PyMongo 1.7). - -Why do I get OverflowError decoding dates stored by another language's driver? ------------------------------------------------------------------------------- -PyMongo decodes BSON datetime values to instances of Python's -:class:`datetime.datetime`. Instances of :class:`datetime.datetime` are -limited to years between :data:`datetime.MINYEAR` (usually 1) and -:data:`datetime.MAXYEAR` (usually 9999). Some MongoDB drivers (e.g. the PHP -driver) can store BSON datetimes with year values far outside those supported -by :class:`datetime.datetime`. - -There are a few ways to work around this issue. One option is to filter -out documents with values outside of the range supported by -:class:`datetime.datetime`:: - - >>> from datetime import datetime - >>> coll = client.test.dates - >>> cur = coll.find({'dt': {'$gte': datetime.min, '$lte': datetime.max}}) - -Another option, assuming you don't need the datetime field, is to filter out -just that field:: - - >>> cur = coll.find({}, fields={'dt': False}) - diff --git a/doc/index.rst b/doc/index.rst index 471224cd56..85812d1b14 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,73 +1,60 @@ PyMongo |release| Documentation =============================== +.. note:: The PyMongo documentation has been migrated to the + `MongoDB Documentation site `_. + This site contains only the detailed changelog and API docs, while the + rest of the documentation appears on the MongoDB Documentation site. + Overview -------- **PyMongo** is a Python distribution containing tools for working with -`MongoDB `_, and is the recommended way to -work with MongoDB from Python. This documentation attempts to explain -everything you need to know to use **PyMongo**. - -.. todo:: a list of PyMongo's features - -:doc:`installation` - Instructions on how to get the distribution. - -:doc:`tutorial` - Start here for a quick overview. - -:doc:`examples/index` - Examples of how to perform specific tasks. - -:doc:`faq` - Some questions that come up often. - -:doc:`python3` - Frequently asked questions about python 3 support. +`MongoDB `_, and is the recommended way to +work with MongoDB from Python. :doc:`api/index` The complete API documentation, organized by module. -:doc:`tools` - A listing of Python tools and libraries that have been written for - MongoDB. +:doc:`changelog` + A full list of changes to PyMongo. + Getting Help ------------ -If you're having trouble or have questions about PyMongo, the best place to ask is the `MongoDB user group `_. Once you get an answer, it'd be great if you could work it back into this documentation and contribute! +If you're having trouble or have questions about PyMongo, ask your question on +our `MongoDB Community Forum `_. +You may also want to consider a +`commercial support subscription `_. +Once you get an answer, it'd be great if you could work it back into this +documentation and contribute! Issues ------ All issues should be reported (and can be tracked / voted for / commented on) at the main `MongoDB JIRA bug tracker -`_, in the "Python Driver" +`_, in the "Python Driver" project. +Feature Requests / Feedback +--------------------------- +Use our `feedback engine `_ +to send us feature requests and general feedback about PyMongo. + Contributing ------------ **PyMongo** has a large :doc:`community ` and contributions are always encouraged. Contributions can be as simple as minor tweaks to this documentation. To contribute, fork the project on -`github `_ and send a +`GitHub `_ and send a pull request. -Changes -------- -See the :doc:`changelog` for a full list of changes to PyMongo. -For older versions of the documentation please see the -`archive list `_. - About This Documentation ------------------------ This documentation is generated using the `Sphinx -`_ documentation generator. The source files +`_ documentation generator. The source files for the documentation are located in the *doc/* directory of the -**PyMongo** distribution. To generate the docs locally run the -following command from the root directory of the **PyMongo** source: - -.. code-block:: bash - - $ python setup.py doc +**PyMongo** distribution. See the PyMongo `contributing guide `_ +for instructions on the building the docs from source. Indices and tables ------------------ @@ -79,13 +66,6 @@ Indices and tables .. toctree:: :hidden: - installation - tutorial - examples/index - faq api/index - tools - contributors changelog - python3 - + contributors diff --git a/doc/installation.rst b/doc/installation.rst deleted file mode 100644 index 436c8c79f8..0000000000 --- a/doc/installation.rst +++ /dev/null @@ -1,220 +0,0 @@ -Installing / Upgrading -====================== -.. highlight:: bash - -**PyMongo** is in the `Python Package Index -`_. - -Microsoft Windows ------------------ - -We recommend using the `MS Windows installers` available from the `Python -Package Index `_. - -Installing with pip -------------------- - -To use `pip `_ -to install pymongo on platforms other than Windows:: - - $ pip install pymongo - -To get a specific version of pymongo:: - - $ pip install pymongo==2.6.3 - -To upgrade using pip:: - - $ pip install --upgrade pymongo - -.. note:: - pip does not support installing python packages in .egg format. If you would - like to install PyMongo from a .egg provided on pypi use easy_install - instead. - -Installing with easy_install ----------------------------- - -To use ``easy_install`` from -`setuptools `_ do:: - - $ easy_install pymongo - -To upgrade do:: - - $ easy_install -U pymongo - -Dependencies for installing C Extensions on Unix ------------------------------------------------- - -MongoDB, Inc. does not provide statically linked binary packages for Unix -flavors other than OSX. To build the optional C extensions you must have the -GNU C compiler (gcc) installed. Depending on your flavor of Unix (or Linux -distribution) you may also need a python development package that provides -the necessary header files for your version of Python. The package name may -vary from distro to distro. - -Debian and Ubuntu users should issue the following command:: - - $ sudo apt-get install build-essential python-dev - -Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux, Oracle Linux, -Fedora, etc.) should issue the following command:: - - $ sudo yum install gcc python-devel - -OSX ---- - -MongoDB, Inc. provides PyMongo in .egg format for Apple provided Python -versions on OSX 10.7 and newer (usually python 2.5, 2.6, and 2.7). If you want -to install PyMongo for other Python versions (or from source) you will have to -install the following to build the C extensions: - -**Snow Leopard (10.6)** - Xcode 3 with 'UNIX Development Support'. - -**Snow Leopard Xcode 4**: The Python versions shipped with OSX 10.6.x -are universal binaries. They support i386, PPC, and (in the case of python2.6) -x86_64. Xcode 4 removed support for PPC, causing the distutils version shipped -with Apple's builds of Python to fail to build the C extensions if you have -Xcode 4 installed. There is a workaround:: - - # For Apple-supplied Python2.6 (installed at /usr/bin/python2.6) and - # some builds from python.org - $ env ARCHFLAGS='-arch i386 -arch x86_64' python -m easy_install pymongo - - # For 32-bit-only Python (/usr/bin/python2.5) and some builds - # from python.org - $ env ARCHFLAGS='-arch i386' python -m easy_install pymongo - -See `http://bugs.python.org/issue11623 `_ -for a more detailed explanation. - -**Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 2.7 >= 2.7.4 or Python 3.x >= 3.2.4 downloaded from -python.org. In all cases Xcode must be installed with 'UNIX Development -Support'. - -**Xcode 5.1**: Starting with version 5.1 the version of clang that ships with -Xcode throws an error when it encounters compiler flags it doesn't recognize. -This may cause C extension builds to fail with an error similar to:: - - clang: error: unknown argument: '-mno-fused-madd' [-Wunused-command-line-argument-hard-error-in-future] - -There are workarounds:: - - # Apple specified workaround for Xcode 5.1 - # easy_install - $ ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future easy_install pymongo - # or pip - $ ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future pip install pymongo - - # Alternative workaround using CFLAGS - # easy_install - $ CFLAGS=-Qunused-arguments easy_install pymongo - # or pip - $ CFLAGS=-Qunused-arguments pip install pymongo - -Installing from source ----------------------- - -If you'd rather install directly from the source (i.e. to stay on the -bleeding edge), install the C extension dependencies then check out the -latest source from github and install the driver from the resulting tree:: - - $ git clone git://github.com/mongodb/mongo-python-driver.git pymongo - $ cd pymongo/ - $ python setup.py install - - -Installing from source on Windows -................................. - -.. note:: - - MongoDB, Inc. provides pre-built exe installers for 32-bit and 64-bit - Windows. We recommend that users install those packages (`available from - pypi `_). - -If you want to install PyMongo with C extensions from source the following -directions apply to both CPython and ActiveState's ActivePython: - -64-bit Windows -~~~~~~~~~~~~~~ - -For Python 3.3 and newer install Visual Studio 2010. For Python 3.2 and older -install Visual Studio 2008. In either case you must use the full version as -Visual C++ Express does not provide 64-bit compilers. Make sure that you check -the "x64 Compilers and Tools" option under Visual C++. - -32-bit Windows -~~~~~~~~~~~~~~ - -For Python 3.3 and newer install Visual C++ 2010 Express. - -For Python 2.6 through 3.2 install Visual C++ 2008 Express SP1. - -For Python 2.4 or 2.5 you must install -`MingW32 `_ then run the -following command to install:: - - python setup.py build -c mingw32 install - -.. _install-no-c: - -Installing Without C Extensions -------------------------------- - -By default, the driver attempts to build and install optional C -extensions (used for increasing performance) when it is installed. If -any extension fails to build the driver will be installed anyway but a -warning will be printed. - -If you wish to install PyMongo without the C extensions, even if the -extensions build properly, it can be done using a command line option to -*setup.py*:: - - $ python setup.py --no_ext install - -Building PyMongo egg Packages ------------------------------ - -Some organizations do not allow compilers and other build tools on production -systems. To install PyMongo on these systems with C extensions you may need to -build custom egg packages. Make sure that you have installed the dependencies -listed above for your operating system then run the following command in the -PyMongo source directory:: - - $ python setup.py bdist_egg - -The egg package can be found in the dist/ subdirectory. The file name will -resemble “pymongo-2.6.3-py2.7-linux-x86_64.egg” but may have a different name -depending on your platform and the version of python you use to compile. - -.. warning:: - - These “binary distributions,” will only work on systems that resemble the - environment on which you built the package. In other words, ensure that - operating systems and versions of Python and architecture (i.e. “32” or “64” - bit) match. - -Copy this file to the target system and issue the following command to install the -package:: - - $ sudo easy_install pymongo-2.6.3-py2.7-linux-x86_64.egg - -Installing a release candidate ------------------------------- - -MongoDB, Inc. may occasionally tag a release candidate for testing by the -community before final release. These releases will not be uploaded to pypi -but can be found on the -`github tags page `_. -They can be installed by passing the full URL for the tag to pip:: - - $ pip install https://github.com/mongodb/mongo-python-driver/archive/2.7rc1.tar.gz - -or easy_install:: - - $ easy_install https://github.com/mongodb/mongo-python-driver/archive/2.7rc1.tar.gz - diff --git a/doc/make.bat b/doc/make.bat index 4ccc1590eb..aa1adb91a6 100644 --- a/doc/make.bat +++ b/doc/make.bat @@ -1,113 +1,35 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -set SPHINXBUILD=sphinx-build -set BUILDDIR=_build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PyMongo.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PyMongo.ghc - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -:end +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst new file mode 100644 index 0000000000..fda3e2e129 --- /dev/null +++ b/doc/migrate-to-pymongo4.rst @@ -0,0 +1,995 @@ +:orphan: + +.. _pymongo4-migration-guide: + +PyMongo 4 Migration Guide +========================= + +.. testsetup:: + + from pymongo import MongoClient, ReadPreference + + client = MongoClient() + database = client.my_database + collection = database.my_collection + +PyMongo 4.0 brings a number of improvements as well as some backward breaking +changes. This guide provides a roadmap for migrating an existing application +from PyMongo 3.x to 4.x or writing libraries that will work with both +PyMongo 3.x and 4.x. + +PyMongo 3 +--------- + +The first step in any successful migration involves upgrading to, or +requiring, at least that latest version of PyMongo 3.x. If your project has a +requirements.txt file, add the line "pymongo >= 3.12, < 4.0" until you have +completely migrated to PyMongo 4. Most of the key new methods and options from +PyMongo 4.0 are backported in PyMongo 3.12 making migration much easier. + +.. note:: Users of PyMongo 2.X who wish to upgrade to 4.x must first upgrade + to PyMongo 3.x by following the `PyMongo 3 Migration Guide + `_. + +Python 3.6+ +----------- + +PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. Users who wish to +upgrade to 4.x must first upgrade to Python 3.6.2+. Users upgrading from +Python 2 should consult `Python 3 `_. + +Enable Deprecation Warnings +--------------------------- + +:exc:`DeprecationWarning` is raised by most methods removed in PyMongo 4.0. +Make sure you enable runtime warnings to see where deprecated functions and +methods are being used in your application:: + + python -Wd + +Warnings can also be changed to errors:: + + python -Wd -Werror + +.. note:: Not all deprecated features raise :exc:`DeprecationWarning` when + used. See `Removed features with no migration path`_. + +MongoReplicaSetClient +--------------------- + +Removed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`. +Since PyMongo 3.0, ``MongoReplicaSetClient`` has been identical to +:class:`pymongo.mongo_client.MongoClient`. Applications can simply replace +``MongoReplicaSetClient`` with :class:`pymongo.mongo_client.MongoClient` and +get the same behavior. + +MongoClient +----------- + +.. _pymongo4-migration-direct-connection: + +``directConnection`` defaults to False +...................................... + +``directConnection`` URI option and keyword argument to :class:`~pymongo +.mongo_client.MongoClient` defaults to ``False`` instead of ``None``, +allowing for the automatic discovery of replica sets. This means that if you +want a direct connection to a single server you must pass +``directConnection=True`` as a URI option or keyword argument. + +If you see any :exc:`~pymongo.errors.ServerSelectionTimeoutError`'s after upgrading from PyMongo 3 to 4.x, you likely +need to add ``directConnection=True`` when creating the client. +Here are some example errors: + +.. code-block:: + + pymongo.errors.ServerSelectionTimeoutError: mongo_node2: [Errno 8] nodename nor servname + provided, or not known,mongo_node1:27017 + +.. code-block:: + + ServerSelectionTimeoutError: No servers match selector "Primary()", Timeout: 30s, + Topology Description: ... + + +Additionally, the "isWritablePrimary" attribute of a hello command sent back by the server will +always be True if ``directConnection=False``:: + + >>> client.admin.command('hello')['isWritablePrimary'] + True + + +The waitQueueMultiple parameter is removed +.......................................... + +Removed the ``waitQueueMultiple`` keyword argument to +:class:`~pymongo.mongo_client.MongoClient` and removed +:exc:`pymongo.errors.ExceededMaxWaiters`. Instead of using +``waitQueueMultiple`` to bound queuing, limit the size of the thread +pool in your application. + +The socketKeepAlive parameter is removed +.......................................... + +Removed the ``socketKeepAlive`` keyword argument to +:class:`~pymongo.mongo_client.MongoClient`. PyMongo now always enables TCP +keepalive. For more information see the `documentation `_. + +Renamed URI options +................... + +Several deprecated URI options have been renamed to the standardized +option names defined in the +`URI options specification `_. +The old option names and their renamed equivalents are summarized in the table +below. Some renamed options have different semantics from the option being +replaced as noted in the 'Migration Notes' column. + ++--------------------+-------------------------------+--------------------------------------------------------+ +| Old URI Option | Renamed URI Option | Migration Notes | ++====================+===============================+========================================================+ +| ssl_pem_passphrase | tlsCertificateKeyFilePassword | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_ca_certs | tlsCAFile | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_crlfile | tlsCRLFile | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_match_hostname | tlsAllowInvalidHostnames | ``ssl_match_hostname=True`` is equivalent to | +| | | ``tlsAllowInvalidHostnames=False`` and vice-versa. | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_cert_reqs | tlsAllowInvalidCertificates | Instead of ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` | +| | | and ``ssl.CERT_REQUIRED``, the new option expects | +| | | a boolean value - ``True`` is equivalent to | +| | | ``ssl.CERT_NONE``, while ``False`` is equivalent to | +| | | ``ssl.CERT_REQUIRED``. | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_certfile | tlsCertificateKeyFile | Instead of using ``ssl_certfile`` and ``ssl_keyfile`` | +| | | to specify the certificate and private key files | ++--------------------+ | respectively, use ``tlsCertificateKeyFile`` to pass | +| ssl_keyfile | | a single file containing both the client certificate | +| | | and the private key. | ++--------------------+-------------------------------+--------------------------------------------------------+ +| j | journal | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| wtimeout | wTimeoutMS | - | ++--------------------+-------------------------------+--------------------------------------------------------+ + +MongoClient.fsync is removed +............................ + +Removed :meth:`pymongo.mongo_client.MongoClient.fsync`. Run the +`fsync command`_ directly with :meth:`~pymongo.database.Database.command` +instead. For example:: + + client.admin.command('fsync', lock=True) + +.. _fsync command: https://mongodb.com/docs/manual/reference/command/fsync/ + +MongoClient.unlock is removed +............................. + +Removed :meth:`pymongo.mongo_client.MongoClient.unlock`. Run the +`fsyncUnlock command`_ directly with +:meth:`~pymongo.database.Database.command` instead. For example:: + + client.admin.command('fsyncUnlock') + +.. _fsyncUnlock command: https://mongodb.com/docs/manual/reference/command/fsyncUnlock/ + +MongoClient.is_locked is removed +................................ + +Removed :attr:`pymongo.mongo_client.MongoClient.is_locked`. Run the +`currentOp command`_ directly with +:meth:`~pymongo.database.Database.command` instead. For example:: + + is_locked = client.admin.command('currentOp').get('fsyncLock') + +.. _currentOp command: https://mongodb.com/docs/manual/reference/command/currentOp/ + +MongoClient.database_names is removed +..................................... + +Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. Use +:meth:`~pymongo.mongo_client.MongoClient.list_database_names` instead. Code like +this:: + + names = client.database_names() + +can be changed to this:: + + names = client.list_database_names() + +MongoClient.max_bson_size/max_message_size/max_write_batch_size are removed +........................................................................... + +Removed :attr:`pymongo.mongo_client.MongoClient.max_bson_size`, +:attr:`pymongo.mongo_client.MongoClient.max_message_size`, and +:attr:`pymongo.mongo_client.MongoClient.max_write_batch_size`. These helpers +were incorrect when in ``loadBalanced=true mode`` and ambiguous in clusters +with mixed versions. Use the `hello command`_ to get the authoritative +value from the remote server instead. Code like this:: + + max_bson_size = client.max_bson_size + max_message_size = client.max_message_size + max_write_batch_size = client.max_write_batch_size + +can be changed to this:: + + doc = client.admin.command('hello') + max_bson_size = doc['maxBsonObjectSize'] + max_message_size = doc['maxMessageSizeBytes'] + max_write_batch_size = doc['maxWriteBatchSize'] + +.. _hello command: https://mongodb.com/docs/manual/reference/command/hello/ + +MongoClient.event_listeners and other configuration option helpers are removed +.............................................................................. + +The following client configuration option helpers are removed: +- :attr:`pymongo.mongo_client.MongoClient.event_listeners`. +- :attr:`pymongo.mongo_client.MongoClient.max_pool_size`. +- :attr:`pymongo.mongo_client.MongoClient.max_idle_time_ms`. +- :attr:`pymongo.mongo_client.MongoClient.local_threshold_ms`. +- :attr:`pymongo.mongo_client.MongoClient.server_selection_timeout`. +- :attr:`pymongo.mongo_client.MongoClient.retry_writes`. +- :attr:`pymongo.mongo_client.MongoClient.retry_reads`. + +These helpers have been replaced by +:attr:`pymongo.mongo_client.MongoClient.options`. Code like this:: + + client.event_listeners + client.local_threshold_ms + client.server_selection_timeout + client.max_pool_size + client.min_pool_size + client.max_idle_time_ms + +can be changed to this:: + + client.options.event_listeners + client.options.local_threshold_ms + client.options.server_selection_timeout + client.options.pool_options.max_pool_size + client.options.pool_options.min_pool_size + client.options.pool_options.max_idle_time_seconds + +.. _tz_aware_default_change: + +``tz_aware`` defaults to ``False`` +.................................. + +The ``tz_aware`` argument to :class:`~bson.json_util.JSONOptions` +now defaults to ``False`` instead of ``True``. :meth:`bson.json_util.loads` +now decodes datetime as naive by default:: + + >>> from bson import json_util + >>> s = '{"dt": {"$date": "2022-05-09T17:54:00Z"}}' + >>> json_util.loads(s) + {'dt': datetime.datetime(2022, 5, 9, 17, 54)} + +To retain the PyMongo 3 behavior set ``tz_aware=True``, for example:: + + >>> from bson import json_util + >>> opts = json_util.JSONOptions(tz_aware=True) + >>> s = '{"dt": {"$date": "2022-05-09T17:54:00Z"}}' + >>> json_util.loads(s, json_options=opts) + {'dt': datetime.datetime(2022, 5, 9, 17, 54, tzinfo=)} + +This change was made to match the default behavior of +:class:`~bson.codec_options.CodecOptions` and :class:`bson.decode`. + +MongoClient cannot execute operations after ``close()`` +....................................................... + +:class:`~pymongo.mongo_client.MongoClient` cannot execute any operations +after being closed. The previous behavior would simply reconnect. However, +now you must create a new instance. + +MongoClient raises exception when given more than one URI +......................................................... + +:class:`~pymongo.mongo_client.MongoClient` now raises a :exc:`~pymongo.errors.ConfigurationError` +when more than one URI is passed into the ``hosts`` argument. + +MongoClient raises exception when given unescaped percent sign in login info +............................................................................ + +:class:`~pymongo.mongo_client.MongoClient` now raises an +:exc:`~pymongo.errors.InvalidURI` exception +when it encounters unescaped percent signs in username and password. + +Database +-------- + +Database.authenticate and Database.logout are removed +..................................................... + +Removed :meth:`pymongo.database.Database.authenticate` and +:meth:`pymongo.database.Database.logout`. Authenticating multiple users +on the same client conflicts with support for logical sessions in MongoDB 3.6+. +To authenticate as multiple users, create multiple instances of +:class:`~pymongo.mongo_client.MongoClient`. Code like this:: + + client = MongoClient() + client.admin.authenticate('user1', 'pass1') + client.admin.authenticate('user2', 'pass2') + +can be changed to this:: + + client1 = MongoClient(username='user1', password='pass1') + client2 = MongoClient(username='user2', password='pass2') + +Alternatively, create a single user that contains all the authentication privileges +required by your application. + +Database.collection_names is removed +.................................... + +Removed :meth:`pymongo.database.Database.collection_names`. Use +:meth:`~pymongo.database.Database.list_collection_names` instead. Code like +this:: + + names = client.db.collection_names() + non_system_names = client.db.collection_names(include_system_collections=False) + +can be changed to this:: + + names = client.db.list_collection_names() + non_system_names = client.db.list_collection_names(filter={"name": {"$regex": "^(?!system\\.)"}}) + +Database.current_op is removed +.............................. + +Removed :meth:`pymongo.database.Database.current_op`. Use +:meth:`~pymongo.database.Database.aggregate` instead with the +`$currentOp aggregation pipeline stage`_. Code like +this:: + + ops = client.admin.current_op()['inprog'] + +can be changed to this:: + + ops = list(client.admin.aggregate([{'$currentOp': {}}])) + +.. _$currentOp aggregation pipeline stage: https://mongodb.com/docs/manual/reference/operator/aggregation/currentOp/ + +Database.add_user is removed +............................ + +Removed :meth:`pymongo.database.Database.add_user` which was deprecated in +PyMongo 3.6. Use the `createUser command`_ or `updateUser command`_ instead. +To create a user:: + + db.command("createUser", "admin", pwd="password", roles=["dbAdmin"]) + +To create a read-only user:: + + db.command("createUser", "user", pwd="password", roles=["read"]) + +To change a password:: + + db.command("updateUser", "user", pwd="newpassword") + +Or change roles:: + + db.command("updateUser", "user", roles=["readWrite"]) + +.. _createUser command: https://mongodb.com/docs/manual/reference/command/createUser/ +.. _updateUser command: https://mongodb.com/docs/manual/reference/command/updateUser/ + +Database.remove_user is removed +............................... + +Removed :meth:`pymongo.database.Database.remove_user` which was deprecated in +PyMongo 3.6. Use the `dropUser command`_ instead:: + + db.command("dropUser", "user") + +.. _dropUser command: https://mongodb.com/docs/manual/reference/command/createUser/ + +Database.profiling_level is removed +................................... + +Removed :meth:`pymongo.database.Database.profiling_level` which was deprecated in +PyMongo 3.12. Use the `profile command`_ instead. Code like this:: + + level = db.profiling_level() + +Can be changed to this:: + + profile = db.command('profile', -1) + level = profile['was'] + +.. _profile command: https://mongodb.com/docs/manual/reference/command/profile/ + +Database.set_profiling_level is removed +....................................... + +Removed :meth:`pymongo.database.Database.set_profiling_level` which was deprecated in +PyMongo 3.12. Use the `profile command`_ instead. Code like this:: + + db.set_profiling_level(pymongo.ALL, filter={'op': 'query'}) + +Can be changed to this:: + + res = db.command('profile', 2, filter={'op': 'query'}) + +Database.profiling_info is removed +.................................. + +Removed :meth:`pymongo.database.Database.profiling_info` which was deprecated in +PyMongo 3.12. Query the `'system.profile' collection`_ instead. Code like this:: + + profiling_info = db.profiling_info() + +Can be changed to this:: + + profiling_info = list(db['system.profile'].find()) + +.. _'system.profile' collection: https://mongodb.com/docs/manual/reference/database-profiler/ + +Database.__bool__ raises NotImplementedError +............................................ +:class:`~pymongo.database.Database` now raises an error upon evaluating as a +Boolean. Code like this:: + + if database: + +Can be changed to this:: + + if database is not None: + +You must now explicitly compare with None. + +Collection +---------- + +The useCursor option for Collection.aggregate is removed +........................................................ + +Removed the ``useCursor`` option for +:meth:`~pymongo.collection.Collection.aggregate` which was deprecated in +PyMongo 3.6. The option was only necessary when upgrading from MongoDB 2.4 +to MongoDB 2.6. + +Collection.insert is removed +............................ + +Removed :meth:`pymongo.collection.Collection.insert`. Use +:meth:`~pymongo.collection.Collection.insert_one` or +:meth:`~pymongo.collection.Collection.insert_many` instead. + +Code like this:: + + collection.insert({'doc': 1}) + collection.insert([{'doc': 2}, {'doc': 3}]) + +Can be changed to this:: + + collection.insert_one({'my': 'document'}) + collection.insert_many([{'doc': 2}, {'doc': 3}]) + +Collection.save is removed +.......................... + +Removed :meth:`pymongo.collection.Collection.save`. Applications will +get better performance using :meth:`~pymongo.collection.Collection.insert_one` +to insert a new document and :meth:`~pymongo.collection.Collection.update_one` +to update an existing document. Code like this:: + + doc = collection.find_one({"_id": "some id"}) + doc["some field"] = + db.collection.save(doc) + +Can be changed to this:: + + result = collection.update_one({"_id": "some id"}, {"$set": {"some field": }}) + +If performance is not a concern and refactoring is untenable, ``save`` can be +implemented like so:: + + def save(doc): + if '_id' in doc: + collection.replace_one({'_id': doc['_id']}, doc, upsert=True) + return doc['_id'] + else: + res = collection.insert_one(doc) + return res.inserted_id + +Collection.update is removed +............................ + +Removed :meth:`pymongo.collection.Collection.update`. Use +:meth:`~pymongo.collection.Collection.update_one` +to update a single document or +:meth:`~pymongo.collection.Collection.update_many` to update multiple +documents. Code like this:: + + collection.update({}, {'$set': {'a': 1}}) + collection.update({}, {'$set': {'b': 1}}, multi=True) + +Can be changed to this:: + + collection.update_one({}, {'$set': {'a': 1}}) + collection.update_many({}, {'$set': {'b': 1}}) + +Collection.remove is removed +............................ + +Removed :meth:`pymongo.collection.Collection.remove`. Use +:meth:`~pymongo.collection.Collection.delete_one` +to delete a single document or +:meth:`~pymongo.collection.Collection.delete_many` to delete multiple +documents. Code like this:: + + collection.remove({'a': 1}, multi=False) + collection.remove({'b': 1}) + +Can be changed to this:: + + collection.delete_one({'a': 1}) + collection.delete_many({'b': 1}) + +Collection.find_and_modify is removed +..................................... + +Removed :meth:`pymongo.collection.Collection.find_and_modify`. Use +:meth:`~pymongo.collection.Collection.find_one_and_update`, +:meth:`~pymongo.collection.Collection.find_one_and_replace`, or +:meth:`~pymongo.collection.Collection.find_one_and_delete` instead. +Code like this:: + + updated_doc = collection.find_and_modify({'a': 1}, {'$set': {'b': 1}}) + replaced_doc = collection.find_and_modify({'b': 1}, {'c': 1}) + deleted_doc = collection.find_and_modify({'c': 1}, remove=True) + +Can be changed to this:: + + updated_doc = collection.find_one_and_update({'a': 1}, {'$set': {'b': 1}}) + replaced_doc = collection.find_one_and_replace({'b': 1}, {'c': 1}) + deleted_doc = collection.find_one_and_delete({'c': 1}) + +Collection.count and Cursor.count is removed +............................................ + +Removed :meth:`pymongo.collection.Collection.count` and +:meth:`pymongo.cursor.Cursor.count`. Use +:meth:`~pymongo.collection.Collection.count_documents` or +:meth:`~pymongo.collection.Collection.estimated_document_count` instead. +Code like this:: + + ntotal = collection.count({}) + nmatched = collection.count({'price': {'$gte': 10}}) + # Or via the Cursor.count api: + ntotal = collection.find({}).count() + nmatched = collection.find({'price': {'$gte': 10}}).count() + +Can be changed to this:: + + ntotal = collection.estimated_document_count() + nmatched = collection.count_documents({'price': {'$gte': 10}}) + +.. note:: When migrating from :meth:`count` to :meth:`count_documents` + the following query operators must be replaced: + + +-------------+--------------------------------------------------------------+ + | Operator | Replacement | + +=============+==============================================================+ + | $where | `$expr`_ | + +-------------+--------------------------------------------------------------+ + | $near | `$geoWithin`_ with `$center`_; i.e. | + | | ``{'$geoWithin': {'$center': [[,], ]}}`` | + +-------------+--------------------------------------------------------------+ + | $nearSphere | `$geoWithin`_ with `$centerSphere`_; i.e. | + | | ``{'$geoWithin': {'$centerSphere': [[,], ]}}`` | + +-------------+--------------------------------------------------------------+ + +.. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ +.. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ +.. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ +.. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ + +Collection.initialize_ordered_bulk_op and initialize_unordered_bulk_op is removed +................................................................................. + +Removed :meth:`pymongo.collection.Collection.initialize_ordered_bulk_op` +and :class:`pymongo.bulk.BulkOperationBuilder`. Use +:meth:`pymongo.collection.Collection.bulk_write` instead. Code like this:: + + batch = coll.initialize_ordered_bulk_op() + batch.insert({'a': 1}) + batch.find({'a': 1}).update_one({'$set': {'b': 1}}) + batch.find({'a': 2}).upsert().replace_one({'b': 2}) + batch.find({'a': 3}).remove() + result = batch.execute() + +Can be changed to this:: + + coll.bulk_write([ + InsertOne({'a': 1}), + UpdateOne({'a': 1}, {'$set': {'b': 1}}), + ReplaceOne({'a': 2}, {'b': 2}, upsert=True), + DeleteOne({'a': 3}), + ]) + +Collection.initialize_unordered_bulk_op is removed +.................................................. + +Removed :meth:`pymongo.collection.Collection.initialize_unordered_bulk_op`. +Use :meth:`pymongo.collection.Collection.bulk_write` instead. Code like this:: + + batch = coll.initialize_unordered_bulk_op() + batch.insert({'a': 1}) + batch.find({'a': 1}).update_one({'$set': {'b': 1}}) + batch.find({'a': 2}).upsert().replace_one({'b': 2}) + batch.find({'a': 3}).remove() + result = batch.execute() + +Can be changed to this:: + + coll.bulk_write([ + InsertOne({'a': 1}), + UpdateOne({'a': 1}, {'$set': {'b': 1}}), + ReplaceOne({'a': 2}, {'b': 2}, upsert=True), + DeleteOne({'a': 3}), + ], ordered=False) + +Collection.group is removed +........................... + +Removed :meth:`pymongo.collection.Collection.group`. This method was +deprecated in PyMongo 3.5. MongoDB 4.2 removed the group command. +Use :meth:`~pymongo.collection.Collection.aggregate` with the ``$group`` stage +instead. + +Collection.map_reduce and Collection.inline_map_reduce are removed +.................................................................. + +Removed :meth:`pymongo.collection.Collection.map_reduce` and +:meth:`pymongo.collection.Collection.inline_map_reduce`. +Migrate to :meth:`~pymongo.collection.Collection.aggregate` or run the +`mapReduce command`_ directly with :meth:`~pymongo.database.Database.command` +instead. For more guidance on this migration see: + +- https://mongodb.com/docs/manual/reference/map-reduce-to-aggregation-pipeline/ +- https://mongodb.com/docs/manual/reference/aggregation-commands-comparison/ + +.. _mapReduce command: https://mongodb.com/docs/manual/reference/command/mapReduce/ + +Collection.ensure_index is removed +.................................. + +Removed :meth:`pymongo.collection.Collection.ensure_index`. Use +:meth:`~pymongo.collection.Collection.create_index` or +:meth:`~pymongo.collection.Collection.create_indexes` instead. Note that +``ensure_index`` maintained an in memory cache of recently created indexes +whereas the newer methods do not. Applications should avoid frequent calls +to :meth:`~pymongo.collection.Collection.create_index` or +:meth:`~pymongo.collection.Collection.create_indexes`. Code like this:: + + def persist(self, document): + collection.ensure_index('a', unique=True) + collection.insert_one(document) + +Can be changed to this:: + + def persist(self, document): + if not self.created_index: + collection.create_index('a', unique=True) + self.created_index = True + collection.insert_one(document) + +Collection.reindex is removed +............................. + +Removed :meth:`pymongo.collection.Collection.reindex`. Run the +`reIndex command`_ directly instead. Code like this:: + + >>> result = database.my_collection.reindex() + +can be changed to this:: + + >>> result = database.command('reIndex', 'my_collection') + +.. _reIndex command: https://mongodb.com/docs/manual/reference/command/reIndex/ + +The modifiers parameter is removed +.................................. + +Removed the ``modifiers`` parameter from +:meth:`~pymongo.collection.Collection.find`, +:meth:`~pymongo.collection.Collection.find_one`, +:meth:`~pymongo.collection.Collection.find_raw_batches`, and +:meth:`~pymongo.cursor.Cursor`. Pass the options directly to the method +instead. Code like this:: + + cursor = coll.find({}, modifiers={ + "$comment": "comment", + "$hint": {"_id": 1}, + "$min": {"_id": 0}, + "$max": {"_id": 6}, + "$maxTimeMS": 6000, + "$returnKey": False, + "$showDiskLoc": False, + }) + +can be changed to this:: + + cursor = coll.find( + {}, + comment="comment", + hint={"_id": 1}, + min={"_id": 0}, + max={"_id": 6}, + max_time_ms=6000, + return_key=False, + show_record_id=False, + ) + +The hint parameter is required with min/max +........................................... + +The ``hint`` option is now required when using ``min`` or ``max`` queries +with :meth:`~pymongo.collection.Collection.find` to ensure the query utilizes +the correct index. For example, code like this:: + + cursor = coll.find({}, min={'x', min_value}) + +can be changed to this:: + + cursor = coll.find({}, min={'x', min_value}, hint=[('x', ASCENDING)]) + +Collection.__bool__ raises NotImplementedError +.............................................. +:class:`~pymongo.collection.Collection` now raises an error upon evaluating +as a Boolean. Code like this:: + + if collection: + +Can be changed to this:: + + if collection is not None: + +You must now explicitly compare with None. + +Collection.find returns entire document with empty projection +............................................................. +Empty projections (eg {} or []) for +:meth:`~pymongo.collection.Collection.find`, and +:meth:`~pymongo.collection.Collection.find_one` +are passed to the server as-is rather than the previous behavior which +substituted in a projection of ``{"_id": 1}``. This means that an empty +projection will now return the entire document, not just the ``"_id"`` field. +To ensure that behavior remains consistent, code like this:: + + coll.find({}, projection={}) + +Can be changed to this:: + + coll.find({}, projection={"_id":1}) + +SONManipulator is removed +------------------------- + +Removed :mod:`pymongo.son_manipulator`, +:class:`pymongo.son_manipulator.SONManipulator`, +:class:`pymongo.son_manipulator.ObjectIdInjector`, +:class:`pymongo.son_manipulator.ObjectIdShuffler`, +:class:`pymongo.son_manipulator.AutoReference`, +:class:`pymongo.son_manipulator.NamespaceInjector`, +:meth:`pymongo.database.Database.add_son_manipulator`, +:attr:`pymongo.database.Database.outgoing_copying_manipulators`, +:attr:`pymongo.database.Database.outgoing_manipulators`, +:attr:`pymongo.database.Database.incoming_copying_manipulators`, and +:attr:`pymongo.database.Database.incoming_manipulators`. + +Removed the ``manipulate`` parameter from +:meth:`~pymongo.collection.Collection.find`, +:meth:`~pymongo.collection.Collection.find_one`, and +:meth:`~pymongo.cursor.Cursor`. + +The :class:`pymongo.son_manipulator.SONManipulator` API has limitations as a +technique for transforming your data and was deprecated in PyMongo 3.0. +Instead, it is more flexible and straightforward to transform outgoing +documents in your own code before passing them to PyMongo, and transform +incoming documents after receiving them from PyMongo. + +Alternatively, if your application uses the ``SONManipulator`` API to convert +custom types to BSON, the :class:`~bson.codec_options.TypeCodec` and +:class:`~bson.codec_options.TypeRegistry` APIs may be a suitable alternative. +For more information, see `Custom Types `_. + +``SON().items()`` now returns ``dict_items`` object. +---------------------------------------------------- +:meth:`~bson.son.SON.items` now returns a ``dict_items`` object rather than +a list. + +``SON().iteritems()`` removed. +------------------------------ +``SON.iteritems()`` now removed. Code that looks like this:: + + for k, v in son.iteritems(): + +Can now be replaced by code that looks like:: + + for k, v in son.items(): + +IsMaster is removed +------------------- + +Removed :class:`pymongo.ismaster.IsMaster`. +Use :class:`pymongo.hello.Hello` instead. + +NotMasterError is removed +------------------------- + +Removed :exc:`~pymongo.errors.NotMasterError`. +Use :exc:`~pymongo.errors.NotPrimaryError` instead. + +CertificateError is removed +--------------------------- + +Removed :exc:`~pymongo.errors.CertificateError`. Since PyMongo 3.0 this error +is handled internally and is never raised to the application. + +pymongo.GEOHAYSTACK is removed +------------------------------ + +Removed :attr:`pymongo.GEOHAYSTACK`. Replace with "geoHaystack" or create a +2d index and use $geoNear or $geoWithin instead. + +UUIDLegacy is removed +--------------------- + +Removed :class:`bson.binary.UUIDLegacy`. Use +:meth:`bson.binary.Binary.from_uuid` instead. Code like this:: + + uu = uuid.uuid4() + uuid_legacy = UUIDLegacy(uu) + +can be changed to this:: + + uu = uuid.uuid4() + uuid_legacy = Binary.from_uuid(uu, PYTHON_LEGACY) + +Default JSONMode changed from LEGACY to RELAXED +----------------------------------------------- + +Changed the default JSON encoding representation from legacy to relaxed. +The json_mode parameter for :const:`bson.json_util.dumps` now defaults to +:const:`~bson.json_util.RELAXED_JSON_OPTIONS`. + +GridFS changes +-------------- + +.. _removed-gridfs-checksum: + +disable_md5 parameter is removed +................................ + +Removed the ``disable_md5`` option for :class:`~gridfs.GridFSBucket` and +:class:`~gridfs.GridFS`. GridFS no longer generates checksums. +Applications that desire a file digest should implement it outside GridFS +and store it with other file metadata. For example:: + + import hashlib + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream("test_file") as grid_in: + file_data = b'...' + sha356 = hashlib.sha256(file_data).hexdigest() + grid_in.write(file_data) + grid_in.sha356 = sha356 # Set the custom 'sha356' field + +Note that for large files, the checksum may need to be computed in chunks +to avoid the excessive memory needed to load the entire file at once. + +Removed features with no migration path +--------------------------------------- + +cursor_manager support is removed +................................. + +Removed :class:`pymongo.cursor_manager.CursorManager`, +:mod:`pymongo.cursor_manager`, and +:meth:`pymongo.mongo_client.MongoClient.set_cursor_manager`. + +MongoClient.close_cursor is removed +................................... + +Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor` and +:meth:`pymongo.mongo_client.MongoClient.kill_cursors`. Instead, close cursors +with :meth:`pymongo.cursor.Cursor.close` or +:meth:`pymongo.command_cursor.CommandCursor.close`. + +.. _killCursors command: https://mongodb.com/docs/manual/reference/command/killCursors/ + +Database.eval, Database.system_js, and SystemJS are removed +........................................................... + +Removed :meth:`~pymongo.database.Database.eval`, +:data:`~pymongo.database.Database.system_js` and +:class:`~pymongo.database.SystemJS`. The eval command was deprecated in +MongoDB 3.0 and removed in MongoDB 4.2. There is no replacement for eval with +MongoDB 4.2+. + +However, on MongoDB <= 4.0, code like this:: + + >>> result = database.eval('function (x) {return x;}', 3) + +can be changed to this:: + + >>> from bson.code import Code + >>> result = database.command('eval', Code('function (x) {return x;}'), args=[3]).get('retval') + +Database.error, Database.last_status, Database.previous_error, and Database.reset_error_history are removed +........................................................................................................... + +Removed :meth:`pymongo.database.Database.error`, +:meth:`pymongo.database.Database.last_status`, +:meth:`pymongo.database.Database.previous_error`, and +:meth:`pymongo.database.Database.reset_error_history`. +These methods are obsolete: all MongoDB write operations use an acknowledged +write concern and report their errors by default. These methods were +deprecated in PyMongo 2.8. + +Collection.parallel_scan is removed +................................... + +Removed :meth:`~pymongo.collection.Collection.parallel_scan`. MongoDB 4.2 +removed the parallelCollectionScan command. There is no replacement. + +pymongo.message helpers are removed +................................... + +Removed :meth:`pymongo.message.delete`, :meth:`pymongo.message.get_more`, +:meth:`pymongo.message.insert`, :meth:`pymongo.message.kill_cursors`, +:meth:`pymongo.message.query`, and :meth:`pymongo.message.update`. + + +Name is a required argument for pymongo.driver_info.DriverInfo +.............................................................. + +``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. + +DBRef BSON/JSON decoding behavior +................................. + +Changed the BSON and JSON decoding behavior of :class:`~bson.dbref.DBRef` +to match the behavior outlined in the `DBRef specification`_ version 1.0. +Specifically, PyMongo now only decodes a subdocument into a +:class:`~bson.dbref.DBRef` if and only if, it contains both ``$ref`` and +``$id`` fields and the ``$ref``, ``$id``, and ``$db`` fields are of the +correct type. Otherwise the document is returned as normal. Previously, any +subdocument containing a ``$ref`` field would be decoded as a +:class:`~bson.dbref.DBRef`. + +.. _DBRef specification: https://github.com/mongodb/specifications/blob/master/source/dbref/dbref.md + +Encoding a UUID raises an error by default +.......................................... + +The default ``uuid_representation`` for :class:`~bson.codec_options.CodecOptions`, +:class:`~bson.json_util.JSONOptions`, and +:class:`~pymongo.mongo_client.MongoClient` has been changed from +:data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to +:data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a +:class:`uuid.UUID` instance to BSON or JSON now produces an error by default. +If you were using UUIDs previously, you will need to set your ``uuid_representation`` to +:data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to avoid data corruption. If you do not have UUIDs, +then you should set :data:`bson.binary.UuidRepresentation.STANDARD`. If you do not explicitly set a value, +you will receive an error like this when attempting to encode a :class:`uuid.UUID`:: + + ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted... + +See `Handling UUIDs `_ for details. + +Additional BSON classes implement ``__slots__`` +............................................... + +:class:`~bson.int64.Int64`, :class:`~bson.min_key.MinKey`, +:class:`~bson.max_key.MaxKey`, :class:`~bson.timestamp.Timestamp`, +:class:`~bson.regex.Regex`, and :class:`~bson.dbref.DBRef` now implement +``__slots__`` to reduce memory usage. This means that their attributes are fixed, and new +attributes cannot be added to the object at runtime. diff --git a/doc/mongo_extensions.py b/doc/mongo_extensions.py deleted file mode 100644 index e080e2bc78..0000000000 --- a/doc/mongo_extensions.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MongoDB specific extensions to Sphinx.""" - -from docutils import nodes -from sphinx import addnodes -from sphinx.util.compat import (Directive, - make_admonition) - - -class mongodoc(nodes.Admonition, nodes.Element): - pass - - -class mongoref(nodes.reference): - pass - - -def visit_mongodoc_node(self, node): - self.visit_admonition(node, "seealso") - - -def depart_mongodoc_node(self, node): - self.depart_admonition(node) - - -def visit_mongoref_node(self, node): - atts = {"class": "reference external", - "href": node["refuri"], - "name": node["name"]} - self.body.append(self.starttag(node, 'a', '', **atts)) - - -def depart_mongoref_node(self, node): - self.body.append('') - if not isinstance(node.parent, nodes.TextElement): - self.body.append('\n') - - -class MongodocDirective(Directive): - - has_content = True - required_arguments = 0 - optional_arguments = 0 - final_argument_whitespace = False - option_spec = {} - - def run(self): - return make_admonition(mongodoc, self.name, - ['See general MongoDB documentation'], - self.options, self.content, self.lineno, - self.content_offset, self.block_text, - self.state, self.state_machine) - - -def process_mongodoc_nodes(app, doctree, fromdocname): - for node in doctree.traverse(mongodoc): - anchor = None - for name in node.parent.parent.traverse(addnodes.desc_signature): - anchor = name["ids"][0] - break - if not anchor: - for name in node.parent.traverse(nodes.section): - anchor = name["ids"][0] - break - for para in node.traverse(nodes.paragraph): - tag = str(para.traverse()[1]) - link = mongoref("", "") - link["refuri"] = "http://dochub.mongodb.org/core/%s" % tag - link["name"] = anchor - link.append(nodes.emphasis(tag, tag)) - new_para = nodes.paragraph() - new_para += link - node.replace(para, new_para) - - -def setup(app): - app.add_node(mongodoc, - html=(visit_mongodoc_node, depart_mongodoc_node), - latex=(visit_mongodoc_node, depart_mongodoc_node), - text=(visit_mongodoc_node, depart_mongodoc_node)) - app.add_node(mongoref, - html=(visit_mongoref_node, depart_mongoref_node)) - - app.add_directive("mongodoc", MongodocDirective) - app.connect("doctree-resolved", process_mongodoc_nodes) diff --git a/doc/pydoctheme/static/pydoctheme.css b/doc/pydoctheme/static/pydoctheme.css new file mode 100644 index 0000000000..50835bb92c --- /dev/null +++ b/doc/pydoctheme/static/pydoctheme.css @@ -0,0 +1,178 @@ +@import url("default.css"); + +body { + background-color: white; + margin-left: 1em; + margin-right: 1em; +} + +div.related { + margin-bottom: 1.2em; + padding: 0.5em 0; + border-top: 1px solid #ccc; + margin-top: 0.5em; +} + +div.related a:hover { + color: #0095C4; +} + +div.related:first-child { + border-top: 0; + border-bottom: 1px solid #ccc; +} + +div.sphinxsidebar { + background-color: #eeeeee; + border-radius: 5px; + line-height: 130%; + font-size: smaller; +} + +div.sphinxsidebar h3, div.sphinxsidebar h4 { + margin-top: 1.5em; +} + +div.sphinxsidebarwrapper > h3:first-child { + margin-top: 0.2em; +} + +div.sphinxsidebarwrapper > ul > li > ul > li { + margin-bottom: 0.4em; +} + +div.sphinxsidebar a:hover { + color: #0095C4; +} + +div.sphinxsidebar input { + font-family: 'Lucida Grande',Arial,sans-serif; + border: 1px solid #999999; + font-size: smaller; + border-radius: 3px; +} + +div.sphinxsidebar input[type=text] { + max-width: 150px; +} + +div.body { + padding: 0 0 0 1.2em; +} + +div.body p { + line-height: 140%; +} + +div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 { + margin: 0; + border: 0; + padding: 0.3em 0; +} + +div.body hr { + border: 0; + background-color: #ccc; + height: 1px; +} + +div.body pre { + border-radius: 3px; + border: 1px solid #ac9; +} + +div.body div.admonition, div.body div.impl-detail { + border-radius: 3px; +} + +div.body div.impl-detail > p { + margin: 0; +} + +div.body div.seealso { + border: 1px solid #dddd66; +} + +div.body a { + color: #0072aa; +} + +div.body a:visited { + color: #6363bb; +} + +div.body a:hover { + color: #00B0E4; +} + +tt, code, pre { + font-family: monospace, sans-serif; + font-size: 96.5%; +} + +div.body tt, div.body code { + border-radius: 3px; +} + +div.body tt.descname, div.body code.descname { + font-size: 120%; +} + +div.body tt.xref, div.body a tt, div.body code.xref, div.body a code { + font-weight: normal; +} + +.deprecated { + border-radius: 3px; +} + +table.docutils { + border: 1px solid #ddd; + min-width: 20%; + border-radius: 3px; + margin-top: 10px; + margin-bottom: 10px; +} + +table.docutils td, table.docutils th { + border: 1px solid #ddd !important; + border-radius: 3px; +} + +table p, table li { + text-align: left !important; +} + +table.docutils th { + background-color: #eee; + padding: 0.3em 0.5em; +} + +table.docutils td { + background-color: white; + padding: 0.3em 0.5em; +} + +table.footnote, table.footnote td { + border: 0 !important; +} + +div.footer { + line-height: 150%; + margin-top: -2em; + text-align: right; + width: auto; + margin-right: 10px; +} + +div.footer a:hover { + color: #0095C4; +} + +.refcount { + color: #060; +} + +.stableabi { + color: #229; +} diff --git a/doc/pydoctheme/theme.conf b/doc/pydoctheme/theme.conf new file mode 100644 index 0000000000..88bf38734d --- /dev/null +++ b/doc/pydoctheme/theme.conf @@ -0,0 +1,24 @@ +[theme] +inherit = default +stylesheet = pydoctheme.css +pygments_style = sphinx + +[options] +bodyfont = 'Lucida Grande', Arial, sans-serif +headfont = 'Lucida Grande', Arial, sans-serif +footerbgcolor = white +footertextcolor = #555555 +relbarbgcolor = white +relbartextcolor = #666666 +relbarlinkcolor = #444444 +sidebarbgcolor = white +sidebartextcolor = #444444 +sidebarlinkcolor = #444444 +bgcolor = white +textcolor = #222222 +linkcolor = #0090c0 +visitedlinkcolor = #00608f +headtextcolor = #1a1a1a +headbgcolor = white +headlinkcolor = #aaaaaa +googletag = False diff --git a/doc/python3.rst b/doc/python3.rst deleted file mode 100644 index 33976b122d..0000000000 --- a/doc/python3.rst +++ /dev/null @@ -1,178 +0,0 @@ -Python 3 FAQ -============ - -.. contents:: - -What Python 3 versions are supported? -------------------------------------- - -PyMongo supports Python 3.x where x >= 1. - -We **do not** support Python 3.0.x. It has many problems -(some that directly impact PyMongo) and was `end-of-lifed`_ -with the release of Python 3.1. - -.. _end-of-lifed: http://www.python.org/download/releases/3.0.1/ - -Are there any PyMongo behavior changes with Python 3? ------------------------------------------------------ - -Only one intentional change. Instances of :class:`bytes` -are encoded as BSON type 5 (Binary data) with subtype 0. -In Python 3 they are decoded back to :class:`bytes`. In -Python 2 they will be decoded to :class:`~bson.binary.Binary` -with subtype 0. - -For example, let's insert a :class:`bytes` instance using Python 3 then -read it back. Notice the byte string is decoded back to :class:`bytes`:: - - Python 3.1.4 (default, Mar 21 2012, 14:34:01) - [GCC 4.5.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pymongo - >>> c = pymongo.MongoClient() - >>> c.test.bintest.insert({'binary': b'this is a byte string'}) - ObjectId('4f9086b1fba5222021000000') - >>> c.test.bintest.find_one() - {'binary': b'this is a byte string', '_id': ObjectId('4f9086b1fba5222021000000')} - -Now retrieve the same document in Python 2. Notice the byte string is decoded -to :class:`~bson.binary.Binary`:: - - Python 2.7.3 (default, Apr 12 2012, 10:35:17) - [GCC 4.5.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pymongo - >>> c = pymongo.MongoClient() - >>> c.test.bintest.find_one() - {u'binary': Binary('this is a byte string', 0), u'_id': ObjectId('4f9086b1fba5222021000000')} - - -Why can't I share pickled ObjectIds between some versions of Python 2 and 3? ----------------------------------------------------------------------------- - -Instances of :class:`~bson.objectid.ObjectId` pickled using Python 2 -can always be unpickled using Python 3. Due to -`http://bugs.python.org/issue13505 `_ -you must use Python 3.2.3 or newer to pickle instances of ObjectId if you -need to unpickle them in Python 2. - -If you pickled an ObjectId using Python 2 and want to unpickle it using -Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: - - Python 2.7.3 (default, Apr 12 2012, 10:35:17) - [GCC 4.5.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> from bson.objectid import ObjectId - >>> oid = ObjectId() - >>> oid - ObjectId('4f919ba2fba5225b84000000') - >>> pickle.dumps(oid) - 'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...' - - Python 3.1.4 (default, Mar 21 2012, 14:34:01) - [GCC 4.5.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> pickle.loads(b'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...', encoding='latin-1') - ObjectId('4f919ba2fba5225b84000000') - - -If you need to pickle ObjectIds using Python 3 and unpickle them using Python 2 -you must use Python 3.2.3 or newer and ``protocol <= 2``:: - - Python 3.2.3 (v3.2.3:3d0686d90f55, Apr 10 2012, 11:25:50) - [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> from bson.objectid import ObjectId - >>> oid = ObjectId() - >>> oid - ObjectId('4f96f20c430ee6bd06000000') - >>> pickle.dumps(oid, protocol=2) - b'\x80\x02cbson.objectid\nObjectId\nq\x00)\x81q\x01c_codecs\nencode\...' - - Python 2.4.4 (#1, Oct 18 2006, 10:34:39) - [GCC 4.0.1 (Apple Computer, Inc. build 5341)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> pickle.loads('\x80\x02cbson.objectid\nObjectId\nq\x00)\x81q\x01c_codecs\nencode\...') - ObjectId('4f96f20c430ee6bd06000000') - - -Unfortunately this won't work if you pickled the ObjectId using a Python 3 -version older than 3.2.3:: - - Python 3.2.2 (default, Mar 21 2012, 14:32:23) - [GCC 4.5.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> from bson.objectid import ObjectId - >>> oid = ObjectId() - >>> pickle.dumps(oid, protocol=2) - b'\x80\x02cbson.objectid\nObjectId\nq\x00)\x81q\x01c__builtin__\nbytes\...' - - Python 2.4.6 (#1, Apr 12 2012, 14:48:24) - [GCC 4.5.3] on linux3 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> pickle.loads('\x80\x02cbson.objectid\nObjectId\nq\x00)\x81q\x01c__builtin__\nbytes\...') - Traceback (most recent call last): - File "", line 1, in ? - File "/usr/lib/python2.4/pickle.py", line 1394, in loads - return Unpickler(file).load() - File "/usr/lib/python2.4/pickle.py", line 872, in load - dispatch[key](self) - File "/usr/lib/python2.4/pickle.py", line 1104, in load_global - klass = self.find_class(module, name) - File "/usr/lib/python2.4/pickle.py", line 1140, in find_class - klass = getattr(mod, name) - AttributeError: 'module' object has no attribute 'bytes' - -.. warning:: - - Unpickling in Python 2.6 or 2.7 an ObjectId pickled in a Python 3 version - older than 3.2.3 will seem to succeed but the resulting ObjectId instance - will contain garbage data. - - >>> pickle.loads('\x80\x02cbson.objectid\nObjectId\nq\x00)\x81q\x01c__builtin__\nbytes\...) - ObjectId('5b37392c203135302c203234362c2034352c203235312c203136352c2033342c203532...') - - -Why do I get a syntax error importing pymongo after installing from source? ---------------------------------------------------------------------------- - -PyMongo makes use of the 2to3 tool to translate much of its code to valid -Python 3 syntax at install time. The translated modules are written to the -build subdirectory before being installed, leaving the original source files -intact. If you start the python interactive shell from the top level source -directory after running ``python setup.py install`` the untranslated modules -will be the first thing in your path. Importing pymongo will result in an -exception similar to:: - - Python 3.1.5 (default, Jun 2 2012, 12:24:49) - [GCC 4.6.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pymongo - Traceback (most recent call last): - File "", line 1, in - File "pymongo/__init__.py", line 58, in - version = get_version_string() - File "pymongo/__init__.py", line 54, in get_version_string - if isinstance(version_tuple[-1], basestring): - NameError: global name 'basestring' is not defined - -Note the path in the traceback (``pymongo/__init__.py``). Changing out of the -source directory takes the untranslated modules out of your path:: - - $ cd .. - $ python - Python 3.1.5 (default, Jun 2 2012, 12:24:49) - [GCC 4.6.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pymongo - >>> pymongo.__file__ - '/home/behackett/py3k/lib/python3.1/site-packages/pymongo-2.2-py3.1-linux-x86_64.egg/pymongo/__init__.py' - - diff --git a/doc/static/periodic-executor-refs.dot b/doc/static/periodic-executor-refs.dot new file mode 100644 index 0000000000..39632cfa73 --- /dev/null +++ b/doc/static/periodic-executor-refs.dot @@ -0,0 +1,17 @@ +digraph "Monitor and PeriodicExecutor" { + // Strong references. + topology -> server + server -> monitor + monitor -> executor + executor -> "target()" + "target()" -> self_ref + thread -> "target()" + + // Weak references + edge [style="dashed"]; + + self_ref -> monitor [curved=true] + monitor -> topology + executor -> thread + _EXECUTORS -> executor +} diff --git a/doc/static/periodic-executor-refs.png b/doc/static/periodic-executor-refs.png new file mode 100644 index 0000000000..3224b3d1d9 Binary files /dev/null and b/doc/static/periodic-executor-refs.png differ diff --git a/doc/static/sidebar.js b/doc/static/sidebar.js new file mode 100644 index 0000000000..e8d58f4bfa --- /dev/null +++ b/doc/static/sidebar.js @@ -0,0 +1,193 @@ +/* + * sidebar.js + * ~~~~~~~~~~ + * + * This script makes the Sphinx sidebar collapsible and implements intelligent + * scrolling. + * + * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds in + * .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton used to + * collapse and expand the sidebar. + * + * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden and the + * width of the sidebar and the margin-left of the document are decreased. + * When the sidebar is expanded the opposite happens. This script saves a + * per-browser/per-session cookie used to remember the position of the sidebar + * among the pages. Once the browser is closed the cookie is deleted and the + * position reset to the default (expanded). + * + * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +$(function() { + // global elements used by the functions. + // the 'sidebarbutton' element is defined as global after its + // creation, in the add_sidebar_button function + var jwindow = $(window); + var jdocument = $(document); + var bodywrapper = $('.bodywrapper'); + var sidebar = $('.sphinxsidebar'); + var sidebarwrapper = $('.sphinxsidebarwrapper'); + + // original margin-left of the bodywrapper and width of the sidebar + // with the sidebar expanded + var bw_margin_expanded = bodywrapper.css('margin-left'); + var ssb_width_expanded = sidebar.width(); + + // margin-left of the bodywrapper and width of the sidebar + // with the sidebar collapsed + var bw_margin_collapsed = '.8em'; + var ssb_width_collapsed = '.8em'; + + // colors used by the current theme + var dark_color = '#AAAAAA'; + var light_color = '#CCCCCC'; + + function get_viewport_height() { + if (window.innerHeight) + return window.innerHeight; + else + return jwindow.height(); + } + + function sidebar_is_collapsed() { + return sidebarwrapper.is(':not(:visible)'); + } + + function toggle_sidebar() { + if (sidebar_is_collapsed()) + expand_sidebar(); + else + collapse_sidebar(); + // adjust the scrolling of the sidebar + scroll_sidebar(); + } + + function collapse_sidebar() { + sidebarwrapper.hide(); + sidebar.css('width', ssb_width_collapsed); + bodywrapper.css('margin-left', bw_margin_collapsed); + sidebarbutton.css({ + 'margin-left': '0', + 'height': bodywrapper.height(), + 'border-radius': '5px' + }); + sidebarbutton.find('span').text('»'); + sidebarbutton.attr('title', _('Expand sidebar')); + document.cookie = 'sidebar=collapsed'; + } + + function expand_sidebar() { + bodywrapper.css('margin-left', bw_margin_expanded); + sidebar.css('width', ssb_width_expanded); + sidebarwrapper.show(); + sidebarbutton.css({ + 'margin-left': ssb_width_expanded-12, + 'height': bodywrapper.height(), + 'border-radius': '0 5px 5px 0' + }); + sidebarbutton.find('span').text('«'); + sidebarbutton.attr('title', _('Collapse sidebar')); + //sidebarwrapper.css({'padding-top': + // Math.max(window.pageYOffset - sidebarwrapper.offset().top, 10)}); + document.cookie = 'sidebar=expanded'; + } + + function add_sidebar_button() { + sidebarwrapper.css({ + 'float': 'left', + 'margin-right': '0', + 'width': ssb_width_expanded - 28 + }); + // create the button + sidebar.append( + '
«
' + ); + var sidebarbutton = $('#sidebarbutton'); + // find the height of the viewport to center the '<<' in the page + var viewport_height = get_viewport_height(); + var sidebar_offset = sidebar.offset().top; + var sidebar_height = Math.max(bodywrapper.height(), sidebar.height()); + sidebarbutton.find('span').css({ + 'display': 'block', + 'position': 'fixed', + 'top': Math.min(viewport_height/2, sidebar_height/2 + sidebar_offset) - 10 + }); + + sidebarbutton.click(toggle_sidebar); + sidebarbutton.attr('title', _('Collapse sidebar')); + sidebarbutton.css({ + 'border-radius': '0 5px 5px 0', + 'color': '#444444', + 'background-color': '#CCCCCC', + 'font-size': '1.2em', + 'cursor': 'pointer', + 'height': sidebar_height, + 'padding-top': '1px', + 'padding-left': '1px', + 'margin-left': ssb_width_expanded - 12 + }); + + sidebarbutton.hover( + function () { + $(this).css('background-color', dark_color); + }, + function () { + $(this).css('background-color', light_color); + } + ); + } + + function set_position_from_cookie() { + if (!document.cookie) + return; + var items = document.cookie.split(';'); + for(var k=0; k wintop && curbot > winbot) { + sidebarwrapper.css('top', $u.max([wintop - offset - 10, 0])); + } + else if (curtop < wintop && curbot < winbot) { + sidebarwrapper.css('top', $u.min([winbot - sidebar_height - offset - 20, + jdocument.height() - sidebar_height - 200])); + } + } + } + jwindow.scroll(scroll_sidebar); +}); diff --git a/doc/tools.rst b/doc/tools.rst deleted file mode 100644 index 4688e04375..0000000000 --- a/doc/tools.rst +++ /dev/null @@ -1,137 +0,0 @@ -Tools -===== -Many tools have been written for working with **PyMongo**. If you know -of or have created a tool for working with MongoDB from Python please -list it here. - -.. note:: We try to keep this list current. As such, projects that - have not been updated recently or appear to be unmaintained will - occasionally be removed from the list or moved to the back (to keep - the list from becoming too intimidating). - - If a project gets removed that is still being developed or is in active use - please let us know or add it back. - -ORM-like Layers ---------------- -Some people have found that they prefer to work with a layer that -has more features than PyMongo provides. Often, things like models and -validation are desired. To that end, several different ORM-like layers -have been written by various authors. - -It is our recommendation that new users begin by working directly with -PyMongo, as described in the rest of this documentation. Many people -have found that the features of PyMongo are enough for their -needs. Even if you eventually come to the decision to use one of these -layers, the time spent working directly with the driver will have -increased your understanding of how MongoDB actually works. - -Humongolus - `Humongolus `_ is a lightweight ORM - framework for Python and MongoDB. The name comes from the combination of - MongoDB and `Homunculus `_ (the - concept of a miniature though fully formed human body). Humongolus allows - you to create models/schemas with robust validation. It attempts to be as - pythonic as possible and exposes the pymongo cursor objects whenever - possible. The code is available for download - `at github `_. Tutorials and usage - examples are also available at GitHub. - -MongoKit - The `MongoKit `_ framework - is an ORM-like layer on top of PyMongo. There is also a MongoKit - `google group `_. - -Ming - `Ming `_ (the Merciless) is a - library that allows you to enforce schemas on a MongoDB database in - your Python application. It was developed by `SourceForge - `_ in the course of their migration to - MongoDB. See the `introductory blog post - `_ - for more details. - -MongoAlchemy - `MongoAlchemy `_ is another ORM-like layer on top of - PyMongo. Its API is inspired by `SQLAlchemy `_. The - code is available `on github `_; - for more information, see `the tutorial `_. - -MongoEngine - `MongoEngine `_ is another ORM-like - layer on top of PyMongo. It allows you to define schemas for - documents and query collections using syntax inspired by the Django - ORM. The code is available on `github - `_; for more information, see - the `tutorial `_. - -Minimongo - `minimongo `_ is a lightweight, - pythonic interface to MongoDB. It retains pymongo's query and update API, - and provides a number of additional features, including a simple - document-oriented interface, connection pooling, index management, and - collection & database naming helpers. The `source is on github - `_. - -Manga - `Manga `_ aims to be a simpler ORM-like - layer on top of PyMongo. The syntax for defining schema is inspired by the - Django ORM, but Pymongo's query language is maintained. The source `is on - github `_. - -MotorEngine - `MotorEngine `_ is a port of - MongoEngine to Motor, for asynchronous access with Tornado. - It implements the same modeling APIs to be data-portable, meaning that a - model defined in MongoEngine can be read in MotorEngine. The source is - `available on github `_. - -Framework Tools ---------------- -This section lists tools and adapters that have been designed to work with -various Python frameworks and libraries. - -* `Django MongoDB Engine - `_ is a MongoDB - database backend for Django that completely integrates with its ORM. - For more information `see the tutorial - `_. -* `mango `_ provides MongoDB backends for - Django sessions and authentication (bypassing :mod:`django.db` entirely). -* `Django MongoEngine - `_ is a MongoDB backend for - Django, an `example: - `_. - For more information ``_ -* `mongodb_beaker `_ is a - project to enable using MongoDB as a backend for `beaker's - `_ caching / session system. - `The source is on github `_. -* `MongoLog `_ is a Python logging - handler that stores logs in MongoDB using a capped collection. -* `c5t `_ is a content-management system - using TurboGears and MongoDB. -* `rod.recipe.mongodb `_ is a - ZC Buildout recipe for downloading and installing MongoDB. -* `repoze-what-plugins-mongodb - `_ is a project - working to support a plugin for using MongoDB as a backend for - :mod:`repoze.what`. -* `mongobox `_ is a tool to run a sandboxed - MongoDB instance from within a python app. -* `Flask-MongoAlchemy `_ Add - Flask support for MongoDB using MongoAlchemy. -* `Flask-MongoKit `_ Flask extension - to better integrate MongoKit into Flask. -* `Flask-PyMongo `_ Flask-PyMongo - bridges Flask and PyMongo. - -Alternative Drivers -------------------- -These are alternatives to PyMongo. - -* `Motor `_ is a full-featured, non-blocking - MongoDB driver for Python Tornado applications. -* `TxMongo `_ is an - asynchronous Python driver for MongoDB, although it is not currently - recommended for production use. diff --git a/doc/tutorial.rst b/doc/tutorial.rst deleted file mode 100644 index cf3961d7cb..0000000000 --- a/doc/tutorial.rst +++ /dev/null @@ -1,374 +0,0 @@ -Tutorial -======== - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - client.drop_database('test-database') - -This tutorial is intended as an introduction to working with -**MongoDB** and **PyMongo**. - -Prerequisites -------------- -Before we start, make sure that you have the **PyMongo** distribution -:doc:`installed `. In the Python shell, the following -should run without raising an exception: - -.. doctest:: - - >>> import pymongo - -This tutorial also assumes that a MongoDB instance is running on the -default host and port. Assuming you have `downloaded and installed -`_ MongoDB, you -can start it like so: - -.. code-block:: bash - - $ mongod - -Making a Connection with MongoClient ------------------------------------- -The first step when working with **PyMongo** is to create a -:class:`~pymongo.mongo_client.MongoClient` to the running **mongod** -instance. Doing so is easy: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> client = MongoClient() - -The above code will connect on the default host and port. We can also -specify the host and port explicitly, as follows: - -.. doctest:: - - >>> client = MongoClient('localhost', 27017) - -Or use the MongoDB URI format: - -.. doctest:: - - >>> client = MongoClient('mongodb://localhost:27017/') - -Getting a Database ------------------- -A single instance of MongoDB can support multiple independent -`databases `_. When -working with PyMongo you access databases using attribute style access -on :class:`~pymongo.mongo_client.MongoClient` instances: - -.. doctest:: - - >>> db = client.test_database - -If your database name is such that using attribute style access won't -work (like ``test-database``), you can use dictionary style access -instead: - -.. doctest:: - - >>> db = client['test-database'] - -Getting a Collection --------------------- -A `collection `_ is a -group of documents stored in MongoDB, and can be thought of as roughly -the equivalent of a table in a relational database. Getting a -collection in PyMongo works the same as getting a database: - -.. doctest:: - - >>> collection = db.test_collection - -or (using dictionary style access): - -.. doctest:: - - >>> collection = db['test-collection'] - -An important note about collections (and databases) in MongoDB is that -they are created lazily - none of the above commands have actually -performed any operations on the MongoDB server. Collections and -databases are created when the first document is inserted into them. - -Documents ---------- -Data in MongoDB is represented (and stored) using JSON-style -documents. In PyMongo we use dictionaries to represent documents. As -an example, the following dictionary might be used to represent a blog -post: - -.. doctest:: - - >>> import datetime - >>> post = {"author": "Mike", - ... "text": "My first blog post!", - ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.utcnow()} - -Note that documents can contain native Python types (like -:class:`datetime.datetime` instances) which will be automatically -converted to and from the appropriate `BSON -`_ types. - -.. todo:: link to table of Python <-> BSON types - -Inserting a Document --------------------- -To insert a document into a collection we can use the -:meth:`~pymongo.collection.Collection.insert` method: - -.. doctest:: - - >>> posts = db.posts - >>> post_id = posts.insert(post) - >>> post_id - ObjectId('...') - -When a document is inserted a special key, ``"_id"``, is automatically -added if the document doesn't already contain an ``"_id"`` key. The value -of ``"_id"`` must be unique across the -collection. :meth:`~pymongo.collection.Collection.insert` returns the -value of ``"_id"`` for the inserted document. For more information, see the -`documentation on _id -`_. - -.. todo:: notes on the differences between save and insert - -After inserting the first document, the *posts* collection has -actually been created on the server. We can verify this by listing all -of the collections in our database: - -.. doctest:: - - >>> db.collection_names() - [u'system.indexes', u'posts'] - -.. note:: The *system.indexes* collection is a special internal - collection that was created automatically. - -Getting a Single Document With :meth:`~pymongo.collection.Collection.find_one` ------------------------------------------------------------------------------- -The most basic type of query that can be performed in MongoDB is -:meth:`~pymongo.collection.Collection.find_one`. This method returns a -single document matching a query (or ``None`` if there are no -matches). It is useful when you know there is only one matching -document, or are only interested in the first match. Here we use -:meth:`~pymongo.collection.Collection.find_one` to get the first -document from the posts collection: - -.. doctest:: - - >>> posts.find_one() - {u'date': datetime.datetime(...), u'text': u'My first blog post!', u'_id': ObjectId('...'), u'author': u'Mike', u'tags': [u'mongodb', u'python', u'pymongo']} - -The result is a dictionary matching the one that we inserted previously. - -.. note:: The returned document contains an ``"_id"``, which was - automatically added on insert. - -:meth:`~pymongo.collection.Collection.find_one` also supports querying -on specific elements that the resulting document must match. To limit -our results to a document with author "Mike" we do: - -.. doctest:: - - >>> posts.find_one({"author": "Mike"}) - {u'date': datetime.datetime(...), u'text': u'My first blog post!', u'_id': ObjectId('...'), u'author': u'Mike', u'tags': [u'mongodb', u'python', u'pymongo']} - -If we try with a different author, like "Eliot", we'll get no result: - -.. doctest:: - - >>> posts.find_one({"author": "Eliot"}) - >>> - -.. _querying-by-objectid: - -Querying By ObjectId --------------------- -We can also find a post by its ``_id``, which in our example is an ObjectId: - -.. doctest:: - - >>> post_id - ObjectId(...) - >>> posts.find_one({"_id": post_id}) - {u'date': datetime.datetime(...), u'text': u'My first blog post!', u'_id': ObjectId('...'), u'author': u'Mike', u'tags': [u'mongodb', u'python', u'pymongo']} - -Note that an ObjectId is not the same as its string representation: - -.. doctest:: - - >>> post_id_as_str = str(post_id) - >>> posts.find_one({"_id": post_id_as_str}) # No result - >>> - -A common task in web applications is to get an ObjectId from the -request URL and find the matching document. It's necessary in this -case to **convert the ObjectId from a string** before passing it to -``find_one``:: - - from bson.objectid import ObjectId - - # The web framework gets post_id from the URL and passes it as a string - def get(post_id): - # Convert from string to ObjectId: - document = client.db.collection.find_one({'_id': ObjectId(post_id)}) - -.. seealso:: :ref:`web-application-querying-by-objectid` - -A Note On Unicode Strings -------------------------- -You probably noticed that the regular Python strings we stored earlier look -different when retrieved from the server (e.g. u'Mike' instead of 'Mike'). -A short explanation is in order. - -MongoDB stores data in `BSON format `_. BSON strings are -UTF-8 encoded so PyMongo must ensure that any strings it stores contain only -valid UTF-8 data. Regular strings () are validated and stored -unaltered. Unicode strings () are encoded UTF-8 first. The -reason our example string is represented in the Python shell as u'Mike' instead -of 'Mike' is that PyMongo decodes each BSON string to a Python unicode string, -not a regular str. - -`You can read more about Python unicode strings here -`_. - -Bulk Inserts ------------- -In order to make querying a little more interesting, let's insert a -few more documents. In addition to inserting a single document, we can -also perform *bulk insert* operations, by passing an iterable as the -first argument to :meth:`~pymongo.collection.Collection.insert`. This -will insert each document in the iterable, sending only a single -command to the server: - -.. doctest:: - - >>> new_posts = [{"author": "Mike", - ... "text": "Another post!", - ... "tags": ["bulk", "insert"], - ... "date": datetime.datetime(2009, 11, 12, 11, 14)}, - ... {"author": "Eliot", - ... "title": "MongoDB is fun", - ... "text": "and pretty easy too!", - ... "date": datetime.datetime(2009, 11, 10, 10, 45)}] - >>> posts.insert(new_posts) - [ObjectId('...'), ObjectId('...')] - -There are a couple of interesting things to note about this example: - - - The call to :meth:`~pymongo.collection.Collection.insert` now - returns two :class:`~bson.objectid.ObjectId` instances, one for - each inserted document. - - ``new_posts[1]`` has a different "shape" than the other posts - - there is no ``"tags"`` field and we've added a new field, - ``"title"``. This is what we mean when we say that MongoDB is - *schema-free*. - -Querying for More Than One Document ------------------------------------ -To get more than a single document as the result of a query we use the -:meth:`~pymongo.collection.Collection.find` -method. :meth:`~pymongo.collection.Collection.find` returns a -:class:`~pymongo.cursor.Cursor` instance, which allows us to iterate -over all matching documents. For example, we can iterate over every -document in the ``posts`` collection: - -.. doctest:: - - >>> for post in posts.find(): - ... post - ... - {u'date': datetime.datetime(...), u'text': u'My first blog post!', u'_id': ObjectId('...'), u'author': u'Mike', u'tags': [u'mongodb', u'python', u'pymongo']} - {u'date': datetime.datetime(2009, 11, 12, 11, 14), u'text': u'Another post!', u'_id': ObjectId('...'), u'author': u'Mike', u'tags': [u'bulk', u'insert']} - {u'date': datetime.datetime(2009, 11, 10, 10, 45), u'text': u'and pretty easy too!', u'_id': ObjectId('...'), u'author': u'Eliot', u'title': u'MongoDB is fun'} - -Just like we did with :meth:`~pymongo.collection.Collection.find_one`, -we can pass a document to :meth:`~pymongo.collection.Collection.find` -to limit the returned results. Here, we get only those documents whose -author is "Mike": - -.. doctest:: - - >>> for post in posts.find({"author": "Mike"}): - ... post - ... - {u'date': datetime.datetime(...), u'text': u'My first blog post!', u'_id': ObjectId('...'), u'author': u'Mike', u'tags': [u'mongodb', u'python', u'pymongo']} - {u'date': datetime.datetime(2009, 11, 12, 11, 14), u'text': u'Another post!', u'_id': ObjectId('...'), u'author': u'Mike', u'tags': [u'bulk', u'insert']} - -Counting --------- -If we just want to know how many documents match a query we can -perform a :meth:`~pymongo.cursor.Cursor.count` operation instead of a -full query. We can get a count of all of the documents in a -collection: - -.. doctest:: - - >>> posts.count() - 3 - -or just of those documents that match a specific query: - -.. doctest:: - - >>> posts.find({"author": "Mike"}).count() - 2 - -Range Queries -------------- -MongoDB supports many different types of `advanced queries -`_. As an -example, lets perform a query where we limit results to posts older -than a certain date, but also sort the results by author: - -.. doctest:: - - >>> d = datetime.datetime(2009, 11, 12, 12) - >>> for post in posts.find({"date": {"$lt": d}}).sort("author"): - ... print post - ... - {u'date': datetime.datetime(2009, 11, 10, 10, 45), u'text': u'and pretty easy too!', u'_id': ObjectId('...'), u'author': u'Eliot', u'title': u'MongoDB is fun'} - {u'date': datetime.datetime(2009, 11, 12, 11, 14), u'text': u'Another post!', u'_id': ObjectId('...'), u'author': u'Mike', u'tags': [u'bulk', u'insert']} - -Here we use the special ``"$lt"`` operator to do a range query, and -also call :meth:`~pymongo.cursor.Cursor.sort` to sort the results -by author. - -Indexing --------- -To make the above query fast we can add a compound index on -``"date"`` and ``"author"``. To start, lets use the -:meth:`~pymongo.cursor.Cursor.explain` method to get some information -about how the query is being performed without the index: - -.. doctest:: - - >>> posts.find({"date": {"$lt": d}}).sort("author").explain()["cursor"] - u'BasicCursor' - >>> posts.find({"date": {"$lt": d}}).sort("author").explain()["nscanned"] - 3 - -We can see that the query is using the *BasicCursor* and scanning over -all 3 documents in the collection. Now let's add a compound index and -look at the same information: - -.. doctest:: - - >>> from pymongo import ASCENDING, DESCENDING - >>> posts.create_index([("date", DESCENDING), ("author", ASCENDING)]) - u'date_-1_author_1' - >>> posts.find({"date": {"$lt": d}}).sort("author").explain()["cursor"] - u'BtreeCursor date_-1_author_1' - >>> posts.find({"date": {"$lt": d}}).sort("author").explain()["nscanned"] - 2 - -Now the query is using a *BtreeCursor* (the index) and only scanning -over the 2 matching documents. - -.. seealso:: The MongoDB documentation on `indexes `_ diff --git a/ez_setup.py b/ez_setup.py deleted file mode 100644 index 9dc2c8729b..0000000000 --- a/ez_setup.py +++ /dev/null @@ -1,382 +0,0 @@ -#!python -"""Bootstrap setuptools installation - -If you want to use setuptools in your package's setup.py, just include this -file in the same directory with it, and add this to the top of your setup.py:: - - from ez_setup import use_setuptools - use_setuptools() - -If you want to require a specific version of setuptools, set a download -mirror, or use an alternate download directory, you can do so by supplying -the appropriate options to ``use_setuptools()``. - -This file can also be run as a script to install or upgrade setuptools. -""" -import os -import shutil -import sys -import tempfile -import tarfile -import optparse -import subprocess -import platform - -from distutils import log - -try: - from site import USER_SITE -except ImportError: - USER_SITE = None - -DEFAULT_VERSION = "1.4.2" -DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/" - -def _python_cmd(*args): - args = (sys.executable,) + args - return subprocess.call(args) == 0 - -def _check_call_py24(cmd, *args, **kwargs): - res = subprocess.call(cmd, *args, **kwargs) - class CalledProcessError(Exception): - pass - if not res == 0: - msg = "Command '%s' return non-zero exit status %d" % (cmd, res) - raise CalledProcessError(msg) -vars(subprocess).setdefault('check_call', _check_call_py24) - -def _install(tarball, install_args=()): - # extracting the tarball - tmpdir = tempfile.mkdtemp() - log.warn('Extracting in %s', tmpdir) - old_wd = os.getcwd() - try: - os.chdir(tmpdir) - tar = tarfile.open(tarball) - _extractall(tar) - tar.close() - - # going in the directory - subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) - os.chdir(subdir) - log.warn('Now working in %s', subdir) - - # installing - log.warn('Installing Setuptools') - if not _python_cmd('setup.py', 'install', *install_args): - log.warn('Something went wrong during the installation.') - log.warn('See the error message above.') - # exitcode will be 2 - return 2 - finally: - os.chdir(old_wd) - shutil.rmtree(tmpdir) - - -def _build_egg(egg, tarball, to_dir): - # extracting the tarball - tmpdir = tempfile.mkdtemp() - log.warn('Extracting in %s', tmpdir) - old_wd = os.getcwd() - try: - os.chdir(tmpdir) - tar = tarfile.open(tarball) - _extractall(tar) - tar.close() - - # going in the directory - subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) - os.chdir(subdir) - log.warn('Now working in %s', subdir) - - # building an egg - log.warn('Building a Setuptools egg in %s', to_dir) - _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) - - finally: - os.chdir(old_wd) - shutil.rmtree(tmpdir) - # returning the result - log.warn(egg) - if not os.path.exists(egg): - raise IOError('Could not build the egg.') - - -def _do_download(version, download_base, to_dir, download_delay): - egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg' - % (version, sys.version_info[0], sys.version_info[1])) - if not os.path.exists(egg): - tarball = download_setuptools(version, download_base, - to_dir, download_delay) - _build_egg(egg, tarball, to_dir) - sys.path.insert(0, egg) - - # Remove previously-imported pkg_resources if present (see - # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). - if 'pkg_resources' in sys.modules: - del sys.modules['pkg_resources'] - - import setuptools - setuptools.bootstrap_install_from = egg - - -def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, - to_dir=os.curdir, download_delay=15): - # making sure we use the absolute path - to_dir = os.path.abspath(to_dir) - was_imported = 'pkg_resources' in sys.modules or \ - 'setuptools' in sys.modules - try: - import pkg_resources - except ImportError: - return _do_download(version, download_base, to_dir, download_delay) - try: - pkg_resources.require("setuptools>=" + version) - return - except pkg_resources.VersionConflict: - e = sys.exc_info()[1] - if was_imported: - sys.stderr.write( - "The required version of setuptools (>=%s) is not available,\n" - "and can't be installed while this script is running. Please\n" - "install a more recent version first, using\n" - "'easy_install -U setuptools'." - "\n\n(Currently using %r)\n" % (version, e.args[0])) - sys.exit(2) - else: - del pkg_resources, sys.modules['pkg_resources'] # reload ok - return _do_download(version, download_base, to_dir, - download_delay) - except pkg_resources.DistributionNotFound: - return _do_download(version, download_base, to_dir, - download_delay) - -def _clean_check(cmd, target): - """ - Run the command to download target. If the command fails, clean up before - re-raising the error. - """ - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - if os.access(target, os.F_OK): - os.unlink(target) - raise - -def download_file_powershell(url, target): - """ - Download the file at url to target using Powershell (which will validate - trust). Raise an exception if the command cannot complete. - """ - target = os.path.abspath(target) - cmd = [ - 'powershell', - '-Command', - "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(), - ] - _clean_check(cmd, target) - -def has_powershell(): - if platform.system() != 'Windows': - return False - cmd = ['powershell', '-Command', 'echo test'] - devnull = open(os.path.devnull, 'wb') - try: - try: - subprocess.check_call(cmd, stdout=devnull, stderr=devnull) - except: - return False - finally: - devnull.close() - return True - -download_file_powershell.viable = has_powershell - -def download_file_curl(url, target): - cmd = ['curl', url, '--silent', '--output', target] - _clean_check(cmd, target) - -def has_curl(): - cmd = ['curl', '--version'] - devnull = open(os.path.devnull, 'wb') - try: - try: - subprocess.check_call(cmd, stdout=devnull, stderr=devnull) - except: - return False - finally: - devnull.close() - return True - -download_file_curl.viable = has_curl - -def download_file_wget(url, target): - cmd = ['wget', url, '--quiet', '--output-document', target] - _clean_check(cmd, target) - -def has_wget(): - cmd = ['wget', '--version'] - devnull = open(os.path.devnull, 'wb') - try: - try: - subprocess.check_call(cmd, stdout=devnull, stderr=devnull) - except: - return False - finally: - devnull.close() - return True - -download_file_wget.viable = has_wget - -def download_file_insecure(url, target): - """ - Use Python to download the file, even though it cannot authenticate the - connection. - """ - try: - from urllib.request import urlopen - except ImportError: - from urllib2 import urlopen - src = dst = None - try: - src = urlopen(url) - # Read/write all in one block, so we don't create a corrupt file - # if the download is interrupted. - data = src.read() - dst = open(target, "wb") - dst.write(data) - finally: - if src: - src.close() - if dst: - dst.close() - -download_file_insecure.viable = lambda: True - -def get_best_downloader(): - downloaders = [ - download_file_powershell, - download_file_curl, - download_file_wget, - download_file_insecure, - ] - - for dl in downloaders: - if dl.viable(): - return dl - -def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, - to_dir=os.curdir, delay=15, - downloader_factory=get_best_downloader): - """Download setuptools from a specified location and return its filename - - `version` should be a valid setuptools version number that is available - as an egg for download under the `download_base` URL (which should end - with a '/'). `to_dir` is the directory where the egg will be downloaded. - `delay` is the number of seconds to pause before an actual download - attempt. - - ``downloader_factory`` should be a function taking no arguments and - returning a function for downloading a URL to a target. - """ - # making sure we use the absolute path - to_dir = os.path.abspath(to_dir) - tgz_name = "setuptools-%s.tar.gz" % version - url = download_base + tgz_name - saveto = os.path.join(to_dir, tgz_name) - if not os.path.exists(saveto): # Avoid repeated downloads - log.warn("Downloading %s", url) - downloader = downloader_factory() - downloader(url, saveto) - return os.path.realpath(saveto) - - -def _extractall(self, path=".", members=None): - """Extract all members from the archive to the current working - directory and set owner, modification time and permissions on - directories afterwards. `path' specifies a different directory - to extract to. `members' is optional and must be a subset of the - list returned by getmembers(). - """ - import copy - import operator - from tarfile import ExtractError - directories = [] - - if members is None: - members = self - - for tarinfo in members: - if tarinfo.isdir(): - # Extract directories with a safe mode. - directories.append(tarinfo) - tarinfo = copy.copy(tarinfo) - tarinfo.mode = 448 # decimal for oct 0700 - self.extract(tarinfo, path) - - # Reverse sort directories. - if sys.version_info < (2, 4): - def sorter(dir1, dir2): - return cmp(dir1.name, dir2.name) - directories.sort(sorter) - directories.reverse() - else: - directories.sort(key=operator.attrgetter('name'), reverse=True) - - # Set correct owner, mtime and filemode on directories. - for tarinfo in directories: - dirpath = os.path.join(path, tarinfo.name) - try: - self.chown(tarinfo, dirpath) - self.utime(tarinfo, dirpath) - self.chmod(tarinfo, dirpath) - except ExtractError: - e = sys.exc_info()[1] - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - -def _build_install_args(options): - """ - Build the arguments to 'python setup.py install' on the setuptools package - """ - install_args = [] - if options.user_install: - if sys.version_info < (2, 6): - log.warn("--user requires Python 2.6 or later") - raise SystemExit(1) - install_args.append('--user') - return install_args - -def _parse_args(): - """ - Parse the command line for options - """ - parser = optparse.OptionParser() - parser.add_option( - '--user', dest='user_install', action='store_true', default=False, - help='install in user site package (requires Python 2.6 or later)') - parser.add_option( - '--download-base', dest='download_base', metavar="URL", - default=DEFAULT_URL, - help='alternative URL from where to download the setuptools package') - parser.add_option( - '--insecure', dest='downloader_factory', action='store_const', - const=lambda: download_file_insecure, default=get_best_downloader, - help='Use internal, non-validating downloader' - ) - options, args = parser.parse_args() - # positional arguments are ignored - return options - -def main(version=DEFAULT_VERSION): - """Install or upgrade setuptools and EasyInstall""" - options = _parse_args() - tarball = download_setuptools(download_base=options.download_base, - downloader_factory=options.downloader_factory) - return _install(tarball, _build_install_args(options)) - -if __name__ == '__main__': - sys.exit(main()) diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 25bd3a699f..8173561beb 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,386 +17,38 @@ The :mod:`gridfs` package is an implementation of GridFS on top of :mod:`pymongo`, exposing a file-like interface. -.. mongodoc:: gridfs +.. seealso:: The MongoDB documentation on `gridfs `_. """ - -from gridfs.errors import (NoFile, - UnsupportedAPI) -from gridfs.grid_file import (GridIn, - GridOut, - GridOutCursor) -from pymongo import (MongoClient, - ASCENDING, - DESCENDING) -from pymongo.database import Database - - -class GridFS(object): - """An instance of GridFS on top of a single Database. - """ - def __init__(self, database, collection="fs", _connect=True): - """Create a new instance of :class:`GridFS`. - - Raises :class:`TypeError` if `database` is not an instance of - :class:`~pymongo.database.Database`. - - :Parameters: - - `database`: database to use - - `collection` (optional): root collection to use - - .. versionadded:: 1.6 - The `collection` parameter. - - .. mongodoc:: gridfs - """ - if not isinstance(database, Database): - raise TypeError("database must be an instance of Database") - - self.__database = database - self.__collection = database[collection] - self.__files = self.__collection.files - self.__chunks = self.__collection.chunks - if _connect: - self.__ensure_index_files_id() - - def __is_secondary(self): - client = self.__database.connection - - # Connect the client, so we know if it's connected to the primary. - client._ensure_connected() - return isinstance(client, MongoClient) and not client.is_primary - - def __ensure_index_files_id(self): - if not self.__is_secondary(): - self.__chunks.ensure_index([("files_id", ASCENDING), - ("n", ASCENDING)], - unique=True) - - def __ensure_index_filename(self): - if not self.__is_secondary(): - self.__files.ensure_index([("filename", ASCENDING), - ("uploadDate", DESCENDING)]) - - def new_file(self, **kwargs): - """Create a new file in GridFS. - - Returns a new :class:`~gridfs.grid_file.GridIn` instance to - which data can be written. Any keyword arguments will be - passed through to :meth:`~gridfs.grid_file.GridIn`. - - If the ``"_id"`` of the file is manually specified, it must - not already exist in GridFS. Otherwise - :class:`~gridfs.errors.FileExists` is raised. - - :Parameters: - - `**kwargs` (optional): keyword arguments for file creation - - .. versionadded:: 1.6 - """ - # No need for __ensure_index_files_id() here; GridIn ensures - # the (files_id, n) index when needed. - return GridIn(self.__collection, **kwargs) - - def put(self, data, **kwargs): - """Put data in GridFS as a new file. - - Equivalent to doing:: - - try: - f = new_file(**kwargs) - f.write(data) - finally: - f.close() - - `data` can be either an instance of :class:`str` (:class:`bytes` - in python 3) or a file-like object providing a :meth:`read` method. - If an `encoding` keyword argument is passed, `data` can also be a - :class:`unicode` (:class:`str` in python 3) instance, which will - be encoded as `encoding` before being written. Any keyword arguments - will be passed through to the created file - see - :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the - ``"_id"`` of the created file. - - If the ``"_id"`` of the file is manually specified, it must - not already exist in GridFS. Otherwise - :class:`~gridfs.errors.FileExists` is raised. - - :Parameters: - - `data`: data to be written as a file. - - `**kwargs` (optional): keyword arguments for file creation - - .. versionadded:: 1.9 - The ability to write :class:`unicode`, if an `encoding` has - been specified as a keyword argument. - - .. versionadded:: 1.6 - """ - grid_file = GridIn(self.__collection, **kwargs) - - # Start a request - necessary if w=0, harmless otherwise - request = self.__collection.database.connection.start_request() - try: - try: - grid_file.write(data) - finally: - grid_file.close() - finally: - # Ensure request is ended even if close() throws error - request.end() - return grid_file._id - - def get(self, file_id): - """Get a file from GridFS by ``"_id"``. - - Returns an instance of :class:`~gridfs.grid_file.GridOut`, - which provides a file-like interface for reading. - - :Parameters: - - `file_id`: ``"_id"`` of the file to get - - .. versionadded:: 1.6 - """ - return GridOut(self.__collection, file_id) - - def get_version(self, filename=None, version=-1, **kwargs): - """Get a file from GridFS by ``"filename"`` or metadata fields. - - Returns a version of the file in GridFS whose filename matches - `filename` and whose metadata fields match the supplied keyword - arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. - - Version numbering is a convenience atop the GridFS API provided - by MongoDB. If more than one file matches the query (either by - `filename` alone, by metadata fields, or by a combination of - both), then version ``-1`` will be the most recently uploaded - matching file, ``-2`` the second most recently - uploaded, etc. Version ``0`` will be the first version - uploaded, ``1`` the second version, etc. So if three versions - have been uploaded, then version ``0`` is the same as version - ``-3``, version ``1`` is the same as version ``-2``, and - version ``2`` is the same as version ``-1``. - - Raises :class:`~gridfs.errors.NoFile` if no such version of - that file exists. - - An index on ``{filename: 1, uploadDate: -1}`` will - automatically be created when this method is called the first - time. - - :Parameters: - - `filename`: ``"filename"`` of the file to get, or `None` - - `version` (optional): version of the file to get (defaults - to -1, the most recent version uploaded) - - `**kwargs` (optional): find files by custom metadata. - - .. versionchanged:: 1.11 - `filename` defaults to None; - .. versionadded:: 1.11 - Accept keyword arguments to find files by custom metadata. - .. versionadded:: 1.9 - """ - self.__ensure_index_filename() - query = kwargs - if filename is not None: - query["filename"] = filename - - cursor = self.__files.find(query) - if version < 0: - skip = abs(version) - 1 - cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) - else: - cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) - try: - grid_file = cursor.next() - return GridOut(self.__collection, file_document=grid_file) - except StopIteration: - raise NoFile("no version %d for filename %r" % (version, filename)) - - def get_last_version(self, filename=None, **kwargs): - """Get the most recent version of a file in GridFS by ``"filename"`` - or metadata fields. - - Equivalent to calling :meth:`get_version` with the default - `version` (``-1``). - - :Parameters: - - `filename`: ``"filename"`` of the file to get, or `None` - - `**kwargs` (optional): find files by custom metadata. - - .. versionchanged:: 1.11 - `filename` defaults to None; - .. versionadded:: 1.11 - Accept keyword arguments to find files by custom metadata. See - :meth:`get_version`. - .. versionadded:: 1.6 - """ - return self.get_version(filename=filename, **kwargs) - - # TODO add optional safe mode for chunk removal? - def delete(self, file_id): - """Delete a file from GridFS by ``"_id"``. - - Removes all data belonging to the file with ``"_id"``: - `file_id`. - - .. warning:: Any processes/threads reading from the file while - this method is executing will likely see an invalid/corrupt - file. Care should be taken to avoid concurrent reads to a file - while it is being deleted. - - .. note:: Deletes of non-existent files are considered successful - since the end result is the same: no file with that _id remains. - - :Parameters: - - `file_id`: ``"_id"`` of the file to delete - - .. versionadded:: 1.6 - """ - self.__ensure_index_files_id() - self.__files.remove({"_id": file_id}, - **self.__files._get_wc_override()) - self.__chunks.remove({"files_id": file_id}) - - def list(self): - """List the names of all files stored in this instance of - :class:`GridFS`. - - An index on ``{filename: 1, uploadDate: -1}`` will - automatically be created when this method is called the first - time. - - .. versionchanged:: 2.7 - ``list`` ensures an index, the same as ``get_version``. - - .. versionchanged:: 1.6 - Removed the `collection` argument. - """ - self.__ensure_index_filename() - - # With an index, distinct includes documents with no filename - # as None. - return [ - name for name in self.__files.distinct("filename") - if name is not None] - - def find(self, *args, **kwargs): - """Query GridFS for files. - - Returns a cursor that iterates across files matching - arbitrary queries on the files collection. Can be combined - with other modifiers for additional control. For example:: - - for grid_out in fs.find({"filename": "lisa.txt"}, timeout=False): - data = grid_out.read() - - would iterate through all versions of "lisa.txt" stored in GridFS. - Note that setting timeout to False may be important to prevent the - cursor from timing out during long multi-file processing work. - - As another example, the call:: - - most_recent_three = fs.find().sort("uploadDate", -1).limit(3) - - would return a cursor to the three most recently uploaded files - in GridFS. - - Follows a similar interface to - :meth:`~pymongo.collection.Collection.find` - in :class:`~pymongo.collection.Collection`. - - :Parameters: - - `spec` (optional): a SON object specifying elements which - must be present for a document to be included in the - result set - - `skip` (optional): the number of files to omit (from - the start of the result set) when returning the results - - `limit` (optional): the maximum number of results to - return - - `timeout` (optional): if True (the default), any returned - cursor is closed by the server after 10 minutes of - inactivity. If set to False, the returned cursor will never - time out on the server. Care should be taken to ensure that - cursors with timeout turned off are properly closed. - - `sort` (optional): a list of (key, direction) pairs - specifying the sort order for this query. See - :meth:`~pymongo.cursor.Cursor.sort` for details. - - `max_scan` (optional): limit the number of file documents - examined when performing the query - - `read_preference` (optional): The read preference for - this query. - - `tag_sets` (optional): The tag sets for this query. - - `secondary_acceptable_latency_ms` (optional): Any replica-set - member whose ping time is within secondary_acceptable_latency_ms of - the nearest member may accept reads. Default 15 milliseconds. - **Ignored by mongos** and must be configured on the command line. - See the localThreshold_ option for more information. - - `compile_re` (optional): if ``False``, don't attempt to compile - BSON regex objects into Python regexes. Return instances of - :class:`~bson.regex.Regex` instead. - - Raises :class:`TypeError` if any of the arguments are of - improper type. Returns an instance of - :class:`~gridfs.grid_file.GridOutCursor` - corresponding to this query. - - .. versionadded:: 2.7 - .. mongodoc:: find - .. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold - """ - return GridOutCursor(self.__collection, *args, **kwargs) - - def exists(self, document_or_id=None, **kwargs): - """Check if a file exists in this instance of :class:`GridFS`. - - The file to check for can be specified by the value of its - ``_id`` key, or by passing in a query document. A query - document can be passed in as dictionary, or by using keyword - arguments. Thus, the following three calls are equivalent: - - >>> fs.exists(file_id) - >>> fs.exists({"_id": file_id}) - >>> fs.exists(_id=file_id) - - As are the following two calls: - - >>> fs.exists({"filename": "mike.txt"}) - >>> fs.exists(filename="mike.txt") - - And the following two: - - >>> fs.exists({"foo": {"$gt": 12}}) - >>> fs.exists(foo={"$gt": 12}) - - Returns ``True`` if a matching file exists, ``False`` - otherwise. Calls to :meth:`exists` will not automatically - create appropriate indexes; application developers should be - sure to create indexes if needed and as appropriate. - - :Parameters: - - `document_or_id` (optional): query document, or _id of the - document to check for - - `**kwargs` (optional): keyword arguments are used as a - query document, if they're present. - - .. versionadded:: 1.8 - """ - if kwargs: - return self.__files.find_one(kwargs, ["_id"]) is not None - return self.__files.find_one(document_or_id, ["_id"]) is not None - - def open(self, *args, **kwargs): - """No longer supported. - - .. versionchanged:: 1.6 - The open method is no longer supported. - """ - raise UnsupportedAPI("The open method is no longer supported.") - - def remove(self, *args, **kwargs): - """No longer supported. - - .. versionchanged:: 1.6 - The remove method is no longer supported. - """ - raise UnsupportedAPI("The remove method is no longer supported. " - "Please use the delete method instead.") +from __future__ import annotations + +from gridfs.asynchronous.grid_file import ( + AsyncGridFS, + AsyncGridFSBucket, + AsyncGridIn, + AsyncGridOut, + AsyncGridOutCursor, +) +from gridfs.errors import NoFile +from gridfs.grid_file_shared import DEFAULT_CHUNK_SIZE +from gridfs.synchronous.grid_file import ( + GridFS, + GridFSBucket, + GridIn, + GridOut, + GridOutCursor, +) + +__all__ = [ + "AsyncGridFS", + "GridFS", + "AsyncGridFSBucket", + "GridFSBucket", + "NoFile", + "DEFAULT_CHUNK_SIZE", + "AsyncGridIn", + "GridIn", + "AsyncGridOut", + "GridOut", + "AsyncGridOutCursor", + "GridOutCursor", +] diff --git a/gridfs/asynchronous/__init__.py b/gridfs/asynchronous/__init__.py new file mode 100644 index 0000000000..0826145b11 --- /dev/null +++ b/gridfs/asynchronous/__init__.py @@ -0,0 +1,42 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GridFS is a specification for storing large objects in Mongo. + +The :mod:`gridfs` package is an implementation of GridFS on top of +:mod:`pymongo`, exposing a file-like interface. + +.. seealso:: The MongoDB documentation on `gridfs `_. +""" +from __future__ import annotations + +from gridfs.asynchronous.grid_file import ( + AsyncGridFS, + AsyncGridFSBucket, + AsyncGridIn, + AsyncGridOut, + AsyncGridOutCursor, +) +from gridfs.errors import NoFile +from gridfs.grid_file_shared import DEFAULT_CHUNK_SIZE + +__all__ = [ + "AsyncGridFS", + "AsyncGridFSBucket", + "NoFile", + "DEFAULT_CHUNK_SIZE", + "AsyncGridIn", + "AsyncGridOut", + "AsyncGridOutCursor", +] diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py new file mode 100644 index 0000000000..69a2200d3b --- /dev/null +++ b/gridfs/asynchronous/grid_file.py @@ -0,0 +1,2007 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for representing files stored in GridFS.""" +from __future__ import annotations + +import datetime +import inspect +import io +import math +from collections import abc +from typing import Any, Iterable, Mapping, NoReturn, Optional, cast + +from bson.int64 import Int64 +from bson.objectid import ObjectId +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from gridfs.grid_file_shared import ( + _C_INDEX, + _CHUNK_OVERHEAD, + _F_INDEX, + _SEEK_CUR, + _SEEK_END, + _SEEK_SET, + _UPLOAD_BUFFER_CHUNKS, + _UPLOAD_BUFFER_SIZE, + DEFAULT_CHUNK_SIZE, + EMPTY, + NEWLN, + _a_grid_in_property, + _a_grid_out_property, + _clear_entity_type_registry, +) +from pymongo import ASCENDING, DESCENDING, WriteConcern, _csot +from pymongo.asynchronous.client_session import AsyncClientSession +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.common import validate_string +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + CursorNotFound, + DuplicateKeyError, + InvalidOperation, + OperationFailure, +) +from pymongo.helpers_shared import _check_write_command_response +from pymongo.read_preferences import ReadPreference, _ServerMode + +_IS_SYNC = False + + +def _disallow_transactions(session: Optional[AsyncClientSession]) -> None: + if session and session.in_transaction: + raise InvalidOperation("GridFS does not support multi-document transactions") + + +class AsyncGridFS: + """An instance of GridFS on top of a single Database.""" + + def __init__(self, database: AsyncDatabase[Any], collection: str = "fs"): + """Create a new instance of :class:`GridFS`. + + Raises :class:`TypeError` if `database` is not an instance of + :class:`~pymongo.database.Database`. + + :param database: database to use + :param collection: root collection to use + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFS operation in a transaction now always raises an + error. GridFS does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionchanged:: 3.1 + Indexes are only ensured on the first write to the DB. + + .. versionchanged:: 3.0 + `database` must use an acknowledged + :attr:`~pymongo.database.Database.write_concern` + + .. seealso:: The MongoDB documentation on `gridfs `_. + """ + if not isinstance(database, AsyncDatabase): + raise TypeError(f"database must be an instance of Database, not {type(database)}") + + database = _clear_entity_type_registry(database) + + if not database.write_concern.acknowledged: + raise ConfigurationError("database must use acknowledged write_concern") + + self._collection = database[collection] + self._files = self._collection.files + self._chunks = self._collection.chunks + + def new_file(self, **kwargs: Any) -> AsyncGridIn: + """Create a new file in GridFS. + + Returns a new :class:`~gridfs.grid_file.GridIn` instance to + which data can be written. Any keyword arguments will be + passed through to :meth:`~gridfs.grid_file.GridIn`. + + If the ``"_id"`` of the file is manually specified, it must + not already exist in GridFS. Otherwise + :class:`~gridfs.errors.FileExists` is raised. + + :param kwargs: keyword arguments for file creation + """ + return AsyncGridIn(self._collection, **kwargs) + + async def put(self, data: Any, **kwargs: Any) -> Any: + """Put data in GridFS as a new file. + + Equivalent to doing:: + + with fs.new_file(**kwargs) as f: + f.write(data) + + `data` can be either an instance of :class:`bytes` or a file-like + object providing a :meth:`read` method. If an `encoding` keyword + argument is passed, `data` can also be a :class:`str` instance, which + will be encoded as `encoding` before being written. Any keyword + arguments will be passed through to the created file - see + :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the + ``"_id"`` of the created file. + + If the ``"_id"`` of the file is manually specified, it must + not already exist in GridFS. Otherwise + :class:`~gridfs.errors.FileExists` is raised. + + :param data: data to be written as a file. + :param kwargs: keyword arguments for file creation + + .. versionchanged:: 3.0 + w=0 writes to GridFS are now prohibited. + """ + async with AsyncGridIn(self._collection, **kwargs) as grid_file: + await grid_file.write(data) + return grid_file._id + + async def get(self, file_id: Any, session: Optional[AsyncClientSession] = None) -> AsyncGridOut: + """Get a file from GridFS by ``"_id"``. + + Returns an instance of :class:`~gridfs.grid_file.GridOut`, + which provides a file-like interface for reading. + + :param file_id: ``"_id"`` of the file to get + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + gout = AsyncGridOut(self._collection, file_id, session=session) + + # Raise NoFile now, instead of on first attribute access. + await gout.open() + return gout + + async def get_version( + self, + filename: Optional[str] = None, + version: Optional[int] = -1, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> AsyncGridOut: + """Get a file from GridFS by ``"filename"`` or metadata fields. + + Returns a version of the file in GridFS whose filename matches + `filename` and whose metadata fields match the supplied keyword + arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. + + Version numbering is a convenience atop the GridFS API provided + by MongoDB. If more than one file matches the query (either by + `filename` alone, by metadata fields, or by a combination of + both), then version ``-1`` will be the most recently uploaded + matching file, ``-2`` the second most recently + uploaded, etc. Version ``0`` will be the first version + uploaded, ``1`` the second version, etc. So if three versions + have been uploaded, then version ``0`` is the same as version + ``-3``, version ``1`` is the same as version ``-2``, and + version ``2`` is the same as version ``-1``. + + Raises :class:`~gridfs.errors.NoFile` if no such version of + that file exists. + + :param filename: ``"filename"`` of the file to get, or `None` + :param version: version of the file to get (defaults + to -1, the most recent version uploaded) + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + :param kwargs: find files by custom metadata. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``get_version`` no longer ensures indexes. + """ + query = kwargs + if filename is not None: + query["filename"] = filename + + _disallow_transactions(session) + cursor = self._files.find(query, session=session) + if version is None: + version = -1 + if version < 0: + skip = abs(version) - 1 + cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) + else: + cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) + try: + doc = await anext(cursor) + return AsyncGridOut(self._collection, file_document=doc, session=session) + except StopAsyncIteration: + raise NoFile("no version %d for filename %r" % (version, filename)) from None + + async def get_last_version( + self, + filename: Optional[str] = None, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> AsyncGridOut: + """Get the most recent version of a file in GridFS by ``"filename"`` + or metadata fields. + + Equivalent to calling :meth:`get_version` with the default + `version` (``-1``). + + :param filename: ``"filename"`` of the file to get, or `None` + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + :param kwargs: find files by custom metadata. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return await self.get_version(filename=filename, session=session, **kwargs) + + # TODO add optional safe mode for chunk removal? + async def delete(self, file_id: Any, session: Optional[AsyncClientSession] = None) -> None: + """Delete a file from GridFS by ``"_id"``. + + Deletes all data belonging to the file with ``"_id"``: + `file_id`. + + .. warning:: Any processes/threads reading from the file while + this method is executing will likely see an invalid/corrupt + file. Care should be taken to avoid concurrent reads to a file + while it is being deleted. + + .. note:: Deletes of non-existent files are considered successful + since the end result is the same: no file with that _id remains. + + :param file_id: ``"_id"`` of the file to delete + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``delete`` no longer ensures indexes. + """ + _disallow_transactions(session) + await self._files.delete_one({"_id": file_id}, session=session) + await self._chunks.delete_many({"files_id": file_id}, session=session) + + async def list(self, session: Optional[AsyncClientSession] = None) -> list[str]: + """List the names of all files stored in this instance of + :class:`GridFS`. + + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``list`` no longer ensures indexes. + """ + _disallow_transactions(session) + # With an index, distinct includes documents with no filename + # as None. + return [ + name + for name in await self._files.distinct("filename", session=session) + if name is not None + ] + + async def find_one( + self, + filter: Optional[Any] = None, + session: Optional[AsyncClientSession] = None, + *args: Any, + **kwargs: Any, + ) -> Optional[AsyncGridOut]: + """Get a single file from gridfs. + + All arguments to :meth:`find` are also valid arguments for + :meth:`find_one`, although any `limit` argument will be + ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, + or ``None`` if no matching file is found. For example: + + .. code-block: python + + file = fs.find_one({"filename": "lisa.txt"}) + + :param filter: a dictionary specifying + the query to be performing OR any other type to be used as + the value for a query for ``"_id"`` in the file collection. + :param args: any additional positional arguments are + the same as the arguments to :meth:`find`. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + :param kwargs: any additional keyword arguments + are the same as the arguments to :meth:`find`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + if filter is not None and not isinstance(filter, abc.Mapping): + filter = {"_id": filter} + + _disallow_transactions(session) + async for f in self.find(filter, *args, session=session, **kwargs): + return f + + return None + + def find(self, *args: Any, **kwargs: Any) -> AsyncGridOutCursor: + """Query GridFS for files. + + Returns a cursor that iterates across files matching + arbitrary queries on the files collection. Can be combined + with other modifiers for additional control. For example:: + + for grid_out in fs.find({"filename": "lisa.txt"}, + no_cursor_timeout=True): + data = grid_out.read() + + would iterate through all versions of "lisa.txt" stored in GridFS. + Note that setting no_cursor_timeout to True may be important to + prevent the cursor from timing out during long multi-file processing + work. + + As another example, the call:: + + most_recent_three = fs.find().sort("uploadDate", -1).limit(3) + + would return a cursor to the three most recently uploaded files + in GridFS. + + Follows a similar interface to + :meth:`~pymongo.collection.Collection.find` + in :class:`~pymongo.collection.Collection`. + + If a :class:`~pymongo.client_session.AsyncClientSession` is passed to + :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances + are associated with that session. + + :param filter: A query document that selects which files + to include in the result set. Can be an empty document to include + all files. + :param skip: the number of files to omit (from + the start of the result set) when returning the results + :param limit: the maximum number of results to + return + :param no_cursor_timeout: if False (the default), any + returned cursor is closed by the server after 10 minutes of + inactivity. If set to True, the returned cursor will never + time out on the server. Care should be taken to ensure that + cursors with no_cursor_timeout turned on are properly closed. + :param sort: a list of (key, direction) pairs + specifying the sort order for this query. See + :meth:`~pymongo.cursor.Cursor.sort` for details. + + Raises :class:`TypeError` if any of the arguments are of + improper type. Returns an instance of + :class:`~gridfs.grid_file.GridOutCursor` + corresponding to this query. + + .. versionchanged:: 3.0 + Removed the read_preference, tag_sets, and + secondary_acceptable_latency_ms options. + .. versionadded:: 2.7 + .. seealso:: The MongoDB documentation on `find `_. + """ + return AsyncGridOutCursor(self._collection, *args, **kwargs) + + async def exists( + self, + document_or_id: Optional[Any] = None, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> bool: + """Check if a file exists in this instance of :class:`GridFS`. + + The file to check for can be specified by the value of its + ``_id`` key, or by passing in a query document. A query + document can be passed in as dictionary, or by using keyword + arguments. Thus, the following three calls are equivalent: + + >>> fs.exists(file_id) + >>> fs.exists({"_id": file_id}) + >>> fs.exists(_id=file_id) + + As are the following two calls: + + >>> fs.exists({"filename": "mike.txt"}) + >>> fs.exists(filename="mike.txt") + + And the following two: + + >>> fs.exists({"foo": {"$gt": 12}}) + >>> fs.exists(foo={"$gt": 12}) + + Returns ``True`` if a matching file exists, ``False`` + otherwise. Calls to :meth:`exists` will not automatically + create appropriate indexes; application developers should be + sure to create indexes if needed and as appropriate. + + :param document_or_id: query document, or _id of the + document to check for + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + :param kwargs: keyword arguments are used as a + query document, if they're present. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + if kwargs: + f = await self._files.find_one(kwargs, ["_id"], session=session) + else: + f = await self._files.find_one(document_or_id, ["_id"], session=session) + + return f is not None + + +class AsyncGridFSBucket: + """An instance of GridFS on top of a single Database.""" + + def __init__( + self, + db: AsyncDatabase[Any], + bucket_name: str = "fs", + chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + ) -> None: + """Create a new instance of :class:`GridFSBucket`. + + Raises :exc:`TypeError` if `database` is not an instance of + :class:`~pymongo.database.Database`. + + Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern` + is not acknowledged. + + :param database: database to use. + :param bucket_name: The name of the bucket. Defaults to 'fs'. + :param chunk_size_bytes: The chunk size in bytes. Defaults + to 255KB. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use. If ``None`` + (the default) db.write_concern is used. + :param read_preference: The read preference to use. If + ``None`` (the default) db.read_preference is used. + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFSBucket operation in a transaction now always raises + an error. GridFSBucket does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionadded:: 3.1 + + .. seealso:: The MongoDB documentation on `gridfs `_. + """ + if not isinstance(db, AsyncDatabase): + raise TypeError(f"database must be an instance of AsyncDatabase, not {type(db)}") + + db = _clear_entity_type_registry(db) + + wtc = write_concern if write_concern is not None else db.write_concern + if not wtc.acknowledged: + raise ConfigurationError("write concern must be acknowledged") + + self._bucket_name = bucket_name + self._collection = db[bucket_name] + self._chunks: AsyncCollection[Any] = self._collection.chunks.with_options( + write_concern=write_concern, read_preference=read_preference + ) + + self._files: AsyncCollection[Any] = self._collection.files.with_options( + write_concern=write_concern, read_preference=read_preference + ) + + self._chunk_size_bytes = chunk_size_bytes + self._timeout = db.client.options.timeout + + def open_upload_stream( + self, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[AsyncClientSession] = None, + ) -> AsyncGridIn: + """Opens a Stream that the application can write the contents of the + file to. + + The user must specify the filename, and can choose to add any + additional information in the metadata field of the file document or + modify the chunk size. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream( + "test_file", chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close + + Returns an instance of :class:`~gridfs.grid_file.GridIn`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + + opts = { + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } + if metadata is not None: + opts["metadata"] = metadata + + return AsyncGridIn(self._collection, session=session, **opts) + + def open_upload_stream_with_id( + self, + file_id: Any, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[AsyncClientSession] = None, + ) -> AsyncGridIn: + """Opens a Stream that the application can write the contents of the + file to. + + The user must specify the file id and filename, and can choose to add + any additional information in the metadata field of the file document + or modify the chunk size. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream_with_id( + ObjectId(), + "test_file", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close + + Returns an instance of :class:`~gridfs.grid_file.GridIn`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param file_id: The id to use for this file. The id must not have + already been used for another file. + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + + opts = { + "_id": file_id, + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } + if metadata is not None: + opts["metadata"] = metadata + + return AsyncGridIn(self._collection, session=session, **opts) + + @_csot.apply + async def upload_from_stream( + self, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[AsyncClientSession] = None, + ) -> ObjectId: + """Uploads a user file to a GridFS bucket. + + Reads the contents of the user file from `source` and uploads + it to the file `filename`. Source can be a string or file-like object. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + file_id = fs.upload_from_stream( + "test_file", + "data I want to store!", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) + + Returns the _id of the uploaded file. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be + a file-like object that implements :meth:`read` or a string. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + async with self.open_upload_stream( + filename, chunk_size_bytes, metadata, session=session + ) as gin: + await gin.write(source) + + return cast(ObjectId, gin._id) + + @_csot.apply + async def upload_from_stream_with_id( + self, + file_id: Any, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[AsyncClientSession] = None, + ) -> None: + """Uploads a user file to a GridFS bucket with a custom file id. + + Reads the contents of the user file from `source` and uploads + it to the file `filename`. Source can be a string or file-like object. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + file_id = fs.upload_from_stream( + ObjectId(), + "test_file", + "data I want to store!", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param file_id: The id to use for this file. The id must not have + already been used for another file. + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be + a file-like object that implements :meth:`read` or a string. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + async with self.open_upload_stream_with_id( + file_id, filename, chunk_size_bytes, metadata, session=session + ) as gin: + await gin.write(source) + + async def open_download_stream( + self, file_id: Any, session: Optional[AsyncClientSession] = None + ) -> AsyncGridOut: + """Opens a Stream from which the application can read the contents of + the stored file specified by file_id. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # get _id of file to read. + file_id = fs.upload_from_stream("test_file", "data I want to store!") + grid_out = fs.open_download_stream(file_id) + contents = grid_out.read() + + Returns an instance of :class:`~gridfs.grid_file.GridOut`. + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be downloaded. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + gout = AsyncGridOut(self._collection, file_id, session=session) + + # Raise NoFile now, instead of on first attribute access. + await gout.open() + return gout + + @_csot.apply + async def download_to_stream( + self, file_id: Any, destination: Any, session: Optional[AsyncClientSession] = None + ) -> None: + """Downloads the contents of the stored file specified by file_id and + writes the contents to `destination`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to read + file_id = fs.upload_from_stream("test_file", "data I want to store!") + # Get file to write to + file = open('myfile','wb+') + fs.download_to_stream(file_id, file) + file.seek(0) + contents = file.read() + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be downloaded. + :param destination: a file-like object implementing :meth:`write`. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + async with await self.open_download_stream(file_id, session=session) as gout: + while True: + chunk = await gout.readchunk() + if not len(chunk): + break + destination.write(chunk) + + @_csot.apply + async def delete(self, file_id: Any, session: Optional[AsyncClientSession] = None) -> None: + """Given an file_id, delete this stored file's files collection document + and associated chunks from a GridFS bucket. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to delete + file_id = fs.upload_from_stream("test_file", "data I want to store!") + fs.delete(file_id) + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be deleted. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + res = await self._files.delete_one({"_id": file_id}, session=session) + await self._chunks.delete_many({"files_id": file_id}, session=session) + if not res.deleted_count: + raise NoFile("no file could be deleted because none matched %s" % file_id) + + @_csot.apply + async def delete_by_name( + self, filename: str, session: Optional[AsyncClientSession] = None + ) -> None: + """Given a filename, delete this stored file's files collection document(s) + and associated chunks from a GridFS bucket. + + For example:: + + my_db = AsyncMongoClient().test + fs = AsyncGridFSBucket(my_db) + await fs.upload_from_stream("test_file", "data I want to store!") + await fs.delete_by_name("test_file") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The name of the file to be deleted. + :param session: a :class:`~pymongo.client_session.AsyncClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + files = self._files.find({"filename": filename}, {"_id": 1}, session=session) + file_ids = [file["_id"] async for file in files] + res = await self._files.delete_many({"_id": {"$in": file_ids}}, session=session) + await self._chunks.delete_many({"files_id": {"$in": file_ids}}, session=session) + if not res.deleted_count: + raise NoFile(f"no file could be deleted because none matched filename {filename!r}") + + def find(self, *args: Any, **kwargs: Any) -> AsyncGridOutCursor: + """Find and return the files collection documents that match ``filter`` + + Returns a cursor that iterates across files matching + arbitrary queries on the files collection. Can be combined + with other modifiers for additional control. + + For example:: + + for grid_data in fs.find({"filename": "lisa.txt"}, + no_cursor_timeout=True): + data = grid_data.read() + + would iterate through all versions of "lisa.txt" stored in GridFS. + Note that setting no_cursor_timeout to True may be important to + prevent the cursor from timing out during long multi-file processing + work. + + As another example, the call:: + + most_recent_three = fs.find().sort("uploadDate", -1).limit(3) + + would return a cursor to the three most recently uploaded files + in GridFS. + + Follows a similar interface to + :meth:`~pymongo.collection.Collection.find` + in :class:`~pymongo.collection.Collection`. + + If a :class:`~pymongo.client_session.AsyncClientSession` is passed to + :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances + are associated with that session. + + :param filter: Search query. + :param batch_size: The number of documents to return per + batch. + :param limit: The maximum number of documents to return. + :param no_cursor_timeout: The server normally times out idle + cursors after an inactivity period (10 minutes) to prevent excess + memory use. Set this option to True prevent that. + :param skip: The number of documents to skip before + returning. + :param sort: The order by which to sort results. Defaults to + None. + """ + return AsyncGridOutCursor(self._collection, *args, **kwargs) + + async def open_download_stream_by_name( + self, filename: str, revision: int = -1, session: Optional[AsyncClientSession] = None + ) -> AsyncGridOut: + """Opens a Stream from which the application can read the contents of + `filename` and optional `revision`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + grid_out = fs.open_download_stream_by_name("test_file") + contents = grid_out.read() + + Returns an instance of :class:`~gridfs.grid_file.GridOut`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + + Raises :exc:`~ValueError` filename is not a string. + + :param filename: The name of the file to read from. + :param revision: Which revision (documents with the same + filename and different uploadDate) of the file to retrieve. + Defaults to -1 (the most recent revision). + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + :Note: Revision numbers are defined as follows: + + - 0 = the original stored file + - 1 = the first revision + - 2 = the second revision + - etc... + - -2 = the second most recent revision + - -1 = the most recent revision + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + query = {"filename": filename} + _disallow_transactions(session) + cursor = self._files.find(query, session=session) + if revision < 0: + skip = abs(revision) - 1 + cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) + else: + cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING) + try: + grid_file = await anext(cursor) + return AsyncGridOut(self._collection, file_document=grid_file, session=session) + except StopAsyncIteration: + raise NoFile("no version %d for filename %r" % (revision, filename)) from None + + @_csot.apply + async def download_to_stream_by_name( + self, + filename: str, + destination: Any, + revision: int = -1, + session: Optional[AsyncClientSession] = None, + ) -> None: + """Write the contents of `filename` (with optional `revision`) to + `destination`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get file to write to + file = open('myfile','wb') + fs.download_to_stream_by_name("test_file", file) + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to read from. + :param destination: A file-like object that implements :meth:`write`. + :param revision: Which revision (documents with the same + filename and different uploadDate) of the file to retrieve. + Defaults to -1 (the most recent revision). + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + :Note: Revision numbers are defined as follows: + + - 0 = the original stored file + - 1 = the first revision + - 2 = the second revision + - etc... + - -2 = the second most recent revision + - -1 = the most recent revision + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + async with await self.open_download_stream_by_name( + filename, revision, session=session + ) as gout: + while True: + chunk = await gout.readchunk() + if not len(chunk): + break + destination.write(chunk) + + async def rename( + self, file_id: Any, new_filename: str, session: Optional[AsyncClientSession] = None + ) -> None: + """Renames the stored file with the specified file_id. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to rename + file_id = fs.upload_from_stream("test_file", "data I want to store!") + fs.rename(file_id, "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + result = await self._files.update_one( + {"_id": file_id}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + "no files could be renamed %r because none " + "matched file_id %i" % (new_filename, file_id) + ) + + async def rename_by_name( + self, filename: str, new_filename: str, session: Optional[AsyncClientSession] = None + ) -> None: + """Renames the stored file with the specified filename. + + For example:: + + my_db = AsyncMongoClient().test + fs = AsyncGridFSBucket(my_db) + await fs.upload_from_stream("test_file", "data I want to store!") + await fs.rename_by_name("test_file", "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The filename of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a :class:`~pymongo.client_session.AsyncClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + result = await self._files.update_many( + {"filename": filename}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + f"no files could be renamed {new_filename!r} because none matched filename {filename!r}" + ) + + +class AsyncGridIn: + """Class to write data to GridFS.""" + + def __init__( + self, + root_collection: AsyncCollection[Any], + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> None: + """Write a file to GridFS + + Application developers should generally not need to + instantiate this class directly - instead see the methods + provided by :class:`~gridfs.GridFS`. + + Raises :class:`TypeError` if `root_collection` is not an + instance of :class:`~pymongo.collection.AsyncCollection`. + + Any of the file level options specified in the `GridFS Spec + `_ may be passed as + keyword arguments. Any additional keyword arguments will be + set as additional fields on the file document. Valid keyword + arguments include: + + - ``"_id"``: unique ID for this file (default: + :class:`~bson.objectid.ObjectId`) - this ``"_id"`` must + not have already been used for another file + + - ``"filename"``: human name for the file + + - ``"contentType"`` or ``"content_type"``: valid mime-type + for the file + + - ``"chunkSize"`` or ``"chunk_size"``: size of each of the + chunks, in bytes (default: 255 kb) + + - ``"encoding"``: encoding used for this file. Any :class:`str` + that is written to the file will be converted to :class:`bytes`. + + :param root_collection: root collection to write to + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` to use for all + commands + :param kwargs: Any: file level options (see above) + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + `root_collection` must use an acknowledged + :attr:`~pymongo.collection.AsyncCollection.write_concern` + """ + if not isinstance(root_collection, AsyncCollection): + raise TypeError( + f"root_collection must be an instance of AsyncCollection, not {type(root_collection)}" + ) + + if not root_collection.write_concern.acknowledged: + raise ConfigurationError("root_collection must use acknowledged write_concern") + _disallow_transactions(session) + + # Handle alternative naming + if "content_type" in kwargs: + kwargs["contentType"] = kwargs.pop("content_type") + if "chunk_size" in kwargs: + kwargs["chunkSize"] = kwargs.pop("chunk_size") + + coll = _clear_entity_type_registry(root_collection, read_preference=ReadPreference.PRIMARY) + + # Defaults + kwargs["_id"] = kwargs.get("_id", ObjectId()) + kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) + object.__setattr__(self, "_session", session) + object.__setattr__(self, "_coll", coll) + object.__setattr__(self, "_chunks", coll.chunks) + object.__setattr__(self, "_file", kwargs) + object.__setattr__(self, "_buffer", io.BytesIO()) + object.__setattr__(self, "_position", 0) + object.__setattr__(self, "_chunk_number", 0) + object.__setattr__(self, "_closed", False) + object.__setattr__(self, "_ensured_index", False) + object.__setattr__(self, "_buffered_docs", []) + object.__setattr__(self, "_buffered_docs_size", 0) + + async def _create_index( + self, collection: AsyncCollection[Any], index_key: Any, unique: bool + ) -> None: + doc = await collection.find_one(projection={"_id": 1}, session=self._session) + if doc is None: + try: + index_keys = [ + index_spec["key"] + async for index_spec in await collection.list_indexes(session=self._session) + ] + except OperationFailure: + index_keys = [] + if index_key not in index_keys: + await collection.create_index( + index_key.items(), unique=unique, session=self._session + ) + + async def _ensure_indexes(self) -> None: + if not object.__getattribute__(self, "_ensured_index"): + _disallow_transactions(self._session) + await self._create_index(self._coll.files, _F_INDEX, False) + await self._create_index(self._coll.chunks, _C_INDEX, True) + object.__setattr__(self, "_ensured_index", True) + + async def abort(self) -> None: + """Remove all chunks/files that may have been uploaded and close.""" + await self._coll.chunks.delete_many({"files_id": self._file["_id"]}, session=self._session) + await self._coll.files.delete_one({"_id": self._file["_id"]}, session=self._session) + object.__setattr__(self, "_closed", True) + + @property + def closed(self) -> bool: + """Is this file closed?""" + return self._closed + + _id: Any = _a_grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) + filename: Optional[str] = _a_grid_in_property("filename", "Name of this file.") + name: Optional[str] = _a_grid_in_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _a_grid_in_property( + "contentType", "DEPRECATED, will be removed in PyMongo 5.0. Mime-type for this file." + ) + length: int = _a_grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) + chunk_size: int = _a_grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) + upload_date: datetime.datetime = _a_grid_in_property( + "uploadDate", "Date that this file was uploaded.", closed_only=True + ) + md5: Optional[str] = _a_grid_in_property( + "md5", + "DEPRECATED, will be removed in PyMongo 5.0. MD5 of the contents of this file if an md5 sum was created.", + closed_only=True, + ) + + _buffer: io.BytesIO + _closed: bool + _buffered_docs: list[dict[str, Any]] + _buffered_docs_size: int + + def __getattr__(self, name: str) -> Any: + if name == "_coll": + return object.__getattribute__(self, name) + elif name in self._file: + return self._file[name] + raise AttributeError("GridIn object has no attribute '%s'" % name) + + def __setattr__(self, name: str, value: Any) -> None: + # For properties of this instance like _buffer, or descriptors set on + # the class like filename, use regular __setattr__ + if name in self.__dict__ or name in self.__class__.__dict__: + object.__setattr__(self, name, value) + else: + # All other attributes are part of the document in db.fs.files. + # Store them to be sent to server on close() or if closed, send + # them now. + self._file[name] = value + if self._closed: + if _IS_SYNC: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + else: + raise AttributeError( + "AsyncGridIn does not support __setattr__ after being closed(). Set the attribute before closing the file or use AsyncGridIn.set() instead" + ) + + async def set(self, name: str, value: Any) -> None: + self._file[name] = value + if self._closed: + await self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + + async def _flush_data(self, data: Any, force: bool = False) -> None: + """Flush `data` to a chunk.""" + await self._ensure_indexes() + assert len(data) <= self.chunk_size + if data: + self._buffered_docs.append( + {"files_id": self._file["_id"], "n": self._chunk_number, "data": data} + ) + self._buffered_docs_size += len(data) + _CHUNK_OVERHEAD + if not self._buffered_docs: + return + # Limit to 100,000 chunks or 32MB (+1 chunk) of data. + if ( + force + or self._buffered_docs_size >= _UPLOAD_BUFFER_SIZE + or len(self._buffered_docs) >= _UPLOAD_BUFFER_CHUNKS + ): + try: + await self._chunks.insert_many(self._buffered_docs, session=self._session) + except BulkWriteError as exc: + # For backwards compatibility, raise an insert_one style exception. + write_errors = exc.details["writeErrors"] + for err in write_errors: + if err.get("code") in (11000, 11001, 12582): # Duplicate key errors + self._raise_file_exists(self._file["_id"]) + result = {"writeErrors": write_errors} + wces = exc.details["writeConcernErrors"] + if wces: + result["writeConcernError"] = wces[-1] + _check_write_command_response(result) + raise + self._buffered_docs = [] + self._buffered_docs_size = 0 + self._chunk_number += 1 + self._position += len(data) + + async def _flush_buffer(self, force: bool = False) -> None: + """Flush the buffer contents out to a chunk.""" + await self._flush_data(self._buffer.getvalue(), force=force) + self._buffer.close() + self._buffer = io.BytesIO() + + async def _flush(self) -> Any: + """Flush the file to the database.""" + try: + await self._flush_buffer(force=True) + # The GridFS spec says length SHOULD be an Int64. + self._file["length"] = Int64(self._position) + self._file["uploadDate"] = datetime.datetime.now(tz=datetime.timezone.utc) + + return await self._coll.files.insert_one(self._file, session=self._session) + except DuplicateKeyError: + self._raise_file_exists(self._id) + + def _raise_file_exists(self, file_id: Any) -> NoReturn: + """Raise a FileExists exception for the given file_id.""" + raise FileExists("file with _id %r already exists" % file_id) + + async def close(self) -> None: + """Flush the file and close it. + + A closed file cannot be written any more. Calling + :meth:`close` more than once is allowed. + """ + if not self._closed: + await self._flush() + object.__setattr__(self, "_closed", True) + + def read(self, size: int = -1) -> NoReturn: + raise io.UnsupportedOperation("read") + + def readable(self) -> bool: + return False + + def seekable(self) -> bool: + return False + + async def write(self, data: Any) -> None: + """Write data to the file. There is no return value. + + `data` can be either a string of bytes or a file-like object + (implementing :meth:`read`). If the file has an + :attr:`encoding` attribute, `data` can also be a + :class:`str` instance, which will be encoded as + :attr:`encoding` before being written. + + Due to buffering, the data may not actually be written to the + database until the :meth:`close` method is called. Raises + :class:`ValueError` if this file is already closed. Raises + :class:`TypeError` if `data` is not an instance of + :class:`bytes`, a file-like object, or an instance of :class:`str`. + Unicode data is only allowed if the file has an :attr:`encoding` + attribute. + + :param data: string of bytes or file-like object to be written + to the file + """ + if self._closed: + raise ValueError("cannot write to a closed file") + + try: + # file-like + read = data.read + except AttributeError: + # string + if not isinstance(data, (str, bytes)): + raise TypeError("can only write strings or file-like objects") from None + if isinstance(data, str): + try: + data = data.encode(self.encoding) + except AttributeError: + raise TypeError( + "must specify an encoding for file in order to write str" + ) from None + read = io.BytesIO(data).read + + if inspect.iscoroutinefunction(read): + await self._write_async(read) + else: + if self._buffer.tell() > 0: + # Make sure to flush only when _buffer is complete + space = self.chunk_size - self._buffer.tell() + if space: + try: + to_write = read(space) + except BaseException: + await self.abort() + raise + self._buffer.write(to_write) + if len(to_write) < space: + return # EOF or incomplete + await self._flush_buffer() + to_write = read(self.chunk_size) + while to_write and len(to_write) == self.chunk_size: + await self._flush_data(to_write) + to_write = read(self.chunk_size) + self._buffer.write(to_write) + + async def _write_async(self, read: Any) -> None: + if self._buffer.tell() > 0: + # Make sure to flush only when _buffer is complete + space = self.chunk_size - self._buffer.tell() + if space: + try: + to_write = await read(space) + except BaseException: + await self.abort() + raise + self._buffer.write(to_write) + if len(to_write) < space: + return # EOF or incomplete + await self._flush_buffer() + to_write = await read(self.chunk_size) + while to_write and len(to_write) == self.chunk_size: + await self._flush_data(to_write) + to_write = await read(self.chunk_size) + self._buffer.write(to_write) + + async def writelines(self, sequence: Iterable[Any]) -> None: + """Write a sequence of strings to the file. + + Does not add separators. + """ + for line in sequence: + await self.write(line) + + def writeable(self) -> bool: + return True + + async def __aenter__(self) -> AsyncGridIn: + """Support for the context manager protocol.""" + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: + """Support for the context manager protocol. + + Close the file if no exceptions occur and allow exceptions to propagate. + """ + if exc_type is None: + # No exceptions happened. + await self.close() + else: + # Something happened, at minimum mark as closed. + object.__setattr__(self, "_closed", True) + + # propagate exceptions + return False + + +GRIDOUT_BASE_CLASS = io.IOBase if _IS_SYNC else object # type: Any + + +class AsyncGridOut(GRIDOUT_BASE_CLASS): # type: ignore + + """Class to read data out of GridFS.""" + + def __init__( + self, + root_collection: AsyncCollection[Any], + file_id: Optional[int] = None, + file_document: Optional[Any] = None, + session: Optional[AsyncClientSession] = None, + ) -> None: + """Read a file from GridFS + + Application developers should generally not need to + instantiate this class directly - instead see the methods + provided by :class:`~gridfs.GridFS`. + + Either `file_id` or `file_document` must be specified, + `file_document` will be given priority if present. Raises + :class:`TypeError` if `root_collection` is not an instance of + :class:`~pymongo.collection.AsyncCollection`. + + :param root_collection: root collection to read from + :param file_id: value of ``"_id"`` for the file to read + :param file_document: file document from + `root_collection.files` + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` to use for all + commands + + .. versionchanged:: 3.8 + For better performance and to better follow the GridFS spec, + :class:`GridOut` now uses a single cursor to read all the chunks in + the file. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + Creating a GridOut does not immediately retrieve the file metadata + from the server. Metadata is fetched when first needed. + """ + if not isinstance(root_collection, AsyncCollection): + raise TypeError( + f"root_collection must be an instance of AsyncCollection, not {type(root_collection)}" + ) + _disallow_transactions(session) + + root_collection = _clear_entity_type_registry(root_collection) + + super().__init__() + + self._chunks = root_collection.chunks + self._files = root_collection.files + self._file_id = file_id + self._buffer = EMPTY + # Start position within the current buffered chunk. + self._buffer_pos = 0 + self._chunk_iter = None + # Position within the total file. + self._position = 0 + self._file = file_document + self._session = session + if not _IS_SYNC: + self.closed = False + + _id: Any = _a_grid_out_property("_id", "The ``'_id'`` value for this file.") + filename: str = _a_grid_out_property("filename", "Name of this file.") + name: str = _a_grid_out_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _a_grid_out_property( + "contentType", "DEPRECATED, will be removed in PyMongo 5.0. Mime-type for this file." + ) + length: int = _a_grid_out_property("length", "Length (in bytes) of this file.") + chunk_size: int = _a_grid_out_property("chunkSize", "Chunk size for this file.") + upload_date: datetime.datetime = _a_grid_out_property( + "uploadDate", "Date that this file was first uploaded." + ) + aliases: Optional[list[str]] = _a_grid_out_property( + "aliases", "DEPRECATED, will be removed in PyMongo 5.0. List of aliases for this file." + ) + metadata: Optional[Mapping[str, Any]] = _a_grid_out_property( + "metadata", "Metadata attached to this file." + ) + md5: Optional[str] = _a_grid_out_property( + "md5", + "DEPRECATED, will be removed in PyMongo 5.0. MD5 of the contents of this file if an md5 sum was created.", + ) + + _file: Any + _chunk_iter: Any + + if not _IS_SYNC: + closed: bool + + async def __anext__(self) -> bytes: + line = await self.readline() + if line: + return line + raise StopAsyncIteration() + + async def to_list(self) -> list[bytes]: + return [x async for x in self] # noqa: C416, RUF100 + + async def readline(self, size: int = -1) -> bytes: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + return await self._read_size_or_line(size=size, line=True) + + async def readlines(self, size: int = -1) -> list[bytes]: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + await self.open() + lines = [] + remainder = int(self.length) - self._position + bytes_read = 0 + while remainder > 0: + line = await self._read_size_or_line(line=True) + bytes_read += len(line) + lines.append(line) + remainder = int(self.length) - self._position + if 0 < size < bytes_read: + break + + return lines + + async def open(self) -> None: + if not self._file: + _disallow_transactions(self._session) + self._file = await self._files.find_one({"_id": self._file_id}, session=self._session) + if not self._file: + raise NoFile( + f"no file in gridfs collection {self._files!r} with _id {self._file_id!r}" + ) + + def __getattr__(self, name: str) -> Any: + if _IS_SYNC: + self.open() # type: ignore[unused-coroutine] + elif not self._file: + raise InvalidOperation( + "You must call AsyncGridOut.open() before accessing the %s property" % name + ) + if name in self._file: + return self._file[name] + raise AttributeError("GridOut object has no attribute '%s'" % name) + + def readable(self) -> bool: + return True + + async def readchunk(self) -> bytes: + """Reads a chunk at a time. If the current position is within a + chunk the remainder of the chunk is returned. + """ + await self.open() + received = len(self._buffer) - self._buffer_pos + chunk_data = EMPTY + chunk_size = int(self.chunk_size) + + if received > 0: + chunk_data = self._buffer[self._buffer_pos :] + elif self._position < int(self.length): + chunk_number = int((received + self._position) / chunk_size) + if self._chunk_iter is None: + self._chunk_iter = _AsyncGridOutChunkIterator( + self, self._chunks, self._session, chunk_number + ) + + chunk = await self._chunk_iter.next() + chunk_data = chunk["data"][self._position % chunk_size :] + + if not chunk_data: + raise CorruptGridFile("truncated chunk") + + self._position += len(chunk_data) + self._buffer = EMPTY + self._buffer_pos = 0 + return chunk_data + + async def _read_size_or_line(self, size: int = -1, line: bool = False) -> bytes: + """Internal read() and readline() helper.""" + await self.open() + remainder = int(self.length) - self._position + if size < 0 or size > remainder: + size = remainder + + if size == 0: + return EMPTY + + received = 0 + data = [] + while received < size: + needed = size - received + if self._buffer: + # Optimization: Read the buffer with zero byte copies. + buf = self._buffer + chunk_start = self._buffer_pos + chunk_data = memoryview(buf)[self._buffer_pos :] + self._buffer = EMPTY + self._buffer_pos = 0 + self._position += len(chunk_data) + else: + buf = await self.readchunk() + chunk_start = 0 + chunk_data = memoryview(buf) + if line: + pos = buf.find(NEWLN, chunk_start, chunk_start + needed) - chunk_start + if pos >= 0: + # Decrease size to exit the loop. + size = received + pos + 1 + needed = pos + 1 + if len(chunk_data) > needed: + data.append(chunk_data[:needed]) + # Optimization: Save the buffer with zero byte copies. + self._buffer = buf + self._buffer_pos = chunk_start + needed + self._position -= len(self._buffer) - self._buffer_pos + else: + data.append(chunk_data) + received += len(chunk_data) + + # Detect extra chunks after reading the entire file. + if size == remainder and self._chunk_iter: + try: + await self._chunk_iter.next() + except StopAsyncIteration: + pass + + return b"".join(data) + + async def read(self, size: int = -1) -> bytes: + """Read at most `size` bytes from the file (less if there + isn't enough data). + + The bytes are returned as an instance of :class:`bytes` + If `size` is negative or omitted all data is read. + + :param size: the number of bytes to read + + .. versionchanged:: 3.8 + This method now only checks for extra chunks after reading the + entire file. Previously, this method would check for extra chunks + on every call. + """ + return await self._read_size_or_line(size=size) + + def tell(self) -> int: + """Return the current position of this file.""" + return self._position + + async def seek(self, pos: int, whence: int = _SEEK_SET) -> int: + """Set the current position of this file. + + :param pos: the position (or offset if using relative + positioning) to seek to + :param whence: where to seek + from. :attr:`os.SEEK_SET` (``0``) for absolute file + positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative + to the current position, :attr:`os.SEEK_END` (``2``) to + seek relative to the file's end. + + .. versionchanged:: 4.1 + The method now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. + """ + if whence == _SEEK_SET: + new_pos = pos + elif whence == _SEEK_CUR: + new_pos = self._position + pos + elif whence == _SEEK_END: + new_pos = int(self.length) + pos + else: + raise OSError(22, "Invalid value for `whence`") + + if new_pos < 0: + raise OSError(22, "Invalid value for `pos` - must be positive") + + # Optimization, continue using the same buffer and chunk iterator. + if new_pos == self._position: + return new_pos + + self._position = new_pos + self._buffer = EMPTY + self._buffer_pos = 0 + if self._chunk_iter: + await self._chunk_iter.close() + self._chunk_iter = None + return new_pos + + def seekable(self) -> bool: + return True + + def __aiter__(self) -> AsyncGridOut: + """Return an iterator over all of this file's data. + + The iterator will return lines (delimited by ``b'\\n'``) of + :class:`bytes`. This can be useful when serving files + using a webserver that handles such an iterator efficiently. + + .. versionchanged:: 3.8 + The iterator now raises :class:`CorruptGridFile` when encountering + any truncated, missing, or extra chunk in a file. The previous + behavior was to only raise :class:`CorruptGridFile` on a missing + chunk. + + .. versionchanged:: 4.0 + The iterator now iterates over *lines* in the file, instead + of chunks, to conform to the base class :py:class:`io.IOBase`. + Use :meth:`GridOut.readchunk` to read chunk by chunk instead + of line by line. + """ + return self + + async def close(self) -> None: + """Make GridOut more generically file-like.""" + if self._chunk_iter: + await self._chunk_iter.close() + self._chunk_iter = None + if _IS_SYNC: + super().close() + else: + self.closed = True + + def write(self, value: Any) -> NoReturn: + raise io.UnsupportedOperation("write") + + def writelines(self, lines: Any) -> NoReturn: + raise io.UnsupportedOperation("writelines") + + def writable(self) -> bool: + return False + + async def __aenter__(self) -> AsyncGridOut: + """Makes it possible to use :class:`AsyncGridOut` files + with the async context manager protocol. + """ + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: + """Makes it possible to use :class:`AsyncGridOut` files + with the async context manager protocol. + """ + await self.close() + return False + + def fileno(self) -> NoReturn: + raise io.UnsupportedOperation("fileno") + + def flush(self) -> None: + # GridOut is read-only, so flush does nothing. + pass + + def isatty(self) -> bool: + return False + + def truncate(self, size: Optional[int] = None) -> NoReturn: + # See https://docs.python.org/3/library/io.html#io.IOBase.writable + # for why truncate has to raise. + raise io.UnsupportedOperation("truncate") + + # Override IOBase.__del__ otherwise it will lead to __getattr__ on + # __IOBase_closed which calls _ensure_file and potentially performs I/O. + # We cannot do I/O in __del__ since it can lead to a deadlock. + def __del__(self) -> None: + pass + + +class _AsyncGridOutChunkIterator: + """Iterates over a file's chunks using a single cursor. + + Raises CorruptGridFile when encountering any truncated, missing, or extra + chunk in a file. + """ + + def __init__( + self, + grid_out: AsyncGridOut, + chunks: AsyncCollection[Any], + session: Optional[AsyncClientSession], + next_chunk: Any, + ) -> None: + self._id = grid_out._id + self._chunk_size = int(grid_out.chunk_size) + self._length = int(grid_out.length) + self._chunks = chunks + self._session = session + self._next_chunk = next_chunk + self._num_chunks = math.ceil(float(self._length) / self._chunk_size) + self._cursor = None + + _cursor: Optional[AsyncCursor[Any]] + + def expected_chunk_length(self, chunk_n: int) -> int: + if chunk_n < self._num_chunks - 1: + return self._chunk_size + return self._length - (self._chunk_size * (self._num_chunks - 1)) + + def __aiter__(self) -> _AsyncGridOutChunkIterator: + return self + + def _create_cursor(self) -> None: + filter = {"files_id": self._id} + if self._next_chunk > 0: + filter["n"] = {"$gte": self._next_chunk} + _disallow_transactions(self._session) + self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) + + async def _next_with_retry(self) -> Mapping[str, Any]: + """Return the next chunk and retry once on CursorNotFound. + + We retry on CursorNotFound to maintain backwards compatibility in + cases where two calls to read occur more than 10 minutes apart (the + server's default cursor timeout). + """ + if self._cursor is None: + self._create_cursor() + assert self._cursor is not None + try: + return await self._cursor.next() + except CursorNotFound: + await self._cursor.close() + self._create_cursor() + return await self._cursor.next() + + async def next(self) -> Mapping[str, Any]: + try: + chunk = await self._next_with_retry() + except StopAsyncIteration: + if self._next_chunk >= self._num_chunks: + raise + raise CorruptGridFile("no chunk #%d" % self._next_chunk) from None + + if chunk["n"] != self._next_chunk: + await self.close() + raise CorruptGridFile( + "Missing chunk: expected chunk #%d but found " + "chunk with n=%d" % (self._next_chunk, chunk["n"]) + ) + + if chunk["n"] >= self._num_chunks: + # According to spec, ignore extra chunks if they are empty. + if len(chunk["data"]): + await self.close() + raise CorruptGridFile( + "Extra chunk found: expected %d chunks but found " + "chunk with n=%d" % (self._num_chunks, chunk["n"]) + ) + + expected_length = self.expected_chunk_length(chunk["n"]) + if len(chunk["data"]) != expected_length: + await self.close() + raise CorruptGridFile( + "truncated chunk #%d: expected chunk length to be %d but " + "found chunk with length %d" % (chunk["n"], expected_length, len(chunk["data"])) + ) + + self._next_chunk += 1 + return chunk + + __anext__ = next + + async def close(self) -> None: + if self._cursor: + await self._cursor.close() + self._cursor = None + + +class AsyncGridOutIterator: + def __init__( + self, grid_out: AsyncGridOut, chunks: AsyncCollection[Any], session: AsyncClientSession + ): + self._chunk_iter = _AsyncGridOutChunkIterator(grid_out, chunks, session, 0) + + def __aiter__(self) -> AsyncGridOutIterator: + return self + + async def next(self) -> bytes: + chunk = await self._chunk_iter.next() + return bytes(chunk["data"]) + + __anext__ = next + + +class AsyncGridOutCursor(AsyncCursor): # type: ignore[type-arg] + """A cursor / iterator for returning GridOut objects as the result + of an arbitrary query against the GridFS files collection. + """ + + def __init__( + self, + collection: AsyncCollection[Any], + filter: Optional[Mapping[str, Any]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + sort: Optional[Any] = None, + batch_size: int = 0, + session: Optional[AsyncClientSession] = None, + ) -> None: + """Create a new cursor, similar to the normal + :class:`~pymongo.cursor.Cursor`. + + Should not be called directly by application developers - see + the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead. + + .. versionadded 2.7 + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + _disallow_transactions(session) + collection = _clear_entity_type_registry(collection) + + # Hold on to the base "fs" collection to create GridOut objects later. + self._root_collection = collection + + super().__init__( + collection.files, + filter, + skip=skip, + limit=limit, + no_cursor_timeout=no_cursor_timeout, + sort=sort, + batch_size=batch_size, + session=session, + ) + + async def next(self) -> AsyncGridOut: + """Get next GridOut object from cursor.""" + _disallow_transactions(self.session) + next_file = await super().next() + return AsyncGridOut(self._root_collection, file_document=next_file, session=self.session) + + async def to_list(self, length: Optional[int] = None) -> list[AsyncGridOut]: + """Convert the cursor to a list.""" + if length is None: + return [x async for x in self] # noqa: C416,RUF100 + if length < 1: + raise ValueError("to_list() length must be greater than 0") + ret = [] + for _ in range(length): + ret.append(await self.next()) + return ret + + __anext__ = next + + def add_option(self, *args: Any, **kwargs: Any) -> NoReturn: + raise NotImplementedError("Method does not exist for GridOutCursor") + + def remove_option(self, *args: Any, **kwargs: Any) -> NoReturn: + raise NotImplementedError("Method does not exist for GridOutCursor") + + def _clone_base(self, session: Optional[AsyncClientSession]) -> AsyncGridOutCursor: + """Creates an empty GridOutCursor for information to be copied into.""" + return AsyncGridOutCursor(self._root_collection, session=session) diff --git a/gridfs/errors.py b/gridfs/errors.py index a6420d1711..e8c02cef4f 100644 --- a/gridfs/errors.py +++ b/gridfs/errors.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,44 +13,22 @@ # limitations under the License. """Exceptions raised by the :mod:`gridfs` package""" +from __future__ import annotations from pymongo.errors import PyMongoError class GridFSError(PyMongoError): - """Base class for all GridFS exceptions. - - .. versionadded:: 1.5 - """ + """Base class for all GridFS exceptions.""" class CorruptGridFile(GridFSError): - """Raised when a file in :class:`~gridfs.GridFS` is malformed. - """ + """Raised when a file in :class:`~gridfs.GridFS` is malformed.""" class NoFile(GridFSError): - """Raised when trying to read from a non-existent file. - - .. versionadded:: 1.6 - """ + """Raised when trying to read from a non-existent file.""" class FileExists(GridFSError): - """Raised when trying to create a file that already exists. - - .. versionadded:: 1.7 - """ - - -class UnsupportedAPI(GridFSError): - """Raised when trying to use the old GridFS API. - - In version 1.6 of the PyMongo distribution there were backwards - incompatible changes to the GridFS API. Upgrading shouldn't be - difficult, but the old API is no longer supported (with no - deprecation period). This exception will be raised when attempting - to use unsupported constructs from the old API. - - .. versionadded:: 1.6 - """ + """Raised when trying to create a file that already exists.""" diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index ea1037d30c..b2cab71515 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,660 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing files stored in GridFS.""" +"""Re-import of synchronous gridfs API for compatibility.""" +from __future__ import annotations -import datetime -import math -import os - -from bson.binary import Binary -from bson.objectid import ObjectId -from bson.py3compat import (b, binary_type, next_item, - string_types, text_type, StringIO) -from gridfs.errors import (CorruptGridFile, - FileExists, - NoFile, - UnsupportedAPI) -from pymongo import ASCENDING -from pymongo.collection import Collection -from pymongo.cursor import Cursor -from pymongo.errors import DuplicateKeyError - -try: - _SEEK_SET = os.SEEK_SET - _SEEK_CUR = os.SEEK_CUR - _SEEK_END = os.SEEK_END -# before 2.5 -except AttributeError: - _SEEK_SET = 0 - _SEEK_CUR = 1 - _SEEK_END = 2 - -EMPTY = b("") -NEWLN = b("\n") - -"""Default chunk size, in bytes.""" -# Slightly under a power of 2, to work well with server's record allocations. -DEFAULT_CHUNK_SIZE = 255 * 1024 - - -def _create_property(field_name, docstring, - read_only=False, closed_only=False): - """Helper for creating properties to read/write to files. - """ - def getter(self): - if closed_only and not self._closed: - raise AttributeError("can only get %r on a closed file" % - field_name) - # Protect against PHP-237 - if field_name == 'length': - return self._file.get(field_name, 0) - return self._file.get(field_name, None) - - def setter(self, value): - if self._closed: - self._coll.files.update({"_id": self._file["_id"]}, - {"$set": {field_name: value}}, - **self._coll._get_wc_override()) - self._file[field_name] = value - - if read_only: - docstring = docstring + "\n\nThis attribute is read-only." - elif closed_only: - docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and " - "can only be read after :meth:`close` " - "has been called.") - - if not read_only and not closed_only: - return property(getter, setter, doc=docstring) - return property(getter, doc=docstring) - - -class GridIn(object): - """Class to write data to GridFS. - """ - def __init__(self, root_collection, **kwargs): - """Write a file to GridFS - - Application developers should generally not need to - instantiate this class directly - instead see the methods - provided by :class:`~gridfs.GridFS`. - - Raises :class:`TypeError` if `root_collection` is not an - instance of :class:`~pymongo.collection.Collection`. - - Any of the file level options specified in the `GridFS Spec - `_ may be passed as - keyword arguments. Any additional keyword arguments will be - set as additional fields on the file document. Valid keyword - arguments include: - - - ``"_id"``: unique ID for this file (default: - :class:`~bson.objectid.ObjectId`) - this ``"_id"`` must - not have already been used for another file - - - ``"filename"``: human name for the file - - - ``"contentType"`` or ``"content_type"``: valid mime-type - for the file - - - ``"chunkSize"`` or ``"chunk_size"``: size of each of the - chunks, in bytes (default: 256 kb) - - - ``"encoding"``: encoding used for this file. In Python 2, - any :class:`unicode` that is written to the file will be - converted to a :class:`str`. In Python 3, any :class:`str` - that is written to the file will be converted to - :class:`bytes`. - - If you turn off write-acknowledgment for performance reasons, it is - critical to wrap calls to :meth:`write` and :meth:`close` within a - single request: - - >>> from pymongo import MongoClient - >>> from gridfs import GridFS - >>> client = MongoClient(w=0) # turn off write acknowledgment - >>> fs = GridFS(client.database) - >>> gridin = fs.new_file() - >>> request = client.start_request() - >>> try: - ... for i in range(10): - ... gridin.write('foo') - ... gridin.close() - ... finally: - ... request.end() - - In Python 2.5 and later this code can be simplified with a - with-statement, see :doc:`/examples/requests` for more information. - - :Parameters: - - `root_collection`: root collection to write to - - `**kwargs` (optional): file level options (see above) - """ - if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " - "instance of Collection") - - # Handle alternative naming - if "content_type" in kwargs: - kwargs["contentType"] = kwargs.pop("content_type") - if "chunk_size" in kwargs: - kwargs["chunkSize"] = kwargs.pop("chunk_size") - - # Defaults - kwargs["_id"] = kwargs.get("_id", ObjectId()) - kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) - object.__setattr__(self, "_coll", root_collection) - object.__setattr__(self, "_chunks", root_collection.chunks) - object.__setattr__(self, "_file", kwargs) - object.__setattr__(self, "_buffer", StringIO()) - object.__setattr__(self, "_position", 0) - object.__setattr__(self, "_chunk_number", 0) - object.__setattr__(self, "_closed", False) - object.__setattr__(self, "_ensured_index", False) - - def _ensure_index(self): - if not object.__getattribute__(self, "_ensured_index"): - self._coll.chunks.ensure_index( - [("files_id", ASCENDING), ("n", ASCENDING)], - unique=True) - object.__setattr__(self, "_ensured_index", True) - - @property - def closed(self): - """Is this file closed? - """ - return self._closed - - _id = _create_property("_id", "The ``'_id'`` value for this file.", - read_only=True) - filename = _create_property("filename", "Name of this file.") - name = _create_property("filename", "Alias for `filename`.") - content_type = _create_property("contentType", "Mime-type for this file.") - length = _create_property("length", "Length (in bytes) of this file.", - closed_only=True) - chunk_size = _create_property("chunkSize", "Chunk size for this file.", - read_only=True) - upload_date = _create_property("uploadDate", - "Date that this file was uploaded.", - closed_only=True) - md5 = _create_property("md5", "MD5 of the contents of this file " - "(generated on the server).", - closed_only=True) - - def __getattr__(self, name): - if name in self._file: - return self._file[name] - raise AttributeError("GridIn object has no attribute '%s'" % name) - - def __setattr__(self, name, value): - # For properties of this instance like _buffer, or descriptors set on - # the class like filename, use regular __setattr__ - if name in self.__dict__ or name in self.__class__.__dict__: - object.__setattr__(self, name, value) - else: - # All other attributes are part of the document in db.fs.files. - # Store them to be sent to server on close() or if closed, send - # them now. - self._file[name] = value - if self._closed: - self._coll.files.update({"_id": self._file["_id"]}, - {"$set": {name: value}}, - **self._coll._get_wc_override()) - - def __flush_data(self, data): - """Flush `data` to a chunk. - """ - # Ensure the index, even if there's nothing to write, so - # the filemd5 command always succeeds. - self._ensure_index() - - if not data: - return - assert(len(data) <= self.chunk_size) - - chunk = {"files_id": self._file["_id"], - "n": self._chunk_number, - "data": Binary(data)} - - try: - self._chunks.insert(chunk) - except DuplicateKeyError: - self._raise_file_exists(self._file['_id']) - self._chunk_number += 1 - self._position += len(data) - - def __flush_buffer(self): - """Flush the buffer contents out to a chunk. - """ - self.__flush_data(self._buffer.getvalue()) - self._buffer.close() - self._buffer = StringIO() - - def __flush(self): - """Flush the file to the database. - """ - try: - self.__flush_buffer() - - db = self._coll.database - - # See PYTHON-417, "Sharded GridFS fails with exception: chunks out - # of order." Inserts via mongos, even if they use a single - # connection, can succeed out-of-order due to the writebackListener. - # We mustn't call "filemd5" until all inserts are complete, which - # we ensure by calling getLastError (and ignoring the result). - db.error() - - md5 = db.command( - "filemd5", self._id, root=self._coll.name)["md5"] - - self._file["md5"] = md5 - self._file["length"] = self._position - self._file["uploadDate"] = datetime.datetime.utcnow() - - return self._coll.files.insert(self._file, - **self._coll._get_wc_override()) - except DuplicateKeyError: - self._raise_file_exists(self._id) - - def _raise_file_exists(self, file_id): - """Raise a FileExists exception for the given file_id.""" - raise FileExists("file with _id %r already exists" % file_id) - - def close(self): - """Flush the file and close it. - - A closed file cannot be written any more. Calling - :meth:`close` more than once is allowed. - """ - if not self._closed: - self.__flush() - object.__setattr__(self, "_closed", True) - - def write(self, data): - """Write data to the file. There is no return value. - - `data` can be either a string of bytes or a file-like object - (implementing :meth:`read`). If the file has an - :attr:`encoding` attribute, `data` can also be a - :class:`unicode` (:class:`str` in python 3) instance, which - will be encoded as :attr:`encoding` before being written. - - Due to buffering, the data may not actually be written to the - database until the :meth:`close` method is called. Raises - :class:`ValueError` if this file is already closed. Raises - :class:`TypeError` if `data` is not an instance of - :class:`str` (:class:`bytes` in python 3), a file-like object, - or an instance of :class:`unicode` (:class:`str` in python 3). - Unicode data is only allowed if the file has an :attr:`encoding` - attribute. - - :Parameters: - - `data`: string of bytes or file-like object to be written - to the file - - .. versionadded:: 1.9 - The ability to write :class:`unicode`, if the file has an - :attr:`encoding` attribute. - """ - if self._closed: - raise ValueError("cannot write to a closed file") - - try: - # file-like - read = data.read - except AttributeError: - # string - if not isinstance(data, string_types): - raise TypeError("can only write strings or file-like objects") - if isinstance(data, unicode): - try: - data = data.encode(self.encoding) - except AttributeError: - raise TypeError("must specify an encoding for file in " - "order to write %s" % (text_type.__name__,)) - read = StringIO(data).read - - if self._buffer.tell() > 0: - # Make sure to flush only when _buffer is complete - space = self.chunk_size - self._buffer.tell() - if space: - to_write = read(space) - self._buffer.write(to_write) - if len(to_write) < space: - return # EOF or incomplete - self.__flush_buffer() - to_write = read(self.chunk_size) - while to_write and len(to_write) == self.chunk_size: - self.__flush_data(to_write) - to_write = read(self.chunk_size) - self._buffer.write(to_write) - - def writelines(self, sequence): - """Write a sequence of strings to the file. - - Does not add seperators. - """ - for line in sequence: - self.write(line) - - def __enter__(self): - """Support for the context manager protocol. - """ - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - """Support for the context manager protocol. - - Close the file and allow exceptions to propagate. - """ - self.close() - - # propagate exceptions - return False - - -class GridOut(object): - """Class to read data out of GridFS. - """ - def __init__(self, root_collection, file_id=None, file_document=None, - _connect=True): - """Read a file from GridFS - - Application developers should generally not need to - instantiate this class directly - instead see the methods - provided by :class:`~gridfs.GridFS`. - - Either `file_id` or `file_document` must be specified, - `file_document` will be given priority if present. Raises - :class:`TypeError` if `root_collection` is not an instance of - :class:`~pymongo.collection.Collection`. - - :Parameters: - - `root_collection`: root collection to read from - - `file_id`: value of ``"_id"`` for the file to read - - `file_document`: file document from `root_collection.files` - - .. versionadded:: 1.9 - The `file_document` parameter. - """ - if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " - "instance of Collection") - - self.__chunks = root_collection.chunks - self.__files = root_collection.files - self.__file_id = file_id - self.__buffer = EMPTY - self.__position = 0 - self._file = file_document - if _connect: - self._ensure_file() - - _id = _create_property("_id", "The ``'_id'`` value for this file.", True) - filename = _create_property("filename", "Name of this file.", True) - name = _create_property("filename", "Alias for `filename`.", True) - content_type = _create_property("contentType", "Mime-type for this file.", - True) - length = _create_property("length", "Length (in bytes) of this file.", - True) - chunk_size = _create_property("chunkSize", "Chunk size for this file.", - True) - upload_date = _create_property("uploadDate", - "Date that this file was first uploaded.", - True) - aliases = _create_property("aliases", "List of aliases for this file.", - True) - metadata = _create_property("metadata", "Metadata attached to this file.", - True) - md5 = _create_property("md5", "MD5 of the contents of this file " - "(generated on the server).", True) - - def _ensure_file(self): - if not self._file: - self._file = self.__files.find_one({"_id": self.__file_id}) - if not self._file: - raise NoFile("no file in gridfs collection %r with _id %r" % - (self.__files, self.__file_id)) - - def __getattr__(self, name): - self._ensure_file() - if name in self._file: - return self._file[name] - raise AttributeError("GridOut object has no attribute '%s'" % name) - - def readchunk(self): - """Reads a chunk at a time. If the current position is within a - chunk the remainder of the chunk is returned. - """ - received = len(self.__buffer) - chunk_data = EMPTY - - if received > 0: - chunk_data = self.__buffer - elif self.__position < int(self.length): - chunk_number = int((received + self.__position) / self.chunk_size) - chunk = self.__chunks.find_one({"files_id": self._id, - "n": chunk_number}) - if not chunk: - raise CorruptGridFile("no chunk #%d" % chunk_number) - - chunk_data = chunk["data"][self.__position % self.chunk_size:] - - self.__position += len(chunk_data) - self.__buffer = EMPTY - return chunk_data - - def read(self, size=-1): - """Read at most `size` bytes from the file (less if there - isn't enough data). - - The bytes are returned as an instance of :class:`str` (:class:`bytes` - in python 3). If `size` is negative or omitted all data is read. - - :Parameters: - - `size` (optional): the number of bytes to read - """ - self._ensure_file() - - if size == 0: - return EMPTY - - remainder = int(self.length) - self.__position - if size < 0 or size > remainder: - size = remainder - - received = 0 - data = StringIO() - while received < size: - chunk_data = self.readchunk() - received += len(chunk_data) - data.write(chunk_data) - - self.__position -= received - size - - # Return 'size' bytes and store the rest. - data.seek(size) - self.__buffer = data.read() - data.seek(0) - return data.read(size) - - def readline(self, size=-1): - """Read one line or up to `size` bytes from the file. - - :Parameters: - - `size` (optional): the maximum number of bytes to read - - .. versionadded:: 1.9 - """ - if size == 0: - return b('') - - remainder = int(self.length) - self.__position - if size < 0 or size > remainder: - size = remainder - - received = 0 - data = StringIO() - while received < size: - chunk_data = self.readchunk() - pos = chunk_data.find(NEWLN, 0, size) - if pos != -1: - size = received + pos + 1 - - received += len(chunk_data) - data.write(chunk_data) - if pos != -1: - break - - self.__position -= received - size - - # Return 'size' bytes and store the rest. - data.seek(size) - self.__buffer = data.read() - data.seek(0) - return data.read(size) - - def tell(self): - """Return the current position of this file. - """ - return self.__position - - def seek(self, pos, whence=_SEEK_SET): - """Set the current position of this file. - - :Parameters: - - `pos`: the position (or offset if using relative - positioning) to seek to - - `whence` (optional): where to seek - from. :attr:`os.SEEK_SET` (``0``) for absolute file - positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative - to the current position, :attr:`os.SEEK_END` (``2``) to - seek relative to the file's end. - """ - if whence == _SEEK_SET: - new_pos = pos - elif whence == _SEEK_CUR: - new_pos = self.__position + pos - elif whence == _SEEK_END: - new_pos = int(self.length) + pos - else: - raise IOError(22, "Invalid value for `whence`") - - if new_pos < 0: - raise IOError(22, "Invalid value for `pos` - must be positive") - - self.__position = new_pos - self.__buffer = EMPTY - - def __iter__(self): - """Return an iterator over all of this file's data. - - The iterator will return chunk-sized instances of - :class:`str` (:class:`bytes` in python 3). This can be - useful when serving files using a webserver that handles - such an iterator efficiently. - """ - return GridOutIterator(self, self.__chunks) - - def close(self): - """Make GridOut more generically file-like.""" - pass - - def __enter__(self): - """Makes it possible to use :class:`GridOut` files - with the context manager protocol. - """ - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - """Makes it possible to use :class:`GridOut` files - with the context manager protocol. - """ - return False - - -class GridOutIterator(object): - def __init__(self, grid_out, chunks): - self.__id = grid_out._id - self.__chunks = chunks - self.__current_chunk = 0 - self.__max_chunk = math.ceil(float(grid_out.length) / - grid_out.chunk_size) - - def __iter__(self): - return self - - def next(self): - if self.__current_chunk >= self.__max_chunk: - raise StopIteration - chunk = self.__chunks.find_one({"files_id": self.__id, - "n": self.__current_chunk}) - if not chunk: - raise CorruptGridFile("no chunk #%d" % self.__current_chunk) - self.__current_chunk += 1 - return binary_type(chunk["data"]) - - -class GridFile(object): - """No longer supported. - - .. versionchanged:: 1.6 - The GridFile class is no longer supported. - """ - def __init__(self, *args, **kwargs): - raise UnsupportedAPI("The GridFile class is no longer supported. " - "Please use GridIn or GridOut instead.") - - -class GridOutCursor(Cursor): - """A cursor / iterator for returning GridOut objects as the result - of an arbitrary query against the GridFS files collection. - """ - def __init__(self, collection, spec=None, skip=0, limit=0, - timeout=True, sort=None, max_scan=None, - read_preference=None, tag_sets=None, - secondary_acceptable_latency_ms=None, compile_re=True): - """Create a new cursor, similar to the normal - :class:`~pymongo.cursor.Cursor`. - - Should not be called directly by application developers - see - the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead. - - .. versionadded 2.7 - - .. mongodoc:: cursors - """ - # Hold on to the base "fs" collection to create GridOut objects later. - self.__root_collection = collection - - # Copy these settings from collection if they are not set by caller. - read_preference = read_preference or collection.files.read_preference - tag_sets = tag_sets or collection.files.tag_sets - latency = (secondary_acceptable_latency_ms - or collection.files.secondary_acceptable_latency_ms) - - super(GridOutCursor, self).__init__( - collection.files, spec, skip=skip, limit=limit, timeout=timeout, - sort=sort, max_scan=max_scan, read_preference=read_preference, - secondary_acceptable_latency_ms=latency, compile_re=compile_re, - tag_sets=tag_sets) - - def next(self): - """Get next GridOut object from cursor. - """ - # Work around "super is not iterable" issue in Python 3.x - next_file = getattr(super(GridOutCursor, self), next_item)() - return GridOut(self.__root_collection, file_document=next_file) - - def add_option(self, *args, **kwargs): - raise NotImplementedError("Method does not exist for GridOutCursor") - - def remove_option(self, *args, **kwargs): - raise NotImplementedError("Method does not exist for GridOutCursor") - - def _clone_base(self): - """Creates an empty GridOutCursor for information to be copied into. - """ - return GridOutCursor(self.__root_collection) +from gridfs.synchronous.grid_file import * # noqa: F403 diff --git a/gridfs/grid_file_shared.py b/gridfs/grid_file_shared.py new file mode 100644 index 0000000000..79a0ad7f8c --- /dev/null +++ b/gridfs/grid_file_shared.py @@ -0,0 +1,168 @@ +from __future__ import annotations + +import os +import warnings +from typing import Any, Optional + +from pymongo import ASCENDING +from pymongo.common import MAX_MESSAGE_SIZE +from pymongo.errors import InvalidOperation + +_SEEK_SET = os.SEEK_SET +_SEEK_CUR = os.SEEK_CUR +_SEEK_END = os.SEEK_END + +EMPTY = b"" +NEWLN = b"\n" + +"""Default chunk size, in bytes.""" +# Slightly under a power of 2, to work well with server's record allocations. +DEFAULT_CHUNK_SIZE = 255 * 1024 +# The number of chunked bytes to buffer before calling insert_many. +_UPLOAD_BUFFER_SIZE = MAX_MESSAGE_SIZE +# The number of chunk documents to buffer before calling insert_many. +_UPLOAD_BUFFER_CHUNKS = 100000 +# Rough BSON overhead of a chunk document not including the chunk data itself. +# Essentially len(encode({"_id": ObjectId(), "files_id": ObjectId(), "n": 1, "data": ""})) +_CHUNK_OVERHEAD = 60 + +_C_INDEX: dict[str, Any] = {"files_id": ASCENDING, "n": ASCENDING} +_F_INDEX: dict[str, Any] = {"filename": ASCENDING, "uploadDate": ASCENDING} + + +def _a_grid_in_property( + field_name: str, + docstring: str, + read_only: Optional[bool] = False, + closed_only: Optional[bool] = False, +) -> Any: + """Create a GridIn property.""" + + warn_str = "" + if docstring.startswith("DEPRECATED,"): + warn_str = ( + f"GridIn property '{field_name}' is deprecated and will be removed in PyMongo 5.0" + ) + + def getter(self: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + if closed_only and not self._closed: + raise AttributeError("can only get %r on a closed file" % field_name) + # Protect against PHP-237 + if field_name == "length": + return self._file.get(field_name, 0) + return self._file.get(field_name, None) + + def setter(self: Any, value: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + if self._closed: + raise InvalidOperation( + "AsyncGridIn does not support __setattr__ after being closed(). Set the attribute before closing the file or use AsyncGridIn.set() instead" + ) + self._file[field_name] = value + + if read_only: + docstring += "\n\nThis attribute is read-only." + elif closed_only: + docstring = "{}\n\n{}".format( + docstring, + "This attribute is read-only and " + "can only be read after :meth:`close` " + "has been called.", + ) + + if not read_only and not closed_only: + return property(getter, setter, doc=docstring) + return property(getter, doc=docstring) + + +def _a_grid_out_property(field_name: str, docstring: str) -> Any: + """Create a GridOut property.""" + + def a_getter(self: Any) -> Any: + if not self._file: + raise InvalidOperation( + "You must call GridOut.open() before accessing " "the %s property" % field_name + ) + # Protect against PHP-237 + if field_name == "length": + return self._file.get(field_name, 0) + return self._file.get(field_name, None) + + docstring += "\n\nThis attribute is read-only." + return property(a_getter, doc=docstring) + + +def _grid_in_property( + field_name: str, + docstring: str, + read_only: Optional[bool] = False, + closed_only: Optional[bool] = False, +) -> Any: + """Create a GridIn property.""" + warn_str = "" + if docstring.startswith("DEPRECATED,"): + warn_str = ( + f"GridIn property '{field_name}' is deprecated and will be removed in PyMongo 5.0" + ) + + def getter(self: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + if closed_only and not self._closed: + raise AttributeError("can only get %r on a closed file" % field_name) + # Protect against PHP-237 + if field_name == "length": + return self._file.get(field_name, 0) + return self._file.get(field_name, None) + + def setter(self: Any, value: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + if self._closed: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) + self._file[field_name] = value + + if read_only: + docstring += "\n\nThis attribute is read-only." + elif closed_only: + docstring = "{}\n\n{}".format( + docstring, + "This attribute is read-only and " + "can only be read after :meth:`close` " + "has been called.", + ) + + if not read_only and not closed_only: + return property(getter, setter, doc=docstring) + return property(getter, doc=docstring) + + +def _grid_out_property(field_name: str, docstring: str) -> Any: + """Create a GridOut property.""" + warn_str = "" + if docstring.startswith("DEPRECATED,"): + warn_str = ( + f"GridOut property '{field_name}' is deprecated and will be removed in PyMongo 5.0" + ) + + def getter(self: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + self.open() + + # Protect against PHP-237 + if field_name == "length": + return self._file.get(field_name, 0) + return self._file.get(field_name, None) + + docstring += "\n\nThis attribute is read-only." + return property(getter, doc=docstring) + + +def _clear_entity_type_registry(entity: Any, **kwargs: Any) -> Any: + """Clear the given database/collection object's type registry.""" + codecopts = entity.codec_options.with_options(type_registry=None) + return entity.with_options(codec_options=codecopts, **kwargs) diff --git a/gridfs/py.typed b/gridfs/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/gridfs/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". diff --git a/gridfs/synchronous/__init__.py b/gridfs/synchronous/__init__.py new file mode 100644 index 0000000000..bc2704364b --- /dev/null +++ b/gridfs/synchronous/__init__.py @@ -0,0 +1,42 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GridFS is a specification for storing large objects in Mongo. + +The :mod:`gridfs` package is an implementation of GridFS on top of +:mod:`pymongo`, exposing a file-like interface. + +.. seealso:: The MongoDB documentation on `gridfs `_. +""" +from __future__ import annotations + +from gridfs.errors import NoFile +from gridfs.grid_file_shared import DEFAULT_CHUNK_SIZE +from gridfs.synchronous.grid_file import ( + GridFS, + GridFSBucket, + GridIn, + GridOut, + GridOutCursor, +) + +__all__ = [ + "GridFS", + "GridFSBucket", + "NoFile", + "DEFAULT_CHUNK_SIZE", + "GridIn", + "GridOut", + "GridOutCursor", +] diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py new file mode 100644 index 0000000000..7364aedda3 --- /dev/null +++ b/gridfs/synchronous/grid_file.py @@ -0,0 +1,1993 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for representing files stored in GridFS.""" +from __future__ import annotations + +import datetime +import inspect +import io +import math +from collections import abc +from typing import Any, Iterable, Mapping, NoReturn, Optional, cast + +from bson.int64 import Int64 +from bson.objectid import ObjectId +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from gridfs.grid_file_shared import ( + _C_INDEX, + _CHUNK_OVERHEAD, + _F_INDEX, + _SEEK_CUR, + _SEEK_END, + _SEEK_SET, + _UPLOAD_BUFFER_CHUNKS, + _UPLOAD_BUFFER_SIZE, + DEFAULT_CHUNK_SIZE, + EMPTY, + NEWLN, + _clear_entity_type_registry, + _grid_in_property, + _grid_out_property, +) +from pymongo import ASCENDING, DESCENDING, WriteConcern, _csot +from pymongo.common import validate_string +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + CursorNotFound, + DuplicateKeyError, + InvalidOperation, + OperationFailure, +) +from pymongo.helpers_shared import _check_write_command_response +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.synchronous.client_session import ClientSession +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.database import Database + +_IS_SYNC = True + + +def _disallow_transactions(session: Optional[ClientSession]) -> None: + if session and session.in_transaction: + raise InvalidOperation("GridFS does not support multi-document transactions") + + +class GridFS: + """An instance of GridFS on top of a single Database.""" + + def __init__(self, database: Database[Any], collection: str = "fs"): + """Create a new instance of :class:`GridFS`. + + Raises :class:`TypeError` if `database` is not an instance of + :class:`~pymongo.database.Database`. + + :param database: database to use + :param collection: root collection to use + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFS operation in a transaction now always raises an + error. GridFS does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionchanged:: 3.1 + Indexes are only ensured on the first write to the DB. + + .. versionchanged:: 3.0 + `database` must use an acknowledged + :attr:`~pymongo.database.Database.write_concern` + + .. seealso:: The MongoDB documentation on `gridfs `_. + """ + if not isinstance(database, Database): + raise TypeError(f"database must be an instance of Database, not {type(database)}") + + database = _clear_entity_type_registry(database) + + if not database.write_concern.acknowledged: + raise ConfigurationError("database must use acknowledged write_concern") + + self._collection = database[collection] + self._files = self._collection.files + self._chunks = self._collection.chunks + + def new_file(self, **kwargs: Any) -> GridIn: + """Create a new file in GridFS. + + Returns a new :class:`~gridfs.grid_file.GridIn` instance to + which data can be written. Any keyword arguments will be + passed through to :meth:`~gridfs.grid_file.GridIn`. + + If the ``"_id"`` of the file is manually specified, it must + not already exist in GridFS. Otherwise + :class:`~gridfs.errors.FileExists` is raised. + + :param kwargs: keyword arguments for file creation + """ + return GridIn(self._collection, **kwargs) + + def put(self, data: Any, **kwargs: Any) -> Any: + """Put data in GridFS as a new file. + + Equivalent to doing:: + + with fs.new_file(**kwargs) as f: + f.write(data) + + `data` can be either an instance of :class:`bytes` or a file-like + object providing a :meth:`read` method. If an `encoding` keyword + argument is passed, `data` can also be a :class:`str` instance, which + will be encoded as `encoding` before being written. Any keyword + arguments will be passed through to the created file - see + :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the + ``"_id"`` of the created file. + + If the ``"_id"`` of the file is manually specified, it must + not already exist in GridFS. Otherwise + :class:`~gridfs.errors.FileExists` is raised. + + :param data: data to be written as a file. + :param kwargs: keyword arguments for file creation + + .. versionchanged:: 3.0 + w=0 writes to GridFS are now prohibited. + """ + with GridIn(self._collection, **kwargs) as grid_file: + grid_file.write(data) + return grid_file._id + + def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: + """Get a file from GridFS by ``"_id"``. + + Returns an instance of :class:`~gridfs.grid_file.GridOut`, + which provides a file-like interface for reading. + + :param file_id: ``"_id"`` of the file to get + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + gout = GridOut(self._collection, file_id, session=session) + + # Raise NoFile now, instead of on first attribute access. + gout.open() + return gout + + def get_version( + self, + filename: Optional[str] = None, + version: Optional[int] = -1, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> GridOut: + """Get a file from GridFS by ``"filename"`` or metadata fields. + + Returns a version of the file in GridFS whose filename matches + `filename` and whose metadata fields match the supplied keyword + arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. + + Version numbering is a convenience atop the GridFS API provided + by MongoDB. If more than one file matches the query (either by + `filename` alone, by metadata fields, or by a combination of + both), then version ``-1`` will be the most recently uploaded + matching file, ``-2`` the second most recently + uploaded, etc. Version ``0`` will be the first version + uploaded, ``1`` the second version, etc. So if three versions + have been uploaded, then version ``0`` is the same as version + ``-3``, version ``1`` is the same as version ``-2``, and + version ``2`` is the same as version ``-1``. + + Raises :class:`~gridfs.errors.NoFile` if no such version of + that file exists. + + :param filename: ``"filename"`` of the file to get, or `None` + :param version: version of the file to get (defaults + to -1, the most recent version uploaded) + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: find files by custom metadata. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``get_version`` no longer ensures indexes. + """ + query = kwargs + if filename is not None: + query["filename"] = filename + + _disallow_transactions(session) + cursor = self._files.find(query, session=session) + if version is None: + version = -1 + if version < 0: + skip = abs(version) - 1 + cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) + else: + cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) + try: + doc = next(cursor) + return GridOut(self._collection, file_document=doc, session=session) + except StopIteration: + raise NoFile("no version %d for filename %r" % (version, filename)) from None + + def get_last_version( + self, + filename: Optional[str] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> GridOut: + """Get the most recent version of a file in GridFS by ``"filename"`` + or metadata fields. + + Equivalent to calling :meth:`get_version` with the default + `version` (``-1``). + + :param filename: ``"filename"`` of the file to get, or `None` + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: find files by custom metadata. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return self.get_version(filename=filename, session=session, **kwargs) + + # TODO add optional safe mode for chunk removal? + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: + """Delete a file from GridFS by ``"_id"``. + + Deletes all data belonging to the file with ``"_id"``: + `file_id`. + + .. warning:: Any processes/threads reading from the file while + this method is executing will likely see an invalid/corrupt + file. Care should be taken to avoid concurrent reads to a file + while it is being deleted. + + .. note:: Deletes of non-existent files are considered successful + since the end result is the same: no file with that _id remains. + + :param file_id: ``"_id"`` of the file to delete + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``delete`` no longer ensures indexes. + """ + _disallow_transactions(session) + self._files.delete_one({"_id": file_id}, session=session) + self._chunks.delete_many({"files_id": file_id}, session=session) + + def list(self, session: Optional[ClientSession] = None) -> list[str]: + """List the names of all files stored in this instance of + :class:`GridFS`. + + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``list`` no longer ensures indexes. + """ + _disallow_transactions(session) + # With an index, distinct includes documents with no filename + # as None. + return [ + name for name in self._files.distinct("filename", session=session) if name is not None + ] + + def find_one( + self, + filter: Optional[Any] = None, + session: Optional[ClientSession] = None, + *args: Any, + **kwargs: Any, + ) -> Optional[GridOut]: + """Get a single file from gridfs. + + All arguments to :meth:`find` are also valid arguments for + :meth:`find_one`, although any `limit` argument will be + ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, + or ``None`` if no matching file is found. For example: + + .. code-block: python + + file = fs.find_one({"filename": "lisa.txt"}) + + :param filter: a dictionary specifying + the query to be performing OR any other type to be used as + the value for a query for ``"_id"`` in the file collection. + :param args: any additional positional arguments are + the same as the arguments to :meth:`find`. + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: any additional keyword arguments + are the same as the arguments to :meth:`find`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + if filter is not None and not isinstance(filter, abc.Mapping): + filter = {"_id": filter} + + _disallow_transactions(session) + for f in self.find(filter, *args, session=session, **kwargs): + return f + + return None + + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: + """Query GridFS for files. + + Returns a cursor that iterates across files matching + arbitrary queries on the files collection. Can be combined + with other modifiers for additional control. For example:: + + for grid_out in fs.find({"filename": "lisa.txt"}, + no_cursor_timeout=True): + data = grid_out.read() + + would iterate through all versions of "lisa.txt" stored in GridFS. + Note that setting no_cursor_timeout to True may be important to + prevent the cursor from timing out during long multi-file processing + work. + + As another example, the call:: + + most_recent_three = fs.find().sort("uploadDate", -1).limit(3) + + would return a cursor to the three most recently uploaded files + in GridFS. + + Follows a similar interface to + :meth:`~pymongo.collection.Collection.find` + in :class:`~pymongo.collection.Collection`. + + If a :class:`~pymongo.client_session.ClientSession` is passed to + :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances + are associated with that session. + + :param filter: A query document that selects which files + to include in the result set. Can be an empty document to include + all files. + :param skip: the number of files to omit (from + the start of the result set) when returning the results + :param limit: the maximum number of results to + return + :param no_cursor_timeout: if False (the default), any + returned cursor is closed by the server after 10 minutes of + inactivity. If set to True, the returned cursor will never + time out on the server. Care should be taken to ensure that + cursors with no_cursor_timeout turned on are properly closed. + :param sort: a list of (key, direction) pairs + specifying the sort order for this query. See + :meth:`~pymongo.cursor.Cursor.sort` for details. + + Raises :class:`TypeError` if any of the arguments are of + improper type. Returns an instance of + :class:`~gridfs.grid_file.GridOutCursor` + corresponding to this query. + + .. versionchanged:: 3.0 + Removed the read_preference, tag_sets, and + secondary_acceptable_latency_ms options. + .. versionadded:: 2.7 + .. seealso:: The MongoDB documentation on `find `_. + """ + return GridOutCursor(self._collection, *args, **kwargs) + + def exists( + self, + document_or_id: Optional[Any] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> bool: + """Check if a file exists in this instance of :class:`GridFS`. + + The file to check for can be specified by the value of its + ``_id`` key, or by passing in a query document. A query + document can be passed in as dictionary, or by using keyword + arguments. Thus, the following three calls are equivalent: + + >>> fs.exists(file_id) + >>> fs.exists({"_id": file_id}) + >>> fs.exists(_id=file_id) + + As are the following two calls: + + >>> fs.exists({"filename": "mike.txt"}) + >>> fs.exists(filename="mike.txt") + + And the following two: + + >>> fs.exists({"foo": {"$gt": 12}}) + >>> fs.exists(foo={"$gt": 12}) + + Returns ``True`` if a matching file exists, ``False`` + otherwise. Calls to :meth:`exists` will not automatically + create appropriate indexes; application developers should be + sure to create indexes if needed and as appropriate. + + :param document_or_id: query document, or _id of the + document to check for + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: keyword arguments are used as a + query document, if they're present. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + if kwargs: + f = self._files.find_one(kwargs, ["_id"], session=session) + else: + f = self._files.find_one(document_or_id, ["_id"], session=session) + + return f is not None + + +class GridFSBucket: + """An instance of GridFS on top of a single Database.""" + + def __init__( + self, + db: Database[Any], + bucket_name: str = "fs", + chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + ) -> None: + """Create a new instance of :class:`GridFSBucket`. + + Raises :exc:`TypeError` if `database` is not an instance of + :class:`~pymongo.database.Database`. + + Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern` + is not acknowledged. + + :param database: database to use. + :param bucket_name: The name of the bucket. Defaults to 'fs'. + :param chunk_size_bytes: The chunk size in bytes. Defaults + to 255KB. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use. If ``None`` + (the default) db.write_concern is used. + :param read_preference: The read preference to use. If + ``None`` (the default) db.read_preference is used. + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFSBucket operation in a transaction now always raises + an error. GridFSBucket does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionadded:: 3.1 + + .. seealso:: The MongoDB documentation on `gridfs `_. + """ + if not isinstance(db, Database): + raise TypeError(f"database must be an instance of Database, not {type(db)}") + + db = _clear_entity_type_registry(db) + + wtc = write_concern if write_concern is not None else db.write_concern + if not wtc.acknowledged: + raise ConfigurationError("write concern must be acknowledged") + + self._bucket_name = bucket_name + self._collection = db[bucket_name] + self._chunks: Collection[Any] = self._collection.chunks.with_options( + write_concern=write_concern, read_preference=read_preference + ) + + self._files: Collection[Any] = self._collection.files.with_options( + write_concern=write_concern, read_preference=read_preference + ) + + self._chunk_size_bytes = chunk_size_bytes + self._timeout = db.client.options.timeout + + def open_upload_stream( + self, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: + """Opens a Stream that the application can write the contents of the + file to. + + The user must specify the filename, and can choose to add any + additional information in the metadata field of the file document or + modify the chunk size. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream( + "test_file", chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close + + Returns an instance of :class:`~gridfs.grid_file.GridIn`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + + opts = { + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } + if metadata is not None: + opts["metadata"] = metadata + + return GridIn(self._collection, session=session, **opts) + + def open_upload_stream_with_id( + self, + file_id: Any, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: + """Opens a Stream that the application can write the contents of the + file to. + + The user must specify the file id and filename, and can choose to add + any additional information in the metadata field of the file document + or modify the chunk size. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream_with_id( + ObjectId(), + "test_file", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close + + Returns an instance of :class:`~gridfs.grid_file.GridIn`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param file_id: The id to use for this file. The id must not have + already been used for another file. + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + + opts = { + "_id": file_id, + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } + if metadata is not None: + opts["metadata"] = metadata + + return GridIn(self._collection, session=session, **opts) + + @_csot.apply + def upload_from_stream( + self, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> ObjectId: + """Uploads a user file to a GridFS bucket. + + Reads the contents of the user file from `source` and uploads + it to the file `filename`. Source can be a string or file-like object. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + file_id = fs.upload_from_stream( + "test_file", + "data I want to store!", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) + + Returns the _id of the uploaded file. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be + a file-like object that implements :meth:`read` or a string. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_upload_stream(filename, chunk_size_bytes, metadata, session=session) as gin: + gin.write(source) + + return cast(ObjectId, gin._id) + + @_csot.apply + def upload_from_stream_with_id( + self, + file_id: Any, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> None: + """Uploads a user file to a GridFS bucket with a custom file id. + + Reads the contents of the user file from `source` and uploads + it to the file `filename`. Source can be a string or file-like object. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + file_id = fs.upload_from_stream( + ObjectId(), + "test_file", + "data I want to store!", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param file_id: The id to use for this file. The id must not have + already been used for another file. + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be + a file-like object that implements :meth:`read` or a string. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_upload_stream_with_id( + file_id, filename, chunk_size_bytes, metadata, session=session + ) as gin: + gin.write(source) + + def open_download_stream( + self, file_id: Any, session: Optional[ClientSession] = None + ) -> GridOut: + """Opens a Stream from which the application can read the contents of + the stored file specified by file_id. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # get _id of file to read. + file_id = fs.upload_from_stream("test_file", "data I want to store!") + grid_out = fs.open_download_stream(file_id) + contents = grid_out.read() + + Returns an instance of :class:`~gridfs.grid_file.GridOut`. + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be downloaded. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + gout = GridOut(self._collection, file_id, session=session) + + # Raise NoFile now, instead of on first attribute access. + gout.open() + return gout + + @_csot.apply + def download_to_stream( + self, file_id: Any, destination: Any, session: Optional[ClientSession] = None + ) -> None: + """Downloads the contents of the stored file specified by file_id and + writes the contents to `destination`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to read + file_id = fs.upload_from_stream("test_file", "data I want to store!") + # Get file to write to + file = open('myfile','wb+') + fs.download_to_stream(file_id, file) + file.seek(0) + contents = file.read() + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be downloaded. + :param destination: a file-like object implementing :meth:`write`. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_download_stream(file_id, session=session) as gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break + destination.write(chunk) + + @_csot.apply + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: + """Given an file_id, delete this stored file's files collection document + and associated chunks from a GridFS bucket. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to delete + file_id = fs.upload_from_stream("test_file", "data I want to store!") + fs.delete(file_id) + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be deleted. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + res = self._files.delete_one({"_id": file_id}, session=session) + self._chunks.delete_many({"files_id": file_id}, session=session) + if not res.deleted_count: + raise NoFile("no file could be deleted because none matched %s" % file_id) + + @_csot.apply + def delete_by_name(self, filename: str, session: Optional[ClientSession] = None) -> None: + """Given a filename, delete this stored file's files collection document(s) + and associated chunks from a GridFS bucket. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + fs.upload_from_stream("test_file", "data I want to store!") + fs.delete_by_name("test_file") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The name of the file to be deleted. + :param session: a :class:`~pymongo.client_session.ClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + files = self._files.find({"filename": filename}, {"_id": 1}, session=session) + file_ids = [file["_id"] for file in files] + res = self._files.delete_many({"_id": {"$in": file_ids}}, session=session) + self._chunks.delete_many({"files_id": {"$in": file_ids}}, session=session) + if not res.deleted_count: + raise NoFile(f"no file could be deleted because none matched filename {filename!r}") + + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: + """Find and return the files collection documents that match ``filter`` + + Returns a cursor that iterates across files matching + arbitrary queries on the files collection. Can be combined + with other modifiers for additional control. + + For example:: + + for grid_data in fs.find({"filename": "lisa.txt"}, + no_cursor_timeout=True): + data = grid_data.read() + + would iterate through all versions of "lisa.txt" stored in GridFS. + Note that setting no_cursor_timeout to True may be important to + prevent the cursor from timing out during long multi-file processing + work. + + As another example, the call:: + + most_recent_three = fs.find().sort("uploadDate", -1).limit(3) + + would return a cursor to the three most recently uploaded files + in GridFS. + + Follows a similar interface to + :meth:`~pymongo.collection.Collection.find` + in :class:`~pymongo.collection.Collection`. + + If a :class:`~pymongo.client_session.ClientSession` is passed to + :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances + are associated with that session. + + :param filter: Search query. + :param batch_size: The number of documents to return per + batch. + :param limit: The maximum number of documents to return. + :param no_cursor_timeout: The server normally times out idle + cursors after an inactivity period (10 minutes) to prevent excess + memory use. Set this option to True prevent that. + :param skip: The number of documents to skip before + returning. + :param sort: The order by which to sort results. Defaults to + None. + """ + return GridOutCursor(self._collection, *args, **kwargs) + + def open_download_stream_by_name( + self, filename: str, revision: int = -1, session: Optional[ClientSession] = None + ) -> GridOut: + """Opens a Stream from which the application can read the contents of + `filename` and optional `revision`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + grid_out = fs.open_download_stream_by_name("test_file") + contents = grid_out.read() + + Returns an instance of :class:`~gridfs.grid_file.GridOut`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + + Raises :exc:`~ValueError` filename is not a string. + + :param filename: The name of the file to read from. + :param revision: Which revision (documents with the same + filename and different uploadDate) of the file to retrieve. + Defaults to -1 (the most recent revision). + :param session: a + :class:`~pymongo.client_session.ClientSession` + + :Note: Revision numbers are defined as follows: + + - 0 = the original stored file + - 1 = the first revision + - 2 = the second revision + - etc... + - -2 = the second most recent revision + - -1 = the most recent revision + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + query = {"filename": filename} + _disallow_transactions(session) + cursor = self._files.find(query, session=session) + if revision < 0: + skip = abs(revision) - 1 + cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) + else: + cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING) + try: + grid_file = next(cursor) + return GridOut(self._collection, file_document=grid_file, session=session) + except StopIteration: + raise NoFile("no version %d for filename %r" % (revision, filename)) from None + + @_csot.apply + def download_to_stream_by_name( + self, + filename: str, + destination: Any, + revision: int = -1, + session: Optional[ClientSession] = None, + ) -> None: + """Write the contents of `filename` (with optional `revision`) to + `destination`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get file to write to + file = open('myfile','wb') + fs.download_to_stream_by_name("test_file", file) + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to read from. + :param destination: A file-like object that implements :meth:`write`. + :param revision: Which revision (documents with the same + filename and different uploadDate) of the file to retrieve. + Defaults to -1 (the most recent revision). + :param session: a + :class:`~pymongo.client_session.ClientSession` + + :Note: Revision numbers are defined as follows: + + - 0 = the original stored file + - 1 = the first revision + - 2 = the second revision + - etc... + - -2 = the second most recent revision + - -1 = the most recent revision + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_download_stream_by_name(filename, revision, session=session) as gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break + destination.write(chunk) + + def rename( + self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None + ) -> None: + """Renames the stored file with the specified file_id. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to rename + file_id = fs.upload_from_stream("test_file", "data I want to store!") + fs.rename(file_id, "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + result = self._files.update_one( + {"_id": file_id}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + "no files could be renamed %r because none " + "matched file_id %i" % (new_filename, file_id) + ) + + def rename_by_name( + self, filename: str, new_filename: str, session: Optional[ClientSession] = None + ) -> None: + """Renames the stored file with the specified filename. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + fs.upload_from_stream("test_file", "data I want to store!") + fs.rename_by_name("test_file", "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The filename of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a :class:`~pymongo.client_session.ClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + result = self._files.update_many( + {"filename": filename}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + f"no files could be renamed {new_filename!r} because none matched filename {filename!r}" + ) + + +class GridIn: + """Class to write data to GridFS.""" + + def __init__( + self, + root_collection: Collection[Any], + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> None: + """Write a file to GridFS + + Application developers should generally not need to + instantiate this class directly - instead see the methods + provided by :class:`~gridfs.GridFS`. + + Raises :class:`TypeError` if `root_collection` is not an + instance of :class:`~pymongo.collection.Collection`. + + Any of the file level options specified in the `GridFS Spec + `_ may be passed as + keyword arguments. Any additional keyword arguments will be + set as additional fields on the file document. Valid keyword + arguments include: + + - ``"_id"``: unique ID for this file (default: + :class:`~bson.objectid.ObjectId`) - this ``"_id"`` must + not have already been used for another file + + - ``"filename"``: human name for the file + + - ``"contentType"`` or ``"content_type"``: valid mime-type + for the file + + - ``"chunkSize"`` or ``"chunk_size"``: size of each of the + chunks, in bytes (default: 255 kb) + + - ``"encoding"``: encoding used for this file. Any :class:`str` + that is written to the file will be converted to :class:`bytes`. + + :param root_collection: root collection to write to + :param session: a + :class:`~pymongo.client_session.ClientSession` to use for all + commands + :param kwargs: Any: file level options (see above) + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + `root_collection` must use an acknowledged + :attr:`~pymongo.collection.Collection.write_concern` + """ + if not isinstance(root_collection, Collection): + raise TypeError( + f"root_collection must be an instance of Collection, not {type(root_collection)}" + ) + + if not root_collection.write_concern.acknowledged: + raise ConfigurationError("root_collection must use acknowledged write_concern") + _disallow_transactions(session) + + # Handle alternative naming + if "content_type" in kwargs: + kwargs["contentType"] = kwargs.pop("content_type") + if "chunk_size" in kwargs: + kwargs["chunkSize"] = kwargs.pop("chunk_size") + + coll = _clear_entity_type_registry(root_collection, read_preference=ReadPreference.PRIMARY) + + # Defaults + kwargs["_id"] = kwargs.get("_id", ObjectId()) + kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) + object.__setattr__(self, "_session", session) + object.__setattr__(self, "_coll", coll) + object.__setattr__(self, "_chunks", coll.chunks) + object.__setattr__(self, "_file", kwargs) + object.__setattr__(self, "_buffer", io.BytesIO()) + object.__setattr__(self, "_position", 0) + object.__setattr__(self, "_chunk_number", 0) + object.__setattr__(self, "_closed", False) + object.__setattr__(self, "_ensured_index", False) + object.__setattr__(self, "_buffered_docs", []) + object.__setattr__(self, "_buffered_docs_size", 0) + + def _create_index(self, collection: Collection[Any], index_key: Any, unique: bool) -> None: + doc = collection.find_one(projection={"_id": 1}, session=self._session) + if doc is None: + try: + index_keys = [ + index_spec["key"] + for index_spec in collection.list_indexes(session=self._session) + ] + except OperationFailure: + index_keys = [] + if index_key not in index_keys: + collection.create_index(index_key.items(), unique=unique, session=self._session) + + def _ensure_indexes(self) -> None: + if not object.__getattribute__(self, "_ensured_index"): + _disallow_transactions(self._session) + self._create_index(self._coll.files, _F_INDEX, False) + self._create_index(self._coll.chunks, _C_INDEX, True) + object.__setattr__(self, "_ensured_index", True) + + def abort(self) -> None: + """Remove all chunks/files that may have been uploaded and close.""" + self._coll.chunks.delete_many({"files_id": self._file["_id"]}, session=self._session) + self._coll.files.delete_one({"_id": self._file["_id"]}, session=self._session) + object.__setattr__(self, "_closed", True) + + @property + def closed(self) -> bool: + """Is this file closed?""" + return self._closed + + _id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) + filename: Optional[str] = _grid_in_property("filename", "Name of this file.") + name: Optional[str] = _grid_in_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _grid_in_property( + "contentType", "DEPRECATED, will be removed in PyMongo 5.0. Mime-type for this file." + ) + length: int = _grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) + chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) + upload_date: datetime.datetime = _grid_in_property( + "uploadDate", "Date that this file was uploaded.", closed_only=True + ) + md5: Optional[str] = _grid_in_property( + "md5", + "DEPRECATED, will be removed in PyMongo 5.0. MD5 of the contents of this file if an md5 sum was created.", + closed_only=True, + ) + + _buffer: io.BytesIO + _closed: bool + _buffered_docs: list[dict[str, Any]] + _buffered_docs_size: int + + def __getattr__(self, name: str) -> Any: + if name == "_coll": + return object.__getattribute__(self, name) + elif name in self._file: + return self._file[name] + raise AttributeError("GridIn object has no attribute '%s'" % name) + + def __setattr__(self, name: str, value: Any) -> None: + # For properties of this instance like _buffer, or descriptors set on + # the class like filename, use regular __setattr__ + if name in self.__dict__ or name in self.__class__.__dict__: + object.__setattr__(self, name, value) + else: + # All other attributes are part of the document in db.fs.files. + # Store them to be sent to server on close() or if closed, send + # them now. + self._file[name] = value + if self._closed: + if _IS_SYNC: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + else: + raise AttributeError( + "GridIn does not support __setattr__ after being closed(). Set the attribute before closing the file or use GridIn.set() instead" + ) + + def set(self, name: str, value: Any) -> None: + self._file[name] = value + if self._closed: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + + def _flush_data(self, data: Any, force: bool = False) -> None: + """Flush `data` to a chunk.""" + self._ensure_indexes() + assert len(data) <= self.chunk_size + if data: + self._buffered_docs.append( + {"files_id": self._file["_id"], "n": self._chunk_number, "data": data} + ) + self._buffered_docs_size += len(data) + _CHUNK_OVERHEAD + if not self._buffered_docs: + return + # Limit to 100,000 chunks or 32MB (+1 chunk) of data. + if ( + force + or self._buffered_docs_size >= _UPLOAD_BUFFER_SIZE + or len(self._buffered_docs) >= _UPLOAD_BUFFER_CHUNKS + ): + try: + self._chunks.insert_many(self._buffered_docs, session=self._session) + except BulkWriteError as exc: + # For backwards compatibility, raise an insert_one style exception. + write_errors = exc.details["writeErrors"] + for err in write_errors: + if err.get("code") in (11000, 11001, 12582): # Duplicate key errors + self._raise_file_exists(self._file["_id"]) + result = {"writeErrors": write_errors} + wces = exc.details["writeConcernErrors"] + if wces: + result["writeConcernError"] = wces[-1] + _check_write_command_response(result) + raise + self._buffered_docs = [] + self._buffered_docs_size = 0 + self._chunk_number += 1 + self._position += len(data) + + def _flush_buffer(self, force: bool = False) -> None: + """Flush the buffer contents out to a chunk.""" + self._flush_data(self._buffer.getvalue(), force=force) + self._buffer.close() + self._buffer = io.BytesIO() + + def _flush(self) -> Any: + """Flush the file to the database.""" + try: + self._flush_buffer(force=True) + # The GridFS spec says length SHOULD be an Int64. + self._file["length"] = Int64(self._position) + self._file["uploadDate"] = datetime.datetime.now(tz=datetime.timezone.utc) + + return self._coll.files.insert_one(self._file, session=self._session) + except DuplicateKeyError: + self._raise_file_exists(self._id) + + def _raise_file_exists(self, file_id: Any) -> NoReturn: + """Raise a FileExists exception for the given file_id.""" + raise FileExists("file with _id %r already exists" % file_id) + + def close(self) -> None: + """Flush the file and close it. + + A closed file cannot be written any more. Calling + :meth:`close` more than once is allowed. + """ + if not self._closed: + self._flush() + object.__setattr__(self, "_closed", True) + + def read(self, size: int = -1) -> NoReturn: + raise io.UnsupportedOperation("read") + + def readable(self) -> bool: + return False + + def seekable(self) -> bool: + return False + + def write(self, data: Any) -> None: + """Write data to the file. There is no return value. + + `data` can be either a string of bytes or a file-like object + (implementing :meth:`read`). If the file has an + :attr:`encoding` attribute, `data` can also be a + :class:`str` instance, which will be encoded as + :attr:`encoding` before being written. + + Due to buffering, the data may not actually be written to the + database until the :meth:`close` method is called. Raises + :class:`ValueError` if this file is already closed. Raises + :class:`TypeError` if `data` is not an instance of + :class:`bytes`, a file-like object, or an instance of :class:`str`. + Unicode data is only allowed if the file has an :attr:`encoding` + attribute. + + :param data: string of bytes or file-like object to be written + to the file + """ + if self._closed: + raise ValueError("cannot write to a closed file") + + try: + # file-like + read = data.read + except AttributeError: + # string + if not isinstance(data, (str, bytes)): + raise TypeError("can only write strings or file-like objects") from None + if isinstance(data, str): + try: + data = data.encode(self.encoding) + except AttributeError: + raise TypeError( + "must specify an encoding for file in order to write str" + ) from None + read = io.BytesIO(data).read + + if inspect.iscoroutinefunction(read): + self._write_async(read) + else: + if self._buffer.tell() > 0: + # Make sure to flush only when _buffer is complete + space = self.chunk_size - self._buffer.tell() + if space: + try: + to_write = read(space) + except BaseException: + self.abort() + raise + self._buffer.write(to_write) + if len(to_write) < space: + return # EOF or incomplete + self._flush_buffer() + to_write = read(self.chunk_size) + while to_write and len(to_write) == self.chunk_size: + self._flush_data(to_write) + to_write = read(self.chunk_size) + self._buffer.write(to_write) + + def _write_async(self, read: Any) -> None: + if self._buffer.tell() > 0: + # Make sure to flush only when _buffer is complete + space = self.chunk_size - self._buffer.tell() + if space: + try: + to_write = read(space) + except BaseException: + self.abort() + raise + self._buffer.write(to_write) + if len(to_write) < space: + return # EOF or incomplete + self._flush_buffer() + to_write = read(self.chunk_size) + while to_write and len(to_write) == self.chunk_size: + self._flush_data(to_write) + to_write = read(self.chunk_size) + self._buffer.write(to_write) + + def writelines(self, sequence: Iterable[Any]) -> None: + """Write a sequence of strings to the file. + + Does not add separators. + """ + for line in sequence: + self.write(line) + + def writeable(self) -> bool: + return True + + def __enter__(self) -> GridIn: + """Support for the context manager protocol.""" + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: + """Support for the context manager protocol. + + Close the file if no exceptions occur and allow exceptions to propagate. + """ + if exc_type is None: + # No exceptions happened. + self.close() + else: + # Something happened, at minimum mark as closed. + object.__setattr__(self, "_closed", True) + + # propagate exceptions + return False + + +GRIDOUT_BASE_CLASS = io.IOBase if _IS_SYNC else object # type: Any + + +class GridOut(GRIDOUT_BASE_CLASS): # type: ignore + + """Class to read data out of GridFS.""" + + def __init__( + self, + root_collection: Collection[Any], + file_id: Optional[int] = None, + file_document: Optional[Any] = None, + session: Optional[ClientSession] = None, + ) -> None: + """Read a file from GridFS + + Application developers should generally not need to + instantiate this class directly - instead see the methods + provided by :class:`~gridfs.GridFS`. + + Either `file_id` or `file_document` must be specified, + `file_document` will be given priority if present. Raises + :class:`TypeError` if `root_collection` is not an instance of + :class:`~pymongo.collection.Collection`. + + :param root_collection: root collection to read from + :param file_id: value of ``"_id"`` for the file to read + :param file_document: file document from + `root_collection.files` + :param session: a + :class:`~pymongo.client_session.ClientSession` to use for all + commands + + .. versionchanged:: 3.8 + For better performance and to better follow the GridFS spec, + :class:`GridOut` now uses a single cursor to read all the chunks in + the file. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + Creating a GridOut does not immediately retrieve the file metadata + from the server. Metadata is fetched when first needed. + """ + if not isinstance(root_collection, Collection): + raise TypeError( + f"root_collection must be an instance of Collection, not {type(root_collection)}" + ) + _disallow_transactions(session) + + root_collection = _clear_entity_type_registry(root_collection) + + super().__init__() + + self._chunks = root_collection.chunks + self._files = root_collection.files + self._file_id = file_id + self._buffer = EMPTY + # Start position within the current buffered chunk. + self._buffer_pos = 0 + self._chunk_iter = None + # Position within the total file. + self._position = 0 + self._file = file_document + self._session = session + if not _IS_SYNC: + self.closed = False + + _id: Any = _grid_out_property("_id", "The ``'_id'`` value for this file.") + filename: str = _grid_out_property("filename", "Name of this file.") + name: str = _grid_out_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _grid_out_property( + "contentType", "DEPRECATED, will be removed in PyMongo 5.0. Mime-type for this file." + ) + length: int = _grid_out_property("length", "Length (in bytes) of this file.") + chunk_size: int = _grid_out_property("chunkSize", "Chunk size for this file.") + upload_date: datetime.datetime = _grid_out_property( + "uploadDate", "Date that this file was first uploaded." + ) + aliases: Optional[list[str]] = _grid_out_property( + "aliases", "DEPRECATED, will be removed in PyMongo 5.0. List of aliases for this file." + ) + metadata: Optional[Mapping[str, Any]] = _grid_out_property( + "metadata", "Metadata attached to this file." + ) + md5: Optional[str] = _grid_out_property( + "md5", + "DEPRECATED, will be removed in PyMongo 5.0. MD5 of the contents of this file if an md5 sum was created.", + ) + + _file: Any + _chunk_iter: Any + + if not _IS_SYNC: + closed: bool + + def __next__(self) -> bytes: + line = self.readline() + if line: + return line + raise StopIteration() + + def to_list(self) -> list[bytes]: + return [x for x in self] # noqa: C416, RUF100 + + def readline(self, size: int = -1) -> bytes: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + return self._read_size_or_line(size=size, line=True) + + def readlines(self, size: int = -1) -> list[bytes]: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + self.open() + lines = [] + remainder = int(self.length) - self._position + bytes_read = 0 + while remainder > 0: + line = self._read_size_or_line(line=True) + bytes_read += len(line) + lines.append(line) + remainder = int(self.length) - self._position + if 0 < size < bytes_read: + break + + return lines + + def open(self) -> None: + if not self._file: + _disallow_transactions(self._session) + self._file = self._files.find_one({"_id": self._file_id}, session=self._session) + if not self._file: + raise NoFile( + f"no file in gridfs collection {self._files!r} with _id {self._file_id!r}" + ) + + def __getattr__(self, name: str) -> Any: + if _IS_SYNC: + self.open() # type: ignore[unused-coroutine] + elif not self._file: + raise InvalidOperation( + "You must call GridOut.open() before accessing the %s property" % name + ) + if name in self._file: + return self._file[name] + raise AttributeError("GridOut object has no attribute '%s'" % name) + + def readable(self) -> bool: + return True + + def readchunk(self) -> bytes: + """Reads a chunk at a time. If the current position is within a + chunk the remainder of the chunk is returned. + """ + self.open() + received = len(self._buffer) - self._buffer_pos + chunk_data = EMPTY + chunk_size = int(self.chunk_size) + + if received > 0: + chunk_data = self._buffer[self._buffer_pos :] + elif self._position < int(self.length): + chunk_number = int((received + self._position) / chunk_size) + if self._chunk_iter is None: + self._chunk_iter = GridOutChunkIterator( + self, self._chunks, self._session, chunk_number + ) + + chunk = self._chunk_iter.next() + chunk_data = chunk["data"][self._position % chunk_size :] + + if not chunk_data: + raise CorruptGridFile("truncated chunk") + + self._position += len(chunk_data) + self._buffer = EMPTY + self._buffer_pos = 0 + return chunk_data + + def _read_size_or_line(self, size: int = -1, line: bool = False) -> bytes: + """Internal read() and readline() helper.""" + self.open() + remainder = int(self.length) - self._position + if size < 0 or size > remainder: + size = remainder + + if size == 0: + return EMPTY + + received = 0 + data = [] + while received < size: + needed = size - received + if self._buffer: + # Optimization: Read the buffer with zero byte copies. + buf = self._buffer + chunk_start = self._buffer_pos + chunk_data = memoryview(buf)[self._buffer_pos :] + self._buffer = EMPTY + self._buffer_pos = 0 + self._position += len(chunk_data) + else: + buf = self.readchunk() + chunk_start = 0 + chunk_data = memoryview(buf) + if line: + pos = buf.find(NEWLN, chunk_start, chunk_start + needed) - chunk_start + if pos >= 0: + # Decrease size to exit the loop. + size = received + pos + 1 + needed = pos + 1 + if len(chunk_data) > needed: + data.append(chunk_data[:needed]) + # Optimization: Save the buffer with zero byte copies. + self._buffer = buf + self._buffer_pos = chunk_start + needed + self._position -= len(self._buffer) - self._buffer_pos + else: + data.append(chunk_data) + received += len(chunk_data) + + # Detect extra chunks after reading the entire file. + if size == remainder and self._chunk_iter: + try: + self._chunk_iter.next() + except StopIteration: + pass + + return b"".join(data) + + def read(self, size: int = -1) -> bytes: + """Read at most `size` bytes from the file (less if there + isn't enough data). + + The bytes are returned as an instance of :class:`bytes` + If `size` is negative or omitted all data is read. + + :param size: the number of bytes to read + + .. versionchanged:: 3.8 + This method now only checks for extra chunks after reading the + entire file. Previously, this method would check for extra chunks + on every call. + """ + return self._read_size_or_line(size=size) + + def tell(self) -> int: + """Return the current position of this file.""" + return self._position + + def seek(self, pos: int, whence: int = _SEEK_SET) -> int: + """Set the current position of this file. + + :param pos: the position (or offset if using relative + positioning) to seek to + :param whence: where to seek + from. :attr:`os.SEEK_SET` (``0``) for absolute file + positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative + to the current position, :attr:`os.SEEK_END` (``2``) to + seek relative to the file's end. + + .. versionchanged:: 4.1 + The method now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. + """ + if whence == _SEEK_SET: + new_pos = pos + elif whence == _SEEK_CUR: + new_pos = self._position + pos + elif whence == _SEEK_END: + new_pos = int(self.length) + pos + else: + raise OSError(22, "Invalid value for `whence`") + + if new_pos < 0: + raise OSError(22, "Invalid value for `pos` - must be positive") + + # Optimization, continue using the same buffer and chunk iterator. + if new_pos == self._position: + return new_pos + + self._position = new_pos + self._buffer = EMPTY + self._buffer_pos = 0 + if self._chunk_iter: + self._chunk_iter.close() + self._chunk_iter = None + return new_pos + + def seekable(self) -> bool: + return True + + def __iter__(self) -> GridOut: + """Return an iterator over all of this file's data. + + The iterator will return lines (delimited by ``b'\\n'``) of + :class:`bytes`. This can be useful when serving files + using a webserver that handles such an iterator efficiently. + + .. versionchanged:: 3.8 + The iterator now raises :class:`CorruptGridFile` when encountering + any truncated, missing, or extra chunk in a file. The previous + behavior was to only raise :class:`CorruptGridFile` on a missing + chunk. + + .. versionchanged:: 4.0 + The iterator now iterates over *lines* in the file, instead + of chunks, to conform to the base class :py:class:`io.IOBase`. + Use :meth:`GridOut.readchunk` to read chunk by chunk instead + of line by line. + """ + return self + + def close(self) -> None: + """Make GridOut more generically file-like.""" + if self._chunk_iter: + self._chunk_iter.close() + self._chunk_iter = None + if _IS_SYNC: + super().close() + else: + self.closed = True + + def write(self, value: Any) -> NoReturn: + raise io.UnsupportedOperation("write") + + def writelines(self, lines: Any) -> NoReturn: + raise io.UnsupportedOperation("writelines") + + def writable(self) -> bool: + return False + + def __enter__(self) -> GridOut: + """Makes it possible to use :class:`GridOut` files + with the async context manager protocol. + """ + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: + """Makes it possible to use :class:`GridOut` files + with the async context manager protocol. + """ + self.close() + return False + + def fileno(self) -> NoReturn: + raise io.UnsupportedOperation("fileno") + + def flush(self) -> None: + # GridOut is read-only, so flush does nothing. + pass + + def isatty(self) -> bool: + return False + + def truncate(self, size: Optional[int] = None) -> NoReturn: + # See https://docs.python.org/3/library/io.html#io.IOBase.writable + # for why truncate has to raise. + raise io.UnsupportedOperation("truncate") + + # Override IOBase.__del__ otherwise it will lead to __getattr__ on + # __IOBase_closed which calls _ensure_file and potentially performs I/O. + # We cannot do I/O in __del__ since it can lead to a deadlock. + def __del__(self) -> None: + pass + + +class GridOutChunkIterator: + """Iterates over a file's chunks using a single cursor. + + Raises CorruptGridFile when encountering any truncated, missing, or extra + chunk in a file. + """ + + def __init__( + self, + grid_out: GridOut, + chunks: Collection[Any], + session: Optional[ClientSession], + next_chunk: Any, + ) -> None: + self._id = grid_out._id + self._chunk_size = int(grid_out.chunk_size) + self._length = int(grid_out.length) + self._chunks = chunks + self._session = session + self._next_chunk = next_chunk + self._num_chunks = math.ceil(float(self._length) / self._chunk_size) + self._cursor = None + + _cursor: Optional[Cursor[Any]] + + def expected_chunk_length(self, chunk_n: int) -> int: + if chunk_n < self._num_chunks - 1: + return self._chunk_size + return self._length - (self._chunk_size * (self._num_chunks - 1)) + + def __iter__(self) -> GridOutChunkIterator: + return self + + def _create_cursor(self) -> None: + filter = {"files_id": self._id} + if self._next_chunk > 0: + filter["n"] = {"$gte": self._next_chunk} + _disallow_transactions(self._session) + self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) + + def _next_with_retry(self) -> Mapping[str, Any]: + """Return the next chunk and retry once on CursorNotFound. + + We retry on CursorNotFound to maintain backwards compatibility in + cases where two calls to read occur more than 10 minutes apart (the + server's default cursor timeout). + """ + if self._cursor is None: + self._create_cursor() + assert self._cursor is not None + try: + return self._cursor.next() + except CursorNotFound: + self._cursor.close() + self._create_cursor() + return self._cursor.next() + + def next(self) -> Mapping[str, Any]: + try: + chunk = self._next_with_retry() + except StopIteration: + if self._next_chunk >= self._num_chunks: + raise + raise CorruptGridFile("no chunk #%d" % self._next_chunk) from None + + if chunk["n"] != self._next_chunk: + self.close() + raise CorruptGridFile( + "Missing chunk: expected chunk #%d but found " + "chunk with n=%d" % (self._next_chunk, chunk["n"]) + ) + + if chunk["n"] >= self._num_chunks: + # According to spec, ignore extra chunks if they are empty. + if len(chunk["data"]): + self.close() + raise CorruptGridFile( + "Extra chunk found: expected %d chunks but found " + "chunk with n=%d" % (self._num_chunks, chunk["n"]) + ) + + expected_length = self.expected_chunk_length(chunk["n"]) + if len(chunk["data"]) != expected_length: + self.close() + raise CorruptGridFile( + "truncated chunk #%d: expected chunk length to be %d but " + "found chunk with length %d" % (chunk["n"], expected_length, len(chunk["data"])) + ) + + self._next_chunk += 1 + return chunk + + __next__ = next + + def close(self) -> None: + if self._cursor: + self._cursor.close() + self._cursor = None + + +class GridOutIterator: + def __init__(self, grid_out: GridOut, chunks: Collection[Any], session: ClientSession): + self._chunk_iter = GridOutChunkIterator(grid_out, chunks, session, 0) + + def __iter__(self) -> GridOutIterator: + return self + + def next(self) -> bytes: + chunk = self._chunk_iter.next() + return bytes(chunk["data"]) + + __next__ = next + + +class GridOutCursor(Cursor): # type: ignore[type-arg] + """A cursor / iterator for returning GridOut objects as the result + of an arbitrary query against the GridFS files collection. + """ + + def __init__( + self, + collection: Collection[Any], + filter: Optional[Mapping[str, Any]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + sort: Optional[Any] = None, + batch_size: int = 0, + session: Optional[ClientSession] = None, + ) -> None: + """Create a new cursor, similar to the normal + :class:`~pymongo.cursor.Cursor`. + + Should not be called directly by application developers - see + the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead. + + .. versionadded 2.7 + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + _disallow_transactions(session) + collection = _clear_entity_type_registry(collection) + + # Hold on to the base "fs" collection to create GridOut objects later. + self._root_collection = collection + + super().__init__( + collection.files, + filter, + skip=skip, + limit=limit, + no_cursor_timeout=no_cursor_timeout, + sort=sort, + batch_size=batch_size, + session=session, + ) + + def next(self) -> GridOut: + """Get next GridOut object from cursor.""" + _disallow_transactions(self.session) + next_file = super().next() + return GridOut(self._root_collection, file_document=next_file, session=self.session) + + def to_list(self, length: Optional[int] = None) -> list[GridOut]: + """Convert the cursor to a list.""" + if length is None: + return [x for x in self] # noqa: C416,RUF100 + if length < 1: + raise ValueError("to_list() length must be greater than 0") + ret = [] + for _ in range(length): + ret.append(self.next()) + return ret + + __next__ = next + + def add_option(self, *args: Any, **kwargs: Any) -> NoReturn: + raise NotImplementedError("Method does not exist for GridOutCursor") + + def remove_option(self, *args: Any, **kwargs: Any) -> NoReturn: + raise NotImplementedError("Method does not exist for GridOutCursor") + + def _clone_base(self, session: Optional[ClientSession]) -> GridOutCursor: + """Creates an empty GridOutCursor for information to be copied into.""" + return GridOutCursor(self._root_collection, session=session) diff --git a/hatch_build.py b/hatch_build.py new file mode 100644 index 0000000000..40271972dd --- /dev/null +++ b/hatch_build.py @@ -0,0 +1,36 @@ +"""A custom hatch build hook for pymongo.""" +from __future__ import annotations + +import os +import subprocess +import sys +from pathlib import Path + +from hatchling.builders.hooks.plugin.interface import BuildHookInterface + + +class CustomHook(BuildHookInterface): + """The pymongo build hook.""" + + def initialize(self, version, build_data): + """Initialize the hook.""" + if self.target_name == "sdist": + return + here = Path(__file__).parent.resolve() + sys.path.insert(0, str(here)) + + subprocess.run([sys.executable, "_setup.py", "build_ext", "-i"], check=True) + + # Ensure wheel is marked as binary and contains the binary files. + build_data["infer_tag"] = True + build_data["pure_python"] = False + if os.name == "nt": + patt = ".pyd" + else: + patt = ".so" + for pkg in ["bson", "pymongo"]: + dpath = here / pkg + for fpath in dpath.glob(f"*{patt}"): + relpath = os.path.relpath(fpath, here) + build_data["artifacts"].append(relpath) + build_data["force_include"][relpath] = relpath diff --git a/integration_tests/README.md b/integration_tests/README.md new file mode 100644 index 0000000000..fb64a9066f --- /dev/null +++ b/integration_tests/README.md @@ -0,0 +1,42 @@ +# Integration Tests + +A set of tests that verify the usage of PyMongo with downstream packages or frameworks. + +Each test uses [PEP 723 inline metadata](https://packaging.python.org/en/latest/specifications/inline-script-metadata/) and can be run using `pipx` or `uv`. + +The `run.sh` convenience script can be used to run all of the files using `uv`. + +Here is an example header for the script with the inline dependencies: + +```python +# /// script +# dependencies = [ +# "uvloop>=0.18" +# ] +# requires-python = ">=3.10" +# /// +``` + +Here is an example of using the test helper function to create a configured client for the test: + + +```python +import asyncio +import sys +from pathlib import Path + +# Use pymongo from parent directory. +root = Path(__file__).parent.parent +sys.path.insert(0, str(root)) + +from test.asynchronous import async_simple_test_client # noqa: E402 + + +async def main(): + async with async_simple_test_client() as client: + result = await client.admin.command("ping") + assert result["ok"] + + +asyncio.run(main()) +``` diff --git a/integration_tests/run.sh b/integration_tests/run.sh new file mode 100755 index 0000000000..051e2b8a75 --- /dev/null +++ b/integration_tests/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# Run all of the integration test files using `uv run`. +set -eu + +for file in integration_tests/test_*.py ; do + echo "-----------------" + echo "Running $file..." + uv run $file + echo "Running $file...done." + echo "-----------------" +done diff --git a/integration_tests/test_uv_loop.py b/integration_tests/test_uv_loop.py new file mode 100644 index 0000000000..88a3ad73ab --- /dev/null +++ b/integration_tests/test_uv_loop.py @@ -0,0 +1,27 @@ +# /// script +# dependencies = [ +# "uvloop>=0.18" +# ] +# requires-python = ">=3.10" +# /// +from __future__ import annotations + +import sys +from pathlib import Path + +import uvloop + +# Use pymongo from parent directory. +root = Path(__file__).parent.parent +sys.path.insert(0, str(root)) + +from test.asynchronous import async_simple_test_client # noqa: E402 + + +async def main(): + async with async_simple_test_client() as client: + result = await client.admin.command("ping") + assert result["ok"] + + +uvloop.run(main()) diff --git a/justfile b/justfile new file mode 100644 index 0000000000..17b95e87b7 --- /dev/null +++ b/justfile @@ -0,0 +1,85 @@ +# See https://just.systems/man/en/ for instructions +set shell := ["bash", "-c"] + +# Commonly used command segments. +typing_run := "uv run --group typing --extra aws --extra encryption --extra ocsp --extra snappy --extra test --extra zstd" +docs_run := "uv run --extra docs" +doc_build := "./doc/_build" +mypy_args := "--install-types --non-interactive" + +# Make the default recipe private so it doesn't show up in the list. +[private] +default: + @just --list + +[private] +resync: + @uv sync --quiet + +install: + bash .evergreen/scripts/setup-dev-env.sh + uvx pre-commit install + +[group('docs')] +docs: && resync + {{docs_run}} sphinx-build -W -b html doc {{doc_build}}/html + +[group('docs')] +docs-serve: && resync + {{docs_run}} sphinx-autobuild -W -b html doc --watch ./pymongo --watch ./bson --watch ./gridfs {{doc_build}}/serve + +[group('docs')] +docs-linkcheck: && resync + {{docs_run}} sphinx-build -E -b linkcheck doc {{doc_build}}/linkcheck + +[group('typing')] +typing: && resync + just typing-mypy + just typing-pyright + +[group('typing')] +typing-mypy: && resync + {{typing_run}} mypy {{mypy_args}} bson gridfs tools pymongo + {{typing_run}} mypy {{mypy_args}} --config-file mypy_test.ini test + {{typing_run}} mypy {{mypy_args}} test/test_typing.py test/test_typing_strict.py + +[group('typing')] +typing-pyright: && resync + {{typing_run}} pyright test/test_typing.py test/test_typing_strict.py + {{typing_run}} pyright -p strict_pyrightconfig.json test/test_typing_strict.py + +[group('lint')] +lint *args="": && resync + uvx pre-commit run --all-files {{args}} + +[group('lint')] +lint-manual *args="": && resync + uvx pre-commit run --all-files --hook-stage manual {{args}} + +[group('test')] +test *args="-v --durations=5 --maxfail=10": && resync + uv run --extra test pytest {{args}} + +[group('test')] +run-tests *args: && resync + bash ./.evergreen/run-tests.sh {{args}} + +[group('test')] +setup-tests *args="": + bash .evergreen/scripts/setup-tests.sh {{args}} + +[group('test')] +teardown-tests: + bash .evergreen/scripts/teardown-tests.sh + +[group('test')] +integration-tests: + bash integration_tests/run.sh + +[group('server')] +run-server *args="": + bash .evergreen/scripts/run-server.sh {{args}} + +[group('server')] +stop-server: + bash .evergreen/scripts/stop-server.sh diff --git a/mypy_test.ini b/mypy_test.ini new file mode 100644 index 0000000000..9fdc664e32 --- /dev/null +++ b/mypy_test.ini @@ -0,0 +1,15 @@ +[mypy] +strict = true +show_error_codes = true +disable_error_code = attr-defined, union-attr, var-annotated, assignment, no-redef, type-arg, import, no-untyped-call, no-untyped-def, index, no-any-return, misc +exclude = (?x)( + ^test/mypy_fails/*.*$ + | ^test/conftest.py$ + ) + +[mypy-pymongo.synchronous.*,gridfs.synchronous.*,test.*] +warn_unused_ignores = false +disable_error_code = unused-coroutine + +[mypy-pymongo.asynchronous.*,test.asynchronous.*] +warn_unused_ignores = false diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 7471389fad..ac540d94db 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -1,10 +1,10 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,7 +13,39 @@ # limitations under the License. """Python driver for MongoDB.""" - +from __future__ import annotations + +from typing import ContextManager, Optional + +__all__ = [ + "ASCENDING", + "DESCENDING", + "GEO2D", + "GEOSPHERE", + "HASHED", + "TEXT", + "version_tuple", + "get_version_string", + "__version__", + "version", + "ReturnDocument", + "MAX_SUPPORTED_WIRE_VERSION", + "MIN_SUPPORTED_WIRE_VERSION", + "CursorType", + "MongoClient", + "AsyncMongoClient", + "DeleteMany", + "DeleteOne", + "IndexModel", + "InsertOne", + "ReplaceOne", + "UpdateMany", + "UpdateOne", + "ReadPreference", + "WriteConcern", + "has_c", + "timeout", +] ASCENDING = 1 """Ascending sort order.""" @@ -23,21 +55,7 @@ GEO2D = "2d" """Index specifier for a 2-dimensional `geospatial index`_. -.. versionadded:: 1.5.1 - -.. note:: Geo-spatial indexing requires server version **>= 1.3.3**. - -.. _geospatial index: http://docs.mongodb.org/manual/core/geospatial-indexes/ -""" - -GEOHAYSTACK = "geoHaystack" -"""Index specifier for a 2-dimensional `haystack index`_. - -.. versionadded:: 2.1 - -.. note:: Geo-spatial indexing requires server version **>= 1.5.6**. - -.. _haystack index: http://docs.mongodb.org/manual/core/geospatial-indexes/#haystack-indexes +.. _geospatial index: https://mongodb.com/docs/manual/core/2d/ """ GEOSPHERE = "2dsphere" @@ -45,9 +63,7 @@ .. versionadded:: 2.5 -.. note:: 2dsphere indexing requires server version **>= 2.4.0**. - -.. _spherical geospatial index: http://docs.mongodb.org/manual/release-notes/2.4/#new-geospatial-indexes-with-geojson-and-improved-spherical-geometry +.. _spherical geospatial index: https://mongodb.com/docs/manual/core/2dsphere/ """ HASHED = "hashed" @@ -55,43 +71,108 @@ .. versionadded:: 2.5 -.. note:: hashed indexing requires server version **>= 2.4.0**. - -.. _hashed index: http://docs.mongodb.org/manual/release-notes/2.4/#new-hashed-index-and-sharding-with-a-hashed-shard-key +.. _hashed index: https://mongodb.com/docs/manual/core/index-hashed/ """ -OFF = 0 -"""No database profiling.""" -SLOW_ONLY = 1 -"""Only profile slow operations.""" -ALL = 2 -"""Profile all operations.""" +TEXT = "text" +"""Index specifier for a `text index`_. -version_tuple = (2, 7) +.. seealso:: MongoDB's `Atlas Search + `_ which offers more advanced + text search functionality. -def get_version_string(): - if isinstance(version_tuple[-1], basestring): - return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1] - return '.'.join(map(str, version_tuple)) +.. versionadded:: 2.7.1 -version = get_version_string() -"""Current version of PyMongo.""" +.. _text index: https://mongodb.com/docs/manual/core/index-text/ +""" -from pymongo.common import (MIN_SUPPORTED_WIRE_VERSION, - MAX_SUPPORTED_WIRE_VERSION) -from pymongo.connection import Connection -from pymongo.mongo_client import MongoClient -from pymongo.mongo_replica_set_client import MongoReplicaSetClient -from pymongo.replica_set_connection import ReplicaSetConnection +from pymongo import _csot +from pymongo._version import __version__, get_version_string, version_tuple +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION, has_c +from pymongo.cursor import CursorType +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.collection import ReturnDocument +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.write_concern import WriteConcern + +# Public module compatibility imports +# isort: off +from pymongo import uri_parser # noqa: F401 +from pymongo import change_stream # noqa: F401 +from pymongo import client_session # noqa: F401 +from pymongo import collection # noqa: F401 +from pymongo import command_cursor # noqa: F401 +from pymongo import database # noqa: F401 +# isort: on + +version = __version__ +"""Current version of PyMongo.""" + + +def timeout(seconds: Optional[float]) -> ContextManager[None]: + """**(Provisional)** Apply the given timeout for a block of operations. + + .. note:: :func:`~pymongo.timeout` is currently provisional. Backwards + incompatible changes may occur before becoming officially supported. + + Use :func:`~pymongo.timeout` in a with-statement:: + + with pymongo.timeout(5): + client.db.coll.insert_one({}) + client.db.coll2.insert_one({}) + + When the with-statement is entered, a deadline is set for the entire + block. When that deadline is exceeded, any blocking pymongo operation + will raise a timeout exception. For example:: + + try: + with pymongo.timeout(5): + client.db.coll.insert_one({}) + time.sleep(5) + # The deadline has now expired, the next operation will raise + # a timeout exception. + client.db.coll2.insert_one({}) + except PyMongoError as exc: + if exc.timeout: + print(f"block timed out: {exc!r}") + else: + print(f"failed with non-timeout error: {exc!r}") + + When nesting :func:`~pymongo.timeout`, the nested deadline is capped by + the outer deadline. The deadline can only be shortened, not extended. + When exiting the block, the previous deadline is restored:: + + with pymongo.timeout(5): + coll.find_one() # Uses the 5 second deadline. + with pymongo.timeout(3): + coll.find_one() # Uses the 3 second deadline. + coll.find_one() # Uses the original 5 second deadline. + with pymongo.timeout(10): + coll.find_one() # Still uses the original 5 second deadline. + coll.find_one() # Uses the original 5 second deadline. + + :param seconds: A non-negative floating point number expressing seconds, or None. + + :raises: :py:class:`ValueError`: When `seconds` is negative. -def has_c(): - """Is the C extension installed? + See `Limit Server Execution Time `_ for more examples. - .. versionadded:: 1.5 + .. versionadded:: 4.2 """ - try: - from pymongo import _cmessage - return True - except ImportError: - return False + if not isinstance(seconds, (int, float, type(None))): + raise TypeError(f"timeout must be None, an int, or a float, not {type(seconds)}") + if seconds and seconds < 0: + raise ValueError("timeout cannot be negative") + if seconds is not None: + seconds = float(seconds) + return _csot._TimeoutContext(seconds) diff --git a/pymongo/_asyncio_lock.py b/pymongo/_asyncio_lock.py new file mode 100644 index 0000000000..5ca09982fa --- /dev/null +++ b/pymongo/_asyncio_lock.py @@ -0,0 +1,309 @@ +# Copyright (c) 2001-2024 Python Software Foundation; All Rights Reserved + +"""Lock and Condition classes vendored from https://github.com/python/cpython/blob/main/Lib/asyncio/locks.py +to port 3.13 fixes to older versions of Python. +Can be removed once we drop Python 3.12 support.""" + +from __future__ import annotations + +import collections +import threading +from asyncio import events, exceptions +from typing import Any, Coroutine, Optional + +_global_lock = threading.Lock() + + +class _LoopBoundMixin: + _loop = None + + def _get_loop(self) -> Any: + loop = events._get_running_loop() + + if self._loop is None: + with _global_lock: + if self._loop is None: + self._loop = loop + if loop is not self._loop: + raise RuntimeError(f"{self!r} is bound to a different event loop") + return loop + + +class _ContextManagerMixin: + async def __aenter__(self) -> None: + await self.acquire() # type: ignore[attr-defined] + # We have no use for the "as ..." clause in the with + # statement for locks. + return + + async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None: + self.release() # type: ignore[attr-defined] + + +class Lock(_ContextManagerMixin, _LoopBoundMixin): + """Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular task when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another task changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one task is blocked in acquire() waiting for + the state to turn to unlocked, only one task proceeds when a + release() call resets the state to unlocked; successive release() + calls will unblock tasks in FIFO order. + + Locks also support the asynchronous context management protocol. + 'async with lock' statement should be used. + + Usage: + + lock = Lock() + ... + await lock.acquire() + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + async with lock: + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + await lock.acquire() + else: + # lock is acquired + ... + + """ + + def __init__(self) -> None: + self._waiters: Optional[collections.deque[Any]] = None + self._locked = False + + def __repr__(self) -> str: + res = super().__repr__() + extra = "locked" if self._locked else "unlocked" + if self._waiters: + extra = f"{extra}, waiters:{len(self._waiters)}" + return f"<{res[1:-1]} [{extra}]>" + + def locked(self) -> bool: + """Return True if lock is acquired.""" + return self._locked + + async def acquire(self) -> bool: + """Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + """ + # Implement fair scheduling, where thread always waits + # its turn. Jumping the queue if all are cancelled is an optimization. + if not self._locked and ( + self._waiters is None or all(w.cancelled() for w in self._waiters) + ): + self._locked = True + return True + + if self._waiters is None: + self._waiters = collections.deque() + fut = self._get_loop().create_future() + self._waiters.append(fut) + + try: + try: + await fut + finally: + self._waiters.remove(fut) + except exceptions.CancelledError: + # Currently the only exception designed be able to occur here. + + # Ensure the lock invariant: If lock is not claimed (or about + # to be claimed by us) and there is a Task in waiters, + # ensure that the Task at the head will run. + if not self._locked: + self._wake_up_first() + raise + + # assert self._locked is False + self._locked = True + return True + + def release(self) -> None: + """Release a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other tasks are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + """ + if self._locked: + self._locked = False + self._wake_up_first() + else: + raise RuntimeError("Lock is not acquired") + + def _wake_up_first(self) -> None: + """Ensure that the first waiter will wake up.""" + if not self._waiters: + return + try: + fut = next(iter(self._waiters)) + except StopIteration: + return + + # .done() means that the waiter is already set to wake up. + if not fut.done(): + fut.set_result(True) + + +class Condition(_ContextManagerMixin, _LoopBoundMixin): + """Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more tasks to wait until they are notified by another + task. + + A new Lock object is created and used as the underlying lock. + """ + + def __init__(self, lock: Optional[Lock] = None) -> None: + if lock is None: + lock = Lock() + + self._lock = lock + # Export the lock's locked(), acquire() and release() methods. + self.locked = lock.locked + self.acquire = lock.acquire + self.release = lock.release + + self._waiters: collections.deque[Any] = collections.deque() + + def __repr__(self) -> str: + res = super().__repr__() + extra = "locked" if self.locked() else "unlocked" + if self._waiters: + extra = f"{extra}, waiters:{len(self._waiters)}" + return f"<{res[1:-1]} [{extra}]>" + + async def wait(self) -> bool: + """Wait until notified. + + If the calling task has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another task. Once + awakened, it re-acquires the lock and returns True. + + This method may return spuriously, + which is why the caller should always + re-check the state and be prepared to wait() again. + """ + if not self.locked(): + raise RuntimeError("cannot wait on un-acquired lock") + + fut = self._get_loop().create_future() + self.release() + try: + try: + self._waiters.append(fut) + try: + await fut + return True + finally: + self._waiters.remove(fut) + + finally: + # Must re-acquire lock even if wait is cancelled. + # We only catch CancelledError here, since we don't want any + # other (fatal) errors with the future to cause us to spin. + err = None + while True: + try: + await self.acquire() + break + except exceptions.CancelledError as e: + err = e + + if err is not None: + try: + raise err # Re-raise most recent exception instance. + finally: + err = None # Break reference cycles. + except BaseException: + # Any error raised out of here _may_ have occurred after this Task + # believed to have been successfully notified. + # Make sure to notify another Task instead. This may result + # in a "spurious wakeup", which is allowed as part of the + # Condition Variable protocol. + self._notify(1) + raise + + async def wait_for(self, predicate: Any) -> Coroutine[Any, Any, Any]: + """Wait until a predicate becomes true. + + The predicate should be a callable whose result will be + interpreted as a boolean value. The method will repeatedly + wait() until it evaluates to true. The final predicate value is + the return value. + """ + result = predicate() + while not result: + await self.wait() + result = predicate() + return result + + def notify(self, n: int = 1) -> None: + """By default, wake up one task waiting on this condition, if any. + If the calling task has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up n of the tasks waiting for the condition + variable; if fewer than n are waiting, they are all awoken. + + Note: an awakened task does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + """ + if not self.locked(): + raise RuntimeError("cannot notify on un-acquired lock") + self._notify(n) + + def _notify(self, n: int) -> None: + idx = 0 + for fut in self._waiters: + if idx >= n: + break + + if not fut.done(): + idx += 1 + fut.set_result(False) + + def notify_all(self) -> None: + """Wake up all tasks waiting on this condition. This method acts + like notify(), but wakes up all waiting tasks instead of one. If the + calling task has not acquired the lock when this method is called, + a RuntimeError is raised. + """ + self.notify(len(self._waiters)) diff --git a/pymongo/_asyncio_task.py b/pymongo/_asyncio_task.py new file mode 100644 index 0000000000..118471963a --- /dev/null +++ b/pymongo/_asyncio_task.py @@ -0,0 +1,49 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A custom asyncio.Task that allows checking if a task has been sent a cancellation request. +Can be removed once we drop Python 3.10 support in favor of asyncio.Task.cancelling.""" + + +from __future__ import annotations + +import asyncio +import sys +from typing import Any, Coroutine, Optional + + +# TODO (https://jira.mongodb.org/browse/PYTHON-4981): Revisit once the underlying cause of the swallowed cancellations is uncovered +class _Task(asyncio.Task[Any]): + def __init__(self, coro: Coroutine[Any, Any, Any], *, name: Optional[str] = None) -> None: + super().__init__(coro, name=name) + self._cancel_requests = 0 + asyncio._register_task(self) + + def cancel(self, msg: Optional[str] = None) -> bool: + self._cancel_requests += 1 + return super().cancel(msg=msg) + + def uncancel(self) -> int: + if self._cancel_requests > 0: + self._cancel_requests -= 1 + return self._cancel_requests + + def cancelling(self) -> int: + return self._cancel_requests + + +def create_task(coro: Coroutine[Any, Any, Any], *, name: Optional[str] = None) -> asyncio.Task[Any]: + if sys.version_info >= (3, 11): + return asyncio.create_task(coro, name=name) + return _Task(coro, name=name) diff --git a/pymongo/_azure_helpers.py b/pymongo/_azure_helpers.py new file mode 100644 index 0000000000..8a7af0b407 --- /dev/null +++ b/pymongo/_azure_helpers.py @@ -0,0 +1,57 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Azure helpers.""" +from __future__ import annotations + +import json +from typing import Any, Optional + + +def _get_azure_response( + resource: str, client_id: Optional[str] = None, timeout: float = 5 +) -> dict[str, Any]: + # Deferred import to save overall import time. + from urllib.request import Request, urlopen + + url = "http://169.254.169.254/metadata/identity/oauth2/token" + url += "?api-version=2018-02-01" + url += f"&resource={resource}" + if client_id: + url += f"&client_id={client_id}" + headers = {"Metadata": "true", "Accept": "application/json"} + request = Request(url, headers=headers) # noqa: S310 + try: + with urlopen(request, timeout=timeout) as response: # noqa: S310 + status = response.status + body = response.read().decode("utf8") + except Exception as e: + msg = "Failed to acquire IMDS access token: %s" % e + raise ValueError(msg) from None + + if status != 200: + msg = "Failed to acquire IMDS access token." + raise ValueError(msg) + try: + data = json.loads(body) + except Exception: + raise ValueError("Azure IMDS response must be in JSON format") from None + + for key in ["access_token", "expires_in"]: + if not data.get(key): + msg = "Azure IMDS response must contain %s, but was %s." + msg = msg % (key, body) + raise ValueError(msg) + + return data diff --git a/pymongo/_client_bulk_shared.py b/pymongo/_client_bulk_shared.py new file mode 100644 index 0000000000..5814025566 --- /dev/null +++ b/pymongo/_client_bulk_shared.py @@ -0,0 +1,79 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants, types, and classes shared across Client Bulk Write API implementations.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, NoReturn + +from pymongo.errors import ClientBulkWriteException, OperationFailure +from pymongo.helpers_shared import _get_wce_doc + +if TYPE_CHECKING: + from pymongo.typings import _DocumentOut + + +def _merge_command( + ops: list[tuple[str, Mapping[str, Any]]], + offset: int, + full_result: MutableMapping[str, Any], + result: Mapping[str, Any], +) -> None: + """Merge result of a single bulk write batch into the full result.""" + if result.get("error"): + full_result["error"] = result["error"] + + full_result["nInserted"] += result.get("nInserted", 0) + full_result["nDeleted"] += result.get("nDeleted", 0) + full_result["nMatched"] += result.get("nMatched", 0) + full_result["nModified"] += result.get("nModified", 0) + full_result["nUpserted"] += result.get("nUpserted", 0) + + write_errors = result.get("writeErrors") + if write_errors: + for doc in write_errors: + # Leave the server response intact for APM. + replacement = doc.copy() + original_index = doc["idx"] + offset + replacement["idx"] = original_index + # Add the failed operation to the error document. + replacement["op"] = ops[original_index][1] + full_result["writeErrors"].append(replacement) + + wce = _get_wce_doc(result) + if wce: + full_result["writeConcernErrors"].append(wce) + + +def _throw_client_bulk_write_exception( + full_result: _DocumentOut, verbose_results: bool +) -> NoReturn: + """Raise a ClientBulkWriteException from the full result.""" + # retryWrites on MMAPv1 should raise an actionable error. + if full_result["writeErrors"]: + full_result["writeErrors"].sort(key=lambda error: error["idx"]) + err = full_result["writeErrors"][0] + code = err["code"] + msg = err["errmsg"] + if code == 20 and msg.startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, full_result) + if isinstance(full_result["error"], BaseException): + raise ClientBulkWriteException(full_result, verbose_results) from full_result["error"] + raise ClientBulkWriteException(full_result, verbose_results) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index cfe0049a05..a506863737 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -1,11 +1,11 @@ /* - * Copyright 2009-2014 MongoDB, Inc. + * Copyright 2009-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +20,7 @@ * should be used to speed up message creation. */ +#define PY_SSIZE_T_CLEAN #include "Python.h" #include "_cbsonmodule.h" @@ -27,21 +28,14 @@ struct module_state { PyObject* _cbson; + PyObject* _max_bson_size_str; + PyObject* _max_message_size_str; + PyObject* _max_write_batch_size_str; + PyObject* _max_split_size_str; }; /* See comments about module initialization in _cbsonmodule.c */ -#if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) -#else -#define GETSTATE(m) (&_state) -static struct module_state _state; -#endif - -#if PY_MAJOR_VERSION >= 3 -#define BYTES_FORMAT_STRING "y#" -#else -#define BYTES_FORMAT_STRING "s#" -#endif #define DOC_TOO_LARGE_FMT "BSON document too large (%d bytes)" \ " - the connected server supports" \ @@ -51,7 +45,7 @@ static struct module_state _state; * * Returns a new ref */ static PyObject* _error(char* name) { - PyObject* error; + PyObject* error = NULL; PyObject* errors = PyImport_ImportModule("pymongo.errors"); if (!errors) { return NULL; @@ -61,494 +55,290 @@ static PyObject* _error(char* name) { return error; } -/* add a lastError message on the end of the buffer. - * returns 0 on failure */ -static int add_last_error(PyObject* self, buffer_t buffer, - int request_id, char* ns, int nslen, PyObject* args) { - struct module_state *state = GETSTATE(self); - - int message_start; - int document_start; - int message_length; - int document_length; - PyObject* key; - PyObject* value; - Py_ssize_t pos = 0; - PyObject* one; - char *p = strchr(ns, '.'); - /* Length of the database portion of ns. */ - nslen = p ? (int)(p - ns) : nslen; - - message_start = buffer_save_space(buffer, 4); - if (message_start == -1) { - PyErr_NoMemory(); +/* The same as buffer_write_bytes except that it also validates + * "size" will fit in an int. + * Returns 0 on failure */ +static int buffer_write_bytes_ssize_t(buffer_t buffer, const char* data, Py_ssize_t size) { + int downsize = _downcast_and_check(size, 0); + if (size == -1) { return 0; } - if (!buffer_write_bytes(buffer, (const char*)&request_id, 4) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" /* responseTo */ - "\xd4\x07\x00\x00" /* opcode */ - "\x00\x00\x00\x00", /* options */ - 12) || - !buffer_write_bytes(buffer, - ns, nslen) || /* database */ - !buffer_write_bytes(buffer, - ".$cmd\x00" /* collection name */ - "\x00\x00\x00\x00" /* skip */ - "\xFF\xFF\xFF\xFF", /* limit (-1) */ - 14)) { - return 0; - } - - /* save space for length */ - document_start = buffer_save_space(buffer, 4); - if (document_start == -1) { - PyErr_NoMemory(); - return 0; - } - - /* getlasterror: 1 */ - if (!(one = PyLong_FromLong(1))) - return 0; - if (!write_pair(state->_cbson, buffer, "getlasterror", 12, one, 0, 4, 1)) { - Py_DECREF(one); - return 0; - } - Py_DECREF(one); - - /* getlasterror options */ - while (PyDict_Next(args, &pos, &key, &value)) { - if (!decode_and_write_pair(state->_cbson, buffer, key, value, 0, 4, 0)) { - return 0; - } - } - - /* EOD */ - if (!buffer_write_bytes(buffer, "\x00", 1)) { - return 0; - } - - message_length = buffer_get_position(buffer) - message_start; - document_length = buffer_get_position(buffer) - document_start; - memcpy(buffer_get_buffer(buffer) + message_start, &message_length, 4); - memcpy(buffer_get_buffer(buffer) + document_start, &document_length, 4); - return 1; -} - -static int init_insert_buffer(buffer_t buffer, int request_id, int options, - const char* coll_name, int coll_name_len) { - /* Save space for message length */ - int length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyErr_NoMemory(); - return length_location; - } - if (!buffer_write_bytes(buffer, (const char*)&request_id, 4) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" - "\xd2\x07\x00\x00", - 8) || - !buffer_write_bytes(buffer, (const char*)&options, 4) || - !buffer_write_bytes(buffer, - coll_name, - coll_name_len + 1)) { - return -1; - } - return length_location; + return buffer_write_bytes(buffer, data, downsize); } -static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { - /* Note: As of PyMongo 2.6, this function is no longer used. It - * is being kept (with tests) for backwards compatibility with 3rd - * party libraries that may currently be using it, but will likely - * be removed in a future release. */ - struct module_state *state = GETSTATE(self); - +static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { /* NOTE just using a random number as the request_id */ int request_id = rand(); + unsigned int flags; char* collection_name = NULL; - int collection_name_length; - PyObject* docs; - PyObject* doc; - PyObject* iterator; - int before, cur_size, max_size = 0; - int options = 0; - unsigned char check_keys; - unsigned char safe; - unsigned char continue_on_error; - unsigned char uuid_subtype; - PyObject* last_error_args; - buffer_t buffer; + Py_ssize_t collection_name_length; + int begin, cur_size, max_size = 0; + int num_to_skip; + int num_to_return; + PyObject* query = NULL; + PyObject* field_selector = NULL; + PyObject* options_obj = NULL; + codec_options_t options; + buffer_t buffer = NULL; int length_location, message_length; - PyObject* result; + PyObject* result = NULL; + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } - if (!PyArg_ParseTuple(args, "et#ObbObb", + if (!(PyArg_ParseTuple(args, "Iet#iiOOO", + &flags, "utf-8", &collection_name, &collection_name_length, - &docs, &check_keys, &safe, - &last_error_args, - &continue_on_error, &uuid_subtype)) { + &num_to_skip, &num_to_return, + &query, &field_selector, + &options_obj) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } - if (continue_on_error) { - options += 1; - } - - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; + goto fail; } - length_location = init_insert_buffer(buffer, - request_id, - options, - collection_name, - collection_name_length); + // save space for message length + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - PyMem_Free(collection_name); - buffer_free(buffer); - return NULL; - } - - iterator = PyObject_GetIter(docs); - if (iterator == NULL) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "input is not iterable"); - Py_DECREF(InvalidOperation); - } - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } - while ((doc = PyIter_Next(iterator)) != NULL) { - before = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, doc, check_keys, uuid_subtype, 1)) { - Py_DECREF(doc); - Py_DECREF(iterator); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - Py_DECREF(doc); - cur_size = buffer_get_position(buffer) - before; - max_size = (cur_size > max_size) ? cur_size : max_size; - } - Py_DECREF(iterator); - if (PyErr_Occurred()) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + if (!buffer_write_int32(buffer, (int32_t)request_id) || + !buffer_write_bytes(buffer, "\x00\x00\x00\x00\xd4\x07\x00\x00", 8) || + !buffer_write_int32(buffer, (int32_t)flags) || + !buffer_write_bytes_ssize_t(buffer, collection_name, + collection_name_length + 1) || + !buffer_write_int32(buffer, (int32_t)num_to_skip) || + !buffer_write_int32(buffer, (int32_t)num_to_return)) { + goto fail; } - if (!max_size) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "cannot do an empty bulk insert"); - Py_DECREF(InvalidOperation); - } - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + begin = pymongo_buffer_get_position(buffer); + if (!write_dict(state->_cbson, buffer, query, 0, &options, 1)) { + goto fail; } - message_length = buffer_get_position(buffer) - length_location; - memcpy(buffer_get_buffer(buffer) + length_location, &message_length, 4); + max_size = pymongo_buffer_get_position(buffer) - begin; - if (safe) { - if (!add_last_error(self, buffer, request_id, collection_name, - collection_name_length, last_error_args)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + if (field_selector != Py_None) { + begin = pymongo_buffer_get_position(buffer); + if (!write_dict(state->_cbson, buffer, field_selector, 0, + &options, 1)) { + goto fail; } + cur_size = pymongo_buffer_get_position(buffer) - begin; + max_size = (cur_size > max_size) ? cur_size : max_size; } - PyMem_Free(collection_name); + message_length = pymongo_buffer_get_position(buffer) - length_location; + buffer_write_int32_at_position( + buffer, length_location, (int32_t)message_length); /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, - buffer_get_buffer(buffer), - buffer_get_position(buffer), + result = Py_BuildValue("iy#i", request_id, + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), max_size); - buffer_free(buffer); +fail: + PyMem_Free(collection_name); + destroy_codec_options(&options); + if (buffer) { + pymongo_buffer_free(buffer); + } return result; } -PyDoc_STRVAR(_cbson_insert_message_doc, -"Create an insert message to be sent to MongoDB\n\ -\n\ -Note: As of PyMongo 2.6, this function is no longer used. It\n\ -is being kept (with tests) for backwards compatibility with 3rd\n\ -party libraries that may currently be using it, but will likely\n\ -be removed in a future release."); - -static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { +static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { /* NOTE just using a random number as the request_id */ - struct module_state *state = GETSTATE(self); - int request_id = rand(); char* collection_name = NULL; - int collection_name_length; - int before, cur_size, max_size = 0; - PyObject* doc; - PyObject* spec; - unsigned char multi; - unsigned char upsert; - unsigned char safe; - unsigned char check_keys; - unsigned char uuid_subtype; - PyObject* last_error_args; - int options; - buffer_t buffer; + Py_ssize_t collection_name_length; + int num_to_return; + long long cursor_id; + buffer_t buffer = NULL; int length_location, message_length; - PyObject* result; + PyObject* result = NULL; - if (!PyArg_ParseTuple(args, "et#bbOObObb", + if (!PyArg_ParseTuple(args, "et#iL", "utf-8", &collection_name, &collection_name_length, - &upsert, &multi, &spec, &doc, &safe, - &last_error_args, &check_keys, &uuid_subtype)) { + &num_to_return, + &cursor_id)) { return NULL; } - - options = 0; - if (upsert) { - options += 1; - } - if (multi) { - options += 2; - } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; + goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - PyMem_Free(collection_name); - PyErr_NoMemory(); - return NULL; + goto fail; } - if (!buffer_write_bytes(buffer, (const char*)&request_id, 4) || + if (!buffer_write_int32(buffer, (int32_t)request_id) || !buffer_write_bytes(buffer, "\x00\x00\x00\x00" - "\xd1\x07\x00\x00" - "\x00\x00\x00\x00", - 12) || - !buffer_write_bytes(buffer, - collection_name, - collection_name_length + 1) || - !buffer_write_bytes(buffer, (const char*)&options, 4)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - - before = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, spec, 0, uuid_subtype, 1)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - max_size = buffer_get_position(buffer) - before; - - before = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, doc, check_keys, uuid_subtype, 1)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - cur_size = buffer_get_position(buffer) - before; - max_size = (cur_size > max_size) ? cur_size : max_size; - - message_length = buffer_get_position(buffer) - length_location; - memcpy(buffer_get_buffer(buffer) + length_location, &message_length, 4); - - if (safe) { - if (!add_last_error(self, buffer, request_id, collection_name, - collection_name_length, last_error_args)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } + "\xd5\x07\x00\x00" + "\x00\x00\x00\x00", 12) || + !buffer_write_bytes_ssize_t(buffer, + collection_name, + collection_name_length + 1) || + !buffer_write_int32(buffer, (int32_t)num_to_return) || + !buffer_write_int64(buffer, (int64_t)cursor_id)) { + goto fail; } - PyMem_Free(collection_name); + message_length = pymongo_buffer_get_position(buffer) - length_location; + buffer_write_int32_at_position( + buffer, length_location, (int32_t)message_length); /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, - buffer_get_buffer(buffer), - buffer_get_position(buffer), - max_size); - buffer_free(buffer); + result = Py_BuildValue("iy#", request_id, + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); +fail: + PyMem_Free(collection_name); + if (buffer) { + pymongo_buffer_free(buffer); + } return result; } -static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { +/* + * NOTE this method handles multiple documents in a type one payload but + * it does not perform batch splitting and the total message size is + * only checked *after* generating the entire message. + */ +static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { /* NOTE just using a random number as the request_id */ - struct module_state *state = GETSTATE(self); - int request_id = rand(); - unsigned int options; - char* collection_name = NULL; - int collection_name_length; - int begin, cur_size, max_size = 0; - int num_to_skip; - int num_to_return; - PyObject* query; - PyObject* field_selector = Py_None; - unsigned char uuid_subtype = 3; - buffer_t buffer; + unsigned int flags; + PyObject* command = NULL; + char* identifier = NULL; + Py_ssize_t identifier_length = 0; + PyObject* docs = NULL; + PyObject* doc = NULL; + PyObject* options_obj = NULL; + codec_options_t options; + buffer_t buffer = NULL; int length_location, message_length; - PyObject* result; + int total_size = 0; + int max_doc_size = 0; + PyObject* result = NULL; + PyObject* iterator = NULL; + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } - if (!PyArg_ParseTuple(args, "Iet#iiO|Ob", - &options, + /*flags, command, identifier, docs, opts*/ + if (!(PyArg_ParseTuple(args, "IOet#OO", + &flags, + &command, "utf-8", - &collection_name, - &collection_name_length, - &num_to_skip, &num_to_return, - &query, &field_selector, &uuid_subtype)) { + &identifier, + &identifier_length, + &docs, + &options_obj) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; + goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { - PyMem_Free(collection_name); - PyErr_NoMemory(); - return NULL; + goto fail; } - if (!buffer_write_bytes(buffer, (const char*)&request_id, 4) || - !buffer_write_bytes(buffer, "\x00\x00\x00\x00\xd4\x07\x00\x00", 8) || - !buffer_write_bytes(buffer, (const char*)&options, 4) || - !buffer_write_bytes(buffer, collection_name, - collection_name_length + 1) || - !buffer_write_bytes(buffer, (const char*)&num_to_skip, 4) || - !buffer_write_bytes(buffer, (const char*)&num_to_return, 4)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + if (!buffer_write_int32(buffer, (int32_t)request_id) || + !buffer_write_bytes(buffer, + "\x00\x00\x00\x00" /* responseTo */ + "\xdd\x07\x00\x00" /* 2013 */, 8)) { + goto fail; } - begin = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, query, 0, uuid_subtype, 1)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + if (!buffer_write_int32(buffer, (int32_t)flags) || + !buffer_write_bytes(buffer, "\x00", 1) /* Payload type 0 */) { + goto fail; } - max_size = buffer_get_position(buffer) - begin; - - if (field_selector != Py_None) { - begin = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, field_selector, 0, uuid_subtype, 1)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - cur_size = buffer_get_position(buffer) - begin; - max_size = (cur_size > max_size) ? cur_size : max_size; + total_size = write_dict(state->_cbson, buffer, command, 0, + &options, 1); + if (!total_size) { + goto fail; } - PyMem_Free(collection_name); - - message_length = buffer_get_position(buffer) - length_location; - memcpy(buffer_get_buffer(buffer) + length_location, &message_length, 4); - - /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, - buffer_get_buffer(buffer), - buffer_get_position(buffer), - max_size); - buffer_free(buffer); - return result; -} - -static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { - /* NOTE just using a random number as the request_id */ - int request_id = rand(); - char* collection_name = NULL; - int collection_name_length; - int num_to_return; - long long cursor_id; - buffer_t buffer; - int length_location, message_length; - PyObject* result; - - if (!PyArg_ParseTuple(args, "et#iL", - "utf-8", - &collection_name, - &collection_name_length, - &num_to_return, - &cursor_id)) { - return NULL; - } - buffer = buffer_new(); - if (!buffer) { - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; - } + if (identifier_length) { + int payload_one_length_location, payload_length; + /* Payload type 1 */ + if (!buffer_write_bytes(buffer, "\x01", 1)) { + goto fail; + } + /* save space for payload 0 length */ + payload_one_length_location = pymongo_buffer_save_space(buffer, 4); + /* C string identifier */ + if (!buffer_write_bytes_ssize_t(buffer, identifier, identifier_length + 1)) { + goto fail; + } + iterator = PyObject_GetIter(docs); + if (iterator == NULL) { + goto fail; + } + while ((doc = PyIter_Next(iterator)) != NULL) { + int encoded_doc_size = write_dict( + state->_cbson, buffer, doc, 0, &options, 1); + if (!encoded_doc_size) { + Py_CLEAR(doc); + goto fail; + } + if (encoded_doc_size > max_doc_size) { + max_doc_size = encoded_doc_size; + } + Py_CLEAR(doc); + } - // save space for message length - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - PyMem_Free(collection_name); - PyErr_NoMemory(); - return NULL; + payload_length = pymongo_buffer_get_position(buffer) - payload_one_length_location; + buffer_write_int32_at_position( + buffer, payload_one_length_location, (int32_t)payload_length); + total_size += payload_length; } - if (!buffer_write_bytes(buffer, (const char*)&request_id, 4) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" - "\xd5\x07\x00\x00" - "\x00\x00\x00\x00", 12) || - !buffer_write_bytes(buffer, - collection_name, - collection_name_length + 1) || - !buffer_write_bytes(buffer, (const char*)&num_to_return, 4) || - !buffer_write_bytes(buffer, (const char*)&cursor_id, 8)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; - } - - PyMem_Free(collection_name); - message_length = buffer_get_position(buffer) - length_location; - memcpy(buffer_get_buffer(buffer) + length_location, &message_length, 4); + message_length = pymongo_buffer_get_position(buffer) - length_location; + buffer_write_int32_at_position( + buffer, length_location, (int32_t)message_length); /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING, request_id, - buffer_get_buffer(buffer), - buffer_get_position(buffer)); - buffer_free(buffer); + result = Py_BuildValue("iy#ii", request_id, + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), + total_size, + max_doc_size); +fail: + Py_XDECREF(iterator); + if (buffer) { + pymongo_buffer_free(buffer); + } + PyMem_Free(identifier); + destroy_codec_options(&options); return result; } + static void _set_document_too_large(int size, long max) { PyObject* DocumentTooLarge = _error("DocumentTooLarge"); if (DocumentTooLarge) { -#if PY_MAJOR_VERSION >= 3 PyObject* error = PyUnicode_FromFormat(DOC_TOO_LARGE_FMT, size, max); -#else - PyObject* error = PyString_FromFormat(DOC_TOO_LARGE_FMT, size, max); -#endif if (error) { PyErr_SetObject(DocumentTooLarge, error); Py_DECREF(error); @@ -557,111 +347,104 @@ _set_document_too_large(int size, long max) { } } -static PyObject* -_send_insert(PyObject* self, PyObject* client, - PyObject* gle_args, buffer_t buffer, - char* coll_name, int coll_len, int request_id, int safe) { - - PyObject* result; - if (safe) { - if (!add_last_error(self, buffer, request_id, - coll_name, coll_len, gle_args)) { - return NULL; - } - } - - result = Py_BuildValue("i" BYTES_FORMAT_STRING, request_id, - buffer_get_buffer(buffer), - buffer_get_position(buffer)); +#define _INSERT 0 +#define _UPDATE 1 +#define _DELETE 2 - return PyObject_CallMethod(client, "_send_message", "NN", - result, PyBool_FromLong((long)safe)); -} +/* OP_MSG ----------------------------------------------- */ -static PyObject* _cbson_do_batched_insert(PyObject* self, PyObject* args) { - struct module_state *state = GETSTATE(self); +static int +_batched_op_msg( + unsigned char op, unsigned char ack, + PyObject* command, PyObject* docs, PyObject* ctx, + PyObject* to_publish, codec_options_t options, + buffer_t buffer, struct module_state *state) { - /* NOTE just using a random number as the request_id */ - int request_id = rand(); - int send_safe, options = 0; - int length_location, message_length; - int collection_name_length; - char* collection_name = NULL; - PyObject* docs; - PyObject* doc; - PyObject* iterator; - PyObject* client; - PyObject* last_error_args; - PyObject* result; - PyObject* max_bson_size_obj; - PyObject* max_message_size_obj; - unsigned char check_keys; - unsigned char safe; - unsigned char continue_on_error; - unsigned char uuid_subtype; - unsigned char empty = 1; long max_bson_size; + long max_write_batch_size; long max_message_size; - buffer_t buffer; - PyObject *exc_type = NULL, *exc_value = NULL, *exc_trace = NULL; - - if (!PyArg_ParseTuple(args, "et#ObbObbO", - "utf-8", - &collection_name, - &collection_name_length, - &docs, &check_keys, &safe, - &last_error_args, - &continue_on_error, - &uuid_subtype, &client)) { - return NULL; - } - if (continue_on_error) { - options += 1; - } - /* - * If we are doing unacknowledged writes *and* continue_on_error - * is True it's pointless (and slower) to send GLE. - */ - send_safe = (safe || !continue_on_error); - - max_bson_size_obj = PyObject_GetAttrString(client, "max_bson_size"); -#if PY_MAJOR_VERSION >= 3 + int idx = 0; + int size_location; + int position; + int length; + PyObject* max_bson_size_obj = NULL; + PyObject* max_write_batch_size_obj = NULL; + PyObject* max_message_size_obj = NULL; + PyObject* doc = NULL; + PyObject* iterator = NULL; + char* flags = ack ? "\x00\x00\x00\x00" : "\x02\x00\x00\x00"; + + max_bson_size_obj = PyObject_GetAttr(ctx, state->_max_bson_size_str); max_bson_size = PyLong_AsLong(max_bson_size_obj); -#else - max_bson_size = PyInt_AsLong(max_bson_size_obj); -#endif Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { - PyMem_Free(collection_name); - return NULL; + return 0; } - max_message_size_obj = PyObject_GetAttrString(client, "max_message_size"); -#if PY_MAJOR_VERSION >= 3 + max_write_batch_size_obj = PyObject_GetAttr(ctx, state->_max_write_batch_size_str); + max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); + Py_XDECREF(max_write_batch_size_obj); + if (max_write_batch_size == -1) { + return 0; + } + + max_message_size_obj = PyObject_GetAttr(ctx, state->_max_message_size_str); max_message_size = PyLong_AsLong(max_message_size_obj); -#else - max_message_size = PyInt_AsLong(max_message_size_obj); -#endif Py_XDECREF(max_message_size_obj); if (max_message_size == -1) { - PyMem_Free(collection_name); - return NULL; + return 0; } - buffer = buffer_new(); - if (!buffer) { - PyErr_NoMemory(); - PyMem_Free(collection_name); - return NULL; + if (!buffer_write_bytes(buffer, flags, 4)) { + return 0; + } + /* Type 0 Section */ + if (!buffer_write_bytes(buffer, "\x00", 1)) { + return 0; + } + if (!write_dict(state->_cbson, buffer, command, 0, + &options, 0)) { + return 0; } - length_location = init_insert_buffer(buffer, - request_id, - options, - collection_name, - collection_name_length); - if (length_location == -1) { - goto insertfail; + /* Type 1 Section */ + if (!buffer_write_bytes(buffer, "\x01", 1)) { + return 0; + } + /* Save space for size */ + size_location = pymongo_buffer_save_space(buffer, 4); + if (size_location == -1) { + return 0; + } + + switch (op) { + case _INSERT: + { + if (!buffer_write_bytes(buffer, "documents\x00", 10)) + goto fail; + break; + } + case _UPDATE: + { + if (!buffer_write_bytes(buffer, "updates\x00", 8)) + goto fail; + break; + } + case _DELETE: + { + if (!buffer_write_bytes(buffer, "deletes\x00", 8)) + goto fail; + break; + } + default: + { + PyObject* InvalidOperation = _error("InvalidOperation"); + if (InvalidOperation) { + PyErr_SetString(InvalidOperation, "Unknown command"); + Py_DECREF(InvalidOperation); + } + return 0; + } } iterator = PyObject_GetIter(docs); @@ -671,286 +454,238 @@ static PyObject* _cbson_do_batched_insert(PyObject* self, PyObject* args) { PyErr_SetString(InvalidOperation, "input is not iterable"); Py_DECREF(InvalidOperation); } - goto insertfail; + return 0; } while ((doc = PyIter_Next(iterator)) != NULL) { - int before = buffer_get_position(buffer); + int cur_doc_begin = pymongo_buffer_get_position(buffer); int cur_size; - if (!write_dict(state->_cbson, buffer, doc, check_keys, uuid_subtype, 1)) { - Py_DECREF(doc); - goto iterfail; - } - Py_DECREF(doc); - - cur_size = buffer_get_position(buffer) - before; - if (cur_size > max_bson_size) { - /* If we've encoded anything send it before raising. */ - if (!empty) { - buffer_update_position(buffer, before); - message_length = buffer_get_position(buffer) - length_location; - memcpy(buffer_get_buffer(buffer) + length_location, - &message_length, 4); - result = _send_insert(self, client, last_error_args, buffer, - collection_name, collection_name_length, - request_id, send_safe); - if (!result) - goto iterfail; - Py_DECREF(result); - } - _set_document_too_large(cur_size, max_bson_size); - goto iterfail; + int doc_too_large = 0; + int unacked_doc_too_large = 0; + if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { + goto fail; } - empty = 0; - - /* We have enough data, send this batch. */ - if (buffer_get_position(buffer) > max_message_size) { - int new_request_id = rand(); - int message_start; - buffer_t new_buffer = buffer_new(); - if (!new_buffer) { - PyErr_NoMemory(); - goto iterfail; - } - message_start = init_insert_buffer(new_buffer, - new_request_id, - options, - collection_name, - collection_name_length); - if (message_start == -1) { - buffer_free(new_buffer); - goto iterfail; - } - - /* Copy the overflow encoded document into the new buffer. */ - if (!buffer_write_bytes(new_buffer, - (const char*)buffer_get_buffer(buffer) + before, cur_size)) { - buffer_free(new_buffer); - goto iterfail; - } - - /* Roll back to the beginning of this document. */ - buffer_update_position(buffer, before); - message_length = buffer_get_position(buffer) - length_location; - memcpy(buffer_get_buffer(buffer) + length_location, &message_length, 4); - - result = _send_insert(self, client, last_error_args, buffer, - collection_name, collection_name_length, - request_id, send_safe); - - buffer_free(buffer); - buffer = new_buffer; - request_id = new_request_id; - length_location = message_start; - - if (!result) { - PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; - PyObject* OperationFailure; - PyErr_Fetch(&etype, &evalue, &etrace); - OperationFailure = _error("OperationFailure"); - if (OperationFailure) { - if (PyErr_GivenExceptionMatches(etype, OperationFailure)) { - if (!safe || continue_on_error) { - Py_DECREF(OperationFailure); - if (!safe) { - /* We're doing unacknowledged writes and - * continue_on_error is False. Just return. */ - Py_DECREF(etype); - Py_XDECREF(evalue); - Py_XDECREF(etrace); - Py_DECREF(iterator); - buffer_free(buffer); - PyMem_Free(collection_name); - Py_RETURN_NONE; - } - /* continue_on_error is True, store the error - * details to re-raise after the final batch */ - Py_XDECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_trace); - exc_type = etype; - exc_value = evalue; - exc_trace = etrace; - continue; - } - } - Py_DECREF(OperationFailure); - } - /* This isn't OperationFailure, we couldn't - * import OperationFailure, or we are doing - * acknowledged writes. Re-raise immediately. */ - PyErr_Restore(etype, evalue, etrace); - goto iterfail; + cur_size = pymongo_buffer_get_position(buffer) - cur_doc_begin; + + /* Does the first document exceed max_message_size? */ + doc_too_large = (idx == 0 && (pymongo_buffer_get_position(buffer) > max_message_size)); + /* When OP_MSG is used unacknowledged we have to check + * document size client side or applications won't be notified. + * Otherwise we let the server deal with documents that are too large + * since ordered=False causes those documents to be skipped instead of + * halting the bulk write operation. + * */ + unacked_doc_too_large = (!ack && cur_size > max_bson_size); + if (doc_too_large || unacked_doc_too_large) { + if (op == _INSERT) { + _set_document_too_large(cur_size, max_bson_size); } else { - Py_DECREF(result); + PyObject* DocumentTooLarge = _error("DocumentTooLarge"); + if (DocumentTooLarge) { + /* + * There's nothing intelligent we can say + * about size for update and delete. + */ + PyErr_Format( + DocumentTooLarge, + "%s command document too large", + (op == _UPDATE) ? "update": "delete"); + Py_DECREF(DocumentTooLarge); + } } + goto fail; + } + /* We have enough data, return this batch. */ + if (pymongo_buffer_get_position(buffer) > max_message_size) { + /* + * Roll the existing buffer back to the beginning + * of the last document encoded. + */ + pymongo_buffer_update_position(buffer, cur_doc_begin); + Py_CLEAR(doc); + break; + } + if (PyList_Append(to_publish, doc) < 0) { + goto fail; + } + Py_CLEAR(doc); + idx += 1; + /* We have enough documents, return this batch. */ + if (idx == max_write_batch_size) { + break; } } - Py_DECREF(iterator); + Py_CLEAR(iterator); if (PyErr_Occurred()) { - goto insertfail; - } - - if (empty) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "cannot do an empty bulk insert"); - Py_DECREF(InvalidOperation); - } - goto insertfail; + goto fail; } - message_length = buffer_get_position(buffer) - length_location; - memcpy(buffer_get_buffer(buffer) + length_location, &message_length, 4); - - /* Send the last (or only) batch */ - result = _send_insert(self, client, last_error_args, buffer, - collection_name, collection_name_length, - request_id, safe); + position = pymongo_buffer_get_position(buffer); + length = position - size_location; + buffer_write_int32_at_position(buffer, size_location, (int32_t)length); + return 1; - PyMem_Free(collection_name); - buffer_free(buffer); +fail: + Py_XDECREF(doc); + Py_XDECREF(iterator); + return 0; +} - if (!result) { - Py_XDECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_trace); +static PyObject* +_cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { + unsigned char op; + unsigned char ack; + PyObject* command = NULL; + PyObject* docs = NULL; + PyObject* ctx = NULL; + PyObject* to_publish = NULL; + PyObject* result = NULL; + PyObject* options_obj = NULL; + codec_options_t options; + buffer_t buffer; + struct module_state *state = GETSTATE(self); + if (!state) { return NULL; - } else { - Py_DECREF(result); } - if (exc_type) { - /* Re-raise any previously stored exception - * due to continue_on_error being True */ - PyErr_Restore(exc_type, exc_value, exc_trace); + if (!(PyArg_ParseTuple(args, "bOObOO", + &op, &command, &docs, &ack, + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } - - Py_RETURN_NONE; - -iterfail: - Py_DECREF(iterator); -insertfail: - Py_XDECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_trace); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; -} - -static PyObject* -_send_write_command(PyObject* client, buffer_t buffer, - int lst_len_loc, int cmd_len_loc, unsigned char* errors) { - - PyObject* msg; - PyObject* result; - - int request_id = rand(); - int position = buffer_get_position(buffer); - int length = position - lst_len_loc - 1; - memcpy(buffer_get_buffer(buffer) + lst_len_loc, &length, 4); - length = position - cmd_len_loc; - memcpy(buffer_get_buffer(buffer) + cmd_len_loc, &length, 4); - memcpy(buffer_get_buffer(buffer), &position, 4); - memcpy(buffer_get_buffer(buffer) + 4, &request_id, 4); - - /* objectify buffer */ - msg = Py_BuildValue("i" BYTES_FORMAT_STRING, request_id, - buffer_get_buffer(buffer), - buffer_get_position(buffer)); - if (!msg) + if (!(buffer = pymongo_buffer_new())) { + destroy_codec_options(&options); return NULL; - - /* Send the current batch */ - result = PyObject_CallMethod(client, "_send_message", - "NOO", msg, Py_True, Py_True); - if (result && PyDict_GetItemString(result, "writeErrors")) - *errors = 1; + } + if (!(to_publish = PyList_New(0))) { + goto fail; + } + + if (!_batched_op_msg( + op, + ack, + command, + docs, + ctx, + to_publish, + options, + buffer, + state)) { + goto fail; + } + + result = Py_BuildValue("y#O", + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), + to_publish); +fail: + destroy_codec_options(&options); + pymongo_buffer_free(buffer); + Py_XDECREF(to_publish); return result; } -static buffer_t -_command_buffer_new(char* ns, int ns_len) { +static PyObject* +_cbson_batched_op_msg(PyObject* self, PyObject* args) { + unsigned char op; + unsigned char ack; + int request_id; + int position; + PyObject* command = NULL; + PyObject* docs = NULL; + PyObject* ctx = NULL; + PyObject* to_publish = NULL; + PyObject* result = NULL; + PyObject* options_obj = NULL; + codec_options_t options; buffer_t buffer; - if (!(buffer = buffer_new())) { - PyErr_NoMemory(); + struct module_state *state = GETSTATE(self); + if (!state) { return NULL; } - /* Save space for message length and request id */ - if ((buffer_save_space(buffer, 8)) == -1) { - PyErr_NoMemory(); - buffer_free(buffer); + + if (!(PyArg_ParseTuple(args, "bOObOO", + &op, &command, &docs, &ack, + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } + if (!(buffer = pymongo_buffer_new())) { + destroy_codec_options(&options); + return NULL; + } + /* Save space for message length and request id */ + if ((pymongo_buffer_save_space(buffer, 8)) == -1) { + goto fail; + } if (!buffer_write_bytes(buffer, "\x00\x00\x00\x00" /* responseTo */ - "\xd4\x07\x00\x00" /* opcode */ - "\x00\x00\x00\x00", /* options */ - 12) || - !buffer_write_bytes(buffer, - ns, ns_len + 1) || /* namespace */ - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" /* skip */ - "\xFF\xFF\xFF\xFF", /* limit (-1) */ + "\xdd\x07\x00\x00", /* opcode */ 8)) { - buffer_free(buffer); - return NULL; - } - return buffer; + goto fail; + } + if (!(to_publish = PyList_New(0))) { + goto fail; + } + + if (!_batched_op_msg( + op, + ack, + command, + docs, + ctx, + to_publish, + options, + buffer, + state)) { + goto fail; + } + + request_id = rand(); + position = pymongo_buffer_get_position(buffer); + buffer_write_int32_at_position(buffer, 0, (int32_t)position); + buffer_write_int32_at_position(buffer, 4, (int32_t)request_id); + result = Py_BuildValue("iy#O", request_id, + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), + to_publish); +fail: + destroy_codec_options(&options); + pymongo_buffer_free(buffer); + Py_XDECREF(to_publish); + return result; } -#define _INSERT 0 -#define _UPDATE 1 -#define _DELETE 2 +/* End OP_MSG -------------------------------------------- */ -static PyObject* -_cbson_do_batched_write_command(PyObject* self, PyObject* args) { - struct module_state *state = GETSTATE(self); +static int +_batched_write_command( + char* ns, Py_ssize_t ns_len, unsigned char op, + PyObject* command, PyObject* docs, PyObject* ctx, + PyObject* to_publish, codec_options_t options, + buffer_t buffer, struct module_state *state) { long max_bson_size; long max_cmd_size; long max_write_batch_size; - long idx_offset = 0; + long max_split_size; int idx = 0; int cmd_len_loc; int lst_len_loc; - int ns_len; - int ordered; - char *ns = NULL; - PyObject* max_bson_size_obj; - PyObject* max_write_batch_size_obj; - PyObject* command; - PyObject* doc; - PyObject* docs; - PyObject* client; - PyObject* iterator; - PyObject* result; - PyObject* results; - unsigned char op; - unsigned char check_keys; - unsigned char uuid_subtype; - unsigned char empty = 1; - unsigned char errors = 0; - buffer_t buffer; - - if (!PyArg_ParseTuple(args, "et#bOObbO", "utf-8", - &ns, &ns_len, &op, &command, &docs, - &check_keys, &uuid_subtype, &client)) { - return NULL; - } - - max_bson_size_obj = PyObject_GetAttrString(client, "max_bson_size"); -#if PY_MAJOR_VERSION >= 3 + int position; + int length; + PyObject* max_bson_size_obj = NULL; + PyObject* max_write_batch_size_obj = NULL; + PyObject* max_split_size_obj = NULL; + PyObject* doc = NULL; + PyObject* iterator = NULL; + + max_bson_size_obj = PyObject_GetAttr(ctx, state->_max_bson_size_str); max_bson_size = PyLong_AsLong(max_bson_size_obj); -#else - max_bson_size = PyInt_AsLong(max_bson_size_obj); -#endif Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { - PyMem_Free(ns); - return NULL; + return 0; } /* * Max BSON object size + 16k - 2 bytes for ending NUL bytes @@ -958,64 +693,61 @@ _cbson_do_batched_write_command(PyObject* self, PyObject* args) { */ max_cmd_size = max_bson_size + 16382; - max_write_batch_size_obj = PyObject_GetAttrString(client, "max_write_batch_size"); -#if PY_MAJOR_VERSION >= 3 + max_write_batch_size_obj = PyObject_GetAttr(ctx, state->_max_write_batch_size_str); max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); -#else - max_write_batch_size = PyInt_AsLong(max_write_batch_size_obj); -#endif Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { - PyMem_Free(ns); - return NULL; + return 0; } - /* Default to True */ - ordered = !((PyDict_GetItemString(command, "ordered")) == Py_False); - - if (!(results = PyList_New(0))) { - PyMem_Free(ns); - return NULL; + // max_split_size is the size at which to perform a batch split. + // Normally this this value is equal to max_bson_size (16MiB). However, + // when auto encryption is enabled max_split_size is reduced to 2MiB. + max_split_size_obj = PyObject_GetAttr(ctx, state->_max_split_size_str); + max_split_size = PyLong_AsLong(max_split_size_obj); + Py_XDECREF(max_split_size_obj); + if (max_split_size == -1) { + return 0; } - if (!(buffer = _command_buffer_new(ns, ns_len))) { - PyMem_Free(ns); - Py_DECREF(results); - return NULL; + if (!buffer_write_bytes(buffer, + "\x00\x00\x00\x00", /* flags */ + 4) || + !buffer_write_bytes_ssize_t(buffer, ns, ns_len + 1) || /* namespace */ + !buffer_write_bytes(buffer, + "\x00\x00\x00\x00" /* skip */ + "\xFF\xFF\xFF\xFF", /* limit (-1) */ + 8)) { + return 0; } - PyMem_Free(ns); - /* Position of command document length */ - cmd_len_loc = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, command, 0, uuid_subtype, 0)) { - goto cmdfail; + cmd_len_loc = pymongo_buffer_get_position(buffer); + if (!write_dict(state->_cbson, buffer, command, 0, + &options, 0)) { + return 0; } /* Write type byte for array */ - *(buffer_get_buffer(buffer) + (buffer_get_position(buffer) - 1)) = 0x4; + *(pymongo_buffer_get_buffer(buffer) + (pymongo_buffer_get_position(buffer) - 1)) = 0x4; switch (op) { case _INSERT: { if (!buffer_write_bytes(buffer, "documents\x00", 10)) - goto cmdfail; + goto fail; break; } case _UPDATE: { - /* MongoDB does key validation for update. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "updates\x00", 8)) - goto cmdfail; + goto fail; break; } case _DELETE: { - /* Never check keys in a delete command. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "deletes\x00", 8)) - goto cmdfail; + goto fail; break; } default: @@ -1025,15 +757,14 @@ _cbson_do_batched_write_command(PyObject* self, PyObject* args) { PyErr_SetString(InvalidOperation, "Unknown command"); Py_DECREF(InvalidOperation); } - goto cmdfail; + return 0; } } /* Save space for list document */ - lst_len_loc = buffer_save_space(buffer, 4); + lst_len_loc = pymongo_buffer_save_space(buffer, 4); if (lst_len_loc == -1) { - PyErr_NoMemory(); - goto cmdfail; + return 0; } iterator = PyObject_GetIter(docs); @@ -1043,214 +774,213 @@ _cbson_do_batched_write_command(PyObject* self, PyObject* args) { PyErr_SetString(InvalidOperation, "input is not iterable"); Py_DECREF(InvalidOperation); } - goto cmdfail; + return 0; } while ((doc = PyIter_Next(iterator)) != NULL) { - int sub_doc_begin = buffer_get_position(buffer); + int sub_doc_begin = pymongo_buffer_get_position(buffer); int cur_doc_begin; int cur_size; int enough_data = 0; - int enough_documents = 0; - char key[16]; - empty = 0; - INT2STRING(key, idx); + char key[BUF_SIZE]; + int res = LL2STR(key, (long long)idx); + if (res == -1) { + return 0; + } if (!buffer_write_bytes(buffer, "\x03", 1) || !buffer_write_bytes(buffer, key, (int)strlen(key) + 1)) { - Py_DECREF(doc); - goto cmditerfail; + goto fail; } - cur_doc_begin = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, doc, - check_keys, uuid_subtype, 1)) { - Py_DECREF(doc); - goto cmditerfail; + cur_doc_begin = pymongo_buffer_get_position(buffer); + if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { + goto fail; } - Py_DECREF(doc); - - /* We have enough data, maybe send this batch. */ - enough_data = (buffer_get_position(buffer) > max_cmd_size); - enough_documents = (idx >= max_write_batch_size); - if (enough_data || enough_documents) { - buffer_t new_buffer; - cur_size = buffer_get_position(buffer) - cur_doc_begin; - - /* This single document is too large for the command. */ - if (!idx) { - if (op == _INSERT) { - _set_document_too_large(cur_size, max_bson_size); - } else { - PyObject* DocumentTooLarge = _error("DocumentTooLarge"); - if (DocumentTooLarge) { - /* - * There's nothing intelligent we can say - * about size for update and remove. - */ - PyErr_SetString(DocumentTooLarge, - "command document too large"); - Py_DECREF(DocumentTooLarge); - } - } - goto cmditerfail; - } - if (!(new_buffer = buffer_new())) { - PyErr_NoMemory(); - goto cmditerfail; - } - /* New buffer including the current overflow document */ - if (!buffer_write_bytes(new_buffer, - (const char*)buffer_get_buffer(buffer), lst_len_loc + 5) || - !buffer_write_bytes(new_buffer, "0\x00", 2) || - !buffer_write_bytes(new_buffer, - (const char*)buffer_get_buffer(buffer) + cur_doc_begin, cur_size)) { - buffer_free(new_buffer); - goto cmditerfail; + /* We have enough data, return this batch. + * max_cmd_size accounts for the two trailing null bytes. + */ + cur_size = pymongo_buffer_get_position(buffer) - cur_doc_begin; + /* This single document is too large for the command. */ + if (cur_size > max_cmd_size) { + if (op == _INSERT) { + _set_document_too_large(cur_size, max_bson_size); + } else { + PyObject* DocumentTooLarge = _error("DocumentTooLarge"); + if (DocumentTooLarge) { + /* + * There's nothing intelligent we can say + * about size for update and delete. + */ + PyErr_Format( + DocumentTooLarge, + "%s command document too large", + (op == _UPDATE) ? "update": "delete"); + Py_DECREF(DocumentTooLarge); + } } + goto fail; + } + enough_data = (idx >= 1 && + (pymongo_buffer_get_position(buffer) > max_split_size)); + if (enough_data) { /* * Roll the existing buffer back to the beginning * of the last document encoded. */ - buffer_update_position(buffer, sub_doc_begin); - - if (!buffer_write_bytes(buffer, "\x00\x00", 2)) - goto cmditerfail; - - result = _send_write_command(client, buffer, - lst_len_loc, cmd_len_loc, &errors); - - buffer_free(buffer); - buffer = new_buffer; - - if (!result) - goto cmditerfail; - -#if PY_MAJOR_VERSION >= 3 - result = Py_BuildValue("NN", - PyLong_FromLong(idx_offset), result); -#else - result = Py_BuildValue("NN", - PyInt_FromLong(idx_offset), result); -#endif - if (!result) - goto cmditerfail; - - PyList_Append(results, result); - Py_DECREF(result); - - if (errors && ordered) { - Py_DECREF(iterator); - buffer_free(buffer); - return results; - } - idx_offset += idx; - idx = 0; + pymongo_buffer_update_position(buffer, sub_doc_begin); + Py_CLEAR(doc); + break; + } + if (PyList_Append(to_publish, doc) < 0) { + goto fail; } + Py_CLEAR(doc); idx += 1; + /* We have enough documents, return this batch. */ + if (idx == max_write_batch_size) { + break; + } } - Py_DECREF(iterator); + Py_CLEAR(iterator); if (PyErr_Occurred()) { - goto cmdfail; + goto fail; } - if (empty) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "cannot do an empty bulk write"); - Py_DECREF(InvalidOperation); - } - goto cmdfail; + if (!buffer_write_bytes(buffer, "\x00\x00", 2)) { + goto fail; } - if (!buffer_write_bytes(buffer, "\x00\x00", 2)) - goto cmdfail; - - result = _send_write_command(client, buffer, - lst_len_loc, cmd_len_loc, &errors); - if (!result) - goto cmdfail; - -#if PY_MAJOR_VERSION >= 3 - result = Py_BuildValue("NN", PyLong_FromLong(idx_offset), result); -#else - result = Py_BuildValue("NN", PyInt_FromLong(idx_offset), result); -#endif - if (!result) - goto cmdfail; + position = pymongo_buffer_get_position(buffer); + length = position - lst_len_loc - 1; + buffer_write_int32_at_position(buffer, lst_len_loc, (int32_t)length); + length = position - cmd_len_loc; + buffer_write_int32_at_position(buffer, cmd_len_loc, (int32_t)length); + return 1; - buffer_free(buffer); +fail: + Py_XDECREF(doc); + Py_XDECREF(iterator); + return 0; +} - PyList_Append(results, result); - Py_DECREF(result); - return results; +static PyObject* +_cbson_encode_batched_write_command(PyObject* self, PyObject* args) { + char *ns = NULL; + unsigned char op; + Py_ssize_t ns_len; + PyObject* command = NULL; + PyObject* docs = NULL; + PyObject* ctx = NULL; + PyObject* to_publish = NULL; + PyObject* result = NULL; + PyObject* options_obj = NULL; + codec_options_t options; + buffer_t buffer; + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } -cmditerfail: - Py_DECREF(iterator); -cmdfail: - Py_DECREF(results); - buffer_free(buffer); - return NULL; + if (!(PyArg_ParseTuple(args, "et#bOOOO", "utf-8", + &ns, &ns_len, &op, &command, &docs, + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { + return NULL; + } + if (!(buffer = pymongo_buffer_new())) { + PyMem_Free(ns); + destroy_codec_options(&options); + return NULL; + } + if (!(to_publish = PyList_New(0))) { + goto fail; + } + + if (!_batched_write_command( + ns, + ns_len, + op, + command, + docs, + ctx, + to_publish, + options, + buffer, + state)) { + goto fail; + } + + result = Py_BuildValue("y#O", + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), + to_publish); +fail: + PyMem_Free(ns); + destroy_codec_options(&options); + pymongo_buffer_free(buffer); + Py_XDECREF(to_publish); + return result; } static PyMethodDef _CMessageMethods[] = { - {"_insert_message", _cbson_insert_message, METH_VARARGS, - _cbson_insert_message_doc}, - {"_update_message", _cbson_update_message, METH_VARARGS, - "create an update message to be sent to MongoDB"}, {"_query_message", _cbson_query_message, METH_VARARGS, "create a query message to be sent to MongoDB"}, {"_get_more_message", _cbson_get_more_message, METH_VARARGS, "create a get more message to be sent to MongoDB"}, - {"_do_batched_insert", _cbson_do_batched_insert, METH_VARARGS, - "insert a batch of documents, splitting the batch as needed"}, - {"_do_batched_write_command", _cbson_do_batched_write_command, METH_VARARGS, - "execute a batch of insert, update, or delete commands"}, + {"_op_msg", _cbson_op_msg, METH_VARARGS, + "create an OP_MSG message to be sent to MongoDB"}, + {"_encode_batched_write_command", _cbson_encode_batched_write_command, METH_VARARGS, + "Encode the next batched insert, update, or delete command"}, + {"_batched_op_msg", _cbson_batched_op_msg, METH_VARARGS, + "Create the next batched insert, update, or delete using OP_MSG"}, + {"_encode_batched_op_msg", _cbson_encode_batched_op_msg, METH_VARARGS, + "Encode the next batched insert, update, or delete using OP_MSG"}, {NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION >= 3 -#define INITERROR return NULL +#define INITERROR return -1; static int _cmessage_traverse(PyObject *m, visitproc visit, void *arg) { - Py_VISIT(GETSTATE(m)->_cbson); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_VISIT(state->_cbson); + Py_VISIT(state->_max_bson_size_str); + Py_VISIT(state->_max_message_size_str); + Py_VISIT(state->_max_split_size_str); + Py_VISIT(state->_max_write_batch_size_str); return 0; } static int _cmessage_clear(PyObject *m) { - Py_CLEAR(GETSTATE(m)->_cbson); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_CLEAR(state->_cbson); + Py_CLEAR(state->_max_bson_size_str); + Py_CLEAR(state->_max_message_size_str); + Py_CLEAR(state->_max_split_size_str); + Py_CLEAR(state->_max_write_batch_size_str); return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_cmessage", - NULL, - sizeof(struct module_state), - _CMessageMethods, - NULL, - _cmessage_traverse, - _cmessage_clear, - NULL -}; - -PyMODINIT_FUNC -PyInit__cmessage(void) -#else -#define INITERROR return -PyMODINIT_FUNC -init_cmessage(void) -#endif +/* Multi-phase extension module initialization code. + * See https://peps.python.org/pep-0489/. +*/ +static int +_cmessage_exec(PyObject *m) { - PyObject *_cbson; - PyObject *c_api_object; - PyObject *m; - struct module_state *state; + PyObject *_cbson = NULL; + PyObject *c_api_object = NULL; + struct module_state* state = NULL; /* Store a reference to the _cbson module since it's needed to call some * of its functions */ _cbson = PyImport_ImportModule("bson._cbson"); if (_cbson == NULL) { - INITERROR; + goto fail; } /* Import C API of _cbson @@ -1258,37 +988,62 @@ init_cmessage(void) */ c_api_object = PyObject_GetAttrString(_cbson, "_C_API"); if (c_api_object == NULL) { - Py_DECREF(_cbson); - INITERROR; + goto fail; } -#if PY_VERSION_HEX >= 0x03010000 _cbson_API = (void **)PyCapsule_GetPointer(c_api_object, "_cbson._C_API"); -#else - _cbson_API = (void **)PyCObject_AsVoidPtr(c_api_object); -#endif if (_cbson_API == NULL) { - Py_DECREF(c_api_object); - Py_DECREF(_cbson); - INITERROR; - } - -#if PY_MAJOR_VERSION >= 3 - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("_cmessage", _CMessageMethods); -#endif - if (m == NULL) { - Py_DECREF(c_api_object); - Py_DECREF(_cbson); - INITERROR; + goto fail; } state = GETSTATE(m); + if (state == NULL) { + goto fail; + } state->_cbson = _cbson; + if (!((state->_max_bson_size_str = PyUnicode_FromString("max_bson_size")) && + (state->_max_message_size_str = PyUnicode_FromString("max_message_size")) && + (state->_max_write_batch_size_str = PyUnicode_FromString("max_write_batch_size")) && + (state->_max_split_size_str = PyUnicode_FromString("max_split_size")))) { + goto fail; + } Py_DECREF(c_api_object); + return 0; -#if PY_MAJOR_VERSION >= 3 - return m; +fail: + Py_XDECREF(m); + Py_XDECREF(c_api_object); + Py_XDECREF(_cbson); + INITERROR; +} + + +static PyModuleDef_Slot _cmessage_slots[] = { + {Py_mod_exec, _cmessage_exec}, +#ifdef Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030D0000 + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_cmessage", + NULL, + sizeof(struct module_state), + _CMessageMethods, + _cmessage_slots, + _cmessage_traverse, + _cmessage_clear, + NULL +}; + +PyMODINIT_FUNC +PyInit__cmessage(void) +{ + return PyModuleDef_Init(&moduledef); } diff --git a/pymongo/_csot.py b/pymongo/_csot.py new file mode 100644 index 0000000000..ce72a66486 --- /dev/null +++ b/pymongo/_csot.py @@ -0,0 +1,167 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Internal helpers for CSOT.""" + +from __future__ import annotations + +import functools +import inspect +import time +from collections import deque +from contextlib import AbstractContextManager +from contextvars import ContextVar, Token +from typing import TYPE_CHECKING, Any, Callable, Deque, MutableMapping, Optional, TypeVar, cast + +if TYPE_CHECKING: + from pymongo.write_concern import WriteConcern + +TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) +RTT: ContextVar[float] = ContextVar("RTT", default=0.0) +DEADLINE: ContextVar[float] = ContextVar("DEADLINE", default=float("inf")) + + +def reset_all() -> None: + TIMEOUT.set(None) + RTT.set(0.0) + DEADLINE.set(float("inf")) + + +def get_timeout() -> Optional[float]: + return TIMEOUT.get(None) + + +def get_rtt() -> float: + return RTT.get() + + +def get_deadline() -> float: + return DEADLINE.get() + + +def set_rtt(rtt: float) -> None: + RTT.set(rtt) + + +def remaining() -> Optional[float]: + if not get_timeout(): + return None + return DEADLINE.get() - time.monotonic() + + +def clamp_remaining(max_timeout: float) -> float: + """Return the remaining timeout clamped to a max value.""" + timeout = remaining() + if timeout is None: + return max_timeout + return min(timeout, max_timeout) + + +class _TimeoutContext(AbstractContextManager[Any]): + """Internal timeout context manager. + + Use :func:`pymongo.timeout` instead:: + + with pymongo.timeout(0.5): + client.test.test.insert_one({}) + """ + + def __init__(self, timeout: Optional[float]): + self._timeout = timeout + self._tokens: Optional[tuple[Token[Optional[float]], Token[float], Token[float]]] = None + + def __enter__(self) -> None: + timeout_token = TIMEOUT.set(self._timeout) + prev_deadline = DEADLINE.get() + next_deadline = time.monotonic() + self._timeout if self._timeout else float("inf") + deadline_token = DEADLINE.set(min(prev_deadline, next_deadline)) + rtt_token = RTT.set(0.0) + self._tokens = (timeout_token, deadline_token, rtt_token) + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + if self._tokens: + timeout_token, deadline_token, rtt_token = self._tokens + TIMEOUT.reset(timeout_token) + DEADLINE.reset(deadline_token) + RTT.reset(rtt_token) + + +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def apply(func: F) -> F: + """Apply the client's timeoutMS to this operation. Can wrap both asynchronous and synchronous methods""" + if inspect.iscoroutinefunction(func): + + @functools.wraps(func) + async def csot_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: + if get_timeout() is None: + timeout = self._timeout + if timeout is not None: + with _TimeoutContext(timeout): + return await func(self, *args, **kwargs) + return await func(self, *args, **kwargs) + else: + + @functools.wraps(func) + def csot_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: + if get_timeout() is None: + timeout = self._timeout + if timeout is not None: + with _TimeoutContext(timeout): + return func(self, *args, **kwargs) + return func(self, *args, **kwargs) + + return cast(F, csot_wrapper) + + +def apply_write_concern( + cmd: MutableMapping[str, Any], write_concern: Optional[WriteConcern] +) -> None: + """Apply the given write concern to a command.""" + if not write_concern or write_concern.is_server_default: + return + wc = write_concern.document + if get_timeout() is not None: + wc.pop("wtimeout", None) + if wc: + cmd["writeConcern"] = wc + + +_MAX_RTT_SAMPLES: int = 10 +_MIN_RTT_SAMPLES: int = 2 + + +class MovingMinimum: + """Tracks a minimum RTT within the last 10 RTT samples.""" + + samples: Deque[float] + + def __init__(self) -> None: + self.samples = deque(maxlen=_MAX_RTT_SAMPLES) + + def add_sample(self, sample: float) -> None: + if sample < 0: + raise ValueError(f"duration cannot be negative {sample}") + self.samples.append(sample) + + def get(self) -> float: + """Get the min, or 0.0 if there aren't enough samples yet.""" + if len(self.samples) >= _MIN_RTT_SAMPLES: + return min(self.samples) + return 0.0 + + def reset(self) -> None: + self.samples.clear() diff --git a/pymongo/_gcp_helpers.py b/pymongo/_gcp_helpers.py new file mode 100644 index 0000000000..7979d1e807 --- /dev/null +++ b/pymongo/_gcp_helpers.py @@ -0,0 +1,40 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GCP helpers.""" +from __future__ import annotations + +from typing import Any + + +def _get_gcp_response(resource: str, timeout: float = 5) -> dict[str, Any]: + from urllib.request import Request, urlopen + + url = "http://metadata/computeMetadata/v1/instance/service-accounts/default/identity" + url += f"?audience={resource}" + headers = {"Metadata-Flavor": "Google"} + request = Request(url, headers=headers) # noqa: S310 + try: + with urlopen(request, timeout=timeout) as response: # noqa: S310 + status = response.status + body = response.read().decode("utf8") + except Exception as e: + msg = "Failed to acquire IMDS access token: %s" % e + raise ValueError(msg) from None + + if status != 200: + msg = "Failed to acquire IMDS access token." + raise ValueError(msg) + + return dict(access_token=body) diff --git a/pymongo/_version.py b/pymongo/_version.py new file mode 100644 index 0000000000..c6ba82ab13 --- /dev/null +++ b/pymongo/_version.py @@ -0,0 +1,43 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Current version of PyMongo.""" +from __future__ import annotations + +import re +from typing import List, Tuple, Union + +__version__ = "4.16.0.dev0" + + +def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: + pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" + match = re.match(pattern, version) + if match: + parts: List[Union[int, str]] = [int(match[part]) for part in ["major", "minor", "patch"]] + if match["rest"]: + parts.append(match["rest"]) + elif re.match(r"\d+.\d+", version): + parts = [int(part) for part in version.split(".")] + else: + raise ValueError("Could not parse version") + return tuple(parts) + + +version_tuple = get_version_tuple(__version__) +version = __version__ + + +def get_version_string() -> str: + return __version__ diff --git a/pymongo/asynchronous/__init__.py b/pymongo/asynchronous/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pymongo/asynchronous/aggregation.py b/pymongo/asynchronous/aggregation.py new file mode 100644 index 0000000000..6ca60ad9c3 --- /dev/null +++ b/pymongo/asynchronous/aggregation.py @@ -0,0 +1,254 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Perform aggregation operations on a collection or database.""" +from __future__ import annotations + +from collections.abc import Callable, Mapping, MutableMapping +from typing import TYPE_CHECKING, Any, Optional, Union + +from pymongo import common +from pymongo.collation import validate_collation_or_none +from pymongo.errors import ConfigurationError +from pymongo.read_preferences import ReadPreference, _AggWritePref + +if TYPE_CHECKING: + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.collection import AsyncCollection + from pymongo.asynchronous.command_cursor import AsyncCommandCursor + from pymongo.asynchronous.database import AsyncDatabase + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.asynchronous.server import Server + from pymongo.read_preferences import _ServerMode + from pymongo.typings import _DocumentType, _Pipeline + +_IS_SYNC = False + + +class _AggregationCommand: + """The internal abstract base class for aggregation cursors. + + Should not be called directly by application developers. Use + :meth:`pymongo.asynchronous.collection.AsyncCollection.aggregate`, or + :meth:`pymongo.asynchronous.database.AsyncDatabase.aggregate` instead. + """ + + def __init__( + self, + target: Union[AsyncDatabase[Any], AsyncCollection[Any]], + cursor_class: type[AsyncCommandCursor[Any]], + pipeline: _Pipeline, + options: MutableMapping[str, Any], + let: Optional[Mapping[str, Any]] = None, + user_fields: Optional[MutableMapping[str, Any]] = None, + result_processor: Optional[Callable[[Mapping[str, Any], AsyncConnection], None]] = None, + comment: Any = None, + ) -> None: + if "explain" in options: + raise ConfigurationError( + "The explain option is not supported. Use AsyncDatabase.command instead." + ) + + self._target = target + + pipeline = common.validate_list("pipeline", pipeline) + self._pipeline = pipeline + self._performs_write = False + if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): + self._performs_write = True + + common.validate_is_mapping("options", options) + if let is not None: + common.validate_is_mapping("let", let) + options["let"] = let + if comment is not None: + options["comment"] = comment + + self._options = options + + # This is the batchSize that will be used for setting the initial + # batchSize for the cursor, as well as the subsequent getMores. + self._batch_size = common.validate_non_negative_integer_or_none( + "batchSize", self._options.pop("batchSize", None) + ) + + # If the cursor option is already specified, avoid overriding it. + self._options.setdefault("cursor", {}) + # If the pipeline performs a write, we ignore the initial batchSize + # since the server doesn't return results in this case. + if self._batch_size is not None and not self._performs_write: + self._options["cursor"]["batchSize"] = self._batch_size + + self._cursor_class = cursor_class + self._user_fields = user_fields + self._result_processor = result_processor + + self._collation = validate_collation_or_none(options.pop("collation", None)) + + self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) + self._write_preference: Optional[_AggWritePref] = None + + @property + def _aggregation_target(self) -> Union[str, int]: + """The argument to pass to the aggregate command.""" + raise NotImplementedError + + @property + def _cursor_namespace(self) -> str: + """The namespace in which the aggregate command is run.""" + raise NotImplementedError + + def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> AsyncCollection[Any]: + """The AsyncCollection used for the aggregate command cursor.""" + raise NotImplementedError + + @property + def _database(self) -> AsyncDatabase[Any]: + """The database against which the aggregation command is run.""" + raise NotImplementedError + + def get_read_preference( + self, session: Optional[AsyncClientSession] + ) -> Union[_AggWritePref, _ServerMode]: + if self._write_preference: + return self._write_preference + pref = self._target._read_preference_for(session) + if self._performs_write and pref != ReadPreference.PRIMARY: + self._write_preference = pref = _AggWritePref(pref) # type: ignore[assignment] + return pref + + async def get_cursor( + self, + session: Optional[AsyncClientSession], + server: Server, + conn: AsyncConnection, + read_preference: _ServerMode, + ) -> AsyncCommandCursor[_DocumentType]: + # Serialize command. + cmd = {"aggregate": self._aggregation_target, "pipeline": self._pipeline} + cmd.update(self._options) + + # Apply this target's read concern if: + # readConcern has not been specified as a kwarg and either + # - server version is >= 4.2 or + # - server version is >= 3.2 and pipeline doesn't use $out + if ("readConcern" not in cmd) and ( + not self._performs_write or (conn.max_wire_version >= 8) + ): + read_concern = self._target.read_concern + else: + read_concern = None + + # Apply this target's write concern if: + # writeConcern has not been specified as a kwarg and pipeline doesn't + # perform a write operation + if "writeConcern" not in cmd and self._performs_write: + write_concern = self._target._write_concern_for(session) + else: + write_concern = None + + # Run command. + result = await conn.command( + self._database.name, + cmd, + read_preference, + self._target.codec_options, + parse_write_concern_error=True, + read_concern=read_concern, + write_concern=write_concern, + collation=self._collation, + session=session, + client=self._database.client, + user_fields=self._user_fields, + ) + + if self._result_processor: + self._result_processor(result, conn) + + # Extract cursor from result or mock/fake one if necessary. + if "cursor" in result: + cursor = result["cursor"] + else: + # Unacknowledged $out/$merge write. Fake a cursor. + cursor = { + "id": 0, + "firstBatch": result.get("result", []), + "ns": self._cursor_namespace, + } + + # Create and return cursor instance. + cmd_cursor = self._cursor_class( + self._cursor_collection(cursor), + cursor, + conn.address, + batch_size=self._batch_size or 0, + max_await_time_ms=self._max_await_time_ms, + session=session, + comment=self._options.get("comment"), + ) + await cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + +class _CollectionAggregationCommand(_AggregationCommand): + _target: AsyncCollection[Any] + + @property + def _aggregation_target(self) -> str: + return self._target.name + + @property + def _cursor_namespace(self) -> str: + return self._target.full_name + + def _cursor_collection(self, cursor: Mapping[str, Any]) -> AsyncCollection[Any]: + """The AsyncCollection used for the aggregate command cursor.""" + return self._target + + @property + def _database(self) -> AsyncDatabase[Any]: + return self._target.database + + +class _CollectionRawAggregationCommand(_CollectionAggregationCommand): + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + # For raw-batches, we set the initial batchSize for the cursor to 0. + if not self._performs_write: + self._options["cursor"]["batchSize"] = 0 + + +class _DatabaseAggregationCommand(_AggregationCommand): + _target: AsyncDatabase[Any] + + @property + def _aggregation_target(self) -> int: + return 1 + + @property + def _cursor_namespace(self) -> str: + return f"{self._target.name}.$cmd.aggregate" + + @property + def _database(self) -> AsyncDatabase[Any]: + return self._target + + def _cursor_collection(self, cursor: Mapping[str, Any]) -> AsyncCollection[Any]: + """The AsyncCollection used for the aggregate command cursor.""" + # AsyncCollection level aggregate may not always return the "ns" field + # according to our MockupDB tests. Let's handle that case for db level + # aggregate too by defaulting to the .$cmd.aggregate namespace. + _, collname = cursor.get("ns", self._cursor_namespace).split(".", 1) + return self._database[collname] diff --git a/pymongo/asynchronous/auth.py b/pymongo/asynchronous/auth.py new file mode 100644 index 0000000000..c1321f1d90 --- /dev/null +++ b/pymongo/asynchronous/auth.py @@ -0,0 +1,455 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Authentication helpers.""" +from __future__ import annotations + +import functools +import hashlib +import hmac +import socket +from base64 import standard_b64decode, standard_b64encode +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Coroutine, + Mapping, + MutableMapping, + Optional, + cast, +) +from urllib.parse import quote + +from bson.binary import Binary +from pymongo.asynchronous.auth_aws import _authenticate_aws +from pymongo.asynchronous.auth_oidc import ( + _authenticate_oidc, + _get_authenticator, +) +from pymongo.asynchronous.helpers import _getaddrinfo +from pymongo.auth_shared import ( + MongoCredential, + _authenticate_scram_start, + _parse_scram_response, + _xor, +) +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.saslprep import saslprep + +if TYPE_CHECKING: + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.hello import Hello + +HAVE_KERBEROS = True +_USE_PRINCIPAL = False +try: + import winkerberos as kerberos # type:ignore[import] + + if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): + _USE_PRINCIPAL = True +except ImportError: + try: + import kerberos # type:ignore[import] + except ImportError: + HAVE_KERBEROS = False + + +_IS_SYNC = False + + +async def _authenticate_scram( + credentials: MongoCredential, conn: AsyncConnection, mechanism: str +) -> None: + """Authenticate using SCRAM.""" + username = credentials.username + if mechanism == "SCRAM-SHA-256": + digest = "sha256" + digestmod = hashlib.sha256 + data = saslprep(credentials.password).encode("utf-8") + else: + digest = "sha1" + digestmod = hashlib.sha1 + data = _password_digest(username, credentials.password).encode("utf-8") + source = credentials.source + cache = credentials.cache + + # Make local + _hmac = hmac.HMAC + + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + assert isinstance(ctx, _ScramContext) + assert ctx.scram_data is not None + nonce, first_bare = ctx.scram_data + res = ctx.speculative_authenticate + else: + nonce, first_bare, cmd = _authenticate_scram_start(credentials, mechanism) + res = await conn.command(source, cmd) + + assert res is not None + server_first = res["payload"] + parsed = _parse_scram_response(server_first) + iterations = int(parsed[b"i"]) + if iterations < 4096: + raise OperationFailure("Server returned an invalid iteration count.") + salt = parsed[b"s"] + rnonce = parsed[b"r"] + if not rnonce.startswith(nonce): + raise OperationFailure("Server returned an invalid nonce.") + + without_proof = b"c=biws,r=" + rnonce + if cache.data: + client_key, server_key, csalt, citerations = cache.data + else: + client_key, server_key, csalt, citerations = None, None, None, None + + # Salt and / or iterations could change for a number of different + # reasons. Either changing invalidates the cache. + if not client_key or salt != csalt or iterations != citerations: + salted_pass = hashlib.pbkdf2_hmac(digest, data, standard_b64decode(salt), iterations) + client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() + server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() + cache.data = (client_key, server_key, salt, iterations) + stored_key = digestmod(client_key).digest() + auth_msg = b",".join((first_bare, server_first, without_proof)) + client_sig = _hmac(stored_key, auth_msg, digestmod).digest() + client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) + client_final = b",".join((without_proof, client_proof)) + + server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) + + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(client_final), + } + res = await conn.command(source, cmd) + + parsed = _parse_scram_response(res["payload"]) + if not hmac.compare_digest(parsed[b"v"], server_sig): + raise OperationFailure("Server returned an invalid signature.") + + # A third empty challenge may be required if the server does not support + # skipEmptyExchange: SERVER-44857. + if not res["done"]: + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(b""), + } + res = await conn.command(source, cmd) + if not res["done"]: + raise OperationFailure("SASL conversation failed to complete.") + + +def _password_digest(username: str, password: str) -> str: + """Get a password digest to use for authentication.""" + if not isinstance(password, str): + raise TypeError("password must be an instance of str") + if len(password) == 0: + raise ValueError("password can't be empty") + if not isinstance(username, str): + raise TypeError(f"username must be an instance of str, not {type(username)}") + + md5hash = hashlib.md5() # noqa: S324 + data = f"{username}:mongo:{password}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +def _auth_key(nonce: str, username: str, password: str) -> str: + """Get an auth key to use for authentication.""" + digest = _password_digest(username, password) + md5hash = hashlib.md5() # noqa: S324 + data = f"{nonce}{username}{digest}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +async def _canonicalize_hostname(hostname: str, option: str | bool) -> str: + """Canonicalize hostname following MIT-krb5 behavior.""" + # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 + if option in [False, "none"]: + return hostname + + af, socktype, proto, canonname, sockaddr = ( + await _getaddrinfo( + hostname, + None, + family=0, + type=0, + proto=socket.IPPROTO_TCP, + flags=socket.AI_CANONNAME, + ) + )[0] # type: ignore[index] + + # For forward just to resolve the cname as dns.lookup() will not return it. + if option == "forward": + return canonname.lower() + + try: + name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) + except socket.gaierror: + return canonname.lower() + + return name[0].lower() + + +async def _authenticate_gssapi(credentials: MongoCredential, conn: AsyncConnection) -> None: + """Authenticate using GSSAPI.""" + if not HAVE_KERBEROS: + raise ConfigurationError( + 'The "kerberos" module must be installed to use GSSAPI authentication.' + ) + + try: + username = credentials.username + password = credentials.password + props = credentials.mechanism_properties + # Starting here and continuing through the while loop below - establish + # the security context. See RFC 4752, Section 3.1, first paragraph. + host = props.service_host or conn.address[0] + host = await _canonicalize_hostname(host, props.canonicalize_host_name) + service = props.service_name + "@" + host + if props.service_realm is not None: + service = service + "@" + props.service_realm + + if password is not None: + if _USE_PRINCIPAL: + # Note that, though we use unquote_plus for unquoting URI + # options, we use quote here. Microsoft's UrlUnescape (used + # by WinKerberos) doesn't support +. + principal = ":".join((quote(username), quote(password))) + result, ctx = kerberos.authGSSClientInit( + service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG + ) + else: + if "@" in username: + user, domain = username.split("@", 1) + else: + user, domain = username, None + result, ctx = kerberos.authGSSClientInit( + service, + gssflags=kerberos.GSS_C_MUTUAL_FLAG, + user=user, + domain=domain, + password=password, + ) + else: + result, ctx = kerberos.authGSSClientInit(service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + + if result != kerberos.AUTH_GSS_COMPLETE: + raise OperationFailure("Kerberos context failed to initialize.") + + try: + # pykerberos uses a weird mix of exceptions and return values + # to indicate errors. + # 0 == continue, 1 == complete, -1 == error + # Only authGSSClientStep can return 0. + if kerberos.authGSSClientStep(ctx, "") != 0: + raise OperationFailure("Unknown kerberos failure in step function.") + + # Start a SASL conversation with mongod/s + # Note: pykerberos deals with base64 encoded byte strings. + # Since mongo accepts base64 strings as the payload we don't + # have to use bson.binary.Binary. + payload = kerberos.authGSSClientResponse(ctx) + cmd = { + "saslStart": 1, + "mechanism": "GSSAPI", + "payload": payload, + "autoAuthorize": 1, + } + response = await conn.command("$external", cmd) + + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + result = kerberos.authGSSClientStep(ctx, str(response["payload"])) + if result == -1: + raise OperationFailure("Unknown kerberos failure in step function.") + + payload = kerberos.authGSSClientResponse(ctx) or "" + + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } + response = await conn.command("$external", cmd) + + if result == kerberos.AUTH_GSS_COMPLETE: + break + else: + raise OperationFailure("Kerberos authentication failed to complete.") + + # Once the security context is established actually authenticate. + # See RFC 4752, Section 3.1, last two paragraphs. + if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Unwrap step.") + + if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") + + payload = kerberos.authGSSClientResponse(ctx) + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } + await conn.command("$external", cmd) + + finally: + kerberos.authGSSClientClean(ctx) + + except kerberos.KrbError as exc: + raise OperationFailure(str(exc)) from None + + +async def _authenticate_plain(credentials: MongoCredential, conn: AsyncConnection) -> None: + """Authenticate using SASL PLAIN (RFC 4616)""" + source = credentials.source + username = credentials.username + password = credentials.password + payload = (f"\x00{username}\x00{password}").encode() + cmd = { + "saslStart": 1, + "mechanism": "PLAIN", + "payload": Binary(payload), + "autoAuthorize": 1, + } + await conn.command(source, cmd) + + +async def _authenticate_x509(credentials: MongoCredential, conn: AsyncConnection) -> None: + """Authenticate using MONGODB-X509.""" + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + # MONGODB-X509 is done after the speculative auth step. + return + + cmd = _X509Context(credentials, conn.address).speculate_command() + await conn.command("$external", cmd) + + +async def _authenticate_default(credentials: MongoCredential, conn: AsyncConnection) -> None: + if conn.max_wire_version >= 7: + if conn.negotiated_mechs: + mechs = conn.negotiated_mechs + else: + source = credentials.source + cmd = conn.hello_cmd() + cmd["saslSupportedMechs"] = source + "." + credentials.username + mechs = (await conn.command(source, cmd, publish_events=False)).get( + "saslSupportedMechs", [] + ) + if "SCRAM-SHA-256" in mechs: + return await _authenticate_scram(credentials, conn, "SCRAM-SHA-256") + else: + return await _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + else: + return await _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + + +_AUTH_MAP: Mapping[str, Callable[..., Coroutine[Any, Any, None]]] = { + "GSSAPI": _authenticate_gssapi, + "MONGODB-X509": _authenticate_x509, + "MONGODB-AWS": _authenticate_aws, + "MONGODB-OIDC": _authenticate_oidc, # type:ignore[dict-item] + "PLAIN": _authenticate_plain, + "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), + "DEFAULT": _authenticate_default, +} + + +class _AuthContext: + def __init__(self, credentials: MongoCredential, address: tuple[str, int]) -> None: + self.credentials = credentials + self.speculative_authenticate: Optional[Mapping[str, Any]] = None + self.address = address + + @staticmethod + def from_credentials( + creds: MongoCredential, address: tuple[str, int] + ) -> Optional[_AuthContext]: + spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) + if spec_cls: + return cast(_AuthContext, spec_cls(creds, address)) + return None + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + raise NotImplementedError + + def parse_response(self, hello: Hello[Mapping[str, Any]]) -> None: + self.speculative_authenticate = hello.speculative_authenticate + + def speculate_succeeded(self) -> bool: + return bool(self.speculative_authenticate) + + +class _ScramContext(_AuthContext): + def __init__( + self, credentials: MongoCredential, address: tuple[str, int], mechanism: str + ) -> None: + super().__init__(credentials, address) + self.scram_data: Optional[tuple[bytes, bytes]] = None + self.mechanism = mechanism + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) + # The 'db' field is included only on the speculative command. + cmd["db"] = self.credentials.source + # Save for later use. + self.scram_data = (nonce, first_bare) + return cmd + + +class _X509Context(_AuthContext): + def speculate_command(self) -> MutableMapping[str, Any]: + cmd = {"authenticate": 1, "mechanism": "MONGODB-X509"} + if self.credentials.username is not None: + cmd["user"] = self.credentials.username + return cmd + + +class _OIDCContext(_AuthContext): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + authenticator = _get_authenticator(self.credentials, self.address) + cmd = authenticator.get_spec_auth_cmd() + if cmd is None: + return None + cmd["db"] = self.credentials.source + return cmd + + +_SPECULATIVE_AUTH_MAP: Mapping[str, Any] = { + "MONGODB-X509": _X509Context, + "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), + "MONGODB-OIDC": _OIDCContext, + "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), +} + + +async def authenticate( + credentials: MongoCredential, conn: AsyncConnection, reauthenticate: bool = False +) -> None: + """Authenticate connection.""" + mechanism = credentials.mechanism + auth_func = _AUTH_MAP[mechanism] + if mechanism == "MONGODB-OIDC": + await _authenticate_oidc(credentials, conn, reauthenticate) + else: + await auth_func(credentials, conn) diff --git a/pymongo/asynchronous/auth_aws.py b/pymongo/asynchronous/auth_aws.py new file mode 100644 index 0000000000..210d306046 --- /dev/null +++ b/pymongo/asynchronous/auth_aws.py @@ -0,0 +1,100 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-AWS Authentication helpers.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Type + +import bson +from bson.binary import Binary +from pymongo.errors import ConfigurationError, OperationFailure + +if TYPE_CHECKING: + from bson.typings import _ReadableBuffer + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.auth_shared import MongoCredential + +_IS_SYNC = False + + +async def _authenticate_aws(credentials: MongoCredential, conn: AsyncConnection) -> None: + """Authenticate using MONGODB-AWS.""" + try: + import pymongo_auth_aws # type:ignore[import] + except ImportError as e: + raise ConfigurationError( + "MONGODB-AWS authentication requires pymongo-auth-aws: " + "install with: python -m pip install 'pymongo[aws]'" + ) from e + # Delayed import. + from pymongo_auth_aws.auth import ( # type:ignore[import] + set_cached_credentials, + set_use_cached_credentials, + ) + + set_use_cached_credentials(True) + + if conn.max_wire_version < 9: + raise ConfigurationError("MONGODB-AWS authentication requires MongoDB version 4.4 or later") + + class AwsSaslContext(pymongo_auth_aws.AwsSaslContext): # type: ignore + # Dependency injection: + def binary_type(self) -> Type[Binary]: + """Return the bson.binary.Binary type.""" + return Binary + + def bson_encode(self, doc: Mapping[str, Any]) -> bytes: + """Encode a dictionary to BSON.""" + return bson.encode(doc) + + def bson_decode(self, data: _ReadableBuffer) -> Mapping[str, Any]: + """Decode BSON to a dictionary.""" + return bson.decode(data) + + try: + ctx = AwsSaslContext( + pymongo_auth_aws.AwsCredential( + credentials.username, + credentials.password, + credentials.mechanism_properties.aws_session_token, + ) + ) + client_payload = ctx.step(None) + client_first = {"saslStart": 1, "mechanism": "MONGODB-AWS", "payload": client_payload} + server_first = await conn.command("$external", client_first) + res = server_first + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + client_payload = ctx.step(res["payload"]) + cmd = { + "saslContinue": 1, + "conversationId": server_first["conversationId"], + "payload": client_payload, + } + res = await conn.command("$external", cmd) + if res["done"]: + # SASL complete. + break + except pymongo_auth_aws.PyMongoAuthAwsError as exc: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + # Convert to OperationFailure and include pymongo-auth-aws version. + raise OperationFailure( + f"{exc} (pymongo-auth-aws version {pymongo_auth_aws.__version__})" + ) from None + except Exception: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + raise diff --git a/pymongo/asynchronous/auth_oidc.py b/pymongo/asynchronous/auth_oidc.py new file mode 100644 index 0000000000..f8f046bd94 --- /dev/null +++ b/pymongo/asynchronous/auth_oidc.py @@ -0,0 +1,305 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-OIDC Authentication helpers.""" +from __future__ import annotations + +import asyncio +import threading +import time +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, Optional, Union + +import bson +from bson.binary import Binary +from pymongo._csot import remaining +from pymongo.auth_oidc_shared import ( + CALLBACK_VERSION, + HUMAN_CALLBACK_TIMEOUT_SECONDS, + MACHINE_CALLBACK_TIMEOUT_SECONDS, + TIME_BETWEEN_CALLS_SECONDS, + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + OIDCIdPInfo, + _OIDCProperties, +) +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.helpers_shared import _AUTHENTICATION_FAILURE_CODE +from pymongo.lock import Lock, _async_create_lock + +if TYPE_CHECKING: + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.auth_shared import MongoCredential + +_IS_SYNC = False + + +def _get_authenticator( + credentials: MongoCredential, address: tuple[str, int] +) -> _OIDCAuthenticator: + if credentials.cache.data: + return credentials.cache.data + + # Extract values. + principal_name = credentials.username + properties = credentials.mechanism_properties + + # Validate that the address is allowed. + if properties.human_callback is not None: + found = False + allowed_hosts = properties.allowed_hosts + for patt in allowed_hosts: + if patt == address[0]: + found = True + elif patt.startswith("*.") and address[0].endswith(patt[1:]): + found = True + if not found: + raise ConfigurationError( + f"Refusing to connect to {address[0]}, which is not in authOIDCAllowedHosts: {allowed_hosts}" + ) + + # Get or create the cache data. + credentials.cache.data = _OIDCAuthenticator(username=principal_name, properties=properties) + return credentials.cache.data + + +@dataclass +class _OIDCAuthenticator: + username: str + properties: _OIDCProperties + refresh_token: Optional[str] = field(default=None) + access_token: Optional[str] = field(default=None) + idp_info: Optional[OIDCIdPInfo] = field(default=None) + token_gen_id: int = field(default=0) + if not _IS_SYNC: + lock: Lock = field(default_factory=_async_create_lock) # type: ignore[assignment] + else: + lock: threading.Lock = field(default_factory=_async_create_lock) # type: ignore[assignment, no-redef] + + last_call_time: float = field(default=0) + + async def reauthenticate(self, conn: AsyncConnection) -> Optional[Mapping[str, Any]]: + """Handle a reauthenticate from the server.""" + # Invalidate the token for the connection. + self._invalidate(conn) + # Call the appropriate auth logic for the callback type. + if self.properties.callback: + return await self._authenticate_machine(conn) + return await self._authenticate_human(conn) + + async def authenticate(self, conn: AsyncConnection) -> Optional[Mapping[str, Any]]: + """Handle an initial authenticate request.""" + # First handle speculative auth. + # If it succeeded, we are done. + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + resp = ctx.speculative_authenticate + if resp and resp["done"]: + conn.oidc_token_gen_id = self.token_gen_id + return resp + + # If spec auth failed, call the appropriate auth logic for the callback type. + # We cannot assume that the token is invalid, because a proxy may have been + # involved that stripped the speculative auth information. + if self.properties.callback: + return await self._authenticate_machine(conn) + return await self._authenticate_human(conn) + + def get_spec_auth_cmd(self) -> Optional[MutableMapping[str, Any]]: + """Get the appropriate speculative auth command.""" + if not self.access_token: + return None + return self._get_start_command({"jwt": self.access_token}) + + async def _authenticate_machine(self, conn: AsyncConnection) -> Mapping[str, Any]: + # If there is a cached access token, try to authenticate with it. If + # authentication fails with error code 18, invalidate the access token, + # fetch a new access token, and try to authenticate again. If authentication + # fails for any other reason, raise the error to the user. + if self.access_token: + try: + return await self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return await self._authenticate_machine(conn) + raise + return await self._sasl_start_jwt(conn) + + async def _authenticate_human(self, conn: AsyncConnection) -> Optional[Mapping[str, Any]]: + # If we have a cached access token, try a JwtStepRequest. + # authentication fails with error code 18, invalidate the access token, + # and try to authenticate again. If authentication fails for any other + # reason, raise the error to the user. + if self.access_token: + try: + return await self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return await self._authenticate_human(conn) + raise + + # If we have a cached refresh token, try a JwtStepRequest with that. + # If authentication fails with error code 18, invalidate the access and + # refresh tokens, and try to authenticate again. If authentication fails for + # any other reason, raise the error to the user. + if self.refresh_token: + try: + return await self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + self.refresh_token = None + return await self._authenticate_human(conn) + raise + + # Start a new Two-Step SASL conversation. + # Run a PrincipalStepRequest to get the IdpInfo. + cmd = self._get_start_command(None) + start_resp = await self._run_command(conn, cmd) + # Attempt to authenticate with a JwtStepRequest. + return await self._sasl_continue_jwt(conn, start_resp) + + async def _get_access_token(self) -> Optional[str]: + properties = self.properties + cb: Union[None, OIDCCallback] + resp: OIDCCallbackResult + + is_human = properties.human_callback is not None + if is_human and self.idp_info is None: + return None + + if properties.callback: + cb = properties.callback + if properties.human_callback: + cb = properties.human_callback + + prev_token = self.access_token + if prev_token: + return prev_token + + if cb is None and not prev_token: + return None + + if not prev_token and cb is not None: + async with self.lock: # type: ignore[attr-defined] + # See if the token was changed while we were waiting for the + # lock. + new_token = self.access_token + if new_token != prev_token: + return new_token + + # Ensure that we are waiting a min time between callback invocations. + delta = time.time() - self.last_call_time + if delta < TIME_BETWEEN_CALLS_SECONDS: + await asyncio.sleep(TIME_BETWEEN_CALLS_SECONDS - delta) + self.last_call_time = time.time() + + if is_human: + timeout = HUMAN_CALLBACK_TIMEOUT_SECONDS + assert self.idp_info is not None + else: + timeout = int(remaining() or MACHINE_CALLBACK_TIMEOUT_SECONDS) + context = OIDCCallbackContext( + timeout_seconds=timeout, + version=CALLBACK_VERSION, + refresh_token=self.refresh_token, + idp_info=self.idp_info, + username=self.properties.username, + ) + if not _IS_SYNC: + resp = await asyncio.get_running_loop().run_in_executor(None, cb.fetch, context) # type: ignore[assignment] + else: + resp = cb.fetch(context) + if not isinstance(resp, OIDCCallbackResult): + raise ValueError( + f"Callback result must be of type OIDCCallbackResult, not {type(resp)}" + ) + self.refresh_token = resp.refresh_token + self.access_token = resp.access_token + self.token_gen_id += 1 + + return self.access_token + + async def _run_command( + self, conn: AsyncConnection, cmd: MutableMapping[str, Any] + ) -> Mapping[str, Any]: + try: + return await conn.command("$external", cmd, no_reauth=True) # type: ignore[call-arg] + except OperationFailure as e: + if self._is_auth_error(e): + self._invalidate(conn) + raise + + def _is_auth_error(self, err: Exception) -> bool: + if not isinstance(err, OperationFailure): + return False + return err.code == _AUTHENTICATION_FAILURE_CODE + + def _invalidate(self, conn: AsyncConnection) -> None: + # Ignore the invalidation if a token gen id is given and is less than our + # current token gen id. + token_gen_id = conn.oidc_token_gen_id or 0 + if token_gen_id is not None and token_gen_id < self.token_gen_id: + return + self.access_token = None + + async def _sasl_continue_jwt( + self, conn: AsyncConnection, start_resp: Mapping[str, Any] + ) -> Mapping[str, Any]: + self.access_token = None + self.refresh_token = None + start_payload: dict[str, Any] = bson.decode(start_resp["payload"]) + if "issuer" in start_payload: + self.idp_info = OIDCIdPInfo(**start_payload) + access_token = await self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_continue_command({"jwt": access_token}, start_resp) + return await self._run_command(conn, cmd) + + async def _sasl_start_jwt(self, conn: AsyncConnection) -> Mapping[str, Any]: + access_token = await self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_start_command({"jwt": access_token}) + return await self._run_command(conn, cmd) + + def _get_start_command(self, payload: Optional[Mapping[str, Any]]) -> MutableMapping[str, Any]: + if payload is None: + principal_name = self.username + if principal_name: + payload = {"n": principal_name} + else: + payload = {} + bin_payload = Binary(bson.encode(payload)) + return {"saslStart": 1, "mechanism": "MONGODB-OIDC", "payload": bin_payload} + + def _get_continue_command( + self, payload: Mapping[str, Any], start_resp: Mapping[str, Any] + ) -> MutableMapping[str, Any]: + bin_payload = Binary(bson.encode(payload)) + return { + "saslContinue": 1, + "payload": bin_payload, + "conversationId": start_resp["conversationId"], + } + + +async def _authenticate_oidc( + credentials: MongoCredential, conn: AsyncConnection, reauthenticate: bool +) -> Optional[Mapping[str, Any]]: + """Authenticate using MONGODB-OIDC.""" + authenticator = _get_authenticator(credentials, conn.address) + if reauthenticate: + return await authenticator.reauthenticate(conn) + else: + return await authenticator.authenticate(conn) diff --git a/pymongo/asynchronous/bulk.py b/pymongo/asynchronous/bulk.py new file mode 100644 index 0000000000..4a54f9eb3f --- /dev/null +++ b/pymongo/asynchronous/bulk.py @@ -0,0 +1,753 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The bulk write operations interface. + +.. versionadded:: 2.7 +""" +from __future__ import annotations + +import copy +import datetime +import logging +from collections.abc import MutableMapping +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Iterator, + Mapping, + Optional, + Type, + Union, +) + +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from pymongo import _csot, common +from pymongo.asynchronous.client_session import AsyncClientSession, _validate_session_write_concern +from pymongo.asynchronous.helpers import _handle_reauth +from pymongo.bulk_shared import ( + _COMMANDS, + _DELETE_ALL, + _merge_command, + _raise_bulk_write_error, + _Run, +) +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + ConfigurationError, + InvalidOperation, + NotPrimaryError, + OperationFailure, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import ( + _DELETE, + _INSERT, + _UPDATE, + _BulkWriteContext, + _convert_exception, + _convert_write_result, + _EncryptedBulkWriteContext, + _randint, +) +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from pymongo.asynchronous.collection import AsyncCollection + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.typings import _DocumentOut, _DocumentType, _Pipeline + +_IS_SYNC = False + + +class _AsyncBulk: + """The private guts of the bulk write API.""" + + def __init__( + self, + collection: AsyncCollection[_DocumentType], + ordered: bool, + bypass_document_validation: Optional[bool], + comment: Optional[str] = None, + let: Optional[Any] = None, + ) -> None: + """Initialize a _AsyncBulk instance.""" + self.collection = collection.with_options( + codec_options=collection.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + ) + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.comment: Optional[str] = comment + self.ordered = ordered + self.ops: list[tuple[int, Mapping[str, Any]]] = [] + self.executed = False + self.bypass_doc_val = bypass_document_validation + self.uses_collation = False + self.uses_array_filters = False + self.uses_hint_update = False + self.uses_hint_delete = False + self.uses_sort = False + self.is_retryable = True + self.retrying = False + self.started_retryable_write = False + # Extra state so that we know where to pick up on a retry attempt. + self.current_run = None + self.next_run = None + self.is_encrypted = False + + @property + def bulk_ctx_class(self) -> Type[_BulkWriteContext]: + encrypter = self.collection.database.client._encrypter + if encrypter and not encrypter._bypass_auto_encryption: + self.is_encrypted = True + return _EncryptedBulkWriteContext + else: + self.is_encrypted = False + return _BulkWriteContext + + def add_insert(self, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" + validate_is_document_type("document", document) + # Generate ObjectId client side. + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() + self.ops.append((_INSERT, document)) + + def add_update( + self, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool, + upsert: Optional[bool], + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" + validate_ok_for_update(update) + cmd: dict[str, Any] = {"q": selector, "u": update, "multi": multi} + if upsert is not None: + cmd["upsert"] = upsert + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if array_filters is not None: + self.uses_array_filters = True + cmd["arrayFilters"] = array_filters + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append((_UPDATE, cmd)) + + def add_replace( + self, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: Optional[bool], + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" + validate_ok_for_replace(replacement) + cmd: dict[str, Any] = {"q": selector, "u": replacement} + if upsert is not None: + cmd["upsert"] = upsert + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort + self.ops.append((_UPDATE, cmd)) + + def add_delete( + self, + selector: Mapping[str, Any], + limit: int, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd: dict[str, Any] = {"q": selector, "limit": limit} + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if hint is not None: + self.uses_hint_delete = True + cmd["hint"] = hint + if limit == _DELETE_ALL: + # A bulk_write containing a delete_many is not retryable. + self.is_retryable = False + self.ops.append((_DELETE, cmd)) + + def gen_ordered(self) -> Iterator[Optional[_Run]]: + """Generate batches of operations, batched by type of + operation, in the order **provided**. + """ + run = None + for idx, (op_type, operation) in enumerate(self.ops): + if run is None: + run = _Run(op_type) + elif run.op_type != op_type: + yield run + run = _Run(op_type) + run.add(idx, operation) + yield run + + def gen_unordered(self) -> Iterator[_Run]: + """Generate batches of operations, batched by type of + operation, in arbitrary order. + """ + operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)] + for idx, (op_type, operation) in enumerate(self.ops): + operations[op_type].add(idx, operation) + + for run in operations: + if run.ops: + yield run + + @_handle_reauth + async def write_command( + self, + bwc: _BulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + docs: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> dict[str, Any]: + """A proxy for SocketInfo.write_command that handles event publishing.""" + cmd[bwc.field] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._start(cmd, request_id, docs) + try: + reply = await bwc.conn.write_command(request_id, msg, bwc.codec) # type: ignore[misc] + duration = datetime.datetime.now() - bwc.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + await client._process_response(reply, bwc.session) # type: ignore[arg-type] + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + + if bwc.publish: + bwc._fail(request_id, failure, duration) + # Process the response from the server. + if isinstance(exc, (NotPrimaryError, OperationFailure)): + await client._process_response(exc.details, bwc.session) # type: ignore[arg-type] + raise + return reply # type: ignore[return-value] + + async def unack_write( + self, + bwc: _BulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + max_doc_size: int, + docs: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> Optional[Mapping[str, Any]]: + """A proxy for AsyncConnection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + cmd = bwc._start(cmd, request_id, docs) + try: + result = await bwc.conn.unack_write(msg, max_doc_size) # type: ignore[func-returns-value, misc, override] + duration = datetime.datetime.now() - bwc.start_time + if result is not None: + reply = _convert_write_result(bwc.name, cmd, result) # type: ignore[arg-type] + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(bwc.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if bwc.publish: + assert bwc.start_time is not None + bwc._fail(request_id, failure, duration) + raise + return result # type: ignore[return-value] + + async def _execute_batch_unack( + self, + bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], + cmd: dict[str, Any], + ops: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> list[Mapping[str, Any]]: + if self.is_encrypted: + _, batched_cmd, to_send = bwc.batch_command(cmd, ops) + await bwc.conn.command( # type: ignore[misc] + bwc.db_name, + batched_cmd, # type: ignore[arg-type] + write_concern=WriteConcern(w=0), + session=bwc.session, # type: ignore[arg-type] + client=client, # type: ignore[arg-type] + ) + else: + request_id, msg, to_send = bwc.batch_command(cmd, ops) + # Though this isn't strictly a "legacy" write, the helper + # handles publishing commands and sending our message + # without receiving a result. Send 0 for max_doc_size + # to disable size checking. Size checking is handled while + # the documents are encoded to BSON. + await self.unack_write(bwc, cmd, request_id, msg, 0, to_send, client) # type: ignore[arg-type] + + return to_send + + async def _execute_batch( + self, + bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], + cmd: dict[str, Any], + ops: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> tuple[dict[str, Any], list[Mapping[str, Any]]]: + if self.is_encrypted: + _, batched_cmd, to_send = bwc.batch_command(cmd, ops) + result = await bwc.conn.command( # type: ignore[misc] + bwc.db_name, + batched_cmd, # type: ignore[arg-type] + codec_options=bwc.codec, + session=bwc.session, # type: ignore[arg-type] + client=client, # type: ignore[arg-type] + ) + else: + request_id, msg, to_send = bwc.batch_command(cmd, ops) + result = await self.write_command(bwc, cmd, request_id, msg, to_send, client) # type: ignore[arg-type] + + return result, to_send # type: ignore[return-value] + + async def _execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[AsyncClientSession], + conn: AsyncConnection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: + db_name = self.collection.database.name + client = self.collection.database.client + listeners = client._event_listeners + + if not self.current_run: + self.current_run = next(generator) + self.next_run = None + run = self.current_run + + # AsyncConnection.command validates the session, but we use + # AsyncConnection.write_command + conn.validate_session(client, session) + last_run = False + + while run: + if not self.retrying: + self.next_run = next(generator, None) + if self.next_run is None: + last_run = True + + cmd_name = _COMMANDS[run.op_type] + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, + session, + run.op_type, + self.collection.codec_options, + ) + + while run.idx_offset < len(run.ops): + # If this is the last possible operation, use the + # final write concern. + if last_run and (len(run.ops) - run.idx_offset) == 1: + write_concern = final_write_concern or write_concern + + cmd = {cmd_name: self.collection.name, "ordered": self.ordered} + if self.comment: + cmd["comment"] = self.comment + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + if self.let is not None and run.op_type in (_DELETE, _UPDATE): + cmd["let"] = self.let + if session: + # Start a new retryable write unless one was already + # started for this command. + if retryable and not self.started_retryable_write: + session._start_retryable_write() + self.started_retryable_write = True + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(client, cmd) + ops = islice(run.ops, run.idx_offset, None) + + # Run as many ops as possible in one command. + if write_concern.acknowledged: + result, to_send = await self._execute_batch(bwc, cmd, ops, client) + + # Retryable writeConcernErrors halt the execution of this run. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(run, full, run.idx_offset, result) + _raise_bulk_write_error(full) + + _merge_command(run, full_result, run.idx_offset, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + if self.ordered and "writeErrors" in result: + break + else: + to_send = await self._execute_batch_unack(bwc, cmd, ops, client) + + run.idx_offset += len(to_send) + + # We're supposed to continue if errors are + # at the write concern level (e.g. wtimeout) + if self.ordered and full_result["writeErrors"]: + break + # Reset our state + self.current_run = run = self.next_run + + async def execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[AsyncClientSession], + operation: str, + ) -> dict[str, Any]: + """Execute using write commands.""" + # nModified is only reported for write commands, not legacy ops. + full_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + op_id = _randint() + + async def retryable_bulk( + session: Optional[AsyncClientSession], conn: AsyncConnection, retryable: bool + ) -> None: + await self._execute_command( + generator, + write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) + + client = self.collection.database.client + _ = await client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, # type: ignore[arg-type] + operation_id=op_id, + ) + + if full_result["writeErrors"] or full_result["writeConcernErrors"]: + _raise_bulk_write_error(full_result) + return full_result + + async def execute_op_msg_no_results( + self, conn: AsyncConnection, generator: Iterator[Any] + ) -> None: + """Execute write commands with OP_MSG and w=0 writeConcern, unordered.""" + db_name = self.collection.database.name + client = self.collection.database.client + listeners = client._event_listeners + op_id = _randint() + + if not self.current_run: + self.current_run = next(generator) + run = self.current_run + + while run: + cmd_name = _COMMANDS[run.op_type] + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, + None, + run.op_type, + self.collection.codec_options, + ) + + while run.idx_offset < len(run.ops): + cmd = { + cmd_name: self.collection.name, + "ordered": False, + "writeConcern": {"w": 0}, + } + conn.add_server_api(cmd) + ops = islice(run.ops, run.idx_offset, None) + # Run as many ops as possible. + to_send = await self._execute_batch_unack(bwc, cmd, ops, client) + run.idx_offset += len(to_send) + self.current_run = run = next(generator, None) + + async def execute_command_no_results( + self, + conn: AsyncConnection, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: + """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.""" + full_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + # Ordered bulk writes have to be acknowledged so that we stop + # processing at the first error, even when the application + # specified unacknowledged writeConcern. + initial_write_concern = WriteConcern() + op_id = _randint() + try: + await self._execute_command( + generator, + initial_write_concern, + None, + conn, + op_id, + False, + full_result, + write_concern, + ) + except OperationFailure: + pass + + async def execute_no_results( + self, + conn: AsyncConnection, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: + """Execute all operations, returning no results (w=0).""" + if self.uses_collation: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Guard against unsupported unacknowledged writes. + unack = write_concern and not write_concern.acknowledged + if unack and self.uses_hint_delete and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if unack and self.uses_hint_update and conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) + if unack and self.uses_sort and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) + # Cannot have both unacknowledged writes and bypass document validation. + if self.bypass_doc_val: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) + + if self.ordered: + return await self.execute_command_no_results(conn, generator, write_concern) + return await self.execute_op_msg_no_results(conn, generator) + + async def execute( + self, + write_concern: WriteConcern, + session: Optional[AsyncClientSession], + operation: str, + ) -> Any: + """Execute operations.""" + if not self.ops: + raise InvalidOperation("No operations to execute") + if self.executed: + raise InvalidOperation("Bulk operations can only be executed once.") + self.executed = True + write_concern = write_concern or self.collection.write_concern + session = _validate_session_write_concern(session, write_concern) + + if self.ordered: + generator = self.gen_ordered() + else: + generator = self.gen_unordered() + + client = self.collection.database.client + if not write_concern.acknowledged: + async with await client._conn_for_writes(session, operation) as connection: + await self.execute_no_results(connection, generator, write_concern) + return None + else: + return await self.execute_command(generator, write_concern, session, operation) diff --git a/pymongo/asynchronous/change_stream.py b/pymongo/asynchronous/change_stream.py new file mode 100644 index 0000000000..b2b78b0660 --- /dev/null +++ b/pymongo/asynchronous/change_stream.py @@ -0,0 +1,496 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Watch changes on a collection, a database, or the entire cluster.""" +from __future__ import annotations + +import copy +from typing import TYPE_CHECKING, Any, Generic, Mapping, Optional, Type, Union + +from bson import CodecOptions, _bson_to_dict +from bson.raw_bson import RawBSONDocument +from bson.timestamp import Timestamp +from pymongo import _csot, common +from pymongo.asynchronous.aggregation import ( + _AggregationCommand, + _CollectionAggregationCommand, + _DatabaseAggregationCommand, +) +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.collation import validate_collation_or_none +from pymongo.errors import ( + ConnectionFailure, + CursorNotFound, + InvalidOperation, + OperationFailure, + PyMongoError, +) +from pymongo.operations import _Op +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline + +_IS_SYNC = False + +# The change streams spec considers the following server errors from the +# getMore command non-resumable. All other getMore errors are resumable. +_RESUMABLE_GETMORE_ERRORS = frozenset( + [ + 6, # HostUnreachable + 7, # HostNotFound + 89, # NetworkTimeout + 91, # ShutdownInProgress + 189, # PrimarySteppedDown + 262, # ExceededTimeLimit + 9001, # SocketException + 10107, # NotWritablePrimary + 11600, # InterruptedAtShutdown + 11602, # InterruptedDueToReplStateChange + 13435, # NotPrimaryNoSecondaryOk + 13436, # NotPrimaryOrSecondary + 63, # StaleShardVersion + 150, # StaleEpoch + 13388, # StaleConfig + 234, # RetryChangeStream + 133, # FailedToSatisfyReadPreference + ] +) + + +if TYPE_CHECKING: + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.collection import AsyncCollection + from pymongo.asynchronous.database import AsyncDatabase + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection + + +def _resumable(exc: PyMongoError) -> bool: + """Return True if given a resumable change stream error.""" + if isinstance(exc, (ConnectionFailure, CursorNotFound)): + return True + if isinstance(exc, OperationFailure): + if exc._max_wire_version is None: + return False + return ( + exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") + ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) + return False + + +class AsyncChangeStream(Generic[_DocumentType]): + """The internal abstract base class for change stream cursors. + + Should not be called directly by application developers. Use + :meth:`pymongo.asynchronous.collection.AsyncCollection.watch`, + :meth:`pymongo.asynchronous.database.AsyncDatabase.watch`, or + :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.watch` instead. + + .. versionadded:: 3.6 + .. seealso:: The MongoDB documentation on `changeStreams `_. + """ + + def __init__( + self, + target: Union[ + AsyncMongoClient[_DocumentType], + AsyncDatabase[_DocumentType], + AsyncCollection[_DocumentType], + ], + pipeline: Optional[_Pipeline], + full_document: Optional[str], + resume_after: Optional[Mapping[str, Any]], + max_await_time_ms: Optional[int], + batch_size: Optional[int], + collation: Optional[_CollationIn], + start_at_operation_time: Optional[Timestamp], + session: Optional[AsyncClientSession], + start_after: Optional[Mapping[str, Any]], + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> None: + if pipeline is None: + pipeline = [] + pipeline = common.validate_list("pipeline", pipeline) + common.validate_string_or_none("full_document", full_document) + validate_collation_or_none(collation) + common.validate_non_negative_integer_or_none("batchSize", batch_size) + + self._decode_custom = False + self._orig_codec_options: CodecOptions[_DocumentType] = target.codec_options + if target.codec_options.type_registry._decoder_map: + self._decode_custom = True + # Keep the type registry so that we support encoding custom types + # in the pipeline. + self._target = target.with_options( # type: ignore + codec_options=target.codec_options.with_options(document_class=RawBSONDocument) + ) + else: + self._target = target + + self._pipeline = copy.deepcopy(pipeline) + self._full_document = full_document + self._full_document_before_change = full_document_before_change + self._uses_start_after = start_after is not None + self._uses_resume_after = resume_after is not None + self._resume_token = copy.deepcopy(start_after or resume_after) + self._max_await_time_ms = max_await_time_ms + self._batch_size = batch_size + self._collation = collation + self._start_at_operation_time = start_at_operation_time + self._session = session + self._comment = comment + self._closed = False + self._timeout = self._target._timeout + self._show_expanded_events = show_expanded_events + + async def _initialize_cursor(self) -> None: + # Initialize cursor. + self._cursor = await self._create_cursor() + + @property + def _aggregation_command_class(self) -> Type[_AggregationCommand]: + """The aggregation command class to be used.""" + raise NotImplementedError + + @property + def _client(self) -> AsyncMongoClient: # type: ignore[type-arg] + """The client against which the aggregation commands for + this AsyncChangeStream will be run. + """ + raise NotImplementedError + + def _change_stream_options(self) -> dict[str, Any]: + """Return the options dict for the $changeStream pipeline stage.""" + options: dict[str, Any] = {} + if self._full_document is not None: + options["fullDocument"] = self._full_document + + if self._full_document_before_change is not None: + options["fullDocumentBeforeChange"] = self._full_document_before_change + + resume_token = self.resume_token + if resume_token is not None: + if self._uses_start_after: + options["startAfter"] = resume_token + else: + options["resumeAfter"] = resume_token + + elif self._start_at_operation_time is not None: + options["startAtOperationTime"] = self._start_at_operation_time + + if self._show_expanded_events: + options["showExpandedEvents"] = self._show_expanded_events + + return options + + def _command_options(self) -> dict[str, Any]: + """Return the options dict for the aggregation command.""" + options = {} + if self._max_await_time_ms is not None: + options["maxAwaitTimeMS"] = self._max_await_time_ms + if self._batch_size is not None: + options["batchSize"] = self._batch_size + return options + + def _aggregation_pipeline(self) -> list[dict[str, Any]]: + """Return the full aggregation pipeline for this AsyncChangeStream.""" + options = self._change_stream_options() + full_pipeline: list[dict[str, Any]] = [{"$changeStream": options}] + full_pipeline.extend(self._pipeline) + return full_pipeline + + def _process_result(self, result: Mapping[str, Any], conn: AsyncConnection) -> None: + """Callback that caches the postBatchResumeToken or + startAtOperationTime from a changeStream aggregate command response + containing an empty batch of change documents. + + This is implemented as a callback because we need access to the wire + version in order to determine whether to cache this value. + """ + if not result["cursor"]["firstBatch"]: + if "postBatchResumeToken" in result["cursor"]: + self._resume_token = result["cursor"]["postBatchResumeToken"] + elif ( + self._start_at_operation_time is None + and self._uses_resume_after is False + and self._uses_start_after is False + and conn.max_wire_version >= 7 + ): + self._start_at_operation_time = result.get("operationTime") + # PYTHON-2181: informative error on missing operationTime. + if self._start_at_operation_time is None: + raise OperationFailure( + "Expected field 'operationTime' missing from command " + f"response : {result!r}" + ) + + async def _run_aggregation_cmd( + self, session: Optional[AsyncClientSession] + ) -> AsyncCommandCursor: # type: ignore[type-arg] + """Run the full aggregation pipeline for this AsyncChangeStream and return + the corresponding AsyncCommandCursor. + """ + cmd = self._aggregation_command_class( + self._target, + AsyncCommandCursor, + self._aggregation_pipeline(), + self._command_options(), + result_processor=self._process_result, + comment=self._comment, + ) + return await self._client._retryable_read( + cmd.get_cursor, + self._target._read_preference_for(session), + session, + operation=_Op.AGGREGATE, + ) + + async def _create_cursor(self) -> AsyncCommandCursor: # type: ignore[type-arg] + async with self._client._tmp_session(self._session) as s: + return await self._run_aggregation_cmd(session=s) + + async def _resume(self) -> None: + """Reestablish this change stream after a resumable error.""" + try: + await self._cursor.close() + except PyMongoError: + pass + self._cursor = await self._create_cursor() + + async def close(self) -> None: + """Close this AsyncChangeStream.""" + self._closed = True + await self._cursor.close() + + def __aiter__(self) -> AsyncChangeStream[_DocumentType]: + return self + + @property + def resume_token(self) -> Optional[Mapping[str, Any]]: + """The cached resume token that will be used to resume after the most + recently returned change. + + .. versionadded:: 3.9 + """ + return copy.deepcopy(self._resume_token) + + @_csot.apply + async def next(self) -> _DocumentType: + """Advance the cursor. + + This method blocks until the next change document is returned or an + unrecoverable error is raised. This method is used when iterating over + all changes in the cursor. For example:: + + try: + resume_token = None + pipeline = [{'$match': {'operationType': 'insert'}}] + async with await db.collection.watch(pipeline) as stream: + async for insert_change in stream: + print(insert_change) + resume_token = stream.resume_token + except pymongo.errors.PyMongoError: + # The AsyncChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + if resume_token is None: + # There is no usable resume token because there was a + # failure during AsyncChangeStream initialization. + logging.error('...') + else: + # Use the interrupted AsyncChangeStream's resume token to create + # a new AsyncChangeStream. The new stream will continue from the + # last seen insert change without missing any events. + async with await db.collection.watch( + pipeline, resume_after=resume_token) as stream: + async for insert_change in stream: + print(insert_change) + + Raises :exc:`StopIteration` if this AsyncChangeStream is closed. + """ + while self.alive: + doc = await self.try_next() + if doc is not None: + return doc + + raise StopAsyncIteration + + __anext__ = next + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + .. note:: Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration` and :meth:`try_next` can return ``None``. + + .. versionadded:: 3.8 + """ + return not self._closed + + @_csot.apply + async def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next change document without waiting + indefinitely for the next change. For example:: + + async with await db.collection.watch() as stream: + while stream.alive: + change = await stream.try_next() + # Note that the AsyncChangeStream's resume token may be updated + # even when no changes are returned. + print("Current resume token: %r" % (stream.resume_token,)) + if change is not None: + print("Change document: %r" % (change,)) + continue + # We end up here when there are no recent changes. + # Sleep for a while before trying again to avoid flooding + # the server with getMore requests when no changes are + # available. + asyncio.sleep(10) + + If no change document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there have been no changes) then ``None`` is returned. + + :return: The next change document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 3.8 + """ + if not self._closed and not self._cursor.alive: + await self._resume() + + # Attempt to get the next change with at most one getMore and at most + # one resume attempt. + try: + try: + change = await self._cursor._try_next(True) + except PyMongoError as exc: + if not _resumable(exc): + raise + await self._resume() + change = await self._cursor._try_next(False) + except PyMongoError as exc: + # Close the stream after a fatal error. + if not _resumable(exc) and not exc.timeout: + await self.close() + raise + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + await self.close() + raise + + # Check if the cursor was invalidated. + if not self._cursor.alive: + self._closed = True + + # If no changes are available. + if change is None: + # We have either iterated over all documents in the cursor, + # OR the most-recently returned batch is empty. In either case, + # update the cached resume token with the postBatchResumeToken if + # one was returned. We also clear the startAtOperationTime. + if self._cursor._post_batch_resume_token is not None: + self._resume_token = self._cursor._post_batch_resume_token + self._start_at_operation_time = None + return change + + # Else, changes are available. + try: + resume_token = change["_id"] + except KeyError: + await self.close() + raise InvalidOperation( + "Cannot provide resume functionality when the resume token is missing." + ) from None + + # If this is the last change document from the current batch, cache the + # postBatchResumeToken. + if not self._cursor._has_next() and self._cursor._post_batch_resume_token: + resume_token = self._cursor._post_batch_resume_token + + # Hereafter, don't use startAfter; instead use resumeAfter. + self._uses_start_after = False + self._uses_resume_after = True + + # Cache the resume token and clear startAtOperationTime. + self._resume_token = resume_token + self._start_at_operation_time = None + + if self._decode_custom: + return _bson_to_dict(change.raw, self._orig_codec_options) + return change + + async def __aenter__(self) -> AsyncChangeStream[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + +class AsyncCollectionChangeStream(AsyncChangeStream[_DocumentType]): + """A change stream that watches changes on a single collection. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.asynchronous.collection.AsyncCollection.watch` instead. + + .. versionadded:: 3.7 + """ + + _target: AsyncCollection[_DocumentType] + + @property + def _aggregation_command_class(self) -> Type[_CollectionAggregationCommand]: + return _CollectionAggregationCommand + + @property + def _client(self) -> AsyncMongoClient[_DocumentType]: + return self._target.database.client + + +class AsyncDatabaseChangeStream(AsyncChangeStream[_DocumentType]): + """A change stream that watches changes on all collections in a database. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.asynchronous.database.AsyncDatabase.watch` instead. + + .. versionadded:: 3.7 + """ + + _target: AsyncDatabase[_DocumentType] + + @property + def _aggregation_command_class(self) -> Type[_DatabaseAggregationCommand]: + return _DatabaseAggregationCommand + + @property + def _client(self) -> AsyncMongoClient[_DocumentType]: + return self._target.client + + +class AsyncClusterChangeStream(AsyncDatabaseChangeStream[_DocumentType]): + """A change stream that watches changes on all collections in the cluster. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.watch` instead. + + .. versionadded:: 3.7 + """ + + def _change_stream_options(self) -> dict[str, Any]: + options = super()._change_stream_options() + options["allChangesForCluster"] = True + return options diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py new file mode 100644 index 0000000000..151942c8a8 --- /dev/null +++ b/pymongo/asynchronous/client_bulk.py @@ -0,0 +1,756 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The client-level bulk write operations interface. + +.. versionadded:: 4.9 +""" +from __future__ import annotations + +import copy +import datetime +import logging +from collections.abc import MutableMapping +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + Optional, + Type, + Union, +) + +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from pymongo import _csot, common +from pymongo.asynchronous.client_session import AsyncClientSession, _validate_session_write_concern +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.helpers import _handle_reauth + +if TYPE_CHECKING: + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection +from pymongo._client_bulk_shared import ( + _merge_command, + _throw_client_bulk_write_exception, +) +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + WaitQueueTimeoutError, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import ( + _ClientBulkWriteContext, + _convert_client_bulk_exception, + _convert_exception, + _convert_write_result, + _randint, +) +from pymongo.read_preferences import ReadPreference +from pymongo.results import ( + ClientBulkWriteResult, + DeleteResult, + InsertOneResult, + UpdateResult, +) +from pymongo.typings import _DocumentOut, _Pipeline +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class _AsyncClientBulk: + """The private guts of the client-level bulk write API.""" + + def __init__( + self, + client: AsyncMongoClient[Any], + write_concern: WriteConcern, + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + comment: Optional[str] = None, + let: Optional[Any] = None, + verbose_results: bool = False, + ) -> None: + """Initialize a _AsyncClientBulk instance.""" + self.client = client + self.write_concern = write_concern + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.ordered = ordered + self.bypass_doc_val = bypass_document_validation + self.comment = comment + self.verbose_results = verbose_results + self.ops: list[tuple[str, Mapping[str, Any]]] = [] + self.namespaces: list[str] = [] + self.idx_offset: int = 0 + self.total_ops: int = 0 + self.executed = False + self.uses_collation = False + self.uses_array_filters = False + self.is_retryable = self.client.options.retry_writes + self.retrying = False + self.started_retryable_write = False + + @property + def bulk_ctx_class(self) -> Type[_ClientBulkWriteContext]: + return _ClientBulkWriteContext + + def add_insert(self, namespace: str, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" + validate_is_document_type("document", document) + # Generate ObjectId client side. + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() + cmd = {"insert": -1, "document": document} + self.ops.append(("insert", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_update( + self, + namespace: str, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool, + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" + validate_ok_for_update(update) + cmd = { + "update": -1, + "filter": selector, + "updateMods": update, + "multi": multi, + } + if upsert is not None: + cmd["upsert"] = upsert + if array_filters is not None: + self.uses_array_filters = True + cmd["arrayFilters"] = array_filters + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if sort is not None: + cmd["sort"] = sort + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("update", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_replace( + self, + namespace: str, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" + validate_ok_for_replace(replacement) + cmd = { + "update": -1, + "filter": selector, + "updateMods": replacement, + "multi": False, + } + if upsert is not None: + cmd["upsert"] = upsert + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if sort is not None: + cmd["sort"] = sort + self.ops.append(("replace", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_delete( + self, + namespace: str, + selector: Mapping[str, Any], + multi: bool, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd = {"delete": -1, "filter": selector, "multi": multi} + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("delete", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + @_handle_reauth + async def write_command( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: Union[bytes, dict[str, Any]], + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> dict[str, Any]: + """A proxy for AsyncConnection.write_command that handles event publishing.""" + cmd["ops"] = op_docs + cmd["nsInfo"] = ns_docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._start(cmd, request_id, op_docs, ns_docs) + try: + reply = await bwc.conn.write_command(request_id, msg, bwc.codec) # type: ignore[misc, arg-type] + duration = datetime.datetime.now() - bwc.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + # Process the response from the server. + await self.client._process_response(reply, bwc.session) # type: ignore[arg-type] + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + + if bwc.publish: + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + # Process the response from the server. + if isinstance(exc, OperationFailure): + await self.client._process_response(exc.details, bwc.session) # type: ignore[arg-type] + else: + await self.client._process_response({}, bwc.session) # type: ignore[arg-type] + return reply # type: ignore[return-value] + + async def unack_write( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> Optional[Mapping[str, Any]]: + """A proxy for AsyncConnection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + cmd = bwc._start(cmd, request_id, op_docs, ns_docs) + try: + result = await bwc.conn.unack_write(msg, bwc.max_bson_size) # type: ignore[func-returns-value, misc, override] + duration = datetime.datetime.now() - bwc.start_time + if result is not None: + reply = _convert_write_result(bwc.name, cmd, result) # type: ignore[arg-type] + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(bwc.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if bwc.publish: + assert bwc.start_time is not None + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + return reply + + async def _execute_batch_unack( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (unack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) + await self.unack_write(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] + return to_send_ops, to_send_ns + + async def _execute_batch( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ) -> tuple[dict[str, Any], list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (ack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) + result = await self.write_command( + bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client + ) # type: ignore[arg-type] + return result, to_send_ops, to_send_ns # type: ignore[return-value] + + async def _process_results_cursor( + self, + full_result: MutableMapping[str, Any], + result: MutableMapping[str, Any], + conn: AsyncConnection, + session: Optional[AsyncClientSession], + ) -> None: + """Internal helper for processing the server reply command cursor.""" + if result.get("cursor"): + if session: + session._leave_alive = True + coll = AsyncCollection( + database=AsyncDatabase(self.client, "admin"), + name="$cmd.bulkWrite", + ) + cmd_cursor = AsyncCommandCursor( + coll, + result["cursor"], + conn.address, + session=session, + comment=self.comment, + ) + await cmd_cursor._maybe_pin_connection(conn) + + # Iterate the cursor to get individual write results. + try: + async for doc in cmd_cursor: + original_index = doc["idx"] + self.idx_offset + op_type, op = self.ops[original_index] + + if not doc["ok"]: + result["writeErrors"].append(doc) + if self.ordered: + return + + # Record individual write result. + if doc["ok"] and self.verbose_results: + if op_type == "insert": + inserted_id = op["document"]["_id"] + res = InsertOneResult(inserted_id, acknowledged=True) # type: ignore[assignment] + if op_type in ["update", "replace"]: + op_type = "update" + res = UpdateResult(doc, acknowledged=True, in_client_bulk=True) # type: ignore[assignment] + if op_type == "delete": + res = DeleteResult(doc, acknowledged=True) # type: ignore[assignment] + full_result[f"{op_type}Results"][original_index] = res + except Exception as exc: + # Attempt to close the cursor, then raise top-level error. + if cmd_cursor.alive: + await cmd_cursor.close() + result["error"] = _convert_client_bulk_exception(exc) + + async def _execute_command( + self, + write_concern: WriteConcern, + session: Optional[AsyncClientSession], + conn: AsyncConnection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: + """Internal helper for executing batches of bulkWrite commands.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + + # AsyncConnection.command validates the session, but we use + # AsyncConnection.write_command + conn.validate_session(self.client, session) + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + session, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # If this is the last possible batch, use the + # final write concern. + if self.total_ops - self.idx_offset <= bwc.max_write_batch_size: + write_concern = final_write_concern or write_concern + + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = not self.verbose_results + cmd["ordered"] = self.ordered # type: ignore[assignment] + not_in_transaction = session and not session.in_transaction + if not_in_transaction or not session: + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + if session: + # Start a new retryable write unless one was already + # started for this command. + if retryable and not self.started_retryable_write: + session._start_retryable_write() + self.started_retryable_write = True + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, self.client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(self.client, cmd) + ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) + + # Run as many ops as possible in one server command. + if write_concern.acknowledged: + raw_result, to_send_ops, _ = await self._execute_batch(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + result = raw_result + + # Top-level server/network error. + if result.get("error"): + error = result["error"] + retryable_top_level_error = ( + hasattr(error, "details") + and isinstance(error.details, dict) + and error.details.get("code", 0) in _RETRYABLE_ERROR_CODES + ) + retryable_network_error = isinstance( + error, ConnectionFailure + ) and not isinstance(error, (NotPrimaryError, WaitQueueTimeoutError)) + + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + if retryable and (retryable_top_level_error or retryable_network_error): + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + else: + _merge_command(self.ops, self.idx_offset, full_result, result) + _throw_client_bulk_write_exception(full_result, self.verbose_results) + + result["error"] = None + result["writeErrors"] = [] + if result.get("nErrors", 0) < len(to_send_ops): + full_result["anySuccessful"] = True + + # Top-level command error. + if not result["ok"]: + result["error"] = raw_result + _merge_command(self.ops, self.idx_offset, full_result, result) + break + + if retryable: + # Retryable writeConcernErrors halt the execution of this batch. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + + # Process the server reply as a command cursor. + await self._process_results_cursor(full_result, result, conn, session) + + # Merge this batch's results with the full results. + _merge_command(self.ops, self.idx_offset, full_result, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + else: + to_send_ops, _ = await self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + # We halt execution if we hit a top-level error, + # or an individual error in an ordered bulk write. + if full_result["error"] or (self.ordered and full_result["writeErrors"]): + break + + async def execute_command( + self, + session: Optional[AsyncClientSession], + operation: str, + ) -> MutableMapping[str, Any]: + """Execute commands with w=1 WriteConcern.""" + full_result: MutableMapping[str, Any] = { + "anySuccessful": False, + "error": None, + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nDeleted": 0, + "insertResults": {}, + "updateResults": {}, + "deleteResults": {}, + } + op_id = _randint() + + async def retryable_bulk( + session: Optional[AsyncClientSession], + conn: AsyncConnection, + retryable: bool, + ) -> None: + if conn.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + await self._execute_command( + self.write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) + + await self.client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, + operation_id=op_id, + ) + + if full_result["error"] or full_result["writeErrors"] or full_result["writeConcernErrors"]: + _throw_client_bulk_write_exception(full_result, self.verbose_results) + return full_result + + async def execute_command_unack( + self, + conn: AsyncConnection, + ) -> None: + """Execute commands with OP_MSG and w=0 writeConcern. Always unordered.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + op_id = _randint() + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + None, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = True + cmd["ordered"] = False + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + cmd["writeConcern"] = {"w": 0} # type: ignore[assignment] + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + conn.add_server_api(cmd) + ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) + + # Run as many ops as possible in one server command. + to_send_ops, _ = await self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + async def execute_no_results( + self, + conn: AsyncConnection, + ) -> None: + """Execute all operations, returning no results (w=0).""" + if self.uses_collation: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Cannot have both unacknowledged writes and bypass document validation. + if self.bypass_doc_val is not None: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) + + return await self.execute_command_unack(conn) + + async def execute( + self, + session: Optional[AsyncClientSession], + operation: str, + ) -> Any: + """Execute operations.""" + if not self.ops: + raise InvalidOperation("No operations to execute") + if self.executed: + raise InvalidOperation("Bulk operations can only be executed once.") + self.executed = True + session = _validate_session_write_concern(session, self.write_concern) + + if not self.write_concern.acknowledged: + async with await self.client._conn_for_writes(session, operation) as connection: + if connection.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + await self.execute_no_results(connection) + return ClientBulkWriteResult(None, False, False) # type: ignore[arg-type] + + result = await self.execute_command(session, operation) + return ClientBulkWriteResult( + result, + self.write_concern.acknowledged, + self.verbose_results, + ) diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py new file mode 100644 index 0000000000..8674e98447 --- /dev/null +++ b/pymongo/asynchronous/client_session.py @@ -0,0 +1,1187 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Logical sessions for ordering sequential operations. + +.. versionadded:: 3.6 + +Causally Consistent Reads +========================= + +.. code-block:: python + + async with client.start_session(causal_consistency=True) as session: + collection = client.db.collection + await collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) + secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) + + # A secondary read waits for replication of the write. + await secondary_c.find_one({"_id": 1}, session=session) + +If `causal_consistency` is True (the default), read operations that use +the session are causally after previous read and write operations. Using a +causally consistent session, an application can read its own writes and is +guaranteed monotonic reads, even when reading from replica set secondaries. + +.. seealso:: The MongoDB documentation on `causal-consistency `_. + +.. _async-transactions-ref: + +Transactions +============ + +.. versionadded:: 3.7 + +MongoDB 4.0 adds support for transactions on replica set primaries. A +transaction is associated with a :class:`AsyncClientSession`. To start a transaction +on a session, use :meth:`AsyncClientSession.start_transaction` in a with-statement. +Then, execute an operation within the transaction by passing the session to the +operation: + +.. code-block:: python + + orders = client.db.orders + inventory = client.db.inventory + async with client.start_session() as session: + async with await session.start_transaction(): + await orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + await inventory.update_one( + {"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, + session=session, + ) + +Upon normal completion of ``async with await session.start_transaction()`` block, the +transaction automatically calls :meth:`AsyncClientSession.commit_transaction`. +If the block exits with an exception, the transaction automatically calls +:meth:`AsyncClientSession.abort_transaction`. + +In general, multi-document transactions only support read/write (CRUD) +operations on existing collections. However, MongoDB 4.4 adds support for +creating collections and indexes with some limitations, including an +insert operation that would result in the creation of a new collection. +For a complete description of all the supported and unsupported operations +see the `MongoDB server's documentation for transactions +`_. + +A session may only have a single active transaction at a time, multiple +transactions on the same session can be executed in sequence. + +Sharded Transactions +^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 3.9 + +PyMongo 3.9 adds support for transactions on sharded clusters running MongoDB +>=4.2. Sharded transactions have the same API as replica set transactions. +When running a transaction against a sharded cluster, the session is +pinned to the mongos server selected for the first operation in the +transaction. All subsequent operations that are part of the same transaction +are routed to the same mongos server. When the transaction is completed, by +running either commitTransaction or abortTransaction, the session is unpinned. + +.. seealso:: The MongoDB documentation on `transactions `_. + +.. _async-snapshot-reads-ref: + +Snapshot Reads +============== + +.. versionadded:: 3.12 + +MongoDB 5.0 adds support for snapshot reads. Snapshot reads are requested by +passing the ``snapshot`` option to +:meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.start_session`. +If ``snapshot`` is True, all read operations that use this session read data +from the same snapshot timestamp. The server chooses the latest +majority-committed snapshot timestamp when executing the first read operation +using the session. Subsequent reads on this session read from the same +snapshot timestamp. Snapshot reads are also supported when reading from +replica set secondaries. + +.. code-block:: python + + # Each read using this session reads data from the same point in time. + async with client.start_session(snapshot=True) as session: + order = await orders.find_one({"sku": "abc123"}, session=session) + inventory = await inventory.find_one({"sku": "abc123"}, session=session) + +Snapshot Reads Limitations +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Snapshot reads sessions are incompatible with ``causal_consistency=True``. +Only the following read operations are supported in a snapshot reads session: + +- :meth:`~pymongo.asynchronous.collection.AsyncCollection.find` +- :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one` +- :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate` +- :meth:`~pymongo.asynchronous.collection.AsyncCollection.count_documents` +- :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct` (on unsharded collections) + +Classes +======= +""" + +from __future__ import annotations + +import collections +import time +import uuid +from collections.abc import Mapping as _Mapping +from typing import ( + TYPE_CHECKING, + Any, + AsyncContextManager, + Callable, + Coroutine, + Mapping, + MutableMapping, + NoReturn, + Optional, + Type, + TypeVar, +) + +from bson.binary import Binary +from bson.int64 import Int64 +from bson.timestamp import Timestamp +from pymongo import _csot +from pymongo.asynchronous.cursor import _ConnectionManager +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, + PyMongoError, + WTimeoutError, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.server_type import SERVER_TYPE +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from types import TracebackType + + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.asynchronous.server import Server + from pymongo.typings import ClusterTime, _Address + +_IS_SYNC = False + + +class SessionOptions: + """Options for a new :class:`AsyncClientSession`. + + :param causal_consistency: If True, read operations are causally + ordered within the session. Defaults to True when the ``snapshot`` + option is ``False``. + :param default_transaction_options: The default + TransactionOptions to use for transactions started on this session. + :param snapshot: If True, then all reads performed using this + session will read from the same snapshot. This option is incompatible + with ``causal_consistency=True``. Defaults to ``False``. + + .. versionchanged:: 3.12 + Added the ``snapshot`` parameter. + """ + + def __init__( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> None: + if snapshot: + if causal_consistency: + raise ConfigurationError("snapshot reads do not support causal_consistency=True") + causal_consistency = False + elif causal_consistency is None: + causal_consistency = True + self._causal_consistency = causal_consistency + if default_transaction_options is not None: + if not isinstance(default_transaction_options, TransactionOptions): + raise TypeError( + "default_transaction_options must be an instance of " + "pymongo.client_session.TransactionOptions, not: {!r}".format( + default_transaction_options + ) + ) + self._default_transaction_options = default_transaction_options + self._snapshot = snapshot + + @property + def causal_consistency(self) -> bool: + """Whether causal consistency is configured.""" + return self._causal_consistency + + @property + def default_transaction_options(self) -> Optional[TransactionOptions]: + """The default TransactionOptions to use for transactions started on + this session. + + .. versionadded:: 3.7 + """ + return self._default_transaction_options + + @property + def snapshot(self) -> Optional[bool]: + """Whether snapshot reads are configured. + + .. versionadded:: 3.12 + """ + return self._snapshot + + +class TransactionOptions: + """Options for :meth:`AsyncClientSession.start_transaction`. + + :param read_concern: The + :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. + If ``None`` (the default) the :attr:`read_preference` of + the :class:`AsyncMongoClient` is used. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use for this + transaction. If ``None`` (the default) the :attr:`read_preference` of + the :class:`AsyncMongoClient` is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncMongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. Transactions which read must use + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param max_commit_time_ms: The maximum amount of time to allow a + single commitTransaction command to run. This option is an alias for + maxTimeMS option on the commitTransaction command. If ``None`` (the + default) maxTimeMS is not used. + + .. versionchanged:: 3.9 + Added the ``max_commit_time_ms`` option. + + .. versionadded:: 3.7 + """ + + def __init__( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> None: + self._read_concern = read_concern + self._write_concern = write_concern + self._read_preference = read_preference + self._max_commit_time_ms = max_commit_time_ms + if read_concern is not None: + if not isinstance(read_concern, ReadConcern): + raise TypeError( + "read_concern must be an instance of " + f"pymongo.read_concern.ReadConcern, not: {read_concern!r}" + ) + if write_concern is not None: + if not isinstance(write_concern, WriteConcern): + raise TypeError( + "write_concern must be an instance of " + f"pymongo.write_concern.WriteConcern, not: {write_concern!r}" + ) + if not write_concern.acknowledged: + raise ConfigurationError( + "transactions do not support unacknowledged write concern" + f": {write_concern!r}" + ) + if read_preference is not None: + if not isinstance(read_preference, _ServerMode): + raise TypeError( + f"{read_preference!r} is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." + ) + if max_commit_time_ms is not None: + if not isinstance(max_commit_time_ms, int): + raise TypeError( + f"max_commit_time_ms must be an integer or None, not {type(max_commit_time_ms)}" + ) + + @property + def read_concern(self) -> Optional[ReadConcern]: + """This transaction's :class:`~pymongo.read_concern.ReadConcern`.""" + return self._read_concern + + @property + def write_concern(self) -> Optional[WriteConcern]: + """This transaction's :class:`~pymongo.write_concern.WriteConcern`.""" + return self._write_concern + + @property + def read_preference(self) -> Optional[_ServerMode]: + """This transaction's :class:`~pymongo.read_preferences.ReadPreference`.""" + return self._read_preference + + @property + def max_commit_time_ms(self) -> Optional[int]: + """The maxTimeMS to use when running a commitTransaction command. + + .. versionadded:: 3.9 + """ + return self._max_commit_time_ms + + +def _validate_session_write_concern( + session: Optional[AsyncClientSession], write_concern: Optional[WriteConcern] +) -> Optional[AsyncClientSession]: + """Validate that an explicit session is not used with an unack'ed write. + + Returns the session to use for the next operation. + """ + if session: + if write_concern is not None and not write_concern.acknowledged: + # For unacknowledged writes without an explicit session, + # drivers SHOULD NOT use an implicit session. If a driver + # creates an implicit session for unacknowledged writes + # without an explicit session, the driver MUST NOT send the + # session ID. + if session._implicit: + return None + else: + raise ConfigurationError( + "Explicit sessions are incompatible with " + f"unacknowledged write concern: {write_concern!r}" + ) + return session + + +class _TransactionContext: + """Internal transaction context manager for start_transaction.""" + + def __init__(self, session: AsyncClientSession): + self.__session = session + + async def __aenter__(self) -> _TransactionContext: + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if self.__session.in_transaction: + if exc_val is None: + await self.__session.commit_transaction() + else: + await self.__session.abort_transaction() + + +class _TxnState: + NONE = 1 + STARTING = 2 + IN_PROGRESS = 3 + COMMITTED = 4 + COMMITTED_EMPTY = 5 + ABORTED = 6 + + +class _Transaction: + """Internal class to hold transaction information in a AsyncClientSession.""" + + def __init__(self, opts: Optional[TransactionOptions], client: AsyncMongoClient[Any]): + self.opts = opts + self.state = _TxnState.NONE + self.sharded = False + self.pinned_address: Optional[_Address] = None + self.conn_mgr: Optional[_ConnectionManager] = None + self.recovery_token = None + self.attempt = 0 + self.client = client + + def active(self) -> bool: + return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) + + def starting(self) -> bool: + return self.state == _TxnState.STARTING + + @property + def pinned_conn(self) -> Optional[AsyncConnection]: + if self.active() and self.conn_mgr: + return self.conn_mgr.conn + return None + + def pin(self, server: Server, conn: AsyncConnection) -> None: + self.sharded = True + self.pinned_address = server.description.address + if server.description.server_type == SERVER_TYPE.LoadBalancer: + conn.pin_txn() + self.conn_mgr = _ConnectionManager(conn, False) + + async def unpin(self) -> None: + self.pinned_address = None + if self.conn_mgr: + await self.conn_mgr.close() + self.conn_mgr = None + + async def reset(self) -> None: + await self.unpin() + self.state = _TxnState.NONE + self.sharded = False + self.recovery_token = None + self.attempt = 0 + + def __del__(self) -> None: + if self.conn_mgr: + # Reuse the cursor closing machinery to return the socket to the + # pool soon. + self.client._close_cursor_soon(0, None, self.conn_mgr) + self.conn_mgr = None + + +def _reraise_with_unknown_commit(exc: Any) -> NoReturn: + """Re-raise an exception with the UnknownTransactionCommitResult label.""" + exc._add_error_label("UnknownTransactionCommitResult") + raise + + +def _max_time_expired_error(exc: PyMongoError) -> bool: + """Return true if exc is a MaxTimeMSExpired error.""" + return isinstance(exc, OperationFailure) and exc.code == 50 + + +# From the transactions spec, all the retryable writes errors plus +# WriteConcernTimeout. +_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( # type: ignore[type-arg] + [ + 64, # WriteConcernTimeout + 50, # MaxTimeMSExpired + ] +) + +# From the Convenient API for Transactions spec, with_transaction must +# halt retries after 120 seconds. +# This limit is non-configurable and was chosen to be twice the 60 second +# default value of MongoDB's `transactionLifetimeLimitSeconds` parameter. +_WITH_TRANSACTION_RETRY_TIME_LIMIT = 120 + + +def _within_time_limit(start_time: float) -> bool: + """Are we within the with_transaction retry limit?""" + return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT + + +_T = TypeVar("_T") + +if TYPE_CHECKING: + from pymongo.asynchronous.mongo_client import AsyncMongoClient + + +class AsyncClientSession: + """A session for ordering sequential operations. + + :class:`AsyncClientSession` instances are **not thread-safe or fork-safe**. + They can only be used by one thread or process at a time. A single + :class:`AsyncClientSession` cannot be used to run multiple operations + concurrently. + + Should not be initialized directly by application developers - to create a + :class:`AsyncClientSession`, call + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.start_session`. + """ + + def __init__( + self, + client: AsyncMongoClient[Any], + server_session: Any, + options: SessionOptions, + implicit: bool, + ) -> None: + # An AsyncMongoClient, a _ServerSession, a SessionOptions, and a set. + self._client: AsyncMongoClient[Any] = client + self._server_session = server_session + self._options = options + self._cluster_time: Optional[Mapping[str, Any]] = None + self._operation_time: Optional[Timestamp] = None + self._snapshot_time = None + # Is this an implicitly created session? + self._implicit = implicit + self._transaction = _Transaction(None, client) + # Is this session attached to a cursor? + self._attached_to_cursor = False + # Should we leave the session alive when the cursor is closed? + self._leave_alive = False + + async def end_session(self) -> None: + """Finish this session. If a transaction has started, abort it. + + It is an error to use the session after the session has ended. + """ + await self._end_session(lock=True) + + async def _end_session(self, lock: bool) -> None: + if self._server_session is not None: + try: + if self.in_transaction: + await self.abort_transaction() + # It's possible we're still pinned here when the transaction + # is in the committed state when the session is discarded. + await self._unpin() + finally: + self._client._return_server_session(self._server_session) + self._server_session = None + + def _end_implicit_session(self) -> None: + # Implicit sessions can't be part of transactions or pinned connections + if not self._leave_alive and self._server_session is not None: + self._client._return_server_session(self._server_session) + self._server_session = None + + def _check_ended(self) -> None: + if self._server_session is None: + raise InvalidOperation("Cannot use ended session") + + async def __aenter__(self) -> AsyncClientSession: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self._end_session(lock=True) + + @property + def client(self) -> AsyncMongoClient[Any]: + """The :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` this session was + created from. + """ + return self._client + + @property + def options(self) -> SessionOptions: + """The :class:`SessionOptions` this session was created with.""" + return self._options + + @property + def session_id(self) -> Mapping[str, Any]: + """A BSON document, the opaque server session identifier.""" + self._check_ended() + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.session_id + + @property + def _transaction_id(self) -> Int64: + """The current transaction id for the underlying server session.""" + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.transaction_id + + @property + def cluster_time(self) -> Optional[ClusterTime]: + """The cluster time returned by the last operation executed + in this session. + """ + return self._cluster_time + + @property + def operation_time(self) -> Optional[Timestamp]: + """The operation time returned by the last operation executed + in this session. + """ + return self._operation_time + + def _inherit_option(self, name: str, val: _T) -> _T: + """Return the inherited TransactionOption value.""" + if val: + return val + txn_opts = self.options.default_transaction_options + parent_val = txn_opts and getattr(txn_opts, name) + if parent_val: + return parent_val + return getattr(self.client, name) + + async def with_transaction( + self, + callback: Callable[[AsyncClientSession], Coroutine[Any, Any, _T]], + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> _T: + """Execute a callback in a transaction. + + This method starts a transaction on this session, executes ``callback`` + once, and then commits the transaction. For example:: + + async def callback(session): + orders = session.client.db.orders + inventory = session.client.db.inventory + await orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + await inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, session=session) + + async with client.start_session() as session: + await session.with_transaction(callback) + + To pass arbitrary arguments to the ``callback``, wrap your callable + with a ``lambda`` like this:: + + async def callback(session, custom_arg, custom_kwarg=None): + # Transaction operations... + + async with client.start_session() as session: + await session.with_transaction( + lambda s: callback(s, "custom_arg", custom_kwarg=1)) + + In the event of an exception, ``with_transaction`` may retry the commit + or the entire transaction, therefore ``callback`` may be invoked + multiple times by a single call to ``with_transaction``. Developers + should be mindful of this possibility when writing a ``callback`` that + modifies application state or has any other side-effects. + Note that even when the ``callback`` is invoked multiple times, + ``with_transaction`` ensures that the transaction will be committed + at-most-once on the server. + + The ``callback`` should not attempt to start new transactions, but + should simply run operations meant to be contained within a + transaction. The ``callback`` should also not commit the transaction; + this is handled automatically by ``with_transaction``. If the + ``callback`` does commit or abort the transaction without error, + however, ``with_transaction`` will return without taking further + action. + + :class:`AsyncClientSession` instances are **not thread-safe or fork-safe**. + Consequently, the ``callback`` must not attempt to execute multiple + operations concurrently. + + When ``callback`` raises an exception, ``with_transaction`` + automatically aborts the current transaction. When ``callback`` or + :meth:`~AsyncClientSession.commit_transaction` raises an exception that + includes the ``"TransientTransactionError"`` error label, + ``with_transaction`` starts a new transaction and re-executes + the ``callback``. + + The ``callback`` MUST NOT silently handle command errors + without allowing such errors to propagate. Command errors may abort the + transaction on the server, and an attempt to commit the transaction will + be rejected with a ``NoSuchTransaction`` error. For more information see + the `transactions specification`_. + + When :meth:`~AsyncClientSession.commit_transaction` raises an exception with + the ``"UnknownTransactionCommitResult"`` error label, + ``with_transaction`` retries the commit until the result of the + transaction is known. + + This method will cease retrying after 120 seconds has elapsed. This + timeout is not configurable and any exception raised by the + ``callback`` or by :meth:`AsyncClientSession.commit_transaction` after the + timeout is reached will be re-raised. Applications that desire a + different timeout duration should not use this method. + + :param callback: The callable ``callback`` to run inside a transaction. + The callable must accept a single argument, this session. Note, + under certain error conditions the callback may be run multiple + times. + :param read_concern: The + :class:`~pymongo.read_concern.ReadConcern` to use for this + transaction. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use for this + transaction. + :param read_preference: The read preference to use for this + transaction. If ``None`` (the default) the :attr:`read_preference` + of this :class:`AsyncDatabase` is used. See + :mod:`~pymongo.read_preferences` for options. + + :return: The return value of the ``callback``. + + .. versionadded:: 3.9 + + .. _transactions specification: + https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/transactions-convenient-api.md#handling-errors-inside-the-callback + """ + start_time = time.monotonic() + while True: + await self.start_transaction( + read_concern, write_concern, read_preference, max_commit_time_ms + ) + try: + ret = await callback(self) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as exc: + if self.in_transaction: + await self.abort_transaction() + if ( + isinstance(exc, PyMongoError) + and exc.has_error_label("TransientTransactionError") + and _within_time_limit(start_time) + ): + # Retry the entire transaction. + continue + raise + + if not self.in_transaction: + # Assume callback intentionally ended the transaction. + return ret + + while True: + try: + await self.commit_transaction() + except PyMongoError as exc: + if ( + exc.has_error_label("UnknownTransactionCommitResult") + and _within_time_limit(start_time) + and not _max_time_expired_error(exc) + ): + # Retry the commit. + continue + + if exc.has_error_label("TransientTransactionError") and _within_time_limit( + start_time + ): + # Retry the entire transaction. + break + raise + + # Commit succeeded. + return ret + + async def start_transaction( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> AsyncContextManager[Any]: + """Start a multi-statement transaction. + + Takes the same arguments as :class:`TransactionOptions`. + + .. versionchanged:: 3.9 + Added the ``max_commit_time_ms`` option. + + .. versionadded:: 3.7 + """ + self._check_ended() + + if self.options.snapshot: + raise InvalidOperation("Transactions are not supported in snapshot sessions") + + if self.in_transaction: + raise InvalidOperation("Transaction already in progress") + + read_concern = self._inherit_option("read_concern", read_concern) + write_concern = self._inherit_option("write_concern", write_concern) + read_preference = self._inherit_option("read_preference", read_preference) + if max_commit_time_ms is None: + opts = self.options.default_transaction_options + if opts: + max_commit_time_ms = opts.max_commit_time_ms + + self._transaction.opts = TransactionOptions( + read_concern, write_concern, read_preference, max_commit_time_ms + ) + await self._transaction.reset() + self._transaction.state = _TxnState.STARTING + self._start_retryable_write() + return _TransactionContext(self) + + async def commit_transaction(self) -> None: + """Commit a multi-statement transaction. + + .. versionadded:: 3.7 + """ + self._check_ended() + state = self._transaction.state + if state is _TxnState.NONE: + raise InvalidOperation("No transaction started") + elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): + # Server transaction was never started, no need to send a command. + self._transaction.state = _TxnState.COMMITTED_EMPTY + return + elif state is _TxnState.ABORTED: + raise InvalidOperation("Cannot call commitTransaction after calling abortTransaction") + elif state is _TxnState.COMMITTED: + # We're explicitly retrying the commit, move the state back to + # "in progress" so that in_transaction returns true. + self._transaction.state = _TxnState.IN_PROGRESS + + try: + await self._finish_transaction_with_retry("commitTransaction") + except ConnectionFailure as exc: + # We do not know if the commit was successfully applied on the + # server or if it satisfied the provided write concern, set the + # unknown commit error label. + exc._remove_error_label("TransientTransactionError") + _reraise_with_unknown_commit(exc) + except WTimeoutError as exc: + # We do not know if the commit has satisfied the provided write + # concern, add the unknown commit error label. + _reraise_with_unknown_commit(exc) + except OperationFailure as exc: + if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: + # The server reports errorLabels in the case. + raise + # We do not know if the commit was successfully applied on the + # server or if it satisfied the provided write concern, set the + # unknown commit error label. + _reraise_with_unknown_commit(exc) + finally: + self._transaction.state = _TxnState.COMMITTED + + async def abort_transaction(self) -> None: + """Abort a multi-statement transaction. + + .. versionadded:: 3.7 + """ + self._check_ended() + + state = self._transaction.state + if state is _TxnState.NONE: + raise InvalidOperation("No transaction started") + elif state is _TxnState.STARTING: + # Server transaction was never started, no need to send a command. + self._transaction.state = _TxnState.ABORTED + return + elif state is _TxnState.ABORTED: + raise InvalidOperation("Cannot call abortTransaction twice") + elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): + raise InvalidOperation("Cannot call abortTransaction after calling commitTransaction") + + try: + await self._finish_transaction_with_retry("abortTransaction") + except (OperationFailure, ConnectionFailure): + # The transactions spec says to ignore abortTransaction errors. + pass + finally: + self._transaction.state = _TxnState.ABORTED + await self._unpin() + + async def _finish_transaction_with_retry(self, command_name: str) -> dict[str, Any]: + """Run commit or abort with one retry after any retryable error. + + :param command_name: Either "commitTransaction" or "abortTransaction". + """ + + async def func( + _session: Optional[AsyncClientSession], conn: AsyncConnection, _retryable: bool + ) -> dict[str, Any]: + return await self._finish_transaction(conn, command_name) + + return await self._client._retry_internal( + func, self, None, retryable=True, operation=command_name + ) + + async def _finish_transaction(self, conn: AsyncConnection, command_name: str) -> dict[str, Any]: + self._transaction.attempt += 1 + opts = self._transaction.opts + assert opts + wc = opts.write_concern + cmd = {command_name: 1} + if command_name == "commitTransaction": + if opts.max_commit_time_ms and _csot.get_timeout() is None: + cmd["maxTimeMS"] = opts.max_commit_time_ms + + # Transaction spec says that after the initial commit attempt, + # subsequent commitTransaction commands should be upgraded to use + # w:"majority" and set a default value of 10 seconds for wtimeout. + if self._transaction.attempt > 1: + assert wc + wc_doc = wc.document + wc_doc["w"] = "majority" + wc_doc.setdefault("wtimeout", 10000) + wc = WriteConcern(**wc_doc) + + if self._transaction.recovery_token: + cmd["recoveryToken"] = self._transaction.recovery_token + + return await self._client.admin._command( + conn, cmd, session=self, write_concern=wc, parse_write_concern_error=True + ) + + def _advance_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + """Internal cluster time helper.""" + if self._cluster_time is None: + self._cluster_time = cluster_time + elif cluster_time is not None: + if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: + self._cluster_time = cluster_time + + def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: + """Update the cluster time for this session. + + :param cluster_time: The + :data:`~pymongo.asynchronous.client_session.AsyncClientSession.cluster_time` from + another `AsyncClientSession` instance. + """ + if not isinstance(cluster_time, _Mapping): + raise TypeError( + f"cluster_time must be a subclass of collections.Mapping, not {type(cluster_time)}" + ) + if not isinstance(cluster_time.get("clusterTime"), Timestamp): + raise ValueError("Invalid cluster_time") + self._advance_cluster_time(cluster_time) + + def _advance_operation_time(self, operation_time: Optional[Timestamp]) -> None: + """Internal operation time helper.""" + if self._operation_time is None: + self._operation_time = operation_time + elif operation_time is not None: + if operation_time > self._operation_time: + self._operation_time = operation_time + + def advance_operation_time(self, operation_time: Timestamp) -> None: + """Update the operation time for this session. + + :param operation_time: The + :data:`~pymongo.asynchronous.client_session.AsyncClientSession.operation_time` from + another `AsyncClientSession` instance. + """ + if not isinstance(operation_time, Timestamp): + raise TypeError( + f"operation_time must be an instance of bson.timestamp.Timestamp, not {type(operation_time)}" + ) + self._advance_operation_time(operation_time) + + def _process_response(self, reply: Mapping[str, Any]) -> None: + """Process a response to a command that was run with this session.""" + self._advance_cluster_time(reply.get("$clusterTime")) + self._advance_operation_time(reply.get("operationTime")) + if self._options.snapshot and self._snapshot_time is None: + if "cursor" in reply: + ct = reply["cursor"].get("atClusterTime") + else: + ct = reply.get("atClusterTime") + self._snapshot_time = ct + if self.in_transaction and self._transaction.sharded: + recovery_token = reply.get("recoveryToken") + if recovery_token: + self._transaction.recovery_token = recovery_token + + @property + def has_ended(self) -> bool: + """True if this session is finished.""" + return self._server_session is None + + @property + def in_transaction(self) -> bool: + """True if this session has an active multi-statement transaction. + + .. versionadded:: 3.10 + """ + return self._transaction.active() + + @property + def _starting_transaction(self) -> bool: + """True if this session is starting a multi-statement transaction.""" + return self._transaction.starting() + + @property + def _pinned_address(self) -> Optional[_Address]: + """The mongos address this transaction was created on.""" + if self._transaction.active(): + return self._transaction.pinned_address + return None + + @property + def _pinned_connection(self) -> Optional[AsyncConnection]: + """The connection this transaction was started on.""" + return self._transaction.pinned_conn + + def _pin(self, server: Server, conn: AsyncConnection) -> None: + """Pin this session to the given Server or to the given connection.""" + self._transaction.pin(server, conn) + + async def _unpin(self) -> None: + """Unpin this session from any pinned Server.""" + await self._transaction.unpin() + + def _txn_read_preference(self) -> Optional[_ServerMode]: + """Return read preference of this transaction or None.""" + if self.in_transaction: + assert self._transaction.opts + return self._transaction.opts.read_preference + return None + + def _materialize(self, logical_session_timeout_minutes: Optional[int] = None) -> None: + if isinstance(self._server_session, _EmptyServerSession): + old = self._server_session + self._server_session = self._client._topology.get_server_session( + logical_session_timeout_minutes + ) + if old.started_retryable_write: + self._server_session.inc_transaction_id() + + def _apply_to( + self, + command: MutableMapping[str, Any], + is_retryable: bool, + read_preference: _ServerMode, + conn: AsyncConnection, + ) -> None: + if not conn.supports_sessions: + if not self._implicit: + raise ConfigurationError("Sessions are not supported by this MongoDB deployment") + return + self._check_ended() + self._materialize(conn.logical_session_timeout_minutes) + if self.options.snapshot: + self._update_read_concern(command, conn) + + self._server_session.last_use = time.monotonic() + command["lsid"] = self._server_session.session_id + + if is_retryable: + command["txnNumber"] = self._server_session.transaction_id + return + + if self.in_transaction: + if read_preference != ReadPreference.PRIMARY: + raise InvalidOperation( + f"read preference in a transaction must be primary, not: {read_preference!r}" + ) + + if self._transaction.state == _TxnState.STARTING: + # First command begins a new transaction. + self._transaction.state = _TxnState.IN_PROGRESS + command["startTransaction"] = True + + assert self._transaction.opts + if self._transaction.opts.read_concern: + rc = self._transaction.opts.read_concern.document + if rc: + command["readConcern"] = rc + self._update_read_concern(command, conn) + + command["txnNumber"] = self._server_session.transaction_id + command["autocommit"] = False + + def _start_retryable_write(self) -> None: + self._check_ended() + self._server_session.inc_transaction_id() + + def _update_read_concern(self, cmd: MutableMapping[str, Any], conn: AsyncConnection) -> None: + if self.options.causal_consistency and self.operation_time is not None: + cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time + if self.options.snapshot: + if conn.max_wire_version < 13: + raise ConfigurationError("Snapshot reads require MongoDB 5.0 or later") + rc = cmd.setdefault("readConcern", {}) + rc["level"] = "snapshot" + if self._snapshot_time is not None: + rc["atClusterTime"] = self._snapshot_time + + def __copy__(self) -> NoReturn: + raise TypeError("A AsyncClientSession cannot be copied, create a new session instead") + + +class _EmptyServerSession: + __slots__ = "dirty", "started_retryable_write" + + def __init__(self) -> None: + self.dirty = False + self.started_retryable_write = False + + def mark_dirty(self) -> None: + self.dirty = True + + def inc_transaction_id(self) -> None: + self.started_retryable_write = True + + +class _ServerSession: + def __init__(self, generation: int): + # Ensure id is type 4, regardless of CodecOptions.uuid_representation. + self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} + self.last_use = time.monotonic() + self._transaction_id = 0 + self.dirty = False + self.generation = generation + + def mark_dirty(self) -> None: + """Mark this session as dirty. + + A server session is marked dirty when a command fails with a network + error. Dirty sessions are later discarded from the server session pool. + """ + self.dirty = True + + def timed_out(self, session_timeout_minutes: Optional[int]) -> bool: + if session_timeout_minutes is None: + return False + + idle_seconds = time.monotonic() - self.last_use + + # Timed out if we have less than a minute to live. + return idle_seconds > (session_timeout_minutes - 1) * 60 + + @property + def transaction_id(self) -> Int64: + """Positive 64-bit integer.""" + return Int64(self._transaction_id) + + def inc_transaction_id(self) -> None: + self._transaction_id += 1 + + +class _ServerSessionPool(collections.deque): # type: ignore[type-arg] + """Pool of _ServerSession objects. + + This class is thread-safe. + """ + + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + self.generation = 0 + + def reset(self) -> None: + self.generation += 1 + self.clear() + + def pop_all(self) -> list[_ServerSession]: + ids = [] + while True: + try: + ids.append(self.pop().session_id) + except IndexError: + break + return ids + + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: + # Although the Driver Sessions Spec says we only clear stale sessions + # in return_server_session, PyMongo can't take a lock when returning + # sessions from a __del__ method (like in AsyncCursor.__die), so it can't + # clear stale sessions there. In case many sessions were returned via + # __del__, check for stale sessions here too. + self._clear_stale(session_timeout_minutes) + + # The most recently used sessions are on the left. + while True: + try: + s = self.popleft() + except IndexError: + break + if not s.timed_out(session_timeout_minutes): + return s + + return _ServerSession(self.generation) + + def return_server_session(self, server_session: _ServerSession) -> None: + # Discard sessions from an old pool to avoid duplicate sessions in the + # child process after a fork. + if server_session.generation == self.generation and not server_session.dirty: + self.appendleft(server_session) + + def _clear_stale(self, session_timeout_minutes: Optional[int]) -> None: + # Clear stale sessions. The least recently used are on the right. + while True: + try: + s = self.pop() + except IndexError: + break + if not s.timed_out(session_timeout_minutes): + self.append(s) + # The remaining sessions also haven't timed out. + break diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py new file mode 100644 index 0000000000..e7e2f58031 --- /dev/null +++ b/pymongo/asynchronous/collection.py @@ -0,0 +1,3646 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection level utilities for Mongo.""" +from __future__ import annotations + +import warnings +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, + AsyncContextManager, + Callable, + Coroutine, + Generic, + Iterable, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, + overload, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from bson.timestamp import Timestamp +from pymongo import ASCENDING, _csot, common, helpers_shared, message +from pymongo.asynchronous.aggregation import ( + _CollectionAggregationCommand, + _CollectionRawAggregationCommand, +) +from pymongo.asynchronous.bulk import _AsyncBulk +from pymongo.asynchronous.change_stream import AsyncCollectionChangeStream +from pymongo.asynchronous.command_cursor import ( + AsyncCommandCursor, + AsyncRawBatchCommandCursor, +) +from pymongo.asynchronous.cursor import ( + AsyncCursor, + AsyncRawBatchCursor, +) +from pymongo.collation import validate_collation_or_none +from pymongo.common import _ecoc_coll_name, _esc_coll_name +from pymongo.errors import ( + ConfigurationError, + InvalidName, + InvalidOperation, + OperationFailure, +) +from pymongo.helpers_shared import _check_write_command_response +from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + SearchIndexModel, + UpdateMany, + UpdateOne, + _IndexKeyHint, + _IndexList, + _Op, +) +from pymongo.read_concern import DEFAULT_READ_CONCERN +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean + +_IS_SYNC = False + +T = TypeVar("T") + +_FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} + + +_WriteOp = Union[ + InsertOne[_DocumentType], + DeleteOne, + DeleteMany, + ReplaceOne[_DocumentType], + UpdateOne, + UpdateMany, +] + + +class ReturnDocument: + """An enum used with + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one_and_replace` and + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one_and_update`. + """ + + BEFORE = False + """Return the original document before it was updated/replaced, or + ``None`` if no document matches the query. + """ + AFTER = True + """Return the updated/replaced or inserted document.""" + + +if TYPE_CHECKING: + import bson + from pymongo.asynchronous.aggregation import _AggregationCommand + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.database import AsyncDatabase + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.asynchronous.server import Server + from pymongo.collation import Collation + from pymongo.read_concern import ReadConcern + + +class AsyncCollection(common.BaseObject, Generic[_DocumentType]): + """An asynchronous Mongo collection.""" + + def __init__( + self, + database: AsyncDatabase[_DocumentType], + name: str, + create: Optional[bool] = False, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> None: + """Get / create an asynchronous Mongo collection. + + Raises :class:`TypeError` if `name` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if `name` is + not a valid collection name. Any additional keyword arguments will be used + as options passed to the create command. See + :meth:`~pymongo.asynchronous.database.AsyncDatabase.create_collection` for valid + options. + + If `create` is ``True``, `collation` is specified, or any additional + keyword arguments are present, a ``create`` command will be + sent, using ``session`` if specified. Otherwise, a ``create`` command + will not be sent and the collection will be created implicitly on first + use. The optional ``session`` argument is *only* used for the ``create`` + command, it is not associated with the collection afterward. + + :param database: the database to get a collection from + :param name: the name of the collection to get + :param create: **Not supported by AsyncCollection**. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) database.codec_options is used. + :param read_preference: The read preference to use. If + ``None`` (the default) database.read_preference is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) database.write_concern is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) database.read_concern is used. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. If a collation is provided, + it will be passed to the create collection command. + :param session: **Not supported by AsyncCollection**. + :param kwargs: **Not supported by AsyncCollection**. + + .. versionchanged:: 4.2 + Added the ``clusteredIndex`` and ``encryptedFields`` parameters. + + .. versionchanged:: 4.0 + Removed the reindex, map_reduce, inline_map_reduce, + parallel_scan, initialize_unordered_bulk_op, + initialize_ordered_bulk_op, group, count, insert, save, + update, remove, find_and_modify, and ensure_index methods. See the + :ref:`pymongo4-migration-guide`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Support the `collation` option. + + .. versionchanged:: 3.2 + Added the read_concern option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + Removed the uuid_subtype attribute. + :class:`~pymongo.asynchronous.collection.AsyncCollection` no longer returns an + instance of :class:`~pymongo.asynchronous.collection.AsyncCollection` for attribute + names with leading underscores. You must use dict-style lookups + instead:: + + collection['__my_collection__'] + + Not: + + collection.__my_collection__ + + .. seealso:: The MongoDB documentation on `collections `_. + """ + super().__init__( + codec_options or database.codec_options, + read_preference or database.read_preference, + write_concern or database.write_concern, + read_concern or database.read_concern, + ) + if not isinstance(name, str): + raise TypeError(f"name must be an instance of str, not {type(name)}") + from pymongo.asynchronous.database import AsyncDatabase + + if not isinstance(database, AsyncDatabase): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncDatabase" for cls in type(database).__mro__): + raise TypeError(f"AsyncDatabase required but given {type(database).__name__}") + + if not name or ".." in name: + raise InvalidName("collection names cannot be empty") + if "$" in name and not (name.startswith(("oplog.$main", "$cmd"))): + raise InvalidName("collection names must not contain '$': %r" % name) + if name[0] == "." or name[-1] == ".": + raise InvalidName("collection names must not start or end with '.': %r" % name) + if "\x00" in name: + raise InvalidName("collection names must not contain the null character") + + self._database: AsyncDatabase[_DocumentType] = database + self._name = name + self._full_name = f"{self._database.name}.{self._name}" + self._write_response_codec_options = self.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + self._timeout = database.client.options.timeout + + if create or kwargs: + if _IS_SYNC: + warnings.warn( + "The `create` and `kwargs` arguments to AsyncCollection are deprecated and will be removed in PyMongo 5.0", + DeprecationWarning, + stacklevel=2, + ) + self._create(kwargs, session) # type: ignore[unused-coroutine] + else: + raise ValueError( + "AsyncCollection does not support the `create` or `kwargs` arguments." + ) + + def __getattr__(self, name: str) -> AsyncCollection[_DocumentType]: + """Get a sub-collection of this collection by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + full_name = f"{self._name}.{name}" + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {full_name}" + f" collection, use database['{full_name}']." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> AsyncCollection[_DocumentType]: + return AsyncCollection( + self._database, + f"{self._name}.{name}", + False, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._database!r}, {self._name!r})" + + def __eq__(self, other: Any) -> bool: + if isinstance(other, AsyncCollection): + return self._database == other.database and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._database, self._name)) + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: collection is not None" + ) + + @property + def full_name(self) -> str: + """The full name of this :class:`AsyncCollection`. + + The full name is of the form `database_name.collection_name`. + """ + return self._full_name + + @property + def name(self) -> str: + """The name of this :class:`AsyncCollection`.""" + return self._name + + @property + def database(self) -> AsyncDatabase[_DocumentType]: + """The :class:`~pymongo.asynchronous.database.AsyncDatabase` that this + :class:`AsyncCollection` is a part of. + """ + return self._database + + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncCollection[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncCollection[_DocumentTypeArg]: + ... + + def with_options( + self, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> AsyncCollection[_DocumentType] | AsyncCollection[_DocumentTypeArg]: + """Get a clone of this collection changing the specified settings. + + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY) + >>> coll1.read_preference + Primary() + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncCollection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncCollection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncCollection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncCollection` + is used. + """ + return AsyncCollection( + self._database, + self._name, + False, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def _write_concern_for_cmd( + self, cmd: Mapping[str, Any], session: Optional[AsyncClientSession] + ) -> WriteConcern: + raw_wc = cmd.get("writeConcern") + if raw_wc is not None: + return WriteConcern(**raw_wc) + else: + return self._write_concern_for(session) + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'AsyncCollection' object is not iterable") + + next = __next__ + + def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: + """This is only here so that some API misusages are easier to debug.""" + if "." not in self._name: + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you " + "meant to call the '%s' method on an 'AsyncDatabase' " + "object it is failing because no such method " + "exists." % self._name + ) + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you meant to " + f"call the '%s' method on a '{type(self).__name__}' object it is " + "failing because no such method exists." % self._name.split(".")[-1] + ) + + async def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[AsyncClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> AsyncCollectionChangeStream[_DocumentType]: + """Watch changes on this collection. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.asynchronous.change_stream.AsyncCollectionChangeStream` cursor which + iterates over changes on this collection. + + .. code-block:: python + + async with await db.collection.watch() as stream: + async for change in stream: + print(change) + + The :class:`~pymongo.asynchronous.change_stream.AsyncCollectionChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.asynchronous.change_stream.AsyncCollectionChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + async with await db.coll.watch([{"$match": {"operationType": "insert"}}]) as stream: + async for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The AsyncChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + .. note:: Using this helper method is preferred to directly calling + :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate` with a + ``$changeStream`` stage, for the purpose of supporting + resumability. + + .. warning:: This AsyncCollection's :attr:`read_concern` must be + ``ReadConcern("majority")`` in order to use the ``$changeStream`` + stage. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.asynchronous.change_stream.AsyncCollectionChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionchanged:: 3.7 + Added the ``start_at_operation_time`` parameter. + + .. versionadded:: 3.6 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = AsyncCollectionChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events, + ) + + await change_stream._initialize_cursor() + return change_stream + + async def _conn_for_writes( + self, session: Optional[AsyncClientSession], operation: str + ) -> AsyncContextManager[AsyncConnection]: + return await self._database.client._conn_for_writes(session, operation) + + async def _command( + self, + conn: AsyncConnection, + command: MutableMapping[str, Any], + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions[Mapping[str, Any]]] = None, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + collation: Optional[_CollationIn] = None, + session: Optional[AsyncClientSession] = None, + retryable_write: bool = False, + user_fields: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal command helper. + + :param conn` - A AsyncConnection instance. + :param command` - The command itself, as a :class:`~bson.son.SON` instance. + :param read_preference` (optional) - The read preference to use. + :param codec_options` (optional) - An instance of + :class:`~bson.codec_options.CodecOptions`. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern` (optional) - An instance of + :class:`~pymongo.read_concern.ReadConcern`. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. + :param collation` (optional) - An instance of + :class:`~pymongo.collation.Collation`. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param retryable_write: True if this command is a retryable + write. + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + + :return: The result document. + """ + async with self._database.client._tmp_session(session) as s: + return await conn.command( + self._database.name, + command, + read_preference or self._read_preference_for(session), + codec_options or self.codec_options, + check, + allowable_errors, + read_concern=read_concern, + write_concern=write_concern, + parse_write_concern_error=True, + collation=collation, + session=s, + client=self._database.client, + retryable_write=retryable_write, + user_fields=user_fields, + ) + + async def _create_helper( + self, + name: str, + options: MutableMapping[str, Any], + collation: Optional[_CollationIn], + session: Optional[AsyncClientSession], + encrypted_fields: Optional[Mapping[str, Any]] = None, + qev2_required: bool = False, + ) -> None: + """Sends a create command with the given options.""" + cmd: dict[str, Any] = {"create": name} + if encrypted_fields: + cmd["encryptedFields"] = encrypted_fields + + if options: + if "size" in options: + options["size"] = float(options["size"]) + cmd.update(options) + async with await self._conn_for_writes(session, operation=_Op.CREATE) as conn: + if qev2_required and conn.max_wire_version < 21: + raise ConfigurationError( + "Driver support of Queryable Encryption is incompatible with server. " + "Upgrade server to use Queryable Encryption. " + f"Got maxWireVersion {conn.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" + ) + + await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + collation=collation, + session=session, + ) + + async def _create( + self, + options: MutableMapping[str, Any], + session: Optional[AsyncClientSession], + ) -> None: + collation = validate_collation_or_none(options.pop("collation", None)) + encrypted_fields = options.pop("encryptedFields", None) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} + await self._create_helper( + _esc_coll_name(encrypted_fields, self._name), + opts, + None, + session, + qev2_required=True, + ) + await self._create_helper( + _ecoc_coll_name(encrypted_fields, self._name), opts, None, session + ) + await self._create_helper( + self._name, options, collation, session, encrypted_fields=encrypted_fields + ) + await self.create_index([("__safeContent__", ASCENDING)], session) + else: + await self._create_helper(self._name, options, collation, session) + + @_csot.apply + async def bulk_write( + self, + requests: Sequence[_WriteOp[_DocumentType]], + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + let: Optional[Mapping[str, Any]] = None, + ) -> BulkWriteResult: + """Send a batch of write operations to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + ... + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}), + ... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)] + >>> result = await db.test.bulk_write(requests) + >>> result.inserted_count + 1 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_ids + {2: ObjectId('54f62ee28891e756a6e1abd5')} + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + + :param requests: A list of write operations (see examples above). + :param ordered: If ``True`` (the default) requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False`` requests + will be performed on the server in arbitrary order, possibly in + parallel, and all operations will be attempted. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + + :return: An instance of :class:`~pymongo.results.BulkWriteResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + common.validate_list("requests", requests) + + blk = _AsyncBulk(self, ordered, bypass_document_validation, comment=comment, let=let) + for request in requests: + try: + request._add_to_bulk(blk) + except AttributeError: + raise TypeError(f"{request!r} is not a valid request") from None + + write_concern = self._write_concern_for(session) + bulk_api_result = await blk.execute(write_concern, session, _Op.INSERT) + if bulk_api_result is not None: + return BulkWriteResult(bulk_api_result, True) + return BulkWriteResult({}, False) + + async def _insert_one( + self, + doc: Mapping[str, Any], + ordered: bool, + write_concern: WriteConcern, + op_id: Optional[int], + bypass_doc_val: Optional[bool], + session: Optional[AsyncClientSession], + comment: Optional[Any] = None, + ) -> Any: + """Internal helper for inserting a single document.""" + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + command = {"insert": self.name, "ordered": ordered, "documents": [doc]} + if comment is not None: + command["comment"] = comment + + async def _insert_command( + session: Optional[AsyncClientSession], conn: AsyncConnection, retryable_write: bool + ) -> None: + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val + + result = await conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + + _check_write_command_response(result) + + await self._database.client._retryable_write( + acknowledged, _insert_command, session, operation=_Op.INSERT + ) + + if not isinstance(doc, RawBSONDocument): + return doc.get("_id") + return None + + async def insert_one( + self, + document: Union[_DocumentType, RawBSONDocument], + bypass_document_validation: Optional[bool] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertOneResult: + """Insert a single document. + + >>> await db.test.count_documents({'x': 1}) + 0 + >>> result = await db.test.insert_one({'x': 1}) + >>> result.inserted_id + ObjectId('54f112defba522406c9cc208') + >>> await db.test.find_one({'x': 1}) + {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} + + :param document: The document to insert. Must be a mutable mapping + type. If the document does not have an _id field one will be + added automatically. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.InsertOneResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + common.validate_is_document_type("document", document) + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() # type: ignore[index] + + write_concern = self._write_concern_for(session) + return InsertOneResult( + await self._insert_one( + document, + ordered=True, + write_concern=write_concern, + op_id=None, + bypass_doc_val=bypass_document_validation, + session=session, + comment=comment, + ), + write_concern.acknowledged, + ) + + @_csot.apply + async def insert_many( + self, + documents: Iterable[Union[_DocumentType, RawBSONDocument]], + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertManyResult: + """Insert an iterable of documents. + + >>> await db.test.count_documents({}) + 0 + >>> result = await db.test.insert_many([{'x': i} for i in range(2)]) + >>> await result.inserted_ids + [ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')] + >>> await db.test.count_documents({}) + 2 + + :param documents: A iterable of documents to insert. + :param ordered: If ``True`` (the default) documents will be + inserted on the server serially, in the order provided. If an error + occurs all remaining inserts are aborted. If ``False``, documents + will be inserted on the server in arbitrary order, possibly in + parallel, and all document inserts will be attempted. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: An instance of :class:`~pymongo.results.InsertManyResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + if ( + not isinstance(documents, abc.Iterable) + or isinstance(documents, abc.Mapping) + or not documents + ): + raise TypeError("documents must be a non-empty list") + inserted_ids: list[ObjectId] = [] + + def gen() -> Iterator[tuple[int, Mapping[str, Any]]]: + """A generator that validates documents and handles _ids.""" + for document in documents: + common.validate_is_document_type("document", document) + if not isinstance(document, RawBSONDocument): + if "_id" not in document: + document["_id"] = ObjectId() # type: ignore[index] + inserted_ids.append(document["_id"]) + yield (message._INSERT, document) + + write_concern = self._write_concern_for(session) + blk = _AsyncBulk(self, ordered, bypass_document_validation, comment=comment) + blk.ops = list(gen()) + await blk.execute(write_concern, session, _Op.INSERT) + return InsertManyResult(inserted_ids, write_concern.acknowledged) + + async def _update( + self, + conn: AsyncConnection, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: + """Internal update / replace helper.""" + validate_boolean("upsert", upsert) + collation = validate_collation_or_none(collation) + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + update_doc: dict[str, Any] = { + "q": criteria, + "u": document, + "multi": multi, + "upsert": upsert, + } + if collation is not None: + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + else: + update_doc["collation"] = collation + if array_filters is not None: + if not acknowledged: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + else: + update_doc["arrayFilters"] = array_filters + if hint is not None: + if not acknowledged and conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + update_doc["hint"] = hint + if sort is not None: + if not acknowledged and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) + common.validate_is_mapping("sort", sort) + update_doc["sort"] = sort + + command = {"update": self.name, "ordered": ordered, "updates": [update_doc]} + if let is not None: + common.validate_is_mapping("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + # Update command. + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val + + # The command result has to be published for APM unmodified + # so we make a shallow copy here before adding updatedExisting. + result = ( + await conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + ).copy() + _check_write_command_response(result) + # Add the updatedExisting field for compatibility. + if result.get("n") and "upserted" not in result: + result["updatedExisting"] = True + else: + result["updatedExisting"] = False + # MongoDB >= 2.6.0 returns the upsert _id in an array + # element. Break it out for backward compatibility. + if "upserted" in result: + result["upserted"] = result["upserted"][0]["_id"] + + if not acknowledged: + return None + return result + + async def _update_retryable( + self, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + operation: str, + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: + """Internal update / replace helper.""" + + async def _update( + session: Optional[AsyncClientSession], conn: AsyncConnection, retryable_write: bool + ) -> Optional[Mapping[str, Any]]: + return await self._update( + conn, + criteria, + document, + upsert=upsert, + multi=multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + bypass_doc_val=bypass_doc_val, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + sort=sort, + comment=comment, + ) + + return await self._database.client._retryable_write( + (write_concern or self.write_concern).acknowledged and not multi, + _update, + session, + operation, + ) + + async def replace_one( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Replace a single document matching the filter. + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} + >>> result = await db.test.replace_one({'x': 1}, {'y': 1}) + >>> result.matched_count + 1 + >>> result.modified_count + 1 + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} + + The *upsert* option can be used to insert a new document if a matching + document does not exist. + + >>> result = await db.test.replace_one({'x': 1}, {'x': 1}, True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('54f11e5c8891e756a6e1abd4') + >>> await db.test.find_one({'x': 1}) + {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} + + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.11 + Added ``sort`` parameter. + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionchanged:: 3.2 + Added bypass_document_validation support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_replace(replacement) + if let is not None: + common.validate_is_mapping("let", let) + write_concern = self._write_concern_for(session) + return UpdateResult( + await self._update_retryable( + filter, + replacement, + _Op.UPDATE, + upsert, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + hint=hint, + session=session, + let=let, + sort=sort, + comment=comment, + ), + write_concern.acknowledged, + ) + + async def update_one( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Update a single document matching the filter. + + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> result = await db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) + >>> result.matched_count + 1 + >>> result.modified_count + 1 + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 4, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + If ``upsert=True`` and no documents match the filter, create a + new document based on the filter criteria and update modifications. + + >>> result = await db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('626a678eeaa80587d4bb3fb7') + >>> await db.test.find_one(result.upserted_id) + {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} + + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.11 + Added ``sort`` parameter. + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the ``update``. + .. versionchanged:: 3.6 + Added the ``array_filters`` and ``session`` parameters. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Added ``bypass_document_validation`` support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + + write_concern = self._write_concern_for(session) + return UpdateResult( + await self._update_retryable( + filter, + update, + _Op.UPDATE, + upsert, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + sort=sort, + comment=comment, + ), + write_concern.acknowledged, + ) + + async def update_many( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Update one or more documents that match the filter. + + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> result = await db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) + >>> result.matched_count + 3 + >>> result.modified_count + 3 + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 4, '_id': 0} + {'x': 4, '_id': 1} + {'x': 4, '_id': 2} + + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added ``array_filters`` and ``session`` parameters. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionchanged:: 3.2 + Added bypass_document_validation support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + + write_concern = self._write_concern_for(session) + return UpdateResult( + await self._update_retryable( + filter, + update, + _Op.UPDATE, + upsert, + multi=True, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + async def drop( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> None: + """Alias for :meth:`~pymongo.asynchronous.database.AsyncDatabase.drop_collection`. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. + + The following two calls are equivalent: + + >>> await db.foo.drop() + >>> await db.drop_collection("foo") + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.7 + :meth:`drop` now respects this :class:`AsyncCollection`'s :attr:`write_concern`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + dbo = self._database.client.get_database( + self._database.name, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + await dbo.drop_collection( + self._name, session=session, comment=comment, encrypted_fields=encrypted_fields + ) + + async def _delete( + self, + conn: AsyncConnection, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal delete helper.""" + common.validate_is_mapping("filter", criteria) + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + delete_doc = {"q": criteria, "limit": int(not multi)} + collation = validate_collation_or_none(collation) + if collation is not None: + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + else: + delete_doc["collation"] = collation + if hint is not None: + if not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + delete_doc["hint"] = hint + command = {"delete": self.name, "ordered": ordered, "deletes": [delete_doc]} + + if let is not None: + common.validate_is_document_type("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + + # Delete command. + result = await conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + _check_write_command_response(result) + return result + + async def _delete_retryable( + self, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal delete helper.""" + + async def _delete( + session: Optional[AsyncClientSession], conn: AsyncConnection, retryable_write: bool + ) -> Mapping[str, Any]: + return await self._delete( + conn, + criteria, + multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + collation=collation, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + comment=comment, + ) + + return await self._database.client._retryable_write( + (write_concern or self.write_concern).acknowledged and not multi, + _delete, + session, + operation=_Op.DELETE, + ) + + async def delete_one( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: + """Delete a single document matching the filter. + + >>> await db.test.count_documents({'x': 1}) + 3 + >>> result = await db.test.delete_one({'x': 1}) + >>> result.deleted_count + 1 + >>> await db.test.count_documents({'x': 1}) + 2 + + :param filter: A query that matches the document to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.DeleteResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + write_concern = self._write_concern_for(session) + return DeleteResult( + await self._delete_retryable( + filter, + False, + write_concern=write_concern, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + async def delete_many( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: + """Delete one or more documents matching the filter. + + >>> await db.test.count_documents({'x': 1}) + 3 + >>> result = await db.test.delete_many({'x': 1}) + >>> result.deleted_count + 3 + >>> await db.test.count_documents({'x': 1}) + 0 + + :param filter: A query that matches the documents to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.DeleteResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + write_concern = self._write_concern_for(session) + return DeleteResult( + await self._delete_retryable( + filter, + True, + write_concern=write_concern, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + async def find_one( + self, filter: Optional[Any] = None, *args: Any, **kwargs: Any + ) -> Optional[_DocumentType]: + """Get a single document from the database. + + All arguments to :meth:`find` are also valid arguments for + :meth:`find_one`, although any `limit` argument will be + ignored. Returns a single document, or ``None`` if no matching + document is found. + + The :meth:`find_one` method obeys the :attr:`read_preference` of + this :class:`AsyncCollection`. + + :param filter: a dictionary specifying + the query to be performed OR any other type to be used as + the value for a query for ``"_id"``. + + :param args: any additional positional arguments + are the same as the arguments to :meth:`find`. + + :param kwargs: any additional keyword arguments + are the same as the arguments to :meth:`find`. + + :: code-block: python + + >>> await collection.find_one(max_time_ms=100) + + """ + if filter is not None and not isinstance(filter, abc.Mapping): + filter = {"_id": filter} + cursor = self.find(filter, *args, **kwargs) + async for result in cursor.limit(-1): + return result + return None + + def find(self, *args: Any, **kwargs: Any) -> AsyncCursor[_DocumentType]: + """Query the database. + + The `filter` argument is a query document that all results + must match. For example: + + >>> db.test.find({"hello": "world"}) + + only matches documents that have a key "hello" with value + "world". Matches can have other keys *in addition* to + "hello". The `projection` argument is used to specify a subset + of fields that should be included in the result documents. By + limiting results to a certain subset of fields you can cut + down on network traffic and decoding time. + + Raises :class:`TypeError` if any of the arguments are of + improper type. Returns an instance of + :class:`~pymongo.asynchronous.cursor.AsyncCursor` corresponding to this query. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with collection.find() as cursor: + async for doc in cursor: + print(doc) + + The :meth:`find` method obeys the :attr:`read_preference` of + this :class:`AsyncCollection`. + + :param filter: A query document that selects which documents + to include in the result set. Can be an empty document to include + all documents. + :param projection: a list of field names that should be + returned in the result set or a dict specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a dict to exclude fields from + the result (e.g. projection={'_id': False}). + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param skip: the number of documents to omit (from + the start of the result set) when returning the results + :param limit: the maximum number of results to + return. A limit of 0 (the default) is equivalent to setting no + limit. + :param no_cursor_timeout: if False (the default), any + returned cursor is closed by the server after 10 minutes of + inactivity. If set to True, the returned cursor will never + time out on the server. Care should be taken to ensure that + cursors with no_cursor_timeout turned on are properly closed. + :param cursor_type: the type of cursor to return. The valid + options are defined by :class:`~pymongo.cursor.CursorType`: + + - :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of + this find call will return a standard cursor over the result set. + - :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this + find call will be a tailable cursor - tailable cursors are only + for use with capped collections. They are not closed when the + last data is retrieved but are kept open and the cursor location + marks the final document position. If more data is received + iteration of the cursor will continue from the last document + received. For details, see the `tailable cursor documentation + `_. + - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result + of this find call will be a tailable cursor with the await flag + set. The server will wait for a few seconds after returning the + full result set so that it can capture and return additional data + added during the query. + - :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this + find call will be an exhaust cursor. MongoDB will stream batched + results to the client without waiting for the client to request + each batch, reducing latency. See notes on compatibility below. + + :param sort: a list of (key, direction) pairs + specifying the sort order for this query. See + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.sort` for details. + :param allow_partial_results: if True, mongos will return + partial results if some shards are down instead of returning an + error. + :param oplog_replay: **DEPRECATED** - if True, set the + oplogReplay query flag. Default: False. + :param batch_size: Limits the number of documents returned in + a single batch. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param return_key: If True, return only the index keys in + each document. + :param show_record_id: If True, adds a field ``$recordId`` in + each document with the storage engine's internal record identifier. + :param snapshot: **DEPRECATED** - If True, prevents the + cursor from returning a document more than once because of an + intervening write operation. + :param hint: An index, in the same format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). Pass this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.hint` on the cursor to tell Mongo the + proper index to use for the query. + :param max_time_ms: Specifies a time limit for a query + operation. If the specified time is exceeded, the operation will be + aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass + this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.max_time_ms` on the cursor. + :param max_scan: **DEPRECATED** - The maximum number of + documents to scan. Pass this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.max_scan` on the cursor. + :param min: A list of field, limit pairs specifying the + inclusive lower bound for all keys of a specific index in order. + Pass this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.min` on the cursor. ``hint`` must + also be passed to ensure the query utilizes the correct index. + :param max: A list of field, limit pairs specifying the + exclusive upper bound for all keys of a specific index in order. + Pass this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.max` on the cursor. ``hint`` must + also be passed to ensure the query utilizes the correct index. + :param comment: A string to attach to the query to help + interpret and trace the operation in the server logs and in profile + data. Pass this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.comment` on the cursor. + :param allow_disk_use: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. The option has no effect if + MongoDB can satisfy the specified sort using an index, or if the + blocking sort requires less memory than the 100 MiB limit. This + option is only supported on MongoDB 4.4 and above. + + .. note:: There are a number of caveats to using + :attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type: + + - The `limit` option can not be used with an exhaust cursor. + + - Exhaust cursors are not supported by mongos and can not be + used with a sharded cluster. + + - A :class:`~pymongo.cursor.AsyncCursor` instance created with the + :attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an + exclusive :class:`~socket.socket` connection to MongoDB. If the + :class:`~pymongo.asynchronous.cursor.AsyncCursor` is discarded without being + completely iterated the underlying :class:`~socket.socket` + connection will be closed and discarded without being returned to + the connection pool. + + .. versionchanged:: 4.0 + Removed the ``modifiers`` option. + Empty projections (eg {} or []) are passed to the server as-is, + rather than the previous behavior which substituted in a + projection of ``{"_id": 1}``. This means that an empty projection + will now return the entire document, not just the ``"_id"`` field. + + .. versionchanged:: 3.11 + Added the ``allow_disk_use`` option. + Deprecated the ``oplog_replay`` option. Support for this option is + deprecated in MongoDB 4.4. The query engine now automatically + optimizes queries against the oplog without requiring this + option to be set. + + .. versionchanged:: 3.7 + Deprecated the ``snapshot`` option, which is deprecated in MongoDB + 3.6 and removed in MongoDB 4.0. + Deprecated the ``max_scan`` option. Support for this option is + deprecated in MongoDB 4.0. Use ``max_time_ms`` instead to limit + server-side execution time. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.5 + Added the options ``return_key``, ``show_record_id``, ``snapshot``, + ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, and + ``comment``. + Deprecated the ``modifiers`` option. + + .. versionchanged:: 3.4 + Added support for the ``collation`` option. + + .. versionchanged:: 3.0 + Changed the parameter names ``spec``, ``fields``, ``timeout``, and + ``partial`` to ``filter``, ``projection``, ``no_cursor_timeout``, + and ``allow_partial_results`` respectively. + Added the ``cursor_type``, ``oplog_replay``, and ``modifiers`` + options. + Removed the ``network_timeout``, ``read_preference``, ``tag_sets``, + ``secondary_acceptable_latency_ms``, ``max_scan``, ``snapshot``, + ``tailable``, ``await_data``, ``exhaust``, ``as_class``, and + slave_okay parameters. + Removed ``compile_re`` option: PyMongo now always + represents BSON regular expressions as :class:`~bson.regex.Regex` + objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to + convert from a BSON regular expression to a Python regular + expression object. + Soft deprecated the ``manipulate`` option. + + .. seealso:: The MongoDB documentation on `find `_. + """ + return AsyncCursor(self, *args, **kwargs) + + def find_raw_batches(self, *args: Any, **kwargs: Any) -> AsyncRawBatchCursor[_DocumentType]: + """Query the database and retrieve batches of raw BSON. + + Similar to the :meth:`find` method but returns a + :class:`~pymongo.asynchronous.cursor.AsyncRawBatchCursor`. + + This example demonstrates how to work with raw batches, but in practice + raw batches should be passed to an external library that can decode + BSON into another data type, rather than used with PyMongo's + :mod:`bson` module. + + >>> import bson + >>> cursor = db.test.find_raw_batches() + >>> async for batch in cursor: + ... print(bson.decode_all(batch)) + + .. note:: find_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Instead of ignoring the user-specified read concern, this method + now sends it to the server when connected to MongoDB 3.6+. + + Added session support. + + .. versionadded:: 3.6 + """ + # OP_MSG is required to support encryption. + if self._database.client._encrypter: + raise InvalidOperation("find_raw_batches does not support auto encryption") + return AsyncRawBatchCursor(self, *args, **kwargs) + + async def _count_cmd( + self, + session: Optional[AsyncClientSession], + conn: AsyncConnection, + read_preference: Optional[_ServerMode], + cmd: dict[str, Any], + collation: Optional[Collation], + ) -> int: + """Internal count command helper.""" + res = await self._command( + conn, + cmd, + read_preference=read_preference, + codec_options=self._write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + return int(res["n"]) + + async def _aggregate_one_result( + self, + conn: AsyncConnection, + read_preference: Optional[_ServerMode], + cmd: dict[str, Any], + collation: Optional[_CollationIn], + session: Optional[AsyncClientSession], + ) -> Optional[Mapping[str, Any]]: + """Internal helper to run an aggregate that returns a single result.""" + result = await self._command( + conn, + cmd, + read_preference, + allowable_errors=[26], # Ignore NamespaceNotFound. + codec_options=self._write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + # cursor will not be present for NamespaceNotFound errors. + if "cursor" not in result: + return None + batch = result["cursor"]["firstBatch"] + return batch[0] if batch else None + + async def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) -> int: + """Get an estimate of the number of documents in this collection using + collection metadata. + + The :meth:`estimated_document_count` method is **not** supported in a + transaction. + + All optional parameters should be passed as keyword arguments + to this method. Valid options include: + + - `maxTimeMS` (int): The maximum amount of time to allow this + operation to run, in milliseconds. + + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + .. versionchanged:: 4.2 + This method now always uses the `count`_ command. Due to an oversight in versions + 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the + `versioned API `_. Users of the Stable API with estimated_document_count are + recommended to upgrade their server version to 5.0.9+ or set + :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. + + .. versionadded:: 3.7 + .. _count: https://mongodb.com/docs/manual/reference/command/count/ + """ + if "session" in kwargs: + raise ConfigurationError("estimated_document_count does not support sessions") + if comment is not None: + kwargs["comment"] = comment + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: Optional[_ServerMode], + ) -> int: + cmd: dict[str, Any] = {"count": self._name} + cmd.update(kwargs) + return await self._count_cmd(session, conn, read_preference, cmd, collation=None) + + return await self._retryable_non_cursor_read(_cmd, None, operation=_Op.COUNT) + + async def count_documents( + self, + filter: Mapping[str, Any], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> int: + """Count the number of documents in this collection. + + .. note:: For a fast count of the total documents in a collection see + :meth:`estimated_document_count`. + + The :meth:`count_documents` method is supported in a transaction. + + All optional parameters should be passed as keyword arguments + to this method. Valid options include: + + - `skip` (int): The number of matching documents to skip before + returning results. + - `limit` (int): The maximum number of documents to count. Must be + a positive integer. If not provided, no limit is imposed. + - `maxTimeMS` (int): The maximum amount of time to allow this + operation to run, in milliseconds. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `hint` (string or list of tuples): The index to use. Specify either + the index name as a string or the index specification as a list of + tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). + + The :meth:`count_documents` method obeys the :attr:`read_preference` of + this :class:`AsyncCollection`. + + .. note:: When migrating from :meth:`count` to :meth:`count_documents` + the following query operators must be replaced: + + +-------------+-------------------------------------+ + | Operator | Replacement | + +=============+=====================================+ + | $where | `$expr`_ | + +-------------+-------------------------------------+ + | $near | `$geoWithin`_ with `$center`_ | + +-------------+-------------------------------------+ + | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | + +-------------+-------------------------------------+ + + :param filter: A query document that selects which documents + to count in the collection. Can be an empty document to count all + documents. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + + .. versionadded:: 3.7 + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ + .. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ + .. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ + """ + pipeline = [{"$match": filter}] + if "skip" in kwargs: + pipeline.append({"$skip": kwargs.pop("skip")}) + if "limit" in kwargs: + pipeline.append({"$limit": kwargs.pop("limit")}) + if comment is not None: + kwargs["comment"] = comment + pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) + if "hint" in kwargs and not isinstance(kwargs["hint"], str): + kwargs["hint"] = helpers_shared._index_document(kwargs["hint"]) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: Optional[_ServerMode], + ) -> int: + cmd: dict[str, Any] = {"aggregate": self._name, "pipeline": pipeline, "cursor": {}} + cmd.update(kwargs) + result = await self._aggregate_one_result( + conn, read_preference, cmd, collation, session + ) + if not result: + return 0 + return result["n"] + + return await self._retryable_non_cursor_read(_cmd, session, _Op.COUNT) + + async def _retryable_non_cursor_read( + self, + func: Callable[ + [Optional[AsyncClientSession], Server, AsyncConnection, Optional[_ServerMode]], + Coroutine[Any, Any, T], + ], + session: Optional[AsyncClientSession], + operation: str, + ) -> T: + """Non-cursor read helper to handle implicit session creation.""" + client = self._database.client + async with client._tmp_session(session) as s: + return await client._retryable_read(func, self._read_preference_for(s), s, operation) + + async def create_indexes( + self, + indexes: Sequence[IndexModel], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create one or more indexes on this collection. + + >>> from pymongo import IndexModel, ASCENDING, DESCENDING + >>> index1 = IndexModel([("hello", DESCENDING), + ... ("world", ASCENDING)], name="hello_world") + >>> index2 = IndexModel([("goodbye", DESCENDING)]) + >>> await db.test.create_indexes([index1, index2]) + ["hello_world", "goodbye_-1"] + + :param indexes: A list of :class:`~pymongo.operations.IndexModel` + instances. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + + + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + .. versionadded:: 3.0 + + .. _createIndexes: https://mongodb.com/docs/manual/reference/command/createIndexes/ + """ + common.validate_list("indexes", indexes) + if comment is not None: + kwargs["comment"] = comment + return await self._create_indexes(indexes, session, **kwargs) + + @_csot.apply + async def _create_indexes( + self, indexes: Sequence[IndexModel], session: Optional[AsyncClientSession], **kwargs: Any + ) -> list[str]: + """Internal createIndexes helper. + + :param indexes: A list of :class:`~pymongo.operations.IndexModel` + instances. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + """ + names = [] + async with await self._conn_for_writes(session, operation=_Op.CREATE_INDEXES) as conn: + supports_quorum = conn.max_wire_version >= 9 + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in indexes: + if not isinstance(index, IndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.IndexModel" + ) + document = index.document + names.append(document["name"]) + yield document + + cmd = {"createIndexes": self.name, "indexes": list(gen_indexes())} + cmd.update(kwargs) + if "commitQuorum" in kwargs and not supports_quorum: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use the " + "commitQuorum option for createIndexes" + ) + + await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + write_concern=self._write_concern_for(session), + session=session, + ) + return names + + async def create_index( + self, + keys: _IndexKeyHint, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> str: + """Creates an index on this collection. + + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str` and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). + + To create a single key ascending index on the key ``'mike'`` we just + use a string argument:: + + >>> await my_collection.create_index("mike") + + For a compound index on ``'mike'`` descending and ``'eliot'`` + ascending we need to use a list of tuples:: + + >>> await my_collection.create_index([("mike", pymongo.DESCENDING), + ... "eliot"]) + + All optional index creation parameters should be passed as + keyword arguments to this method. For example:: + + >>> await my_collection.create_index([("mike", pymongo.DESCENDING)], + ... background=True) + + Valid options include, but are not limited to: + + - `name`: custom name to use for this index - if none is + given, a name will be generated. + - `unique`: if ``True``, creates a uniqueness constraint on the + index. + - `background`: if ``True``, this index should be created in the + background. + - `sparse`: if ``True``, omit from the index any documents that lack + the indexed field. + - `bucketSize`: for use with geoHaystack indexes. + Number of documents to group together within a certain proximity + to a given longitude and latitude. + - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` + index. + - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` + index. + - `expireAfterSeconds`: Used to create an expiring (TTL) + collection. MongoDB will automatically delete documents from + this collection after seconds. The indexed field must + be a UTC datetime or the data will not expire. + - `partialFilterExpression`: A document that specifies a filter for + a partial index. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `wildcardProjection`: Allows users to include or exclude specific + field paths from a `wildcard index`_ using the {"$**" : 1} key + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. + + See the MongoDB documentation for a full list of supported options by + server version. + + .. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The + option is silently ignored by the server and unique index builds + using the option will fail if a duplicate value is detected. + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + :param keys: a single key or a list of (key, direction) + pairs specifying the index to create + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: any additional index creation + options (see the above list) should be passed as keyword + arguments. + + .. versionchanged:: 4.4 + Allow passing a list containing (key, direction) pairs + or keys for the ``keys`` parameter. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added the ``hidden`` option. + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for passing maxTimeMS + in kwargs. + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. Support the `collation` option. + .. versionchanged:: 3.2 + Added partialFilterExpression to support partial indexes. + .. versionchanged:: 3.0 + Renamed `key_or_list` to `keys`. Removed the `cache_for` option. + :meth:`create_index` no longer caches index names. Removed support + for the drop_dups and bucket_size aliases. + + .. seealso:: The MongoDB documentation on `indexes `_. + + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ + """ + cmd_options = {} + if "maxTimeMS" in kwargs: + cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") + if comment is not None: + cmd_options["comment"] = comment + index = IndexModel(keys, **kwargs) + return (await self._create_indexes([index], session, **cmd_options))[0] + + async def drop_indexes( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Drops all indexes on this collection. + + Can be used on non-existent collections or collections with no indexes. + Raises OperationFailure on an error. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + """ + if comment is not None: + kwargs["comment"] = comment + await self._drop_index("*", session=session, **kwargs) + + @_csot.apply + async def drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Drops the specified index on this collection. + + Can be used on non-existent collections or collections with no + indexes. Raises OperationFailure on an error (e.g. trying to + drop an index that does not exist). `index_or_name` + can be either an index name (as returned by `create_index`), + or an index specifier (as passed to `create_index`). An index + specifier should be a list of (key, direction) pairs. Raises + TypeError if index is not an instance of (str, unicode, list). + + .. warning:: + + if a custom name was used on index creation (by + passing the `name` parameter to :meth:`create_index`) the index + **must** be dropped by name. + + :param index_or_name: index (or name of index) to drop + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + await self._drop_index(index_or_name, session, comment, **kwargs) + + @_csot.apply + async def _drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + name = index_or_name + if isinstance(index_or_name, list): + name = helpers_shared._gen_index_name(index_or_name) + + if not isinstance(name, str): + raise TypeError(f"index_or_name must be an instance of str or list, not {type(name)}") + + cmd = {"dropIndexes": self._name, "index": name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + async with await self._conn_for_writes(session, operation=_Op.DROP_INDEXES) as conn: + await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + session=session, + ) + + async def list_indexes( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the index documents for this collection. + + >>> async for index in await db.test.list_indexes(): + ... print(index) + ... + SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await collection.list_indexes() as cursor: + async for index in cursor: + print(index) + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: An instance of :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor`. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionadded:: 3.0 + """ + return await self._list_indexes(session, comment) + + async def _list_indexes( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + codec_options: CodecOptions[Mapping[str, Any]] = CodecOptions(SON) + coll = cast( + AsyncCollection[MutableMapping[str, Any]], + self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), + ) + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: _ServerMode, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + cmd = {"listIndexes": self._name, "cursor": {}} + if comment is not None: + cmd["comment"] = comment + + try: + cursor = ( + await self._command(conn, cmd, read_preference, codec_options, session=session) + )["cursor"] + except OperationFailure as exc: + # Ignore NamespaceNotFound errors to match the behavior + # of reading from *.system.indexes. + if exc.code != 26: + raise + cursor = {"id": 0, "firstBatch": []} + cmd_cursor = AsyncCommandCursor( + coll, + cursor, + conn.address, + session=session, + comment=cmd.get("comment"), + ) + await cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + async with self._database.client._tmp_session(session) as s: + return await self._database.client._retryable_read( + _cmd, read_pref, s, operation=_Op.LIST_INDEXES + ) + + async def index_information( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: + """Get information on this collection's indexes. + + Returns a dictionary where the keys are index names (as + returned by create_index()) and the values are dictionaries + containing information about each index. The dictionary is + guaranteed to contain at least a single key, ``"key"`` which + is a list of (key, direction) pairs specifying the index (as + passed to create_index()). It will also contain any other + metadata about the indexes, except for the ``"ns"`` and + ``"name"`` keys, which are cleaned. Example output might look + like this: + + >>> await db.test.create_index("x", unique=True) + 'x_1' + >>> await db.test.index_information() + {'_id_': {'key': [('_id', 1)]}, + 'x_1': {'unique': True, 'key': [('x', 1)]}} + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + cursor = await self._list_indexes(session=session, comment=comment) + info = {} + async for index in cursor: + index["key"] = list(index["key"].items()) + index = dict(index) # noqa: PLW2901 + info[index.pop("name")] = index + return info + + async def list_search_indexes( + self, + name: Optional[str] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[Mapping[str, Any]]: + """Return a cursor over search indexes for the current collection. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await collection.list_search_indexes() as cursor: + async for index in cursor: + print(index) + + :param name: If given, the name of the index to search + for. Only indexes with matching index names will be returned. + If not given, all search indexes for the current collection + will be returned. + :param session: a :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: A :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor` over the result + set. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if name is None: + pipeline: _Pipeline = [{"$listSearchIndexes": {}}] + else: + pipeline = [{"$listSearchIndexes": {"name": name}}] + + coll = self.with_options( + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + read_concern=DEFAULT_READ_CONCERN, + ) + cmd = _CollectionAggregationCommand( + coll, + AsyncCommandCursor, + pipeline, + kwargs, + comment=comment, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return await self._database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + operation=_Op.LIST_SEARCH_INDEX, + ) + + async def create_search_index( + self, + model: Union[Mapping[str, Any], SearchIndexModel], + session: Optional[AsyncClientSession] = None, + comment: Any = None, + **kwargs: Any, + ) -> str: + """Create a single search index for the current collection. + + :param model: The model for the new search index. + It can be given as a :class:`~pymongo.operations.SearchIndexModel` + instance or a dictionary with a model "definition" and optional + "name". + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :return: The name of the new search index. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if not isinstance(model, SearchIndexModel): + model = SearchIndexModel(**model) + return (await self._create_search_indexes([model], session, comment, **kwargs))[0] + + async def create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create multiple search indexes for the current collection. + + :param models: A list of :class:`~pymongo.operations.SearchIndexModel` instances. + :param session: a :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :return: A list of the newly created search index names. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + return await self._create_search_indexes(models, session, comment, **kwargs) + + async def _create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + if comment is not None: + kwargs["comment"] = comment + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in models: + if not isinstance(index, SearchIndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.SearchIndexModel" + ) + yield index.document + + cmd = {"createSearchIndexes": self.name, "indexes": list(gen_indexes())} + cmd.update(kwargs) + + async with await self._conn_for_writes( + session, operation=_Op.CREATE_SEARCH_INDEXES + ) as conn: + resp = await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + return [index["name"] for index in resp["indexesCreated"]] + + async def drop_search_index( + self, + name: str, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Delete a search index by index name. + + :param name: The name of the search index to be deleted. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the dropSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = {"dropSearchIndex": self._name, "name": name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + async with await self._conn_for_writes(session, operation=_Op.DROP_SEARCH_INDEXES) as conn: + await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + async def update_search_index( + self, + name: str, + definition: Mapping[str, Any], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Update a search index by replacing the existing index definition with the provided definition. + + :param name: The name of the search index to be updated. + :param definition: The new search index definition. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the updateSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = {"updateSearchIndex": self._name, "name": name, "definition": definition} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + async with await self._conn_for_writes(session, operation=_Op.UPDATE_SEARCH_INDEX) as conn: + await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + async def options( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: + """Get the options set on this collection. + + Returns a dictionary of options and their values - see + :meth:`~pymongo.asynchronous.database.AsyncDatabase.create_collection` for more + information on the possible options. Returns an empty + dictionary if the collection has not been created yet. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + dbo = self._database.client.get_database( + self._database.name, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + cursor = await dbo.list_collections( + session=session, filter={"name": self._name}, comment=comment + ) + + result = None + async for doc in cursor: + result = doc + break + + if not result: + return {} + + options = result.get("options", {}) + assert options is not None + if "create" in options: + del options["create"] + + return options + + @_csot.apply + async def _aggregate( + self, + aggregation_command: Type[_AggregationCommand], + pipeline: _Pipeline, + cursor_class: Type[AsyncCommandCursor], # type: ignore[type-arg] + session: Optional[AsyncClientSession], + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[_DocumentType]: + if comment is not None: + kwargs["comment"] = comment + cmd = aggregation_command( + self, + cursor_class, + pipeline, + kwargs, + let, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return await self._database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, + ) + + async def aggregate( + self, + pipeline: _Pipeline, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[_DocumentType]: + """Perform an aggregation using the aggregation framework on this + collection. + + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`AsyncCollection`, except when ``$out`` or ``$merge`` are used on + MongoDB <5.0, in which case + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. + + .. note:: This method does not support the 'explain' option. Please + use `PyMongoExplain `_ + instead. An example is included in the `aggregation example `_ + documentation. + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await collection.aggregate() as cursor: + async for operation in cursor: + print(operation) + + :param pipeline: a list of aggregation pipeline stages + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `bypassDocumentValidation` (bool): If ``True``, allows the write to opt-out of document level validation. + + + :return: A :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor` over the result + set. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + Support $merge and $out executing on secondaries according to the + collection's :attr:`read_preference`. + .. versionchanged:: 4.0 + Removed the ``useCursor`` option. + .. versionchanged:: 3.9 + Apply this collection's read concern to pipelines containing the + `$out` stage when connected to MongoDB >= 4.2. + Added support for the ``$merge`` pipeline stage. + Aggregations that write always use read preference + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + .. versionchanged:: 3.6 + Added the `session` parameter. Added the `maxAwaitTimeMS` option. + Deprecated the `useCursor` option. + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. Support the `collation` option. + .. versionchanged:: 3.0 + The :meth:`aggregate` method always returns an AsyncCommandCursor. The + pipeline argument must be a list. + + .. seealso:: `Aggregation `_ + + .. _aggregate command: + https://mongodb.com/docs/manual/reference/command/aggregate + """ + async with self._database.client._tmp_session(session) as s: + return await self._aggregate( + _CollectionAggregationCommand, + pipeline, + AsyncCommandCursor, + session=s, + let=let, + comment=comment, + **kwargs, + ) + + async def aggregate_raw_batches( + self, + pipeline: _Pipeline, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncRawBatchCursor[_DocumentType]: + """Perform an aggregation and retrieve batches of raw BSON. + + Similar to the :meth:`aggregate` method but returns a + :class:`~pymongo.asynchronous.cursor.AsyncRawBatchCursor`. + + This example demonstrates how to work with raw batches, but in practice + raw batches should be passed to an external library that can decode + BSON into another data type, rather than used with PyMongo's + :mod:`bson` module. + + >>> import bson + >>> cursor = await db.test.aggregate_raw_batches([ + ... {'$project': {'x': {'$multiply': [2, '$x']}}}]) + >>> async for batch in cursor: + ... print(bson.decode_all(batch)) + + .. note:: aggregate_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Added session support. + + .. versionadded:: 3.6 + """ + # OP_MSG is required to support encryption. + if self._database.client._encrypter: + raise InvalidOperation("aggregate_raw_batches does not support auto encryption") + if comment is not None: + kwargs["comment"] = comment + async with self._database.client._tmp_session(session) as s: + return cast( + AsyncRawBatchCursor[_DocumentType], + await self._aggregate( + _CollectionRawAggregationCommand, + pipeline, + AsyncRawBatchCommandCursor, + session=s, + **kwargs, + ), + ) + + @_csot.apply + async def rename( + self, + new_name: str, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> MutableMapping[str, Any]: + """Rename this collection. + + If operating in auth mode, client must be authorized as an + admin to perform this operation. Raises :class:`TypeError` if + `new_name` is not an instance of :class:`str`. + Raises :class:`~pymongo.errors.InvalidName` + if `new_name` is not a valid collection name. + + :param new_name: new name for this collection + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional arguments to the rename command + may be passed as keyword arguments to this helper method + (i.e. ``dropTarget=True``) + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + if not isinstance(new_name, str): + raise TypeError(f"new_name must be an instance of str, not {type(new_name)}") + + if not new_name or ".." in new_name: + raise InvalidName("collection names cannot be empty") + if new_name[0] == "." or new_name[-1] == ".": + raise InvalidName("collection names must not start or end with '.'") + if "$" in new_name and not new_name.startswith("oplog.$main"): + raise InvalidName("collection names must not contain '$'") + + new_name = f"{self._database.name}.{new_name}" + cmd = {"renameCollection": self._full_name, "to": new_name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + write_concern = self._write_concern_for_cmd(cmd, session) + + async with await self._conn_for_writes(session, operation=_Op.RENAME) as conn: + async with self._database.client._tmp_session(session) as s: + return await conn.command( + "admin", + cmd, + write_concern=write_concern, + parse_write_concern_error=True, + session=s, + client=self._database.client, + ) + + async def distinct( + self, + key: str, + filter: Optional[Mapping[str, Any]] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + hint: Optional[_IndexKeyHint] = None, + **kwargs: Any, + ) -> list[Any]: + """Get a list of distinct values for `key` among all documents + in this collection. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + All optional distinct parameters should be passed as keyword arguments + to this method. Valid options include: + + - `maxTimeMS` (int): The maximum amount of time to allow the count + command to run, in milliseconds. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + + The :meth:`distinct` method obeys the :attr:`read_preference` of + this :class:`AsyncCollection`. + + :param key: name of the field for which we want to get the distinct + values + :param filter: A query document that specifies the documents + from which to retrieve the distinct values. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` + (e.g. ``[('field', ASCENDING)]``). + :param kwargs: See list of options above. + + .. versionchanged:: 4.12 + Added ``hint`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Support the `collation` option. + + """ + if not isinstance(key, str): + raise TypeError(f"key must be an instance of str, not {type(key)}") + if filter is not None: + if "query" in kwargs: + raise ConfigurationError("can't pass both filter and query") + kwargs["query"] = filter + collation = validate_collation_or_none(kwargs.pop("collation", None)) + if hint is not None: + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: Optional[_ServerMode], + ) -> list: # type: ignore[type-arg] + cmd = {"distinct": self._name, "key": key} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + if hint is not None: + cmd["hint"] = hint # type: ignore[assignment] + return ( + await self._command( + conn, + cmd, + read_preference=read_preference, + read_concern=self.read_concern, + collation=collation, + session=session, + user_fields={"values": 1}, + ) + )["values"] + + return await self._retryable_non_cursor_read(_cmd, session, operation=_Op.DISTINCT) + + async def _find_and_modify( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]], + sort: Optional[_IndexList], + upsert: Optional[bool] = None, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Internal findAndModify helper.""" + common.validate_is_mapping("filter", filter) + if not isinstance(return_document, bool): + raise ValueError( + f"return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER, not {type(return_document)}" + ) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + if hint is not None: + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + write_concern = self._write_concern_for_cmd(kwargs, session) + + async def _find_and_modify_helper( + session: Optional[AsyncClientSession], conn: AsyncConnection, retryable_write: bool + ) -> Any: + cmd = {"findAndModify": self._name, "query": filter, "new": return_document} + if let is not None: + common.validate_is_mapping("let", let) + cmd["let"] = let + cmd.update(kwargs) + if projection is not None: + cmd["fields"] = helpers_shared._fields_list_to_dict(projection, "projection") + if sort is not None: + cmd["sort"] = helpers_shared._index_document(sort) + if upsert is not None: + validate_boolean("upsert", upsert) + cmd["upsert"] = upsert + acknowledged = write_concern.acknowledged + if array_filters is not None: + if not acknowledged: + raise ConfigurationError( + "arrayFilters is unsupported for unacknowledged writes." + ) + cmd["arrayFilters"] = list(array_filters) + if hint is not None: + if conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on find and modify commands." + ) + elif not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." + ) + cmd["hint"] = hint + out = await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=write_concern, + collation=collation, + session=session, + retryable_write=retryable_write, + user_fields=_FIND_AND_MODIFY_DOC_FIELDS, + ) + _check_write_command_response(out) + + return out.get("value") + + return await self._database.client._retryable_write( + write_concern.acknowledged, + _find_and_modify_helper, + session, + operation=_Op.FIND_AND_MODIFY, + ) + + async def find_one_and_delete( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and deletes it, returning the document. + + >>> await db.test.count_documents({'x': 1}) + 2 + >>> await db.test.find_one_and_delete({'x': 1}) + {'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')} + >>> await db.test.count_documents({'x': 1}) + 1 + + If multiple documents match *filter*, a *sort* can be applied. + + >>> async for doc in db.test.find({'x': 1}): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> await db.test.find_one_and_delete( + ... {'x': 1}, sort=[('_id', pymongo.DESCENDING)]) + {'x': 1, '_id': 2} + + The *projection* option can be used to limit the fields returned. + + >>> await db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) + {'x': 1} + + :param filter: A query that matches the document to delete. + :param projection: a list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a mapping to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is deleted. + :param hint: An index to use to support the query predicate + specified either by its string name, or in the same format as + passed to :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` + (e.g. ``[('field', ASCENDING)]``). This option is only supported + on MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.asynchronous.collection.AsyncCollection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + kwargs["remove"] = True + if comment is not None: + kwargs["comment"] = comment + return await self._find_and_modify( + filter, projection, sort, let=let, hint=hint, session=session, **kwargs + ) + + async def find_one_and_replace( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and replaces it, returning either the + original or the replaced document. + + The :meth:`find_one_and_replace` method differs from + :meth:`find_one_and_update` by replacing the document matched by + *filter*, rather than modifying the existing document. + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> await db.test.find_one_and_replace({'x': 1}, {'y': 1}) + {'x': 1, '_id': 0} + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'y': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + :param filter: A query that matches the document to replace. + :param replacement: The replacement document. + :param projection: A list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a mapping to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is replaced. + :param upsert: When ``True``, inserts a new document if no + document matches the query. Defaults to ``False``. + :param return_document: If + :attr:`ReturnDocument.BEFORE` (the default), + returns the original document before it was replaced, or ``None`` + if no document matches. If + :attr:`ReturnDocument.AFTER`, returns the replaced + or inserted document. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.asynchronous.collection.AsyncCollection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionadded:: 3.0 + """ + common.validate_ok_for_replace(replacement) + kwargs["update"] = replacement + if comment is not None: + kwargs["comment"] = comment + return await self._find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + let=let, + hint=hint, + session=session, + **kwargs, + ) + + async def find_one_and_update( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and updates it, returning either the + original or the updated document. + + >>> await db.test.find_one_and_update( + ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) + {'_id': 665, 'done': False, 'count': 25}} + + Returns ``None`` if no document matches the filter. + + >>> await db.test.find_one_and_update( + ... {'_exists': False}, {'$inc': {'count': 1}}) + + When the filter matches, by default :meth:`find_one_and_update` + returns the original version of the document before the update was + applied. To return the updated (or inserted in the case of + *upsert*) version of the document instead, use the *return_document* + option. + + >>> from pymongo import ReturnDocument + >>> await db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... return_document=ReturnDocument.AFTER) + {'_id': 'userid', 'seq': 1} + + You can limit the fields returned with the *projection* option. + + >>> await db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... projection={'seq': True, '_id': False}, + ... return_document=ReturnDocument.AFTER) + {'seq': 2} + + The *upsert* option can be used to create the document if it doesn't + already exist. + + >>> (await db.example.delete_many({})).deleted_count + 1 + >>> await db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... projection={'seq': True, '_id': False}, + ... upsert=True, + ... return_document=ReturnDocument.AFTER) + {'seq': 1} + + If multiple documents match *filter*, a *sort* can be applied. + + >>> async for doc in db.test.find({'done': True}): + ... print(doc) + ... + {'_id': 665, 'done': True, 'result': {'count': 26}} + {'_id': 701, 'done': True, 'result': {'count': 17}} + >>> await db.test.find_one_and_update( + ... {'done': True}, + ... {'$set': {'final': True}}, + ... sort=[('_id', pymongo.DESCENDING)]) + {'_id': 701, 'done': True, 'result': {'count': 17}} + + :param filter: A query that matches the document to update. + :param update: The update operations to apply. + :param projection: A list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a dict to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is updated. + :param upsert: When ``True``, inserts a new document if no + document matches the query. Defaults to ``False``. + :param return_document: If + :attr:`ReturnDocument.BEFORE` (the default), + returns the original document before it was updated. If + :attr:`ReturnDocument.AFTER`, returns the updated + or inserted document. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the ``update``. + .. versionchanged:: 3.6 + Added the ``array_filters`` and ``session`` options. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.asynchronous.collection.AsyncCollection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionadded:: 3.0 + """ + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + kwargs["update"] = update + if comment is not None: + kwargs["comment"] = comment + return await self._find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + array_filters, + hint=hint, + let=let, + session=session, + **kwargs, + ) diff --git a/pymongo/asynchronous/command_cursor.py b/pymongo/asynchronous/command_cursor.py new file mode 100644 index 0000000000..e18b3a330e --- /dev/null +++ b/pymongo/asynchronous/command_cursor.py @@ -0,0 +1,472 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CommandCursor class to iterate over command results.""" +from __future__ import annotations + +from collections import deque +from typing import ( + TYPE_CHECKING, + Any, + AsyncIterator, + Generic, + Mapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from bson import CodecOptions, _convert_raw_document_lists_to_streams +from pymongo import _csot +from pymongo.asynchronous.cursor import _ConnectionManager +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _RawBatchGetMore, +) +from pymongo.response import PinnedResponse +from pymongo.typings import _Address, _DocumentOut, _DocumentType + +if TYPE_CHECKING: + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.collection import AsyncCollection + from pymongo.asynchronous.pool import AsyncConnection + +_IS_SYNC = False + + +class AsyncCommandCursor(Generic[_DocumentType]): + """An asynchronous cursor / iterator over command cursors.""" + + _getmore_class = _GetMore + + def __init__( + self, + collection: AsyncCollection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[AsyncClientSession] = None, + comment: Any = None, + ) -> None: + """Create a new command cursor.""" + self._sock_mgr: Any = None + self._collection: AsyncCollection[_DocumentType] = collection + self._id = cursor_info["id"] + self._data = deque(cursor_info["firstBatch"]) + self._postbatchresumetoken: Optional[Mapping[str, Any]] = cursor_info.get( + "postBatchResumeToken" + ) + self._address = address + self._batch_size = batch_size + self._max_await_time_ms = max_await_time_ms + self._timeout = self._collection.database.client.options.timeout + self._session = session + if self._session is not None: + self._session._attached_to_cursor = True + self._killed = self._id == 0 + self._comment = comment + if self._killed: + self._end_session() + + if "ns" in cursor_info: # noqa: SIM401 + self._ns = cursor_info["ns"] + else: + self._ns = collection.full_name + + self.batch_size(batch_size) + + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) + + def __del__(self) -> None: + self._die_no_lock() + + def batch_size(self, batch_size: int) -> AsyncCommandCursor[_DocumentType]: + """Limits the number of documents returned in one batch. Each batch + requires a round trip to the server. It can be adjusted to optimize + performance and limit data transfer. + + .. note:: batch_size can not override MongoDB's internal limits on the + amount of data it will return to the client in a single batch (i.e + if you set batch size to 1,000,000,000, MongoDB will currently only + return 4-16MB of results per batch). + + Raises :exc:`TypeError` if `batch_size` is not an integer. + Raises :exc:`ValueError` if `batch_size` is less than ``0``. + + :param batch_size: The size of each batch of results requested. + """ + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + + self._batch_size = batch_size == 1 and 2 or batch_size + return self + + def _has_next(self) -> bool: + """Returns `True` if the cursor has documents remaining from the + previous batch. + """ + return len(self._data) > 0 + + @property + def _post_batch_resume_token(self) -> Optional[Mapping[str, Any]]: + """Retrieve the postBatchResumeToken from the response to a + changeStream aggregate or getMore. + """ + return self._postbatchresumetoken + + async def _maybe_pin_connection(self, conn: AsyncConnection) -> None: + client = self._collection.database.client + if not client._should_pin_cursor(self._session): + return + if not self._sock_mgr: + conn.pin_cursor() + conn_mgr = _ConnectionManager(conn, False) + # Ensure the connection gets returned when the entire result is + # returned in the first batch. + if self._id == 0: + await conn_mgr.close() + else: + self._sock_mgr = conn_mgr + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration`. Best to use a for loop:: + + async for doc in collection.aggregate(pipeline): + print(doc) + + .. note:: :attr:`alive` can be True while iterating a cursor from + a failed server. In this case :attr:`alive` will return False after + :meth:`next` fails to retrieve the next batch of results from the + server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> int: + """Returns the id of the cursor.""" + return self._id + + @property + def address(self) -> Optional[_Address]: + """The (host, port) of the server used, or None. + + .. versionadded:: 3.0 + """ + return self._address + + @property + def session(self) -> Optional[AsyncClientSession]: + """The cursor's :class:`~pymongo.asynchronous.client_session.AsyncClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._session and not self._session._implicit: + return self._session + return None + + def _prepare_to_die(self) -> tuple[int, Optional[_CursorAddress]]: + already_killed = self._killed + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, self._ns) + else: + # Skip killCursors. + cursor_id = 0 + address = None + return cursor_id, address + + def _die_no_lock(self) -> None: + """Closes this cursor without acquiring a lock.""" + cursor_id, address = self._prepare_to_die() + self._collection.database.client._cleanup_cursor_no_lock( + cursor_id, address, self._sock_mgr, self._session + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + async def _die_lock(self) -> None: + """Closes this cursor.""" + cursor_id, address = self._prepare_to_die() + await self._collection.database.client._cleanup_cursor_lock( + cursor_id, + address, + self._sock_mgr, + self._session, + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + def _end_session(self) -> None: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session._end_implicit_session() + self._session = None + + async def close(self) -> None: + """Explicitly close / kill this cursor.""" + await self._die_lock() + + async def _send_message(self, operation: _GetMore) -> None: + """Send a getmore message and handle the response.""" + client = self._collection.database.client + try: + response = await client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die_no_lock() + else: + # Return the session and pinned connection, if necessary. + await self.close() + raise + except ConnectionFailure: + # Don't send killCursors because the cursor is already closed. + self._killed = True + # Return the session and pinned connection, if necessary. + await self.close() + raise + except Exception: + await self.close() + raise + + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) # type: ignore[arg-type] + if response.from_command: + cursor = response.docs[0]["cursor"] + documents = cursor["nextBatch"] + self._postbatchresumetoken = cursor.get("postBatchResumeToken") + self._id = cursor["id"] + else: + documents = response.docs + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + + if self._id == 0: + await self.close() + self._data = deque(documents) + + async def _refresh(self) -> int: + """Refreshes the cursor with more data from the server. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if self._id: # Get More + dbname, collname = self._ns.split(".", 1) + read_pref = self._collection._read_preference_for(self.session) + await self._send_message( + self._getmore_class( + dbname, + collname, + self._batch_size, + self._id, + self._collection.codec_options, + read_pref, + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + False, + self._comment, + ) + ) + else: # Cursor id is zero nothing else to return + await self._die_lock() + + return len(self._data) + + def __aiter__(self) -> AsyncIterator[_DocumentType]: + return self + + async def next(self) -> _DocumentType: + """Advance the cursor.""" + # Block until a document is returnable. + while self.alive: + doc = await self._try_next(True) + if doc is not None: + return doc + + raise StopAsyncIteration + + async def __anext__(self) -> _DocumentType: + return await self.next() + + async def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: + """Advance the cursor blocking for at most one getMore command.""" + if not len(self._data) and not self._killed and get_more_allowed: + await self._refresh() + if len(self._data): + return self._data.popleft() + else: + return None + + async def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] + """Get all or some available documents from the cursor.""" + if not len(self._data) and not self._killed: + await self._refresh() + if len(self._data): + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) + return True + else: + return False + + async def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next document without waiting + indefinitely for data. + + If no document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there is no additional data) then ``None`` is returned. + + :return: The next document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 4.5 + """ + return await self._try_next(get_more_allowed=True) + + async def __aenter__(self) -> AsyncCommandCursor[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + @_csot.apply + async def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: + """Converts the contents of this cursor to a list more efficiently than ``[doc async for doc in cursor]``. + + To use:: + + >>> await cursor.to_list() + + Or, so read at most n items from the cursor:: + + >>> await cursor.to_list(n) + + If the cursor is empty or has no more results, an empty list will be returned. + + .. versionadded:: 4.9 + """ + res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") + while self.alive: + if not await self._next_batch(res, remaining): + break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break + return res + + +class AsyncRawBatchCommandCursor(AsyncCommandCursor[_DocumentType]): + _getmore_class = _RawBatchGetMore + + def __init__( + self, + collection: AsyncCollection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[AsyncClientSession] = None, + comment: Any = None, + ) -> None: + """Create a new cursor / iterator over raw batches of BSON data. + + Should not be called directly by application developers - + see :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate_raw_batches` + instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + assert not cursor_info.get("firstBatch") + super().__init__( + collection, + cursor_info, + address, + batch_size, + max_await_time_ms, + session, + comment, + ) + + def _unpack_response( # type: ignore[override] + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[dict[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[Mapping[str, Any]]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return raw_response # type: ignore[return-value] + + def __getitem__(self, index: int) -> NoReturn: + raise InvalidOperation("Cannot call __getitem__ on AsyncRawBatchCommandCursor") diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py new file mode 100644 index 0000000000..f19d3f6cee --- /dev/null +++ b/pymongo/asynchronous/cursor.py @@ -0,0 +1,1374 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cursor class to iterate over Mongo query results.""" +from __future__ import annotations + +import copy +import warnings +from collections import deque +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterable, + List, + Mapping, + NoReturn, + Optional, + Sequence, + Union, + cast, + overload, +) + +from bson import RE_TYPE, _convert_raw_document_lists_to_streams +from bson.code import Code +from bson.son import SON +from pymongo import _csot, helpers_shared +from pymongo.collation import validate_collation_or_none +from pymongo.common import ( + validate_is_document_type, + validate_is_mapping, +) +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS, _QUERY_OPTIONS, CursorType, _Hint, _Sort +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.lock import _async_create_lock +from pymongo.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _Query, + _RawBatchGetMore, + _RawBatchQuery, +) +from pymongo.response import PinnedResponse +from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from _typeshed import SupportsItems + + from bson.codec_options import CodecOptions + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.collection import AsyncCollection + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.read_preferences import _ServerMode + +_IS_SYNC = False + + +class _ConnectionManager: + """Used with exhaust cursors to ensure the connection is returned.""" + + def __init__(self, conn: AsyncConnection, more_to_come: bool): + self.conn: Optional[AsyncConnection] = conn + self.more_to_come = more_to_come + self._lock = _async_create_lock() + + def update_exhaust(self, more_to_come: bool) -> None: + self.more_to_come = more_to_come + + async def close(self) -> None: + """Return this instance's connection to the connection pool.""" + if self.conn: + await self.conn.unpin() + self.conn = None + + +class AsyncCursor(Generic[_DocumentType]): + _query_class = _Query + _getmore_class = _GetMore + + def __init__( + self, + collection: AsyncCollection[_DocumentType], + filter: Optional[Mapping[str, Any]] = None, + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + cursor_type: int = CursorType.NON_TAILABLE, + sort: Optional[_Sort] = None, + allow_partial_results: bool = False, + oplog_replay: bool = False, + batch_size: int = 0, + collation: Optional[_CollationIn] = None, + hint: Optional[_Hint] = None, + max_scan: Optional[int] = None, + max_time_ms: Optional[int] = None, + max: Optional[_Sort] = None, + min: Optional[_Sort] = None, + return_key: Optional[bool] = None, + show_record_id: Optional[bool] = None, + snapshot: Optional[bool] = None, + comment: Optional[Any] = None, + session: Optional[AsyncClientSession] = None, + allow_disk_use: Optional[bool] = None, + let: Optional[bool] = None, + ) -> None: + """Create a new cursor. + + Should not be called directly by application developers - see + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find` instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + # Initialize all attributes used in __del__ before possibly raising + # an error to avoid attribute errors during garbage collection. + self._collection: AsyncCollection[_DocumentType] = collection + self._id: Any = None + self._exhaust = False + self._sock_mgr: Any = None + self._killed = False + self._session: Optional[AsyncClientSession] + + if session: + self._session = session + self._session._attached_to_cursor = True + else: + self._session = None + + spec: Mapping[str, Any] = filter or {} + validate_is_mapping("filter", spec) + if not isinstance(skip, int): + raise TypeError(f"skip must be an instance of int, not {type(skip)}") + if not isinstance(limit, int): + raise TypeError(f"limit must be an instance of int, not {type(limit)}") + validate_boolean("no_cursor_timeout", no_cursor_timeout) + if no_cursor_timeout and self._session and self._session._implicit: + warnings.warn( + "use an explicit session with no_cursor_timeout=True " + "otherwise the cursor may still timeout after " + "30 minutes, for more info see " + "https://mongodb.com/docs/v4.4/reference/method/" + "cursor.noCursorTimeout/" + "#session-idle-timeout-overrides-nocursortimeout", + UserWarning, + stacklevel=2, + ) + if cursor_type not in ( + CursorType.NON_TAILABLE, + CursorType.TAILABLE, + CursorType.TAILABLE_AWAIT, + CursorType.EXHAUST, + ): + raise ValueError("not a valid value for cursor_type") + validate_boolean("allow_partial_results", allow_partial_results) + validate_boolean("oplog_replay", oplog_replay) + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + # Only set if allow_disk_use is provided by the user, else None. + if allow_disk_use is not None: + allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) + + if projection is not None: + projection = helpers_shared._fields_list_to_dict(projection, "projection") + + if let is not None: + validate_is_document_type("let", let) + + self._let = let + self._spec = spec + self._has_filter = filter is not None + self._projection = projection + self._skip = skip + self._limit = limit + self._batch_size = batch_size + self._ordering = sort and helpers_shared._index_document(sort) or None + self._max_scan = max_scan + self._explain = False + self._comment = comment + self._max_time_ms = max_time_ms + self._timeout = self._collection.database.client.options.timeout + self._max_await_time_ms: Optional[int] = None + self._max: Optional[Union[dict[Any, Any], _Sort]] = max + self._min: Optional[Union[dict[Any, Any], _Sort]] = min + self._collation = validate_collation_or_none(collation) + self._return_key = return_key + self._show_record_id = show_record_id + self._allow_disk_use = allow_disk_use + self._snapshot = snapshot + self._hint: Union[str, dict[str, Any], None] + self._set_hint(hint) + + # This is ugly. People want to be able to do cursor[5:5] and + # get an empty result set (old behavior was an + # exception). It's hard to do that right, though, because the + # server uses limit(0) to mean 'no limit'. So we set __empty + # in that case and check for it when iterating. We also unset + # it anytime we change __limit. + self._empty = False + + self._data: deque = deque() # type: ignore[type-arg] + self._address: Optional[_Address] = None + self._retrieved = 0 + + self._codec_options = collection.codec_options + # Read preference is set when the initial find is sent. + self._read_preference: Optional[_ServerMode] = None + self._read_concern = collection.read_concern + + self._query_flags = cursor_type + self._cursor_type = cursor_type + if no_cursor_timeout: + self._query_flags |= _QUERY_OPTIONS["no_timeout"] + if allow_partial_results: + self._query_flags |= _QUERY_OPTIONS["partial"] + if oplog_replay: + self._query_flags |= _QUERY_OPTIONS["oplog_replay"] + + # The namespace to use for find/getMore commands. + self._dbname = collection.database.name + self._collname = collection.name + + # Checking exhaust cursor support requires network IO + if _IS_SYNC: + self._exhaust_checked = True + self._supports_exhaust() # type: ignore[unused-coroutine] + else: + self._exhaust = cursor_type == CursorType.EXHAUST + self._exhaust_checked = False + + async def _supports_exhaust(self) -> None: + # Exhaust cursor support + if self._cursor_type == CursorType.EXHAUST: + if await self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + self._exhaust = True + + @property + def collection(self) -> AsyncCollection[_DocumentType]: + """The :class:`~pymongo.asynchronous.collection.AsyncCollection` that this + :class:`AsyncCursor` is iterating. + """ + return self._collection + + @property + def retrieved(self) -> int: + """The number of documents retrieved so far.""" + return self._retrieved + + def __del__(self) -> None: + self._die_no_lock() + + def clone(self) -> AsyncCursor[_DocumentType]: + """Get a clone of this cursor. + + Returns a new AsyncCursor instance with options matching those that have + been set on the current instance. The clone will be completely + unevaluated, even if the current instance has been partially or + completely evaluated. + """ + return self._clone(True) + + def _clone(self, deepcopy: bool = True, base: Optional[AsyncCursor] = None) -> AsyncCursor: # type: ignore[type-arg] + """Internal clone helper.""" + if not base: + if self._session and not self._session._implicit: + base = self._clone_base(self._session) + else: + base = self._clone_base(None) + + values_to_clone = ( + "spec", + "projection", + "skip", + "limit", + "max_time_ms", + "max_await_time_ms", + "comment", + "max", + "min", + "ordering", + "explain", + "hint", + "batch_size", + "max_scan", + "query_flags", + "collation", + "empty", + "show_record_id", + "return_key", + "allow_disk_use", + "snapshot", + "exhaust", + "has_filter", + "cursor_type", + ) + data = { + k: v for k, v in self.__dict__.items() if k.startswith("_") and k[1:] in values_to_clone + } + if deepcopy: + data = self._deepcopy(data) + base.__dict__.update(data) + return base + + def _clone_base(self, session: Optional[AsyncClientSession]) -> AsyncCursor: # type: ignore[type-arg] + """Creates an empty AsyncCursor object for information to be copied into.""" + return self.__class__(self._collection, session=session) + + def _query_spec(self) -> Mapping[str, Any]: + """Get the spec to use for a query.""" + operators: dict[str, Any] = {} + if self._ordering: + operators["$orderby"] = self._ordering + if self._explain: + operators["$explain"] = True + if self._hint: + operators["$hint"] = self._hint + if self._let: + operators["let"] = self._let + if self._comment: + operators["$comment"] = self._comment + if self._max_scan: + operators["$maxScan"] = self._max_scan + if self._max_time_ms is not None: + operators["$maxTimeMS"] = self._max_time_ms + if self._max: + operators["$max"] = self._max + if self._min: + operators["$min"] = self._min + if self._return_key is not None: + operators["$returnKey"] = self._return_key + if self._show_record_id is not None: + # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. + operators["$showDiskLoc"] = self._show_record_id + if self._snapshot is not None: + operators["$snapshot"] = self._snapshot + + if operators: + # Make a shallow copy so we can cleanly rewind or clone. + spec = dict(self._spec) + + # Allow-listed commands must be wrapped in $query. + if "$query" not in spec: + # $query has to come first + spec = {"$query": spec} + + spec.update(operators) + return spec + # Have to wrap with $query if "query" is the first key. + # We can't just use $query anytime "query" is a key as + # that breaks commands like count and find_and_modify. + # Checking spec.keys()[0] covers the case that the spec + # was passed as an instance of SON or OrderedDict. + elif "query" in self._spec and (len(self._spec) == 1 or next(iter(self._spec)) == "query"): + return {"$query": self._spec} + + return self._spec + + def _check_okay_to_chain(self) -> None: + """Check if it is okay to chain more options onto this cursor.""" + if self._retrieved or self._id is not None: + raise InvalidOperation("cannot set options after executing query") + + async def add_option(self, mask: int) -> AsyncCursor[_DocumentType]: + """Set arbitrary query flags using a bitmask. + + To set the tailable flag: + cursor.add_option(2) + """ + if not isinstance(mask, int): + raise TypeError(f"mask must be an int, not {type(mask)}") + self._check_okay_to_chain() + + if mask & _QUERY_OPTIONS["exhaust"]: + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + if await self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + self._exhaust = True + + self._query_flags |= mask + return self + + def remove_option(self, mask: int) -> AsyncCursor[_DocumentType]: + """Unset arbitrary query flags using a bitmask. + + To unset the tailable flag: + cursor.remove_option(2) + """ + if not isinstance(mask, int): + raise TypeError(f"mask must be an int, not {type(mask)}") + self._check_okay_to_chain() + + if mask & _QUERY_OPTIONS["exhaust"]: + self._exhaust = False + + self._query_flags &= ~mask + return self + + def allow_disk_use(self, allow_disk_use: bool) -> AsyncCursor[_DocumentType]: + """Specifies whether MongoDB can use temporary disk files while + processing a blocking sort operation. + + Raises :exc:`TypeError` if `allow_disk_use` is not a boolean. + + .. note:: `allow_disk_use` requires server version **>= 4.4** + + :param allow_disk_use: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. + + .. versionadded:: 3.11 + """ + if not isinstance(allow_disk_use, bool): + raise TypeError(f"allow_disk_use must be a bool, not {type(allow_disk_use)}") + self._check_okay_to_chain() + + self._allow_disk_use = allow_disk_use + return self + + def limit(self, limit: int) -> AsyncCursor[_DocumentType]: + """Limits the number of results to be returned by this cursor. + + Raises :exc:`TypeError` if `limit` is not an integer. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`AsyncCursor` + has already been used. The last `limit` applied to this cursor + takes precedence. A limit of ``0`` is equivalent to no limit. + + :param limit: the number of results to return + + .. seealso:: The MongoDB documentation on `limit `_. + """ + if not isinstance(limit, int): + raise TypeError(f"limit must be an integer, not {type(limit)}") + if self._exhaust: + raise InvalidOperation("Can't use limit and exhaust together.") + self._check_okay_to_chain() + + self._empty = False + self._limit = limit + return self + + def batch_size(self, batch_size: int) -> AsyncCursor[_DocumentType]: + """Limits the number of documents returned in one batch. Each batch + requires a round trip to the server. It can be adjusted to optimize + performance and limit data transfer. + + .. note:: batch_size can not override MongoDB's internal limits on the + amount of data it will return to the client in a single batch (i.e + if you set batch size to 1,000,000,000, MongoDB will currently only + return 4-16MB of results per batch). + + Raises :exc:`TypeError` if `batch_size` is not an integer. + Raises :exc:`ValueError` if `batch_size` is less than ``0``. + Raises :exc:`~pymongo.errors.InvalidOperation` if this + :class:`AsyncCursor` has already been used. The last `batch_size` + applied to this cursor takes precedence. + + :param batch_size: The size of each batch of results requested. + """ + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + self._check_okay_to_chain() + + self._batch_size = batch_size + return self + + def skip(self, skip: int) -> AsyncCursor[_DocumentType]: + """Skips the first `skip` results of this cursor. + + Raises :exc:`TypeError` if `skip` is not an integer. Raises + :exc:`ValueError` if `skip` is less than ``0``. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`AsyncCursor` has + already been used. The last `skip` applied to this cursor takes + precedence. + + :param skip: the number of results to skip + """ + if not isinstance(skip, int): + raise TypeError(f"skip must be an integer, not {type(skip)}") + if skip < 0: + raise ValueError("skip must be >= 0") + self._check_okay_to_chain() + + self._skip = skip + return self + + def max_time_ms(self, max_time_ms: Optional[int]) -> AsyncCursor[_DocumentType]: + """Specifies a time limit for a query operation. If the specified + time is exceeded, the operation will be aborted and + :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` + is ``None`` no limit is applied. + + Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``. + Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`AsyncCursor` + has already been used. + + :param max_time_ms: the time limit after which the operation is aborted + """ + if not isinstance(max_time_ms, int) and max_time_ms is not None: + raise TypeError(f"max_time_ms must be an integer or None, not {type(max_time_ms)}") + self._check_okay_to_chain() + + self._max_time_ms = max_time_ms + return self + + def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> AsyncCursor[_DocumentType]: + """Specifies a time limit for a getMore operation on a + :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other + types of cursor max_await_time_ms is ignored. + + Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or + ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this + :class:`AsyncCursor` has already been used. + + .. note:: `max_await_time_ms` requires server version **>= 3.2** + + :param max_await_time_ms: the time limit after which the operation is + aborted + + .. versionadded:: 3.2 + """ + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) + self._check_okay_to_chain() + + # Ignore max_await_time_ms if not tailable or await_data is False. + if self._query_flags & CursorType.TAILABLE_AWAIT: + self._max_await_time_ms = max_await_time_ms + + return self + + @overload + def __getitem__(self, index: int) -> _DocumentType: + ... + + @overload + def __getitem__(self, index: slice) -> AsyncCursor[_DocumentType]: + ... + + def __getitem__( + self, index: Union[int, slice] + ) -> Union[_DocumentType, AsyncCursor[_DocumentType]]: + """Get a single document or a slice of documents from this cursor. + + .. warning:: A :class:`~AsyncCursor` is not a Python :class:`list`. Each + index access or slice requires that a new query be run using skip + and limit. Do not iterate the cursor using index accesses. + The following example is **extremely inefficient** and may return + surprising results:: + + cursor = db.collection.find() + # Warning: This runs a new query for each document. + # Don't do this! + for idx in range(10): + print(cursor[idx]) + + Raises :class:`~pymongo.errors.InvalidOperation` if this + cursor has already been used. + + To get a single document use an integral index, e.g.:: + + >>> db.test.find()[50] + + An :class:`IndexError` will be raised if the index is negative + or greater than the amount of documents in this cursor. Any + limit previously applied to this cursor will be ignored. + + To get a slice of documents use a slice index, e.g.:: + + >>> db.test.find()[20:25] + + This will return this cursor with a limit of ``5`` and skip of + ``20`` applied. Using a slice index will override any prior + limits or skips applied to this cursor (including those + applied through previous calls to this method). Raises + :class:`IndexError` when the slice has a step, a negative + start value, or a stop value less than or equal to the start + value. + + :param index: An integer or slice index to be applied to this cursor + """ + if _IS_SYNC: + self._check_okay_to_chain() + self._empty = False + if isinstance(index, slice): + if index.step is not None: + raise IndexError("AsyncCursor instances do not support slice steps") + + skip = 0 + if index.start is not None: + if index.start < 0: + raise IndexError("AsyncCursor instances do not support negative indices") + skip = index.start + + if index.stop is not None: + limit = index.stop - skip + if limit < 0: + raise IndexError( + "stop index must be greater than start index for slice %r" % index + ) + if limit == 0: + self._empty = True + else: + limit = 0 + + self._skip = skip + self._limit = limit + return self + + if isinstance(index, int): + if index < 0: + raise IndexError("AsyncCursor instances do not support negative indices") + clone = self.clone() + clone.skip(index + self._skip) + clone.limit(-1) # use a hard limit + clone._query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371 + for doc in clone: # type: ignore[attr-defined] + return doc + raise IndexError("no such item for AsyncCursor instance") + raise TypeError("index %r cannot be applied to AsyncCursor instances" % index) + else: + raise IndexError("AsyncCursor does not support indexing") + + def max_scan(self, max_scan: Optional[int]) -> AsyncCursor[_DocumentType]: + """**DEPRECATED** - Limit the number of documents to scan when + performing the query. + + Raises :class:`~pymongo.errors.InvalidOperation` if this + cursor has already been used. Only the last :meth:`max_scan` + applied to this cursor has any effect. + + :param max_scan: the maximum number of documents to scan + + .. versionchanged:: 3.7 + Deprecated :meth:`max_scan`. Support for this option is deprecated in + MongoDB 4.0. Use :meth:`max_time_ms` instead to limit server side + execution time. + """ + self._check_okay_to_chain() + self._max_scan = max_scan + return self + + def max(self, spec: _Sort) -> AsyncCursor[_DocumentType]: + """Adds ``max`` operator that specifies upper bound for specific index. + + When using ``max``, :meth:`~hint` should also be configured to ensure + the query uses the expected index and starting in MongoDB 4.2 + :meth:`~hint` will be required. + + :param spec: a list of field, limit pairs specifying the exclusive + upper bound for all keys of a specific index in order. + + .. versionchanged:: 3.8 + Deprecated cursors that use ``max`` without a :meth:`~hint`. + + .. versionadded:: 2.7 + """ + if not isinstance(spec, (list, tuple)): + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") + + self._check_okay_to_chain() + self._max = dict(spec) + return self + + def min(self, spec: _Sort) -> AsyncCursor[_DocumentType]: + """Adds ``min`` operator that specifies lower bound for specific index. + + When using ``min``, :meth:`~hint` should also be configured to ensure + the query uses the expected index and starting in MongoDB 4.2 + :meth:`~hint` will be required. + + :param spec: a list of field, limit pairs specifying the inclusive + lower bound for all keys of a specific index in order. + + .. versionchanged:: 3.8 + Deprecated cursors that use ``min`` without a :meth:`~hint`. + + .. versionadded:: 2.7 + """ + if not isinstance(spec, (list, tuple)): + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") + + self._check_okay_to_chain() + self._min = dict(spec) + return self + + def sort( + self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None + ) -> AsyncCursor[_DocumentType]: + """Sorts this cursor's results. + + Pass a field name and a direction, either + :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`.:: + + async for doc in collection.find().sort('field', pymongo.ASCENDING): + print(doc) + + To sort by multiple fields, pass a list of (key, direction) pairs. + If just a name is given, :data:`~pymongo.ASCENDING` will be inferred:: + + async for doc in collection.find().sort([ + 'field1', + ('field2', pymongo.DESCENDING)]): + print(doc) + + Text search results can be sorted by relevance:: + + cursor = db.test.find( + {'$text': {'$search': 'some words'}}, + {'score': {'$meta': 'textScore'}}) + + # Sort by 'score' field. + cursor.sort([('score', {'$meta': 'textScore'})]) + + async for doc in cursor: + print(doc) + + For more advanced text search functionality, see MongoDB's + `Atlas Search `_. + + Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has + already been used. Only the last :meth:`sort` applied to this + cursor has any effect. + + :param key_or_list: a single key or a list of (key, direction) + pairs specifying the keys to sort on + :param direction: only used if `key_or_list` is a single + key, if not given :data:`~pymongo.ASCENDING` is assumed + """ + self._check_okay_to_chain() + keys = helpers_shared._index_list(key_or_list, direction) + self._ordering = helpers_shared._index_document(keys) + return self + + async def explain(self) -> _DocumentType: + """Returns an explain plan record for this cursor. + + .. note:: This method uses the default verbosity mode of the + `explain command + `_, + ``allPlansExecution``. To use a different verbosity use + :meth:`~pymongo.asynchronous.database.AsyncDatabase.command` to run the explain + command directly. + + .. note:: The timeout of this method can be set using :func:`pymongo.timeout`. + + .. seealso:: The MongoDB documentation on `explain `_. + """ + c = self.clone() + c._explain = True + + # always use a hard limit for explains + if c._limit: + c._limit = -abs(c._limit) + return await anext(c) + + def _set_hint(self, index: Optional[_Hint]) -> None: + if index is None: + self._hint = None + return + + if isinstance(index, str): + self._hint = index + else: + self._hint = helpers_shared._index_document(index) + + def hint(self, index: Optional[_Hint]) -> AsyncCursor[_DocumentType]: + """Adds a 'hint', telling Mongo the proper index to use for the query. + + Judicious use of hints can greatly improve query + performance. When doing a query on multiple fields (at least + one of which is indexed) pass the indexed field as a hint to + the query. Raises :class:`~pymongo.errors.OperationFailure` if the + provided hint requires an index that does not exist on this collection, + and raises :class:`~pymongo.errors.InvalidOperation` if this cursor has + already been used. + + `index` should be an index as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` + (e.g. ``[('field', ASCENDING)]``) or the name of the index. + If `index` is ``None`` any existing hint for this query is + cleared. The last hint applied to this cursor takes precedence + over all others. + + :param index: index to hint on (as an index specifier) + """ + self._check_okay_to_chain() + self._set_hint(index) + return self + + def comment(self, comment: Any) -> AsyncCursor[_DocumentType]: + """Adds a 'comment' to the cursor. + + http://mongodb.com/docs/manual/reference/operator/comment/ + + :param comment: A string to attach to the query to help interpret and + trace the operation in the server logs and in profile data. + + .. versionadded:: 2.7 + """ + self._check_okay_to_chain() + self._comment = comment + return self + + def where(self, code: Union[str, Code]) -> AsyncCursor[_DocumentType]: + """Adds a `$where`_ clause to this query. + + The `code` argument must be an instance of :class:`str` or + :class:`~bson.code.Code` containing a JavaScript expression. + This expression will be evaluated for each document scanned. + Only those documents for which the expression evaluates to + *true* will be returned as results. The keyword *this* refers + to the object currently being scanned. For example:: + + # Find all documents where field "a" is less than "b" plus "c". + async for doc in db.test.find().where('this.a < (this.b + this.c)'): + print(doc) + + Raises :class:`TypeError` if `code` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidOperation` if this + :class:`AsyncCursor` has already been used. Only the last call to + :meth:`where` applied to a :class:`AsyncCursor` has any effect. + + .. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code` + with scope variables. Consider using `$expr`_ instead. + + :param code: JavaScript expression to use as a filter + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ + """ + self._check_okay_to_chain() + if not isinstance(code, Code): + code = Code(code) + + # Avoid overwriting a filter argument that was given by the user + # when updating the spec. + spec: dict[str, Any] + if self._has_filter: + spec = dict(self._spec) + else: + spec = cast(dict, self._spec) # type: ignore[type-arg] + spec["$where"] = code + self._spec = spec + return self + + def collation(self, collation: Optional[_CollationIn]) -> AsyncCursor[_DocumentType]: + """Adds a :class:`~pymongo.collation.Collation` to this query. + + Raises :exc:`TypeError` if `collation` is not an instance of + :class:`~pymongo.collation.Collation` or a ``dict``. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`AsyncCursor` has + already been used. Only the last collation applied to this cursor has + any effect. + + :param collation: An instance of :class:`~pymongo.collation.Collation`. + """ + self._check_okay_to_chain() + self._collation = validate_collation_or_none(collation) + return self + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions, # type: ignore[type-arg] + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + def _get_read_preference(self) -> _ServerMode: + if self._read_preference is None: + # Save the read preference for getMore commands. + self._read_preference = self._collection._read_preference_for(self.session) + return self._read_preference + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + This is mostly useful with `tailable cursors + `_ + since they will stop iterating even though they *may* return more + results in the future. + + With regular cursors, simply use an asynchronous for loop instead of :attr:`alive`:: + + async for doc in collection.find(): + print(doc) + + .. note:: Even if :attr:`alive` is True, :meth:`next` can raise + :exc:`StopIteration`. :attr:`alive` can also be True while iterating + a cursor from a failed server. In this case :attr:`alive` will + return False after :meth:`next` fails to retrieve the next batch + of results from the server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> Optional[int]: + """Returns the id of the cursor + + .. versionadded:: 2.2 + """ + return self._id + + @property + def address(self) -> Optional[tuple[str, Any]]: + """The (host, port) of the server used, or None. + + .. versionchanged:: 3.0 + Renamed from "conn_id". + """ + return self._address + + @property + def session(self) -> Optional[AsyncClientSession]: + """The cursor's :class:`~pymongo.asynchronous.client_session.AsyncClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._session and not self._session._implicit: + return self._session + return None + + def __copy__(self) -> AsyncCursor[_DocumentType]: + """Support function for `copy.copy()`. + + .. versionadded:: 2.4 + """ + return self._clone(deepcopy=False) + + def __deepcopy__(self, memo: Any) -> Any: + """Support function for `copy.deepcopy()`. + + .. versionadded:: 2.4 + """ + return self._clone(deepcopy=True) + + @overload + def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: # type: ignore[type-arg] + ... + + @overload + def _deepcopy( + self, + x: SupportsItems, # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> dict: # type: ignore[type-arg] + ... + + def _deepcopy( + self, + x: Union[Iterable, SupportsItems], # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> Union[list[Any], dict[str, Any]]: + """Deepcopy helper for the data dictionary or list. + + Regular expressions cannot be deep copied but as they are immutable we + don't have to copy them when cloning. + """ + y: Union[list[Any], dict[str, Any]] + iterator: Iterable[tuple[Any, Any]] + if not hasattr(x, "items"): + y, is_list, iterator = [], True, enumerate(x) + else: + y, is_list, iterator = {}, False, cast("SupportsItems", x).items() # type: ignore[type-arg] + if memo is None: + memo = {} + val_id = id(x) + if val_id in memo: + return memo[val_id] + memo[val_id] = y + + for key, value in iterator: + if isinstance(value, (dict, list)) and not isinstance(value, SON): + value = self._deepcopy(value, memo) # noqa: PLW2901 + elif not isinstance(value, RE_TYPE): + value = copy.deepcopy(value, memo) # noqa: PLW2901 + + if is_list: + y.append(value) # type: ignore[union-attr] + else: + if not isinstance(key, RE_TYPE): + key = copy.deepcopy(key, memo) # noqa: PLW2901 + y[key] = value # type:ignore[index] + return y + + def _prepare_to_die(self, already_killed: bool) -> tuple[int, Optional[_CursorAddress]]: + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, f"{self._dbname}.{self._collname}") + else: + # Skip killCursors. + cursor_id = 0 + address = None + return cursor_id, address + + def _die_no_lock(self) -> None: + """Closes this cursor without acquiring a lock.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + cursor_id, address = self._prepare_to_die(already_killed) + self._collection.database.client._cleanup_cursor_no_lock( + cursor_id, address, self._sock_mgr, self._session + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + async def _die_lock(self) -> None: + """Closes this cursor.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + cursor_id, address = self._prepare_to_die(already_killed) + await self._collection.database.client._cleanup_cursor_lock( + cursor_id, + address, + self._sock_mgr, + self._session, + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + async def close(self) -> None: + """Explicitly close / kill this cursor.""" + await self._die_lock() + + async def distinct(self, key: str) -> list[Any]: + """Get a list of distinct values for `key` among all documents + in the result set of this query. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + The :meth:`distinct` method obeys the + :attr:`~pymongo.asynchronous.collection.AsyncCollection.read_preference` of the + :class:`~pymongo.asynchronous.collection.AsyncCollection` instance on which + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find` was called. + + :param key: name of key for which we want to get the distinct values + + .. seealso:: :meth:`pymongo.asynchronous.collection.AsyncCollection.distinct` + """ + options: dict[str, Any] = {} + if self._spec: + options["query"] = self._spec + if self._max_time_ms is not None: + options["maxTimeMS"] = self._max_time_ms + if self._comment: + options["comment"] = self._comment + if self._collation is not None: + options["collation"] = self._collation + + return await self._collection.distinct(key, session=self._session, **options) + + async def _send_message(self, operation: Union[_Query, _GetMore]) -> None: + """Send a query or getmore operation and handles the response. + + If operation is ``None`` this is an exhaust cursor, which reads + the next result batch off the exhaust socket instead of + sending getMore messages to the server. + + Can raise ConnectionFailure. + """ + client = self._collection.database.client + # OP_MSG is required to support exhaust cursors with encryption. + if client._encrypter and self._exhaust: + raise InvalidOperation("exhaust cursors do not support auto encryption") + + try: + response = await client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS or self._exhaust: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die_no_lock() + else: + await self.close() + # If this is a tailable cursor the error is likely + # due to capped collection roll over. Setting + # self._killed to True ensures AsyncCursor.alive will be + # False. No need to re-raise. + if ( + exc.code in _CURSOR_CLOSED_ERRORS + and self._query_flags & _QUERY_OPTIONS["tailable_cursor"] + ): + return + raise + except ConnectionFailure: + self._killed = True + await self.close() + raise + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + await self.close() + raise + self._address = response.address + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) # type: ignore[arg-type] + + cmd_name = operation.name + docs = response.docs + if response.from_command: + if cmd_name != "explain": + cursor = docs[0]["cursor"] + self._id = cursor["id"] + if cmd_name == "find": + documents = cursor["firstBatch"] + # Update the namespace used for future getMore commands. + ns = cursor.get("ns") + if ns: + self._dbname, self._collname = ns.split(".", 1) + else: + documents = cursor["nextBatch"] + self._data = deque(documents) + self._retrieved += len(documents) + else: + self._id = 0 + self._data = deque(docs) + self._retrieved += len(docs) + else: + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + self._data = deque(docs) + self._retrieved += response.data.number_returned + + if self._id == 0: + # Don't wait for garbage collection to call __del__, return the + # socket and the session to the pool now. + await self.close() + + if self._limit and self._id and self._limit <= self._retrieved: + await self.close() + + async def _refresh(self) -> int: + """Refreshes the cursor with more data from Mongo. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if not self._session: + self._session = self._collection.database.client._ensure_session() + + if self._id is None: # Query + if (self._min or self._max) and not self._hint: + raise InvalidOperation( + "Passing a 'hint' is required when using the min/max query" + " option to ensure the query utilizes the correct index" + ) + q = self._query_class( + self._query_flags, + self._collection.database.name, + self._collection.name, + self._skip, + self._query_spec(), + self._projection, + self._codec_options, + self._get_read_preference(), + self._limit, + self._batch_size, + self._read_concern, + self._collation, + self._session, + self._collection.database.client, + self._allow_disk_use, + self._exhaust, + ) + await self._send_message(q) + elif self._id: # Get More + if self._limit: + limit = self._limit - self._retrieved + if self._batch_size: + limit = min(limit, self._batch_size) + else: + limit = self._batch_size + # Exhaust cursors don't send getMore messages. + g = self._getmore_class( + self._dbname, + self._collname, + limit, + self._id, + self._codec_options, + self._get_read_preference(), + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + self._exhaust, + self._comment, + ) + await self._send_message(g) + + return len(self._data) + + async def rewind(self) -> AsyncCursor[_DocumentType]: + """Rewind this cursor to its unevaluated state. + + Reset this cursor if it has been partially or completely evaluated. + Any options that are present on the cursor will remain in effect. + Future iterating performed on this cursor will cause new queries to + be sent to the server, even if the resultant data has already been + retrieved by this cursor. + """ + await self.close() + self._data = deque() + self._id = None + self._address = None + self._retrieved = 0 + self._killed = False + + return self + + async def next(self) -> _DocumentType: + """Advance the cursor.""" + if not self._exhaust_checked: + self._exhaust_checked = True + await self._supports_exhaust() + if self._empty: + raise StopAsyncIteration + if len(self._data) or await self._refresh(): + return self._data.popleft() + else: + raise StopAsyncIteration + + async def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] + """Get all or some documents from the cursor.""" + if not self._exhaust_checked: + self._exhaust_checked = True + await self._supports_exhaust() + if self._empty: + return False + if len(self._data) or await self._refresh(): + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) + return True + else: + return False + + async def __anext__(self) -> _DocumentType: + return await self.next() + + def __aiter__(self) -> AsyncCursor[_DocumentType]: + return self + + async def __aenter__(self) -> AsyncCursor[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + @_csot.apply + async def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: + """Converts the contents of this cursor to a list more efficiently than ``[doc async for doc in cursor]``. + + To use:: + + >>> await cursor.to_list() + + Or, to read at most n items from the cursor:: + + >>> await cursor.to_list(n) + + If the cursor is empty or has no more results, an empty list will be returned. + + .. versionadded:: 4.9 + """ + res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") + while self.alive: + if not await self._next_batch(res, remaining): + break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break + return res + + +class AsyncRawBatchCursor(AsyncCursor, Generic[_DocumentType]): # type: ignore[type-arg] + """An asynchronous cursor / iterator over raw batches of BSON data from a query result.""" + + _query_class = _RawBatchQuery + _getmore_class = _RawBatchGetMore + + def __init__( + self, collection: AsyncCollection[_DocumentType], *args: Any, **kwargs: Any + ) -> None: + """Create a new cursor / iterator over raw batches of BSON data. + + Should not be called directly by application developers - + see :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_raw_batches` + instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + super().__init__(collection, *args, **kwargs) + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[_DocumentOut]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return cast(List["_DocumentOut"], raw_response) + + async def explain(self) -> _DocumentType: + """Returns an explain plan record for this cursor. + + .. seealso:: The MongoDB documentation on `explain `_. + """ + clone = self._clone(deepcopy=True, base=AsyncCursor(self.collection)) + return await clone.explain() + + def __getitem__(self, index: Any) -> NoReturn: + raise InvalidOperation("Cannot call __getitem__ on AsyncRawBatchCursor") diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py new file mode 100644 index 0000000000..8e0afc9dc9 --- /dev/null +++ b/pymongo/asynchronous/database.py @@ -0,0 +1,1469 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Database level operations.""" +from __future__ import annotations + +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, + cast, + overload, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.dbref import DBRef +from bson.timestamp import Timestamp +from pymongo import _csot, common +from pymongo.asynchronous.aggregation import _DatabaseAggregationCommand +from pymongo.asynchronous.change_stream import AsyncDatabaseChangeStream +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.common import _ecoc_coll_name, _esc_coll_name +from pymongo.database_shared import _check_name, _CodecDocumentType +from pymongo.errors import CollectionInvalid, InvalidOperation +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline + +if TYPE_CHECKING: + import bson + import bson.codec_options + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.asynchronous.server import Server + from pymongo.read_concern import ReadConcern + from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class AsyncDatabase(common.BaseObject, Generic[_DocumentType]): + def __init__( + self, + client: AsyncMongoClient[_DocumentType], + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> None: + """Get a database by client and name. + + Raises :class:`TypeError` if `name` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if + `name` is not a valid database name. + + :param client: A :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` instance. + :param name: The database name. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) client.codec_options is used. + :param read_preference: The read preference to use. If + ``None`` (the default) client.read_preference is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) client.write_concern is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) client.read_concern is used. + + .. seealso:: The MongoDB documentation on `databases `_. + + .. versionchanged:: 4.0 + Removed the eval, system_js, error, last_status, previous_error, + reset_error_history, authenticate, logout, collection_names, + current_op, add_user, remove_user, profiling_level, + set_profiling_level, and profiling_info methods. + See the :ref:`pymongo4-migration-guide`. + + .. versionchanged:: 3.2 + Added the read_concern option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + :class:`~pymongo.asynchronous.database.AsyncDatabase` no longer returns an instance + of :class:`~pymongo.asynchronous.collection.AsyncCollection` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + db['__my_collection__'] + + Not: + + db.__my_collection__ + """ + super().__init__( + codec_options or client.codec_options, + read_preference or client.read_preference, + write_concern or client.write_concern, + read_concern or client.read_concern, + ) + + from pymongo.asynchronous.mongo_client import AsyncMongoClient + + if not isinstance(name, str): + raise TypeError(f"name must be an instance of str, not {type(name)}") + + if not isinstance(client, AsyncMongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncMongoClient" for cls in type(client).__mro__): + raise TypeError(f"AsyncMongoClient required but given {type(client).__name__}") + + if name != "$external": + _check_name(name) + + self._name = name + self._client: AsyncMongoClient[_DocumentType] = client + self._timeout = client.options.timeout + + @property + def client(self) -> AsyncMongoClient[_DocumentType]: + """The client instance for this :class:`AsyncDatabase`.""" + return self._client + + @property + def name(self) -> str: + """The name of this :class:`AsyncDatabase`.""" + return self._name + + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncDatabase[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncDatabase[_DocumentTypeArg]: + ... + + def with_options( + self, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> AsyncDatabase[_DocumentType] | AsyncDatabase[_DocumentTypeArg]: + """Get a clone of this database changing the specified settings. + + >>> db1.read_preference + Primary() + >>> from pymongo.read_preferences import Secondary + >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) + >>> db1.read_preference + Primary() + >>> db2.read_preference + Secondary(tag_sets=[{'node': 'analytics'}], max_staleness=-1, hedge=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncCollection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncCollection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncCollection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncCollection` + is used. + + .. versionadded:: 3.8 + """ + return AsyncDatabase( + self._client, + self._name, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, AsyncDatabase): + return self._client == other.client and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._client, self._name)) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._client!r}, {self._name!r})" + + def __getattr__(self, name: str) -> AsyncCollection[_DocumentType]: + """Get a collection of this database by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" collection, use database[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> AsyncCollection[_DocumentType]: + """Get a collection of this database by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + return AsyncCollection(self, name) + + def get_collection( + self, + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> AsyncCollection[_DocumentType]: + """Get a :class:`~pymongo.asynchronous.collection.AsyncCollection` with the given name + and options. + + Useful for creating a :class:`~pymongo.asynchronous.collection.AsyncCollection` with + different codec options, read preference, and/or write concern from + this :class:`AsyncDatabase`. + + >>> db.read_preference + Primary() + >>> coll1 = db.test + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = db.get_collection( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the collection - a string. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncDatabase` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncDatabase` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncDatabase` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncDatabase` is + used. + """ + return AsyncCollection( + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + + async def _get_encrypted_fields( + self, kwargs: Mapping[str, Any], coll_name: str, ask_db: bool + ) -> Optional[Mapping[str, Any]]: + encrypted_fields = kwargs.get("encryptedFields") + if encrypted_fields: + return cast(Mapping[str, Any], deepcopy(encrypted_fields)) + if ( + self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + and self.client.options.auto_encryption_opts._encrypted_fields_map.get( + f"{self.name}.{coll_name}" + ) + ): + return cast( + Mapping[str, Any], + deepcopy( + self.client.options.auto_encryption_opts._encrypted_fields_map[ + f"{self.name}.{coll_name}" + ] + ), + ) + if ask_db and self.client.options.auto_encryption_opts: + options = await self[coll_name].options() + if options.get("encryptedFields"): + return cast(Mapping[str, Any], deepcopy(options["encryptedFields"])) + return None + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'AsyncDatabase' object is not iterable") + + next = __next__ + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: database is not None" + ) + + async def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[AsyncClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> AsyncDatabaseChangeStream[_DocumentType]: + """Watch changes on this database. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.asynchronous.change_stream.AsyncDatabaseChangeStream` cursor which + iterates over changes on all collections in this database. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + async with db.watch() as stream: + async for change in stream: + print(change) + + The :class:`~pymongo.asynchronous.change_stream.AsyncDatabaseChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.asynchronous.change_stream.AsyncDatabaseChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + async with db.watch([{"$match": {"operationType": "insert"}}]) as stream: + async for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The AsyncChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.asynchronous.change_stream.AsyncDatabaseChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = AsyncDatabaseChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + await change_stream._initialize_cursor() + return change_stream + + @_csot.apply + async def create_collection( + self, + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[AsyncClientSession] = None, + check_exists: Optional[bool] = True, + **kwargs: Any, + ) -> AsyncCollection[_DocumentType]: + """Create a new :class:`~pymongo.asynchronous.collection.AsyncCollection` in this + database. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.CollectionInvalid` will be + raised if the collection already exists. + + :param name: the name of the collection to create + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncDatabase` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncDatabase` is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncDatabase` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncDatabase` is + used. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param check_exists: if True (the default), send a listCollections command to + check if the collection already exists before creation. + :param kwargs: additional keyword arguments will + be passed as options for the `create collection command`_ + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. Valid options include, but are not + limited to: + + - ``size`` (int): desired initial size for the collection (in + bytes). For capped collections this size is the max + size of the collection. + - ``capped`` (bool): if True, this is a capped collection + - ``max`` (int): maximum number of objects if capped (optional) + - ``timeseries`` (dict): a document specifying configuration options for + timeseries collections + - ``expireAfterSeconds`` (int): the number of seconds after which a + document in a timeseries collection expires + - ``validator`` (dict): a document specifying validation rules or expressions + for the collection + - ``validationLevel`` (str): how strictly to apply the + validation rules to existing documents during an update. The default level + is "strict" + - ``validationAction`` (str): whether to "error" on invalid documents + (the default) or just "warn" about the violations but allow invalid + documents to be inserted + - ``indexOptionDefaults`` (dict): a document specifying a default configuration + for indexes when creating a collection + - ``viewOn`` (str): the name of the source collection or view from which + to create the view + - ``pipeline`` (list): a list of aggregation pipeline stages + - ``comment`` (str): a user-provided comment to attach to this command. + This option is only supported on MongoDB >= 4.4. + - ``encryptedFields`` (dict): **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + - ``clusteredIndex`` (dict): Document that specifies the clustered index + configuration. It must have the following form:: + + { + // key pattern must be {_id: 1} + key: , // required + unique: , // required, must be `true` + name: , // optional, otherwise automatically generated + v: , // optional, must be `2` if provided + } + - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for + enabling pre- and post-images. + + .. versionchanged:: 4.2 + Added the ``check_exists``, ``clusteredIndex``, and ``encryptedFields`` parameters. + + .. versionchanged:: 3.11 + This method is now supported inside multi-document transactions + with MongoDB 4.4+. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Added the collation option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + """ + encrypted_fields = await self._get_encrypted_fields(kwargs, name, False) + if encrypted_fields: + common.validate_is_mapping("encryptedFields", encrypted_fields) + kwargs["encryptedFields"] = encrypted_fields + + clustered_index = kwargs.get("clusteredIndex") + if clustered_index: + common.validate_is_mapping("clusteredIndex", clustered_index) + + async with self._client._tmp_session(session) as s: + if s and not s.in_transaction: + s._leave_alive = True + # Skip this check in a transaction where listCollections is not + # supported. + if ( + check_exists + and (not s or not s.in_transaction) + and name in await self._list_collection_names(filter={"name": name}, session=s) + ): + raise CollectionInvalid("collection %s already exists" % name) + if s: + s._leave_alive = False + coll = AsyncCollection( + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + await coll._create(kwargs, s) + + return coll + + async def aggregate( + self, pipeline: _Pipeline, session: Optional[AsyncClientSession] = None, **kwargs: Any + ) -> AsyncCommandCursor[_DocumentType]: + """Perform a database-level aggregation. + + See the `aggregation pipeline`_ documentation for a list of stages + that are supported. + + .. code-block:: python + + # Lists all operations currently running on the server. + async with await client.admin.aggregate([{"$currentOp": {}}]) as cursor: + async for operation in cursor: + print(operation) + + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`AsyncDatabase`, except when ``$out`` or ``$merge`` are used, in + which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` + is used. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement. + + .. note:: This method does not support the 'explain' option. Please + use :meth:`~pymongo.asynchronous.database.AsyncDatabase.command` instead. + + .. note:: The :attr:`~pymongo.asynchronous.database.AsyncDatabase.write_concern` of + this collection is automatically applied to this operation. + + :param pipeline: a list of aggregation pipeline stages + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param kwargs: extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `let` (dict): A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + + :return: A :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor` over the result + set. + + .. versionadded:: 3.9 + + .. _aggregation pipeline: + https://mongodb.com/docs/manual/reference/operator/aggregation-pipeline + + .. _aggregate command: + https://mongodb.com/docs/manual/reference/command/aggregate + """ + async with self.client._tmp_session(session) as s: + cmd = _DatabaseAggregationCommand( + self, + AsyncCommandCursor, + pipeline, + kwargs, + user_fields={"cursor": {"firstBatch": 1}}, + ) + return await self.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(s), # type: ignore[arg-type] + s, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, + ) + + @overload + async def _command( + self, + conn: AsyncConnection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[dict[str, Any]] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + async def _command( + self, + conn: AsyncConnection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[_CodecDocumentType] = ..., + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + async def _command( + self, + conn: AsyncConnection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: Union[ + CodecOptions[dict[str, Any]], CodecOptions[_CodecDocumentType] + ] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: + """Internal command helper.""" + if isinstance(command, str): + command = {command: value} + + command.update(kwargs) + async with self._client._tmp_session(session) as s: + return await conn.command( + self._name, + command, + read_preference, + codec_options, # type: ignore[arg-type] + check, + allowable_errors, + write_concern=write_concern, + parse_write_concern_error=parse_write_concern_error, + session=s, + client=self._client, + ) + + @overload + async def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: None = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + async def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: CodecOptions[_CodecDocumentType] = ..., + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + @_csot.apply + async def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: + """Issue a MongoDB command. + + Send command `command` to the database and return the + response. If `command` is an instance of :class:`str` + then the command {`command`: `value`} will be sent. + Otherwise, `command` must be an instance of + :class:`dict` and will be sent as is. + + Any additional keyword arguments will be added to the final + command document before it is sent. + + For example, a command like ``{buildinfo: 1}`` can be sent + using: + + >>> await db.command("buildinfo") + OR + >>> await db.command({"buildinfo": 1}) + + For a command where the value matters, like ``{count: + collection_name}`` we can do: + + >>> await db.command("count", collection_name) + OR + >>> await db.command({"count": collection_name}) + + For commands that take additional arguments we can use + kwargs. So ``{count: collection_name, query: query}`` becomes: + + >>> await db.command("count", collection_name, query=query) + OR + >>> await db.command({"count": collection_name, "query": query}) + + :param command: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should be done with this in mind. + + :param value: value to use for the command verb when + `command` is passed as a string + :param check: check the response for errors, raising + :class:`~pymongo.errors.OperationFailure` if there are any + :param allowable_errors: if `check` is ``True``, error messages + in this list will be ignored by error-checking + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent + + + .. note:: :meth:`command` does **not** obey this AsyncDatabase's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see `versioned API `_), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`, + and `secondary_acceptable_latency_ms` option. + Removed `compile_re` option: PyMongo now always represents BSON + regular expressions as :class:`~bson.regex.Regex` objects. Use + :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a + BSON regular expression to a Python regular expression object. + Added the ``codec_options`` parameter. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + opts = codec_options or DEFAULT_CODEC_OPTIONS + if comment is not None: + kwargs["comment"] = comment + + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + + if read_preference is None: + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + async with await self._client._conn_for_reads( + read_preference, session, operation=command_name + ) as ( + connection, + read_preference, + ): + return await self._command( + connection, + command, + value, + check, + allowable_errors, + read_preference, + opts, # type: ignore[arg-type] + session=session, + **kwargs, + ) + + @_csot.apply + async def cursor_command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions[_CodecDocumentType]] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + max_await_time_ms: Optional[int] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[_DocumentType]: + """Issue a MongoDB command and parse the response as a cursor. + + If the response from the server does not include a cursor field, an error will be thrown. + + Otherwise, behaves identically to issuing a normal MongoDB command. + + :param command: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should use an instance of :class:`~bson.son.SON` or + a string and kwargs instead of a Python `dict`. + + :param value: value to use for the command verb when + `command` is passed as a string + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to future getMores for this + command. + :param max_await_time_ms: The number of ms to wait for more data on future getMores for this command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent + + .. note:: :meth:`command` does **not** obey this AsyncDatabase's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see `versioned API `_), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + + async with self._client._tmp_session(session) as tmp_session: + opts = codec_options or DEFAULT_CODEC_OPTIONS + + if read_preference is None: + read_preference = ( + tmp_session and tmp_session._txn_read_preference() + ) or ReadPreference.PRIMARY + async with await self._client._conn_for_reads( + read_preference, tmp_session, command_name + ) as ( + conn, + read_preference, + ): + response = await self._command( + conn, + command, + value, + True, + None, + read_preference, + opts, + session=tmp_session, + **kwargs, + ) + coll = self.get_collection("$cmd", read_preference=read_preference) + if response.get("cursor"): + cmd_cursor = AsyncCommandCursor( + coll, + response["cursor"], + conn.address, + max_await_time_ms=max_await_time_ms, + session=tmp_session, + comment=comment, + ) + await cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + else: + raise InvalidOperation("Command does not return a cursor.") + + async def _retryable_read_command( + self, + command: Union[str, MutableMapping[str, Any]], + operation: str, + session: Optional[AsyncClientSession] = None, + ) -> dict[str, Any]: + """Same as command but used for retryable read commands.""" + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: _ServerMode, + ) -> dict[str, Any]: + return await self._command( + conn, + command, + read_preference=read_preference, + session=session, + ) + + return await self._client._retryable_read(_cmd, read_preference, session, operation) + + async def _list_collections( + self, + conn: AsyncConnection, + session: Optional[AsyncClientSession], + read_preference: _ServerMode, + **kwargs: Any, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + """Internal listCollections helper.""" + coll = cast( + AsyncCollection[MutableMapping[str, Any]], + self.get_collection("$cmd", read_preference=read_preference), + ) + cmd = {"listCollections": 1, "cursor": {}} + cmd.update(kwargs) + async with self._client._tmp_session(session) as tmp_session: + cursor = ( + await self._command(conn, cmd, read_preference=read_preference, session=tmp_session) + )["cursor"] + cmd_cursor = AsyncCommandCursor( + coll, + cursor, + conn.address, + session=tmp_session, + comment=cmd.get("comment"), + ) + await cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + async def _list_collections_helper( + self, + session: Optional[AsyncClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor`. + + .. versionadded:: 3.6 + """ + if filter is not None: + kwargs["filter"] = filter + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + if comment is not None: + kwargs["comment"] = comment + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: _ServerMode, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + return await self._list_collections( + conn, session, read_preference=read_preference, **kwargs + ) + + return await self._client._retryable_read( + _cmd, read_pref, session, operation=_Op.LIST_COLLECTIONS + ) + + async def list_collections( + self, + session: Optional[AsyncClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await database.list_collections() as cursor: + async for collection in cursor: + print(collection) + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor`. + + .. versionadded:: 3.6 + """ + return await self._list_collections_helper(session, filter, comment, **kwargs) + + async def _list_collection_names( + self, + session: Optional[AsyncClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + if comment is not None: + kwargs["comment"] = comment + if filter is None: + kwargs["nameOnly"] = True + + else: + # The enumerate collections spec states that "drivers MUST NOT set + # nameOnly if a filter specifies any keys other than name." + common.validate_is_mapping("filter", filter) + kwargs["filter"] = filter + if not filter or (len(filter) == 1 and "name" in filter): + kwargs["nameOnly"] = True + + return [ + result["name"] + async for result in await self._list_collections_helper(session=session, **kwargs) + ] + + async def list_collection_names( + self, + session: Optional[AsyncClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Get a list of all the collection names in this database. + + For example, to list all non-system collections:: + + filter = {"name": {"$regex": r"^(?!system\\.)"}} + db.list_collection_names(filter=filter) + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + .. versionchanged:: 3.8 + Added the ``filter`` and ``**kwargs`` parameters. + + .. versionadded:: 3.6 + """ + return await self._list_collection_names(session, filter, comment, **kwargs) + + async def _drop_helper( + self, name: str, session: Optional[AsyncClientSession] = None, comment: Optional[Any] = None + ) -> dict[str, Any]: + command = {"drop": name} + if comment is not None: + command["comment"] = comment + + async with await self._client._conn_for_writes(session, operation=_Op.DROP) as connection: + return await self._command( + connection, + command, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + @_csot.apply + async def drop_collection( + self, + name_or_collection: Union[str, AsyncCollection[_DocumentTypeArg]], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> dict[str, Any]: + """Drop a collection. + + :param name_or_collection: the name of a collection to drop or the + collection object itself + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + + } + + + .. note:: The :attr:`~pymongo.asynchronous.database.AsyncDatabase.write_concern` of + this database is automatically applied to this operation. + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Apply this database's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_collection + if isinstance(name, AsyncCollection): + name = name.name + + if not isinstance(name, str): + raise TypeError(f"name_or_collection must be an instance of str, not {type(name)}") + encrypted_fields = await self._get_encrypted_fields( + {"encryptedFields": encrypted_fields}, + name, + True, + ) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + await self._drop_helper( + _esc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + await self._drop_helper( + _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + + return await self._drop_helper(name, session, comment) + + async def validate_collection( + self, + name_or_collection: Union[str, AsyncCollection[_DocumentTypeArg]], + scandata: bool = False, + full: bool = False, + session: Optional[AsyncClientSession] = None, + background: Optional[bool] = None, + comment: Optional[Any] = None, + ) -> dict[str, Any]: + """Validate a collection. + + Returns a dict of validation info. Raises CollectionInvalid if + validation fails. + + See also the MongoDB documentation on the `validate command`_. + + :param name_or_collection: An AsyncCollection object or the name of a + collection to validate. + :param scandata: Do extra checks beyond checking the overall + structure of the collection. + :param full: Have the server do a more thorough scan of the + collection. Use with `scandata` for a thorough scan + of the structure of the collection and the individual + documents. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param background: A boolean flag that determines whether + the command runs in the background. Requires MongoDB 4.4+. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.11 + Added ``background`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ + """ + name = name_or_collection + if isinstance(name, AsyncCollection): + name = name.name + + if not isinstance(name, str): + raise TypeError( + f"name_or_collection must be an instance of str or AsyncCollection, not {type(name)}" + ) + cmd = {"validate": name, "scandata": scandata, "full": full} + if comment is not None: + cmd["comment"] = comment + + if background is not None: + cmd["background"] = background + + result = await self.command(cmd, session=session) + + valid = True + # Pre 1.9 results + if "result" in result: + info = result["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") + # Sharded results + elif "raw" in result: + for _, res in result["raw"].items(): + if "result" in res: + info = res["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") + elif not res.get("valid", False): + valid = False + break + # Post 1.9 non-sharded results. + elif not result.get("valid", False): + valid = False + + if not valid: + raise CollectionInvalid(f"{name} invalid: {result!r}") + + return result + + async def dereference( + self, + dbref: DBRef, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Optional[_DocumentType]: + """Dereference a :class:`~bson.dbref.DBRef`, getting the + document it points to. + + Raises :class:`TypeError` if `dbref` is not an instance of + :class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if + the reference does not point to a valid document. Raises + :class:`ValueError` if `dbref` has a database specified that + is different from the current database. + + :param dbref: the reference + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: any additional keyword arguments + are the same as the arguments to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find`. + + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + if not isinstance(dbref, DBRef): + raise TypeError("cannot dereference a %s" % type(dbref)) + if dbref.database is not None and dbref.database != self._name: + raise ValueError( + "trying to dereference a DBRef that points to " + f"another database ({dbref.database!r} not {self._name!r})" + ) + return await self[dbref.collection].find_one( + {"_id": dbref.id}, session=session, comment=comment, **kwargs + ) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py new file mode 100644 index 0000000000..4dfd36aa49 --- /dev/null +++ b/pymongo/asynchronous/encryption.py @@ -0,0 +1,1283 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Support for explicit client-side field level encryption.""" +from __future__ import annotations + +import asyncio +import contextlib +import enum +import socket +import time as time # noqa: PLC0414 # needed in sync version +import uuid +import weakref +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + AsyncGenerator, + Dict, + Generic, + Iterator, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +try: + from pymongocrypt.asynchronous.auto_encrypter import AsyncAutoEncrypter # type:ignore[import] + from pymongocrypt.asynchronous.explicit_encrypter import ( # type:ignore[import] + AsyncExplicitEncrypter, + ) + from pymongocrypt.asynchronous.state_machine import ( # type:ignore[import] + AsyncMongoCryptCallback, + ) + from pymongocrypt.errors import MongoCryptError # type:ignore[import] + from pymongocrypt.mongocrypt import MongoCryptOptions # type:ignore[import] + + _HAVE_PYMONGOCRYPT = True +except ImportError: + _HAVE_PYMONGOCRYPT = False + AsyncMongoCryptCallback = object + +from bson import _dict_to_bson, decode, encode +from bson.binary import STANDARD, UUID_SUBTYPE, Binary +from bson.codec_options import CodecOptions +from bson.errors import BSONError +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson +from pymongo import _csot +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.common import CONNECT_TIMEOUT +from pymongo.daemon import _spawn_daemon +from pymongo.encryption_options import ( + AutoEncryptionOpts, + RangeOpts, + TextOpts, + check_min_pymongocrypt, +) +from pymongo.errors import ( + ConfigurationError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + NetworkTimeout, + ServerSelectionTimeoutError, +) +from pymongo.helpers_shared import _get_timeout_details +from pymongo.network_layer import async_socket_sendall +from pymongo.operations import UpdateOne +from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + _async_configured_socket, + _raise_connection_failure, +) +from pymongo.read_concern import ReadConcern +from pymongo.results import BulkWriteResult, DeleteResult +from pymongo.ssl_support import BLOCKING_IO_ERRORS, get_ssl_context +from pymongo.typings import _DocumentType, _DocumentTypeArg +from pymongo.uri_parser_shared import _parse_kms_tls_options, parse_host +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from pymongocrypt.mongocrypt import MongoCryptKmsContext + + from pymongo.pyopenssl_context import _sslConn + from pymongo.typings import _Address + + +_IS_SYNC = False + +_HTTPS_PORT = 443 +_KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT +_MONGOCRYPTD_TIMEOUT_MS = 10000 + +_DATA_KEY_OPTS: CodecOptions[dict[str, Any]] = CodecOptions( + document_class=Dict[str, Any], uuid_representation=STANDARD +) +# Use RawBSONDocument codec options to avoid needlessly decoding +# documents from the key vault. +_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) + + +async def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: + try: + return await _async_configured_socket(address, opts) + except Exception as exc: + _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) + + +@contextlib.contextmanager +def _wrap_encryption_errors() -> Iterator[None]: + """Context manager to wrap encryption related errors.""" + try: + yield + except BSONError: + # BSON encoding/decoding errors are unrelated to encryption so + # we should propagate them unchanged. + raise + except Exception as exc: + raise EncryptionError(exc) from exc + + +class _EncryptionIO(AsyncMongoCryptCallback): # type: ignore[misc] + def __init__( + self, + client: Optional[AsyncMongoClient[_DocumentTypeArg]], + key_vault_coll: AsyncCollection[_DocumentTypeArg], + mongocryptd_client: Optional[AsyncMongoClient[_DocumentTypeArg]], + opts: AutoEncryptionOpts, + ): + """Internal class to perform I/O on behalf of pymongocrypt.""" + self.client_ref: Any + # Use a weak ref to break reference cycle. + if client is not None: + self.client_ref = weakref.ref(client) + else: + self.client_ref = None + self.key_vault_coll: Optional[AsyncCollection[RawBSONDocument]] = cast( + AsyncCollection[RawBSONDocument], + key_vault_coll.with_options( + codec_options=_KEY_VAULT_OPTS, + read_concern=ReadConcern(level="majority"), + write_concern=WriteConcern(w="majority"), + ), + ) + self.mongocryptd_client = mongocryptd_client + self.opts = opts + self._spawned = False + self._kms_ssl_contexts = opts._kms_ssl_contexts(_IS_SYNC) + + async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: + """Complete a KMS request. + + :param kms_context: A :class:`MongoCryptKmsContext`. + + :return: None + """ + endpoint = kms_context.endpoint + message = kms_context.message + provider = kms_context.kms_provider + ctx = self._kms_ssl_contexts.get(provider) + if ctx is None: + # Enable strict certificate verification, OCSP, match hostname, and + # SNI using the system default CA certificates. + ctx = get_ssl_context( + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, # disable_ocsp_endpoint_check + _IS_SYNC, + ) + # CSOT: set timeout for socket creation. + connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) + opts = PoolOptions( + connect_timeout=connect_timeout, + socket_timeout=connect_timeout, + ssl_context=ctx, + ) + address = parse_host(endpoint, _HTTPS_PORT) + sleep_u = kms_context.usleep + if sleep_u: + sleep_sec = float(sleep_u) / 1e6 + await asyncio.sleep(sleep_sec) + try: + conn = await _connect_kms(address, opts) + try: + await async_socket_sendall(conn, message) + while kms_context.bytes_needed > 0: + # CSOT: update timeout. + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data: memoryview | bytes + if _IS_SYNC: + data = conn.recv(kms_context.bytes_needed) + else: + from pymongo.network_layer import ( # type: ignore[attr-defined] + async_receive_data_socket, + ) + + data = await async_receive_data_socket(conn, kms_context.bytes_needed) + if not data: + raise OSError("KMS connection closed") + kms_context.feed(data) + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + # Wrap I/O errors in PyMongo exceptions. + if isinstance(exc, BLOCKING_IO_ERRORS): + exc = socket.timeout("timed out") + # Async raises an OSError instead of returning empty bytes. + if isinstance(exc, OSError): + msg_prefix = "KMS connection closed" + else: + msg_prefix = None + _raise_connection_failure( + address, exc, msg_prefix=msg_prefix, timeout_details=_get_timeout_details(opts) + ) + finally: + conn.close() + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + remaining = _csot.remaining() + if isinstance(exc, NetworkTimeout) or (remaining is not None and remaining <= 0): + raise + # Mark this attempt as failed and defer to libmongocrypt to retry. + try: + kms_context.fail() + except MongoCryptError as final_err: + exc = MongoCryptError( + f"{final_err}, last attempt failed with: {exc}", final_err.code + ) + raise exc from final_err + + async def collection_info(self, database: str, filter: bytes) -> Optional[list[bytes]]: + """Get the collection info for a namespace. + + The returned collection info is passed to libmongocrypt which reads + the JSON schema. + + :param database: The database on which to run listCollections. + :param filter: The filter to pass to listCollections. + + :return: All documents from the listCollections command response as BSON. + """ + async with await self.client_ref()[database].list_collections( + filter=RawBSONDocument(filter) + ) as cursor: + return [_dict_to_bson(doc, False, _DATA_KEY_OPTS) async for doc in cursor] + + def spawn(self) -> None: + """Spawn mongocryptd. + + Note this method is thread safe; at most one mongocryptd will start + successfully. + """ + self._spawned = True + args = [self.opts._mongocryptd_spawn_path or "mongocryptd"] + args.extend(self.opts._mongocryptd_spawn_args) + _spawn_daemon(args) + + async def mark_command(self, database: str, cmd: bytes) -> bytes | memoryview: + """Mark a command for encryption. + + :param database: The database on which to run this command. + :param cmd: The BSON command to run. + + :return: The marked command response from mongocryptd. + """ + if not self._spawned and not self.opts._mongocryptd_bypass_spawn: + self.spawn() + # AsyncDatabase.command only supports mutable mappings so we need to decode + # the raw BSON command first. + inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) + assert self.mongocryptd_client is not None + try: + res = await self.mongocryptd_client[database].command( + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) + except ServerSelectionTimeoutError: + if self.opts._mongocryptd_bypass_spawn: + raise + self.spawn() + res = await self.mongocryptd_client[database].command( + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) + return res.raw + + async def fetch_keys(self, filter: bytes) -> AsyncGenerator[bytes | memoryview, None]: + """Yields one or more keys from the key vault. + + :param filter: The filter to pass to find. + + :return: A generator which yields the requested keys from the key vault. + """ + assert self.key_vault_coll is not None + async with self.key_vault_coll.find(RawBSONDocument(filter)) as cursor: + async for key in cursor: + yield key.raw + + async def insert_data_key(self, data_key: bytes) -> Binary: + """Insert a data key into the key vault. + + :param data_key: The data key document to insert. + + :return: The _id of the inserted data key document. + """ + raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) + data_key_id = raw_doc.get("_id") + if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: + raise TypeError( + f"data_key _id must be Binary with a UUID subtype, not {type(data_key_id)}" + ) + + assert self.key_vault_coll is not None + await self.key_vault_coll.insert_one(raw_doc) + return data_key_id + + def bson_encode(self, doc: MutableMapping[str, Any]) -> bytes: + """Encode a document to BSON. + + A document can be any mapping type (like :class:`dict`). + + :param doc: mapping type representing a document + + :return: The encoded BSON bytes. + """ + return encode(doc) + + async def close(self) -> None: + """Release resources. + + Note it is not safe to call this method from __del__ or any GC hooks. + """ + self.client_ref = None + self.key_vault_coll = None + if self.mongocryptd_client: + await self.mongocryptd_client.close() + self.mongocryptd_client = None + + +class RewrapManyDataKeyResult: + """Result object returned by a :meth:`~AsyncClientEncryption.rewrap_many_data_key` operation. + + .. versionadded:: 4.2 + """ + + def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: + self._bulk_write_result = bulk_write_result + + @property + def bulk_write_result(self) -> Optional[BulkWriteResult]: + """The result of the bulk write operation used to update the key vault + collection with one or more rewrapped data keys. If + :meth:`~AsyncClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, + no bulk write operation will be executed and this field will be + ``None``. + """ + return self._bulk_write_result + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._bulk_write_result!r})" + + +class _Encrypter: + """Encrypts and decrypts MongoDB commands. + + This class is used to support automatic encryption and decryption of + MongoDB commands. + """ + + def __init__(self, client: AsyncMongoClient[_DocumentTypeArg], opts: AutoEncryptionOpts): + """Create a _Encrypter for a client. + + :param client: The encrypted AsyncMongoClient. + :param opts: The encrypted client's :class:`AutoEncryptionOpts`. + """ + if opts._schema_map is None: + schema_map = None + else: + schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) + + if opts._encrypted_fields_map is None: + encrypted_fields_map = None + else: + encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) + self._bypass_auto_encryption = opts._bypass_auto_encryption + self._internal_client = None + # parsing kms_ssl_contexts here so that parsing errors will be raised before internal clients are created + opts._kms_ssl_contexts(_IS_SYNC) + + def _get_internal_client( + encrypter: _Encrypter, mongo_client: AsyncMongoClient[_DocumentTypeArg] + ) -> AsyncMongoClient[_DocumentTypeArg]: + if mongo_client.options.pool_options.max_pool_size is None: + # Unlimited pool size, use the same client. + return mongo_client + # Else - limited pool size, use an internal client. + if encrypter._internal_client is not None: + return encrypter._internal_client + internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None) + encrypter._internal_client = internal_client + return internal_client + + if opts._key_vault_client is not None: + key_vault_client = opts._key_vault_client + else: + key_vault_client = _get_internal_client(self, client) + + if opts._bypass_auto_encryption: + metadata_client = None + else: + metadata_client = _get_internal_client(self, client) + + db, coll = opts._key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + mongocryptd_client: AsyncMongoClient[Mapping[str, Any]] = AsyncMongoClient( + opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS + ) + + io_callbacks = _EncryptionIO( # type:ignore[misc] + metadata_client, + key_vault_coll, # type:ignore[arg-type] + mongocryptd_client, + opts, + ) + self._auto_encrypter = AsyncAutoEncrypter( + io_callbacks, + _create_mongocrypt_options( + kms_providers=opts._kms_providers, + schema_map=schema_map, + crypt_shared_lib_path=opts._crypt_shared_lib_path, + crypt_shared_lib_required=opts._crypt_shared_lib_required, + bypass_encryption=opts._bypass_auto_encryption, + encrypted_fields_map=encrypted_fields_map, + bypass_query_analysis=opts._bypass_query_analysis, + key_expiration_ms=opts._key_expiration_ms, + ), + ) + self._closed = False + + async def encrypt( + self, database: str, cmd: Mapping[str, Any], codec_options: CodecOptions[_DocumentTypeArg] + ) -> dict[str, Any]: + """Encrypt a MongoDB command. + + :param database: The database for this command. + :param cmd: A command document. + :param codec_options: The CodecOptions to use while encoding `cmd`. + + :return: The encrypted command to execute. + """ + self._check_closed() + encoded_cmd = _dict_to_bson(cmd, False, codec_options) + with _wrap_encryption_errors(): + encrypted_cmd = await self._auto_encrypter.encrypt(database, encoded_cmd) + # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. + return _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) + + async def decrypt(self, response: bytes | memoryview) -> Optional[bytes]: + """Decrypt a MongoDB command response. + + :param response: A MongoDB command response as BSON. + + :return: The decrypted command response. + """ + self._check_closed() + with _wrap_encryption_errors(): + return cast(bytes, await self._auto_encrypter.decrypt(response)) + + def _check_closed(self) -> None: + if self._closed: + raise InvalidOperation("Cannot use AsyncMongoClient after close") + + async def close(self) -> None: + """Cleanup resources.""" + self._closed = True + await self._auto_encrypter.close() + if self._internal_client: + await self._internal_client.close() + self._internal_client = None + + +class Algorithm(str, enum.Enum): + """An enum that defines the supported encryption algorithms.""" + + AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + """AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic.""" + AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + """AEAD_AES_256_CBC_HMAC_SHA_512_Random.""" + INDEXED = "Indexed" + """Indexed. + + .. versionadded:: 4.2 + """ + UNINDEXED = "Unindexed" + """Unindexed. + + .. versionadded:: 4.2 + """ + RANGE = "Range" + """Range. + + .. versionadded:: 4.9 + """ + RANGEPREVIEW = "RangePreview" + """**DEPRECATED** - RangePreview. + + .. note:: Support for RangePreview is deprecated. Use :attr:`Algorithm.RANGE` instead. + + .. versionadded:: 4.4 + """ + TEXTPREVIEW = "TextPreview" + """**BETA** - TextPreview. + + .. versionadded:: 4.15 + """ + + +class QueryType(str, enum.Enum): + """An enum that defines the supported values for explicit encryption query_type. + + .. versionadded:: 4.2 + """ + + EQUALITY = "equality" + """Used to encrypt a value for an equality query.""" + + RANGE = "range" + """Used to encrypt a value for a range query. + + .. versionadded:: 4.9 + """ + + RANGEPREVIEW = "RangePreview" + """**DEPRECATED** - Used to encrypt a value for a rangePreview query. + + .. note:: Support for RangePreview is deprecated. Use :attr:`QueryType.RANGE` instead. + + .. versionadded:: 4.4 + """ + + PREFIXPREVIEW = "prefixPreview" + """**BETA** - Used to encrypt a value for a prefixPreview query. + + .. versionadded:: 4.15 + """ + + SUFFIXPREVIEW = "suffixPreview" + """**BETA** - Used to encrypt a value for a suffixPreview query. + + .. versionadded:: 4.15 + """ + + SUBSTRINGPREVIEW = "substringPreview" + """**BETA** - Used to encrypt a value for a substringPreview query. + + .. versionadded:: 4.15 + """ + + +def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: + # For compat with pymongocrypt <1.13, avoid setting the default key_expiration_ms. + if kwargs.get("key_expiration_ms") is None: + kwargs.pop("key_expiration_ms", None) + return MongoCryptOptions(**kwargs, enable_multiple_collinfo=True) + + +class AsyncClientEncryption(Generic[_DocumentType]): + """Explicit client-side field level encryption.""" + + def __init__( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: AsyncMongoClient[_DocumentTypeArg], + codec_options: CodecOptions[_DocumentTypeArg], + kms_tls_options: Optional[Mapping[str, Any]] = None, + key_expiration_ms: Optional[int] = None, + ) -> None: + """Explicit client-side field level encryption. + + The AsyncClientEncryption class encapsulates explicit operations on a key + vault collection that cannot be done directly on an AsyncMongoClient. Similar + to configuring auto encryption on an AsyncMongoClient, it is constructed with + an AsyncMongoClient (to a MongoDB cluster containing the key vault + collection), KMS provider configuration, and keyVaultNamespace. It + provides an API for explicitly encrypting and decrypting values, and + creating data keys. It does not provide an API to query keys from the + key vault collection, as this can be done directly on the AsyncMongoClient. + + See `explicit client-side encryption `_ for an example. + + :param kms_providers: Map of KMS provider options. The `kms_providers` + map values differ by provider: + + - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. + These are the AWS access key ID and AWS secret access key used + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string. + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. + + KMS providers may be specified with an optional name suffix + separated by a colon, for example "kmip:name" or "aws:name". + Named KMS providers do not support `CSFLE on-demand credentials `_. + :param key_vault_namespace: The namespace for the key vault collection. + The key vault collection contains all data keys used for encryption + and decryption. Data keys are stored as documents in this MongoDB + collection. Data keys are protected with encryption by a KMS + provider. + :param key_vault_client: An AsyncMongoClient connected to a MongoDB cluster + containing the `key_vault_namespace` collection. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions` to use when encoding a + value for encryption and decoding the decrypted BSON value. This + should be the same CodecOptions instance configured on the + AsyncMongoClient, AsyncDatabase, or AsyncCollection used to access application + data. + :param kms_tls_options: A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.AsyncMongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + :param key_expiration_ms: The cache expiration time for data encryption keys. + Defaults to ``None`` which defers to libmongocrypt's default which is currently 60000. + Set to 0 to disable key expiration. + + .. versionchanged:: 4.12 + Added the `key_expiration_ms` parameter. + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter and the "kmip" KMS provider. + + .. versionadded:: 3.9 + """ + if not _HAVE_PYMONGOCRYPT: + raise ConfigurationError( + "client-side field level encryption requires the pymongocrypt " + "library: install a compatible version with: " + "python -m pip install --upgrade 'pymongo[encryption]'" + ) + + check_min_pymongocrypt() + + if not isinstance(codec_options, CodecOptions): + raise TypeError( + f"codec_options must be an instance of bson.codec_options.CodecOptions, not {type(codec_options)}" + ) + + if not isinstance(key_vault_client, AsyncMongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any( + cls.__name__ == "AsyncMongoClient" for cls in type(key_vault_client).__mro__ + ): + raise TypeError( + f"AsyncMongoClient required but given {type(key_vault_client).__name__}" + ) + + self._kms_providers = kms_providers + self._key_vault_namespace = key_vault_namespace + self._key_vault_client = key_vault_client + self._codec_options = codec_options + + db, coll = key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + opts = AutoEncryptionOpts( + kms_providers, + key_vault_namespace, + kms_tls_options=kms_tls_options, + key_expiration_ms=key_expiration_ms, + ) + self._kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( + None, key_vault_coll, None, opts + ) + self._encryption = AsyncExplicitEncrypter( + self._io_callbacks, + _create_mongocrypt_options( + kms_providers=kms_providers, schema_map=None, key_expiration_ms=key_expiration_ms + ), + ) + # Use the same key vault collection as the callback. + assert self._io_callbacks.key_vault_coll is not None + self._key_vault_coll = self._io_callbacks.key_vault_coll + + async def create_encrypted_collection( + self, + database: AsyncDatabase[_DocumentTypeArg], + name: str, + encrypted_fields: Mapping[str, Any], + kms_provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> tuple[AsyncCollection[_DocumentTypeArg], Mapping[str, Any]]: + """Create a collection with encryptedFields. + + .. warning:: + This function does not update the encryptedFieldsMap in the client's + AutoEncryptionOpts, thus the user must create a new client after calling this function with + the encryptedFields returned. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.EncryptionError` will be + raised if the collection already exists. + + :param database: the database to create the collection + :param name: the name of the collection to create + :param encrypted_fields: Document that describes the encrypted fields for + Queryable Encryption. The "keyId" may be set to ``None`` to auto-generate the data keys. For example: + + .. code-block: python + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + + :param kms_provider: the KMS provider to be used + :param master_key: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + :param kwargs: additional keyword arguments are the same as "create_collection". + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. + See the documentation for :meth:`~pymongo.asynchronous.database.AsyncDatabase.create_collection` for all valid options. + + :raises: - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. + + .. versionadded:: 4.4 + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + + """ + if not isinstance(database, AsyncDatabase): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncDatabase" for cls in type(database).__mro__): + raise TypeError(f"AsyncDatabase required but given {type(database).__name__}") + + encrypted_fields = deepcopy(encrypted_fields) + for i, field in enumerate(encrypted_fields["fields"]): + if isinstance(field, dict) and field.get("keyId") is None: + try: + encrypted_fields["fields"][i]["keyId"] = await self.create_data_key( + kms_provider=kms_provider, # type:ignore[arg-type] + master_key=master_key, + ) + except EncryptionError as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + kwargs["encryptedFields"] = encrypted_fields + kwargs["check_exists"] = False + try: + return ( + await database.create_collection(name=name, **kwargs), + encrypted_fields, + ) + except Exception as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + + async def create_data_key( + self, + kms_provider: str, + master_key: Optional[Mapping[str, Any]] = None, + key_alt_names: Optional[Sequence[str]] = None, + key_material: Optional[bytes] = None, + ) -> Binary: + """Create and insert a new data key into the key vault collection. + + :param kms_provider: The KMS provider to use. Supported values are + "aws", "azure", "gcp", "kmip", "local", or a named provider like + "kmip:name". + :param master_key: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + + If the `kms_provider` type is "aws" it is required and has the + following fields:: + + - `region` (string): Required. The AWS region, e.g. "us-east-1". + - `key` (string): Required. The Amazon Resource Name (ARN) to + the AWS customer. + - `endpoint` (string): Optional. An alternate host to send KMS + requests to. May include port number, e.g. + "kms.us-east-1.amazonaws.com:443". + + If the `kms_provider` type is "azure" it is required and has the + following fields:: + + - `keyVaultEndpoint` (string): Required. Host with optional + port, e.g. "example.vault.azure.net". + - `keyName` (string): Required. Key name in the key vault. + - `keyVersion` (string): Optional. Version of the key to use. + + If the `kms_provider` type is "gcp" it is required and has the + following fields:: + + - `projectId` (string): Required. The Google cloud project ID. + - `location` (string): Required. The GCP location, e.g. "us-east1". + - `keyRing` (string): Required. Name of the key ring that contains + the key to use. + - `keyName` (string): Required. Name of the key to use. + - `keyVersion` (string): Optional. Version of the key to use. + - `endpoint` (string): Optional. Host with optional port. + Defaults to "cloudkms.googleapis.com". + + If the `kms_provider` type is "kmip" it is optional and has the + following fields:: + + - `keyId` (string): Optional. `keyId` is the KMIP Unique + Identifier to a 96 byte KMIP Secret Data managed object. If + keyId is omitted, the driver creates a random 96 byte KMIP + Secret Data managed object. + - `endpoint` (string): Optional. Host with optional + port, e.g. "example.vault.azure.net:". + - `delegated` (bool): Optional. If True (recommended), the + KMIP server will perform encryption and decryption. If + delegated is not provided, defaults to false. + + :param key_alt_names: An optional list of string alternate + names used to reference a key. If a key is created with alternate + names, then encryption may refer to the key by the unique alternate + name instead of by ``key_id``. The following example shows creating + and referring to a data key by alternate name:: + + client_encryption.create_data_key("local", key_alt_names=["name1"]) + # reference the key with the alternate name + client_encryption.encrypt("457-55-5462", key_alt_name="name1", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + :param key_material: Sets the custom key material to be used + by the data key for encryption and decryption. + + :return: The ``_id`` of the created data key document as a + :class:`~bson.binary.Binary` with subtype + :data:`~bson.binary.UUID_SUBTYPE`. + + .. versionchanged:: 4.2 + Added the `key_material` parameter. + """ + self._check_closed() + with _wrap_encryption_errors(): + return cast( + Binary, + await self._encryption.create_data_key( + kms_provider, + master_key=master_key, + key_alt_names=key_alt_names, + key_material=key_material, + ), + ) + + async def _encrypt_helper( + self, + value: Any, + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + is_expression: bool = False, + text_opts: Optional[TextOpts] = None, + ) -> Any: + self._check_closed() + if isinstance(key_id, uuid.UUID): + key_id = Binary.from_uuid(key_id) + if key_id is not None and not ( + isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + + doc = encode( + {"v": value}, + codec_options=self._codec_options, + ) + range_opts_bytes = None + if range_opts: + range_opts_bytes = encode( + range_opts.document, + codec_options=self._codec_options, + ) + text_opts_bytes = None + if text_opts: + text_opts_bytes = encode( + text_opts.document, + codec_options=self._codec_options, + ) + with _wrap_encryption_errors(): + encrypted_doc = await self._encryption.encrypt( + value=doc, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts_bytes, + is_expression=is_expression, + # For compatibility with pymongocrypt < 1.16: + **{"text_opts": text_opts_bytes} if text_opts_bytes else {}, + ) + return decode(encrypted_doc)["v"] + + async def encrypt( + self, + value: Any, + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + text_opts: Optional[TextOpts] = None, + ) -> Binary: + """Encrypt a BSON value with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :param value: The BSON value to encrypt. + :param algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + :param key_id: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + :param range_opts: Index options for `range` queries. See + :class:`RangeOpts` for some valid options. + :param text_opts: Index options for `textPreview` queries. See + :class:`TextOpts` for some valid options. + + :return: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + + .. versionchanged:: 4.9 + Added the `text_opts` parameter. + + .. versionchanged:: 4.9 + Added the `range_opts` parameter. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. + + .. versionchanged:: 4.2 + Added the `query_type` and `contention_factor` parameters. + """ + return cast( + Binary, + await self._encrypt_helper( + value=value, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=False, + text_opts=text_opts, + ), + ) + + async def encrypt_expression( + self, + expression: Mapping[str, Any], + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + ) -> RawBSONDocument: + """Encrypt a BSON expression with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :param expression: The BSON aggregate or match expression to encrypt. + :param algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + :param key_id: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See + :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + :param range_opts: Index options for `range` queries. See + :class:`RangeOpts` for some valid options. + + :return: The encrypted expression, a :class:`~bson.RawBSONDocument`. + + .. versionchanged:: 4.9 + Added the `range_opts` parameter. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. + + .. versionadded:: 4.4 + """ + return cast( + RawBSONDocument, + await self._encrypt_helper( + value=expression, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=True, + ), + ) + + async def decrypt(self, value: Binary) -> Any: + """Decrypt an encrypted value. + + :param value` (Binary): The encrypted value, a + :class:`~bson.binary.Binary` with subtype 6. + + :return: The decrypted BSON value. + """ + self._check_closed() + if not (isinstance(value, Binary) and value.subtype == 6): + raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6") + + with _wrap_encryption_errors(): + doc = encode({"v": value}) + decrypted_doc = await self._encryption.decrypt(doc) + return decode(decrypted_doc, codec_options=self._codec_options)["v"] + + async def get_key(self, id: Binary) -> Optional[RawBSONDocument]: + """Get a data key by id. + + :param id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :return: The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return await self._key_vault_coll.find_one({"_id": id}) + + def get_keys(self) -> AsyncCursor[RawBSONDocument]: + """Get all of the data keys. + + :return: An instance of :class:`~pymongo.asynchronous.cursor.AsyncCursor` over the data key + documents. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find({}) + + async def delete_key(self, id: Binary) -> DeleteResult: + """Delete a key document in the key vault collection that has the given ``key_id``. + + :param id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :return: The delete result. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return await self._key_vault_coll.delete_one({"_id": id}) + + async def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: + """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. + + :param id: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: The key alternate name to add. + + :return: The previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + update = {"$addToSet": {"keyAltNames": key_alt_name}} + assert self._key_vault_coll is not None + return await self._key_vault_coll.find_one_and_update({"_id": id}, update) + + async def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: + """Get a key document in the key vault collection that has the given ``key_alt_name``. + + :param key_alt_name: (str): The key alternate name of the key to get. + + :return: The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return await self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) + + async def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSONDocument]: + """Remove ``key_alt_name`` from the set of keyAltNames in the key document with UUID ``id``. + + Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. + + :param id: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: The key alternate name to remove. + + :return: Returns the previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + pipeline = [ + { + "$set": { + "keyAltNames": { + "$cond": [ + {"$eq": ["$keyAltNames", [key_alt_name]]}, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": {"$ne": ["$$this", key_alt_name]}, + } + }, + ] + } + } + } + ] + assert self._key_vault_coll is not None + return await self._key_vault_coll.find_one_and_update({"_id": id}, pipeline) + + async def rewrap_many_data_key( + self, + filter: Mapping[str, Any], + provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + ) -> RewrapManyDataKeyResult: + """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. + + :param filter: A document used to filter the data keys. + :param provider: The new KMS provider to use to encrypt the data keys, + or ``None`` to use the current KMS provider(s). + :param master_key: The master key fields corresponding to the new KMS + provider when ``provider`` is not ``None``. + + :return: A :class:`RewrapManyDataKeyResult`. + + This method allows you to re-encrypt all of your data-keys with a new CMK, or master key. + Note that this does *not* require re-encrypting any of the data in your encrypted collections, + but rather refreshes the key that protects the keys that encrypt the data: + + .. code-block:: python + + client_encryption.rewrap_many_data_key( + filter={"keyAltNames": "optional filter for which keys you want to update"}, + master_key={ + "provider": "azure", # replace with your cloud provider + "master_key": { + # put the rest of your master_key options here + "key": "" + }, + }, + ) + + .. versionadded:: 4.2 + """ + if master_key is not None and provider is None: + raise ConfigurationError("A provider must be given if a master_key is given") + self._check_closed() + with _wrap_encryption_errors(): + raw_result = await self._encryption.rewrap_many_data_key(filter, provider, master_key) + if raw_result is None: + return RewrapManyDataKeyResult() + + raw_doc = RawBSONDocument(raw_result, DEFAULT_RAW_BSON_OPTIONS) + replacements = [] + for key in raw_doc["v"]: + update_model = { + "$set": {"keyMaterial": key["keyMaterial"], "masterKey": key["masterKey"]}, + "$currentDate": {"updateDate": True}, + } + op = UpdateOne({"_id": key["_id"]}, update_model) + replacements.append(op) + if not replacements: + return RewrapManyDataKeyResult() + assert self._key_vault_coll is not None + result = await self._key_vault_coll.bulk_write(replacements) + return RewrapManyDataKeyResult(result) + + async def __aenter__(self) -> AsyncClientEncryption[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + def _check_closed(self) -> None: + if self._encryption is None: + raise InvalidOperation("Cannot use closed AsyncClientEncryption") + + async def close(self) -> None: + """Release resources. + + Note that using this class in a with-statement will automatically call + :meth:`close`:: + + with AsyncClientEncryption(...) as client_encryption: + encrypted = client_encryption.encrypt(value, ...) + decrypted = client_encryption.decrypt(encrypted) + + """ + if self._io_callbacks: + await self._io_callbacks.close() + self._encryption.close() + self._io_callbacks = None + self._encryption = None diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py new file mode 100644 index 0000000000..4a8c918133 --- /dev/null +++ b/pymongo/asynchronous/helpers.py @@ -0,0 +1,86 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Miscellaneous pieces that need to be synchronized.""" +from __future__ import annotations + +import asyncio +import socket +from typing import ( + Any, + Callable, + TypeVar, + cast, +) + +from pymongo.errors import ( + OperationFailure, +) +from pymongo.helpers_shared import _REAUTHENTICATION_REQUIRED_CODE + +_IS_SYNC = False + +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def _handle_reauth(func: F) -> F: + async def inner(*args: Any, **kwargs: Any) -> Any: + no_reauth = kwargs.pop("no_reauth", False) + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.message import _BulkWriteContext + + try: + return await func(*args, **kwargs) + except OperationFailure as exc: + if no_reauth: + raise + if exc.code == _REAUTHENTICATION_REQUIRED_CODE: + # Look for an argument that either is a AsyncConnection + # or has a connection attribute, so we can trigger + # a reauth. + conn = None + for arg in args: + if isinstance(arg, AsyncConnection): + conn = arg + break + if isinstance(arg, _BulkWriteContext): + conn = arg.conn # type: ignore[assignment] + break + if conn: + await conn.authenticate(reauthenticate=True) + else: + raise + return await func(*args, **kwargs) + raise + + return cast(F, inner) + + +async def _getaddrinfo( + host: Any, port: Any, **kwargs: Any +) -> list[ + tuple[ + socket.AddressFamily, + socket.SocketKind, + int, + str, + tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], + ] +]: + if not _IS_SYNC: + loop = asyncio.get_running_loop() + return await loop.getaddrinfo(host, port, **kwargs) # type: ignore[return-value] + else: + return socket.getaddrinfo(host, port, **kwargs) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py new file mode 100644 index 0000000000..d9bf808d55 --- /dev/null +++ b/pymongo/asynchronous/mongo_client.py @@ -0,0 +1,2978 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools for connecting to MongoDB. + +.. seealso:: `Read and Write Settings `_ for examples of connecting + to replica sets or sets of mongos servers. + +To get a :class:`~pymongo.asynchronous.database.AsyncDatabase` instance from a +:class:`AsyncMongoClient` use either dictionary-style or attribute-style +access: + +.. doctest:: + + >>> from pymongo import AsyncMongoClient + >>> c = AsyncMongoClient() + >>> c.test_database + AsyncDatabase(AsyncMongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') + >>> c["test-database"] + AsyncDatabase(AsyncMongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') +""" +from __future__ import annotations + +import asyncio +import contextlib +import os +import warnings +import weakref +from collections import defaultdict +from typing import ( + TYPE_CHECKING, + Any, + AsyncContextManager, + AsyncGenerator, + Callable, + Collection, + Coroutine, + FrozenSet, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry +from bson.timestamp import Timestamp +from pymongo import _csot, common, helpers_shared, periodic_executor +from pymongo.asynchronous import client_session, database, uri_parser +from pymongo.asynchronous.change_stream import AsyncChangeStream, AsyncClusterChangeStream +from pymongo.asynchronous.client_bulk import _AsyncClientBulk +from pymongo.asynchronous.client_session import _EmptyServerSession +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology, _ErrorContext +from pymongo.client_options import ClientOptions +from pymongo.driver_info import DriverInfo +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ClientBulkWriteException, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, +) +from pymongo.lock import ( + _HAS_REGISTER_AT_FORK, + _async_create_lock, + _release_locks, +) +from pymongo.logger import ( + _CLIENT_LOGGER, + _COMMAND_LOGGER, + _debug_log, + _log_client_error, + _log_or_warn, +) +from pymongo.message import _CursorAddress, _GetMore, _Query +from pymongo.monitoring import ConnectionClosedReason, _EventListeners +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, + _Op, +) +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ClientBulkWriteResult +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription +from pymongo.typings import ( + ClusterTime, + _Address, + _CollationIn, + _DocumentType, + _DocumentTypeArg, + _Pipeline, +) +from pymongo.uri_parser_shared import ( + SRV_SCHEME, + _check_options, + _handle_option_deprecations, + _handle_security_options, + _normalize_options, + _validate_uri, + split_hosts, +) +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern + +if TYPE_CHECKING: + from types import TracebackType + + from bson.objectid import ObjectId + from pymongo.asynchronous.bulk import _AsyncBulk + from pymongo.asynchronous.client_session import AsyncClientSession, _ServerSession + from pymongo.asynchronous.cursor import _ConnectionManager + from pymongo.asynchronous.encryption import _Encrypter + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.asynchronous.server import Server + from pymongo.read_concern import ReadConcern + from pymongo.response import Response + from pymongo.server_selectors import Selection + + +T = TypeVar("T") + +_WriteCall = Callable[ + [Optional["AsyncClientSession"], "AsyncConnection", bool], Coroutine[Any, Any, T] +] +_ReadCall = Callable[ + [Optional["AsyncClientSession"], "Server", "AsyncConnection", _ServerMode], + Coroutine[Any, Any, T], +] + +_IS_SYNC = False + +_WriteOp = Union[ + InsertOne, # type: ignore[type-arg] + DeleteOne, + DeleteMany, + ReplaceOne, # type: ignore[type-arg] + UpdateOne, + UpdateMany, +] + + +class AsyncMongoClient(common.BaseObject, Generic[_DocumentType]): + HOST = "localhost" + PORT = 27017 + # Define order to retrieve options from ClientOptions for __repr__. + # No host/port; these are retrieved from TopologySettings. + _constructor_args = ("document_class", "tz_aware", "connect") + _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() # type: ignore[type-arg] + + def __init__( + self, + host: Optional[Union[str, Sequence[str]]] = None, + port: Optional[int] = None, + document_class: Optional[Type[_DocumentType]] = None, + tz_aware: Optional[bool] = None, + connect: Optional[bool] = None, + type_registry: Optional[TypeRegistry] = None, + **kwargs: Any, + ) -> None: + """Client for a MongoDB instance, a replica set, or a set of mongoses. + + .. warning:: Starting in PyMongo 4.0, ``directConnection`` now has a default value of + False instead of None. + For more details, see the relevant section of the PyMongo 4.x migration guide: + :ref:`pymongo4-migration-direct-connection`. + + The client object is thread-safe and has connection-pooling built in. + If an operation fails because of a network error, + :class:`~pymongo.errors.ConnectionFailure` is raised and the client + reconnects in the background. Application code should handle this + exception (recognizing that the operation failed) and then continue to + execute. + + Best practice is to call :meth:`AsyncMongoClient.close` when the client is no longer needed, + or use the client in a with statement:: + + async with AsyncMongoClient(url) as client: + # Use client here. + + The `host` parameter can be a full `mongodb URI + `_, in addition to + a simple hostname. It can also be a list of hostnames but no more + than one URI. Any port specified in the host string(s) will override + the `port` parameter. For username and + passwords reserved characters like ':', '/', '+' and '@' must be + percent encoded following RFC 2396:: + + from urllib.parse import quote_plus + + uri = "mongodb://%s:%s@%s" % ( + quote_plus(user), quote_plus(password), host) + client = AsyncMongoClient(uri) + + Unix domain sockets are also supported. The socket path must be percent + encoded in the URI:: + + uri = "mongodb://%s:%s@%s" % ( + quote_plus(user), quote_plus(password), quote_plus(socket_path)) + client = AsyncMongoClient(uri) + + But not when passed as a simple hostname:: + + client = AsyncMongoClient('/tmp/mongodb-27017.sock') + + Starting with version 3.6, PyMongo supports mongodb+srv:// URIs. The + URI must include one, and only one, hostname. The hostname will be + resolved to one or more DNS `SRV records + `_ which will be used + as the seed list for connecting to the MongoDB deployment. When using + SRV URIs, the `authSource` and `replicaSet` configuration options can + be specified using `TXT records + `_. See the + `Initial DNS Seedlist Discovery spec + `_ + for more details. Note that the use of SRV URIs implicitly enables + TLS support. Pass tls=false in the URI to override. + + .. note:: AsyncMongoClient creation will block waiting for answers from + DNS when mongodb+srv:// URIs are used. + + .. note:: Starting with version 3.0 the :class:`AsyncMongoClient` + constructor no longer blocks while connecting to the server or + servers, and it no longer raises + :class:`~pymongo.errors.ConnectionFailure` if they are + unavailable, nor :class:`~pymongo.errors.ConfigurationError` + if the user's credentials are wrong. Instead, the constructor + returns immediately and launches the connection process on + background threads. You can check if the server is available + like this:: + + from pymongo.errors import ConnectionFailure + client = AsyncMongoClient() + try: + # The ping command is cheap and does not require auth. + client.admin.command('ping') + except ConnectionFailure: + print("Server not available") + + .. warning:: When using PyMongo in a multiprocessing context, please + read `PyMongo multiprocessing `_ first. + + .. note:: Many of the following options can be passed using a MongoDB + URI or keyword parameters. If the same option is passed in a URI and + as a keyword parameter the keyword parameter takes precedence. + + :param host: hostname or IP address or Unix domain socket + path of a single mongod or mongos instance to connect to, or a + mongodb URI, or a list of hostnames (but no more than one mongodb + URI). If `host` is an IPv6 literal it must be enclosed in '[' + and ']' characters + following the RFC2732 URL syntax (e.g. '[::1]' for localhost). + Multihomed and round robin DNS addresses are **not** supported. + :param port: port number on which to connect + :param document_class: default class to use for + documents returned from queries on this client + :param tz_aware: if ``True``, + :class:`~datetime.datetime` instances returned as values + in a document by this :class:`AsyncMongoClient` will be timezone + aware (otherwise they will be naive) + :param connect: **Not supported by AsyncMongoClient**. + :param type_registry: instance of + :class:`~bson.codec_options.TypeRegistry` to enable encoding + and decoding of custom types. + :param kwargs: **Additional optional parameters available as keyword arguments:** + + - `datetime_conversion` (optional): Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + `handling out of range datetimes `_ for details. + - `directConnection` (optional): if ``True``, forces this client to + connect directly to the specified MongoDB host as a standalone. + If ``false``, the client connects to the entire replica set of + which the given MongoDB host(s) is a part. If this is ``True`` + and a mongodb+srv:// URI or a URI containing multiple seeds is + provided, an exception will be raised. + - `maxPoolSize` (optional): The maximum allowable number of + concurrent connections to each connected server. Requests to a + server will block if there are `maxPoolSize` outstanding + connections to the requested server. Defaults to 100. Can be + either 0 or None, in which case there is no limit on the number + of concurrent connections. + - `minPoolSize` (optional): The minimum required number of concurrent + connections that the pool will maintain to each connected server. + Default is 0. + - `maxIdleTimeMS` (optional): The maximum number of milliseconds that + a connection can remain idle in the pool before being removed and + replaced. Defaults to `None` (no limit). + - `maxConnecting` (optional): The maximum number of connections that + each pool can establish concurrently. Defaults to `2`. + - `timeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait when executing an operation + (including retry attempts) before raising a timeout error. + ``0`` or ``None`` means no timeout. + - `socketTimeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait for a response after sending an + ordinary (non-monitoring) database operation before concluding that + a network error has occurred. ``0`` or ``None`` means no timeout. + Defaults to ``None`` (no timeout). + - `connectTimeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait during server monitoring when + connecting a new socket to a server before concluding the server + is unavailable. ``0`` or ``None`` means no timeout. + Defaults to ``20000`` (20 seconds). + - `server_selector`: (callable or None) Optional, user-provided + function that augments server selection rules. The function should + accept as an argument a list of + :class:`~pymongo.server_description.ServerDescription` objects and + return a list of server descriptions that should be considered + suitable for the desired operation. + - `serverSelectionTimeoutMS`: (integer) Controls how long (in + milliseconds) the driver will wait to find an available, + appropriate server to carry out a database operation; while it is + waiting, multiple server monitoring operations may be carried out, + each controlled by `connectTimeoutMS`. Defaults to ``30000`` (30 + seconds). + - `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds) + a thread will wait for a socket from the pool if the pool has no + free sockets. Defaults to ``None`` (no timeout). + - `heartbeatFrequencyMS`: (optional) The number of milliseconds + between periodic server checks, or None to accept the default + frequency of 10 seconds. + - `serverMonitoringMode`: (optional) The server monitoring mode to use. + Valid values are the strings: "auto", "stream", "poll". Defaults to "auto". + - `appname`: (string or None) The name of the application that + created this AsyncMongoClient instance. The server will log this value + upon establishing each connection. It is also recorded in the slow + query log and profile collections. + - `driver`: (pair or None) A driver implemented on top of PyMongo can + pass a :class:`~pymongo.driver_info.DriverInfo` to add its name, + version, and platform to the message printed in the server log when + establishing a connection. + - `event_listeners`: a list or tuple of event listeners. See + :mod:`~pymongo.monitoring` for details. + - `retryWrites`: (boolean) Whether supported write operations + executed within this AsyncMongoClient will be retried once after a + network error. Defaults to ``True``. + The supported write operations are: + + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, as long as + :class:`~pymongo.asynchronous.operations.UpdateMany` or + :class:`~pymongo.asynchronous.operations.DeleteMany` are not included. + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.delete_one` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.insert_one` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.insert_many` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.replace_one` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.update_one` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one_and_delete` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one_and_replace` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one_and_update` + + Unsupported write operations include, but are not limited to, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate` using the ``$out`` + pipeline operator and any operation with an unacknowledged write + concern (e.g. {w: 0})). See + https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.md + - `retryReads`: (boolean) Whether supported read operations + executed within this AsyncMongoClient will be retried once after a + network error. Defaults to ``True``. + The supported read operations are: + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate` without ``$out``, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.count`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.estimated_document_count`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.count_documents`, + :meth:`pymongo.asynchronous.collection.AsyncCollection.watch`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.list_indexes`, + :meth:`pymongo.asynchronous.database.AsyncDatabase.watch`, + :meth:`~pymongo.asynchronous.database.AsyncDatabase.list_collections`, + :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.watch`, + and :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.list_databases`. + + Unsupported read operations include, but are not limited to + :meth:`~pymongo.asynchronous.database.AsyncDatabase.command` and any getMore + operation on a cursor. + + Enabling retryable reads makes applications more resilient to + transient errors such as network failures, database upgrades, and + replica set failovers. For an exact definition of which errors + trigger a retry, see the `retryable reads specification + `_. + + - `compressors`: Comma separated list of compressors for wire + protocol compression. The list is used to negotiate a compressor + with the server. Currently supported options are "snappy", "zlib" + and "zstd". Support for snappy requires the + `python-snappy `_ package. + zlib support requires the Python standard library zlib module. zstd + requires the `zstandard `_ + package. By default no compression is used. Compression support + must also be enabled on the server. MongoDB 3.6+ supports snappy + and zlib compression. MongoDB 4.2+ adds support for zstd. + See `compress network traffic `_ for details. + - `zlibCompressionLevel`: (int) The zlib compression level to use + when zlib is used as the wire protocol compressor. Supported values + are -1 through 9. -1 tells the zlib library to use its default + compression level (usually 6). 0 means no compression. 1 is best + speed. 9 is best compression. Defaults to -1. + - `uuidRepresentation`: The BSON representation to use when encoding + from and decoding to instances of :class:`~uuid.UUID`. Valid + values are the strings: "standard", "pythonLegacy", "javaLegacy", + "csharpLegacy", and "unspecified" (the default). New applications + should consider setting this to "standard" for cross language + compatibility. See `handling UUID data `_ for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `srvServiceName`: (string) The SRV service name to use for + "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: + + AsyncMongoClient("mongodb+srv://example.com/?srvServiceName=customname") + - `srvMaxHosts`: (int) limits the number of mongos-like hosts a client will + connect to. More specifically, when a "mongodb+srv://" connection string + resolves to more than srvMaxHosts number of hosts, the client will randomly + choose an srvMaxHosts sized subset of hosts. + + + | **Write Concern options:** + | (Only set if passed. No default values.) + + - `w`: (integer or string) If this is a replica set, write operations + will block until they have been replicated to the specified number + or tagged set of servers. `w=` always includes the replica set + primary (e.g. w=3 means write to the primary and wait until + replicated to **two** secondaries). Passing w=0 **disables write + acknowledgement** and all other write concern options. + - `wTimeoutMS`: **DEPRECATED** (integer) Used in conjunction with `w`. + Specify a value in milliseconds to control how long to wait for write propagation + to complete. If replication does not complete in the given + timeframe, a timeout exception is raised. Passing wTimeoutMS=0 + will cause **write operations to wait indefinitely**. + - `journal`: If ``True`` block until write operations have been + committed to the journal. Cannot be used in combination with + `fsync`. Write operations will fail with an exception if this + option is used when the server is running without journaling. + - `fsync`: If ``True`` and the server is running without journaling, + blocks until the server has synced all data files to disk. If the + server is running with journaling, this acts the same as the `j` + option, blocking until write operations have been committed to the + journal. Cannot be used in combination with `j`. + + | **Replica set keyword arguments for connecting with a replica set + - either directly or via a mongos:** + + - `replicaSet`: (string or None) The name of the replica set to + connect to. The driver will verify that all servers it connects to + match this name. Implies that the hosts specified are a seed list + and the driver should attempt to find all members of the set. + Defaults to ``None``. + + | **Read Preference:** + + - `readPreference`: The replica set read preference for this client. + One of ``primary``, ``primaryPreferred``, ``secondary``, + ``secondaryPreferred``, or ``nearest``. Defaults to ``primary``. + - `readPreferenceTags`: Specifies a tag set as a comma-separated list + of colon-separated key-value pairs. For example ``dc:ny,rack:1``. + Defaults to ``None``. + - `maxStalenessSeconds`: (integer) The maximum estimated + length of time a replica set secondary can fall behind the primary + in replication before it will no longer be selected for operations. + Defaults to ``-1``, meaning no maximum. If maxStalenessSeconds + is set, it must be a positive integer greater than or equal to + 90 seconds. + + .. seealso:: `Customize Server Selection `_ + + | **Authentication:** + + - `username`: A string. + - `password`: A string. + + Although username and password must be percent-escaped in a MongoDB + URI, they must not be percent-escaped when passed as parameters. In + this example, both the space and slash special characters are passed + as-is:: + + AsyncMongoClient(username="user name", password="pass/word") + + - `authSource`: The database to authenticate on. Defaults to the + database specified in the URI, if provided, or to "admin". + - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. + If no mechanism is specified, PyMongo automatically negotiates the + mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) with the MongoDB server. + - `authMechanismProperties`: Used to specify authentication mechanism + specific options. To specify the service name for GSSAPI + authentication pass authMechanismProperties='SERVICE_NAME:'. + To specify the session token for MONGODB-AWS authentication pass + ``authMechanismProperties='AWS_SESSION_TOKEN:'``. + + .. seealso:: `Authentication `_ + + | **TLS/SSL configuration:** + + - `tls`: (boolean) If ``True``, create the connection to the server + using transport layer security. Defaults to ``False``. + - `tlsInsecure`: (boolean) Specify whether TLS constraints should be + relaxed as much as possible. Setting ``tlsInsecure=True`` implies + ``tlsAllowInvalidCertificates=True`` and + ``tlsAllowInvalidHostnames=True``. Defaults to ``False``. Think + very carefully before setting this to ``True`` as it dramatically + reduces the security of TLS. + - `tlsAllowInvalidCertificates`: (boolean) If ``True``, continues + the TLS handshake regardless of the outcome of the certificate + verification process. If this is ``False``, and a value is not + provided for ``tlsCAFile``, PyMongo will attempt to load system + provided CA certificates. If the python version in use does not + support loading system CA certificates then the ``tlsCAFile`` + parameter must point to a file of CA certificates. + ``tlsAllowInvalidCertificates=False`` implies ``tls=True``. + Defaults to ``False``. Think very carefully before setting this + to ``True`` as that could make your application vulnerable to + on-path attackers. + - `tlsAllowInvalidHostnames`: (boolean) If ``True``, disables TLS + hostname verification. ``tlsAllowInvalidHostnames=False`` implies + ``tls=True``. Defaults to ``False``. Think very carefully before + setting this to ``True`` as that could make your application + vulnerable to on-path attackers. + - `tlsCAFile`: A file containing a single or a bundle of + "certification authority" certificates, which are used to validate + certificates passed from the other end of the connection. + Implies ``tls=True``. Defaults to ``None``. + - `tlsCertificateKeyFile`: A file containing the client certificate + and private key. Implies ``tls=True``. Defaults to ``None``. + - `tlsCRLFile`: A file containing a PEM or DER formatted + certificate revocation list. Implies ``tls=True``. Defaults to + ``None``. + - `tlsCertificateKeyFilePassword`: The password or passphrase for + decrypting the private key in ``tlsCertificateKeyFile``. Only + necessary if the private key is encrypted. Defaults to ``None``. + - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables + certificate revocation status checking via the OCSP responder + specified on the server certificate. + ``tlsDisableOCSPEndpointCheck=False`` implies ``tls=True``. + Defaults to ``False``. + - `ssl`: (boolean) Alias for ``tls``. + + | **Read Concern options:** + | (If not set explicitly, this will use the server default) + + - `readConcernLevel`: (string) The read concern level specifies the + level of isolation for read operations. For example, a read + operation using a read concern level of ``majority`` will only + return data that has been written to a majority of nodes. If the + level is left unspecified, the server default will be used. + + | **Client side encryption options:** + | (If not set explicitly, client side encryption will not be enabled.) + + - `auto_encryption_opts`: A + :class:`~pymongo.encryption_options.AutoEncryptionOpts` which + configures this client to automatically encrypt collection commands + and automatically decrypt results. See + `client-side field level encryption `_ for an example. + If a :class:`AsyncMongoClient` is configured with + ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a + separate internal ``AsyncMongoClient`` is created if any of the + following are true: + + - A ``key_vault_client`` is not passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + - ``bypass_auto_encrpytion=False`` is passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + + | **Stable API options:** + | (If not set explicitly, Stable API will not be enabled.) + + - `server_api`: A + :class:`~pymongo.server_api.ServerApi` which configures this + client to use Stable API. See `versioned API `_ for + details. + + .. seealso:: The MongoDB documentation on `connections `_. + + .. versionchanged:: 4.5 + Added the ``serverMonitoringMode`` keyword argument. + + .. versionchanged:: 4.2 + Added the ``timeoutMS`` keyword argument. + + .. versionchanged:: 4.0 + + - Removed the fsync, unlock, is_locked, database_names, and + close_cursor methods. + See the :ref:`pymongo4-migration-guide`. + - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` + keyword arguments. + - The default for `uuidRepresentation` was changed from + ``pythonLegacy`` to ``unspecified``. + - Added the ``srvServiceName``, ``maxConnecting``, and ``srvMaxHosts`` URI and + keyword arguments. + + .. versionchanged:: 3.12 + Added the ``server_api`` keyword argument. + The following keyword arguments were deprecated: + + - ``ssl_certfile`` and ``ssl_keyfile`` were deprecated in favor + of ``tlsCertificateKeyFile``. + + .. versionchanged:: 3.11 + Added the following keyword arguments and URI options: + + - ``tlsDisableOCSPEndpointCheck`` + - ``directConnection`` + + .. versionchanged:: 3.9 + Added the ``retryReads`` keyword argument and URI option. + Added the ``tlsInsecure`` keyword argument and URI option. + The following keyword arguments and URI options were deprecated: + + - ``wTimeout`` was deprecated in favor of ``wTimeoutMS``. + - ``j`` was deprecated in favor of ``journal``. + - ``ssl_cert_reqs`` was deprecated in favor of + ``tlsAllowInvalidCertificates``. + - ``ssl_match_hostname`` was deprecated in favor of + ``tlsAllowInvalidHostnames``. + - ``ssl_ca_certs`` was deprecated in favor of ``tlsCAFile``. + - ``ssl_certfile`` was deprecated in favor of + ``tlsCertificateKeyFile``. + - ``ssl_crlfile`` was deprecated in favor of ``tlsCRLFile``. + - ``ssl_pem_passphrase`` was deprecated in favor of + ``tlsCertificateKeyFilePassword``. + + .. versionchanged:: 3.9 + ``retryWrites`` now defaults to ``True``. + + .. versionchanged:: 3.8 + Added the ``server_selector`` keyword argument. + Added the ``type_registry`` keyword argument. + + .. versionchanged:: 3.7 + Added the ``driver`` keyword argument. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + Added the ``retryWrites`` keyword argument and URI option. + + .. versionchanged:: 3.5 + Add ``username`` and ``password`` options. Document the + ``authSource``, ``authMechanism``, and ``authMechanismProperties`` + options. + Deprecated the ``socketKeepAlive`` keyword argument and URI option. + ``socketKeepAlive`` now defaults to ``True``. + + .. versionchanged:: 3.0 + :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` is now the one and only + client class for a standalone server, mongos, or replica set. + It includes the functionality that had been split into + :class:`~pymongo.asynchronous.mongo_client.MongoReplicaSetClient`: it can connect + to a replica set, discover all its members, and monitor the set for + stepdowns, elections, and reconfigs. + + The :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` constructor no + longer blocks while connecting to the server or servers, and it no + longer raises :class:`~pymongo.errors.ConnectionFailure` if they + are unavailable, nor :class:`~pymongo.errors.ConfigurationError` + if the user's credentials are wrong. Instead, the constructor + returns immediately and launches the connection process on + background threads. + + Therefore the ``alive`` method is removed since it no longer + provides meaningful information; even if the client is disconnected, + it may discover a server in time to fulfill the next operation. + + In PyMongo 2.x, :class:`~pymongo.asynchronous.AsyncMongoClient` accepted a list of + standalone MongoDB servers and used the first it could connect to:: + + AsyncMongoClient(['host1.com:27017', 'host2.com:27017']) + + A list of multiple standalones is no longer supported; if multiple + servers are listed they must be members of the same replica set, or + mongoses in the same sharded cluster. + + The behavior for a list of mongoses is changed from "high + availability" to "load balancing". Before, the client connected to + the lowest-latency mongos in the list, and used it until a network + error prompted it to re-evaluate all mongoses' latencies and + reconnect to one of them. In PyMongo 3, the client monitors its + network latency to all the mongoses continuously, and distributes + operations evenly among those with the lowest latency. See + `load balancing `_ for more information. + + The ``connect`` option is added. + + The ``start_request``, ``in_request``, and ``end_request`` methods + are removed, as well as the ``auto_start_request`` option. + + The ``copy_database`` method is removed, see + `Copy and Clone Databases `_ for alternatives. + + The :meth:`AsyncMongoClient.disconnect` method is removed; it was a + synonym for :meth:`~pymongo.asynchronous.AsyncMongoClient.close`. + + :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` no longer returns an + instance of :class:`~pymongo.asynchronous.database.AsyncDatabase` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + client['__my_database__'] + + Not:: + + client.__my_database__ + + .. versionchanged:: 4.7 + Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. + + .. versionchanged:: 4.9 + The default value of ``connect`` is changed to ``False`` when running in a + Function-as-a-service environment. + """ + doc_class = document_class or dict + self._init_kwargs: dict[str, Any] = { + "host": host, + "port": port, + "document_class": doc_class, + "tz_aware": tz_aware, + "connect": connect, + "type_registry": type_registry, + **kwargs, + } + + if host is None: + host = self.HOST + if isinstance(host, str): + host = [host] + if port is None: + port = self.PORT + if not isinstance(port, int): + raise TypeError(f"port must be an instance of int, not {type(port)}") + self._host = host + self._port = port + self._topology: Topology = None # type: ignore[assignment] + self._timeout: float | None = None + self._topology_settings: TopologySettings = None # type: ignore[assignment] + self._event_listeners: _EventListeners | None = None + + # _pool_class, _monitor_class, and _condition_class are for deep + # customization of PyMongo, e.g. Motor. + pool_class = kwargs.pop("_pool_class", None) + monitor_class = kwargs.pop("_monitor_class", None) + condition_class = kwargs.pop("_condition_class", None) + + # Parse options passed as kwargs. + keyword_opts = common._CaseInsensitiveDictionary(kwargs) + keyword_opts["document_class"] = doc_class + self._resolve_srv_info: dict[str, Any] = {"keyword_opts": keyword_opts} + + self._seeds = set() + is_srv = False + username = None + password = None + dbase = None + opts = common._CaseInsensitiveDictionary() + fqdn = None + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + if len([h for h in self._host if "/" in h]) > 1: + raise ConfigurationError("host must not contain multiple MongoDB URIs") + for entity in self._host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + res = _validate_uri( + entity, + port, + validate=True, + warn=True, + normalize=False, + srv_max_hosts=srv_max_hosts, + ) + is_srv = entity.startswith(SRV_SCHEME) + self._seeds.update(res["nodelist"]) + username = res["username"] or username + password = res["password"] or password + dbase = res["database"] or dbase + opts = res["options"] + fqdn = res["fqdn"] + else: + self._seeds.update(split_hosts(entity, self._port)) + if not self._seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in self._seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + if type_registry is not None: + keyword_opts["type_registry"] = type_registry + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + opts = self._normalize_and_validate_options(opts, self._seeds) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", username) + password = opts.get("password", password) + self._options = ClientOptions(username, password, dbase, opts, _IS_SYNC) + + self._default_database_name = dbase + self._lock = _async_create_lock() + self._kill_cursors_queue: list = [] # type: ignore[type-arg] + + self._encrypter: Optional[_Encrypter] = None + + self._resolve_srv_info.update( + { + "is_srv": is_srv, + "username": username, + "password": password, + "dbase": dbase, + "seeds": self._seeds, + "fqdn": fqdn, + "srv_service_name": srv_service_name, + "pool_class": pool_class, + "monitor_class": monitor_class, + "condition_class": condition_class, + } + ) + + super().__init__( + self._options.codec_options, + self._options.read_preference, + self._options.write_concern, + self._options.read_concern, + ) + + self._init_based_on_options(self._seeds, srv_max_hosts, srv_service_name) + + self._opened = False + self._closed = False + self._loop: Optional[asyncio.AbstractEventLoop] = None + if not is_srv: + self._init_background() + + if _IS_SYNC and connect: + self._get_topology() # type: ignore[unused-coroutine] + + async def _resolve_srv(self) -> None: + keyword_opts = self._resolve_srv_info["keyword_opts"] + seeds = set() + opts = common._CaseInsensitiveDictionary() + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + for entity in self._host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + # Determine connection timeout from kwargs. + timeout = keyword_opts.get("connecttimeoutms") + if timeout is not None: + timeout = common.validate_timeout_or_none_or_zero( + keyword_opts.cased_key("connecttimeoutms"), timeout + ) + res = await uri_parser._parse_srv( + entity, + self._port, + validate=True, + warn=True, + normalize=False, + connect_timeout=timeout, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + ) + seeds.update(res["nodelist"]) + opts = res["options"] + else: + seeds.update(split_hosts(entity, self._port)) + + if not seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + tz_aware = keyword_opts["tz_aware"] + connect = keyword_opts["connect"] + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + opts = self._normalize_and_validate_options(opts, seeds) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", self._resolve_srv_info["username"]) + password = opts.get("password", self._resolve_srv_info["password"]) + self._options = ClientOptions( + username, password, self._resolve_srv_info["dbase"], opts, _IS_SYNC + ) + + self._init_based_on_options(seeds, srv_max_hosts, srv_service_name) + + def _init_based_on_options( + self, seeds: Collection[tuple[str, int]], srv_max_hosts: Any, srv_service_name: Any + ) -> None: + self._event_listeners = self._options.pool_options._event_listeners + self._topology_settings = TopologySettings( + seeds=seeds, + replica_set_name=self._options.replica_set_name, + pool_class=self._resolve_srv_info["pool_class"], + pool_options=self._options.pool_options, + monitor_class=self._resolve_srv_info["monitor_class"], + condition_class=self._resolve_srv_info["condition_class"], + local_threshold_ms=self._options.local_threshold_ms, + server_selection_timeout=self._options.server_selection_timeout, + server_selector=self._options.server_selector, + heartbeat_frequency=self._options.heartbeat_frequency, + fqdn=self._resolve_srv_info["fqdn"], + direct_connection=self._options.direct_connection, + load_balanced=self._options.load_balanced, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + server_monitoring_mode=self._options.server_monitoring_mode, + topology_id=self._topology_settings._topology_id if self._topology_settings else None, + ) + if self._options.auto_encryption_opts: + from pymongo.asynchronous.encryption import _Encrypter + + self._encrypter = _Encrypter(self, self._options.auto_encryption_opts) + self._timeout = self._options.timeout + + def _normalize_and_validate_options( + self, opts: common._CaseInsensitiveDictionary, seeds: set[tuple[str, int | None]] + ) -> common._CaseInsensitiveDictionary: + # Handle security-option conflicts in combined options. + opts = _handle_security_options(opts) + # Normalize combined options. + opts = _normalize_options(opts) + _check_options(seeds, opts) + return opts + + def _validate_kwargs_and_update_opts( + self, + keyword_opts: common._CaseInsensitiveDictionary, + opts: common._CaseInsensitiveDictionary, + ) -> common._CaseInsensitiveDictionary: + # Handle deprecated options in kwarg options. + keyword_opts = _handle_option_deprecations(keyword_opts) + # Validate kwarg options. + keyword_opts = common._CaseInsensitiveDictionary( + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) + # Override connection string options with kwarg options. + opts.update(keyword_opts) + return opts + + async def aconnect(self) -> None: + """Explicitly connect to MongoDB asynchronously instead of on the first operation.""" + await self._get_topology() + + def _init_background(self, old_pid: Optional[int] = None) -> None: + self._topology = Topology(self._topology_settings) + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + AsyncMongoClient._clients[self._topology._topology_id] = self + # Seed the topology with the old one's pid so we can detect clients + # that are opened before a fork and used after. + self._topology._pid = old_pid + + async def target() -> bool: + client = self_ref() + if client is None: + return False # Stop the executor. + await AsyncMongoClient._process_periodic_tasks(client) + return True + + executor = periodic_executor.AsyncPeriodicExecutor( + interval=common.KILL_CURSOR_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_kill_cursors_thread", + ) + + # We strongly reference the executor and it weakly references us via + # this closure. When the client is freed, stop the executor soon. + self_ref: Any = weakref.ref(self, executor.close) + self._kill_cursors_executor = executor + self._opened = False + + def append_metadata(self, driver_info: DriverInfo) -> None: + """Appends the given metadata to existing driver metadata. + + :param driver_info: a :class:`~pymongo.driver_info.DriverInfo` + + .. versionadded:: 4.14 + """ + + if not isinstance(driver_info, DriverInfo): + raise TypeError( + f"driver_info must be an instance of DriverInfo, not {type(driver_info)}" + ) + self._options.pool_options._update_metadata(driver_info) + + def _should_pin_cursor(self, session: Optional[AsyncClientSession]) -> Optional[bool]: + return self._options.load_balanced and not (session and session.in_transaction) + + def _after_fork(self) -> None: + """Resets topology in a child after successfully forking.""" + self._init_background(self._topology._pid) + # Reset the session pool to avoid duplicate sessions in the child process. + self._topology._session_pool.reset() + + def _duplicate(self, **kwargs: Any) -> AsyncMongoClient: # type: ignore[type-arg] + args = self._init_kwargs.copy() + args.update(kwargs) + return AsyncMongoClient(**args) + + async def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[client_session.AsyncClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> AsyncChangeStream[_DocumentType]: + """Watch changes on this cluster. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream` cursor which + iterates over changes on all databases on this cluster. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + with client.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with client.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The AsyncChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = AsyncClusterChangeStream( + self.admin, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + await change_stream._initialize_cursor() + return change_stream + + @property + def topology_description(self) -> TopologyDescription: + """The description of the connected MongoDB deployment. + + >>> client.topology_description + , , ]> + >>> client.topology_description.topology_type_name + 'ReplicaSetWithPrimary' + + Note that the description is periodically updated in the background + but the returned object itself is immutable. Access this property again + to get a more recent + :class:`~pymongo.topology_description.TopologyDescription`. + + :return: An instance of + :class:`~pymongo.topology_description.TopologyDescription`. + + .. versionadded:: 4.0 + """ + if self._topology is None: + servers = {(host, port): ServerDescription((host, port)) for host, port in self._seeds} + return TopologyDescription( + TOPOLOGY_TYPE.Unknown, + servers, + None, + None, + None, + self._topology_settings, + ) + return self._topology.description + + @property + def nodes(self) -> FrozenSet[_Address]: + """Set of all currently connected servers. + + .. warning:: When connected to a replica set the value of :attr:`nodes` + can change over time as :class:`AsyncMongoClient`'s view of the replica + set changes. :attr:`nodes` can also be an empty set when + :class:`AsyncMongoClient` is first instantiated and hasn't yet connected + to any servers, or a network partition causes it to lose connection + to all servers. + """ + if self._topology is None: + return frozenset() + description = self._topology.description + return frozenset(s.address for s in description.known_servers) + + @property + def options(self) -> ClientOptions: + """The configuration options for this client. + + :return: An instance of :class:`~pymongo.client_options.ClientOptions`. + + .. versionadded:: 4.0 + """ + return self._options + + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + return ( + tuple(sorted(self._resolve_srv_info["seeds"])), + self._options.replica_set_name, + self._resolve_srv_info["fqdn"], + self._resolve_srv_info["srv_service_name"], + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self.eq_props()) + + def _repr_helper(self) -> str: + def option_repr(option: str, value: Any) -> str: + """Fix options whose __repr__ isn't usable in a constructor.""" + if option == "document_class": + if value is dict: + return "document_class=dict" + else: + return f"document_class={value.__module__}.{value.__name__}" + if option in common.TIMEOUT_OPTIONS and value is not None: + return f"{option}={int(value * 1000)}" + + return f"{option}={value!r}" + + # Host first... + if self._topology is None: + options = [f"host='mongodb+srv://{self._resolve_srv_info['fqdn']}'"] + else: + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] + ] + # ... then everything in self._constructor_args... + options.extend( + option_repr(key, self._options._options[key]) for key in self._constructor_args + ) + # ... then everything else. + options.extend( + option_repr(key, self._options._options[key]) + for key in self._options._options + if key not in set(self._constructor_args) and key != "username" and key != "password" + ) + return ", ".join(options) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._repr_helper()})" + + def __getattr__(self, name: str) -> database.AsyncDatabase[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" database, use client[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> database.AsyncDatabase[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + return database.AsyncDatabase(self, name) + + def __del__(self) -> None: + """Check that this AsyncMongoClient has been closed and issue a warning if not.""" + try: + if self._opened and not self._closed: + warnings.warn( + ( + f"Unclosed {type(self).__name__} opened at:\n{self._topology_settings._stack}" + f"Call {type(self).__name__}.close() to safely shut down your client and free up resources." + ), + ResourceWarning, + stacklevel=2, + ) + except (AttributeError, TypeError): + # Ignore errors at interpreter exit. + pass + + def _close_cursor_soon( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Request that a cursor and/or connection be cleaned up soon.""" + self._kill_cursors_queue.append((address, cursor_id, conn_mgr)) + + def _start_session(self, implicit: bool, **kwargs: Any) -> AsyncClientSession: + server_session = _EmptyServerSession() + opts = client_session.SessionOptions(**kwargs) + return client_session.AsyncClientSession(self, server_session, opts, implicit) + + def start_session( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[client_session.TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> client_session.AsyncClientSession: + """Start a logical session. + + This method takes the same parameters as + :class:`~pymongo.asynchronous.client_session.SessionOptions`. See the + :mod:`~pymongo.asynchronous.client_session` module for details and examples. + + A :class:`~pymongo.asynchronous.client_session.AsyncClientSession` may only be used with + the AsyncMongoClient that started it. :class:`AsyncClientSession` instances are + **not thread-safe or fork-safe**. They can only be used by one thread + or process at a time. A single :class:`AsyncClientSession` cannot be used + to run multiple operations concurrently. + + :return: An instance of :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + + .. versionadded:: 3.6 + """ + return self._start_session( + False, + causal_consistency=causal_consistency, + default_transaction_options=default_transaction_options, + snapshot=snapshot, + ) + + def _ensure_session( + self, session: Optional[AsyncClientSession] = None + ) -> Optional[AsyncClientSession]: + """If provided session is None, lend a temporary session.""" + if session: + return session + + try: + # Don't make implicit sessions causally consistent. Applications + # should always opt-in. + return self._start_session(True, causal_consistency=False) + except (ConfigurationError, InvalidOperation): + # Sessions not supported. + return None + + def _send_cluster_time( + self, command: MutableMapping[str, Any], session: Optional[AsyncClientSession] + ) -> None: + topology_time = self._topology.max_cluster_time() + session_time = session.cluster_time if session else None + if topology_time and session_time: + if topology_time["clusterTime"] > session_time["clusterTime"]: + cluster_time: Optional[ClusterTime] = topology_time + else: + cluster_time = session_time + else: + cluster_time = topology_time or session_time + if cluster_time: + command["$clusterTime"] = cluster_time + + def get_default_database( + self, + default: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.AsyncDatabase[_DocumentType]: + """Get the database named in the MongoDB connection URI. + + >>> uri = 'mongodb://host/my_database' + >>> client = AsyncMongoClient(uri) + >>> db = client.get_default_database() + >>> assert db.name == 'my_database' + >>> db = client.get_database() + >>> assert db.name == 'my_database' + + Useful in scripts where you want to choose which database to use + based only on the URI in a configuration file. + + :param default: the database name to use if no database name + was provided in the URI. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncMongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncMongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncMongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncMongoClient` is + used. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.8 + Undeprecated. Added the ``default``, ``codec_options``, + ``read_preference``, ``write_concern`` and ``read_concern`` + parameters. + + .. versionchanged:: 3.5 + Deprecated, use :meth:`get_database` instead. + """ + if self._default_database_name is None and default is None: + raise ConfigurationError("No default database name defined or provided.") + + name = cast(str, self._default_database_name or default) + return database.AsyncDatabase( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def get_database( + self, + name: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.AsyncDatabase[_DocumentType]: + """Get a :class:`~pymongo.asynchronous.database.AsyncDatabase` with the given name and + options. + + Useful for creating a :class:`~pymongo.asynchronous.database.AsyncDatabase` with + different codec options, read preference, and/or write concern from + this :class:`AsyncMongoClient`. + + >>> client.read_preference + Primary() + >>> db1 = client.test + >>> db1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> db2 = client.get_database( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> db2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the database - a string. If ``None`` + (the default) the database named in the MongoDB connection URI is + returned. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncMongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncMongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncMongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncMongoClient` is + used. + + .. versionchanged:: 3.5 + The `name` parameter is now optional, defaulting to the database + named in the MongoDB connection URI. + """ + if name is None: + if self._default_database_name is None: + raise ConfigurationError("No default database defined") + name = self._default_database_name + + return database.AsyncDatabase( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def _database_default_options(self, name: str) -> database.AsyncDatabase: # type: ignore[type-arg] + """Get a AsyncDatabase instance with the default settings.""" + return self.get_database( + name, + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + ) + + async def __aenter__(self) -> AsyncMongoClient[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'AsyncMongoClient' object is not iterable") + + next = __next__ + + async def _server_property(self, attr_name: str) -> Any: + """An attribute of the current server's description. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + Not threadsafe if used multiple times in a single method, since + the server may change. In such cases, store a local reference to a + ServerDescription first, then use its properties. + """ + server = await (await self._get_topology()).select_server( + writable_server_selector, _Op.TEST + ) + + return getattr(server.description, attr_name) + + @property + async def address(self) -> Optional[tuple[str, int]]: + """(host, port) of the current standalone, primary, or mongos, or None. + + Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if + the client is load-balancing among mongoses, since there is no single + address. Use :attr:`nodes` instead. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + .. versionadded:: 3.0 + """ + if self._topology is None: + await self._get_topology() + topology_type = self._topology._description.topology_type + if ( + topology_type == TOPOLOGY_TYPE.Sharded + and len(self.topology_description.server_descriptions()) > 1 + ): + raise InvalidOperation( + 'Cannot use "address" property when load balancing among' + ' mongoses, use "nodes" instead.' + ) + return await self._server_property("address") + + @property + async def primary(self) -> Optional[tuple[str, int]]: + """The (host, port) of the current primary of the replica set. + + Returns ``None`` if this client is not connected to a replica set, + there is no primary, or this client was created without the + `replicaSet` option. + + .. versionadded:: 3.0 + AsyncMongoClient gained this property in version 3.0. + """ + if self._topology is None: + await self._get_topology() + return await self._topology.get_primary() # type: ignore[return-value] + + @property + async def secondaries(self) -> set[_Address]: + """The secondary members known to this client. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no visible secondaries, or this + client was created without the `replicaSet` option. + + .. versionadded:: 3.0 + AsyncMongoClient gained this property in version 3.0. + """ + if self._topology is None: + await self._get_topology() + return await self._topology.get_secondaries() + + @property + async def arbiters(self) -> set[_Address]: + """Arbiters in the replica set. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no arbiters, or this client was + created without the `replicaSet` option. + """ + if self._topology is None: + await self._get_topology() + return await self._topology.get_arbiters() + + @property + async def is_primary(self) -> bool: + """If this client is connected to a server that can accept writes. + + True if the current server is a standalone, mongos, or the primary of + a replica set. If the client is not connected, this will block until a + connection is established or raise ServerSelectionTimeoutError if no + server is available. + """ + return await self._server_property("is_writable") + + @property + async def is_mongos(self) -> bool: + """If this client is connected to mongos. If the client is not + connected, this will block until a connection is established or raise + ServerSelectionTimeoutError if no server is available. + """ + return await self._server_property("server_type") == SERVER_TYPE.Mongos + + async def _end_sessions(self, session_ids: list[_ServerSession]) -> None: + """Send endSessions command(s) with the given session ids.""" + try: + # Use AsyncConnection.command directly to avoid implicitly creating + # another session. + async with await self._conn_for_reads( + ReadPreference.PRIMARY_PREFERRED, None, operation=_Op.END_SESSIONS + ) as ( + conn, + read_pref, + ): + if not conn.supports_sessions: + return + + for i in range(0, len(session_ids), common._MAX_END_SESSIONS): + spec = {"endSessions": session_ids[i : i + common._MAX_END_SESSIONS]} + await conn.command("admin", spec, read_preference=read_pref, client=self) + except PyMongoError: + # Drivers MUST ignore any errors returned by the endSessions + # command. + pass + + async def close(self) -> None: + """Cleanup client resources and disconnect from MongoDB. + + End all server sessions created by this client by sending one or more + endSessions commands. + + Close all sockets in the connection pools and stop the monitor threads. + + .. versionchanged:: 4.0 + Once closed, the client cannot be used again and any attempt will + raise :exc:`~pymongo.errors.InvalidOperation`. + + .. versionchanged:: 3.6 + End all server sessions created by this client. + """ + if self._topology is None: + return + session_ids = self._topology.pop_all_sessions() + if session_ids: + await self._end_sessions(session_ids) + # Stop the periodic task thread and then send pending killCursor + # requests before closing the topology. + self._kill_cursors_executor.close() + await self._process_kill_cursors() + await self._topology.close() + if self._encrypter: + # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. + await self._encrypter.close() + self._closed = True + if not _IS_SYNC: + await asyncio.gather( + self._topology.cleanup_monitors(), # type: ignore[func-returns-value] + self._kill_cursors_executor.join(), # type: ignore[func-returns-value] + return_exceptions=True, + ) + + if not _IS_SYNC: + # Add support for contextlib.aclosing. + aclose = close + + async def _get_topology(self) -> Topology: + """Get the internal :class:`~pymongo.asynchronous.topology.Topology` object. + + If this client was created with "connect=False", calling _get_topology + launches the connection process in the background. + """ + if not _IS_SYNC: + if self._loop is None: + self._loop = asyncio.get_running_loop() + elif self._loop != asyncio.get_running_loop(): + raise RuntimeError( + "Cannot use AsyncMongoClient in different event loop. AsyncMongoClient uses low-level asyncio APIs that bind it to the event loop it was created on." + ) + if not self._opened: + if self._resolve_srv_info["is_srv"]: + await self._resolve_srv() + self._init_background() + await self._topology.open() + async with self._lock: + self._kill_cursors_executor.open() + self._opened = True + return self._topology + + @contextlib.asynccontextmanager + async def _checkout( + self, server: Server, session: Optional[AsyncClientSession] + ) -> AsyncGenerator[AsyncConnection, None]: + in_txn = session and session.in_transaction + async with _MongoClientErrorHandler(self, server, session) as err_handler: + # Reuse the pinned connection, if it exists. + if in_txn and session and session._pinned_connection: + err_handler.contribute_socket(session._pinned_connection) + yield session._pinned_connection + return + async with await server.checkout(handler=err_handler) as conn: + # Pin this session to the selected server or connection. + if ( + in_txn + and session + and server.description.server_type + in ( + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + ): + session._pin(server, conn) + err_handler.contribute_socket(conn) + if ( + self._encrypter + and not self._encrypter._bypass_auto_encryption + and conn.max_wire_version < 8 + ): + raise ConfigurationError( + "Auto-encryption requires a minimum MongoDB version of 4.2" + ) + yield conn + + async def _select_server( + self, + server_selector: Callable[[Selection], Selection], + session: Optional[AsyncClientSession], + operation: str, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Select a server to run an operation on this client. + + :Parameters: + - `server_selector`: The server selector to use if the session is + not pinned and no address is given. + - `session`: The AsyncClientSession for the next operation, or None. May + be pinned to a mongos server address. + - `address` (optional): Address when sending a message + to a specific server, used for getMore. + """ + try: + topology = await self._get_topology() + if session and not session.in_transaction: + await session._transaction.reset() + if not address and session: + address = session._pinned_address + if address: + # We're running a getMore or this session is pinned to a mongos. + server = await topology.select_server_by_address( + address, operation, operation_id=operation_id + ) + if not server: + raise AutoReconnect("server %s:%s no longer available" % address) # noqa: UP031 + else: + server = await topology.select_server( + server_selector, + operation, + deprioritized_servers=deprioritized_servers, + operation_id=operation_id, + ) + return server + except PyMongoError as exc: + # Server selection errors in a transaction are transient. + if session and session.in_transaction: + exc._add_error_label("TransientTransactionError") + await session._unpin() + raise + + async def _conn_for_writes( + self, session: Optional[AsyncClientSession], operation: str + ) -> AsyncContextManager[AsyncConnection]: + server = await self._select_server(writable_server_selector, session, operation) + return self._checkout(server, session) + + @contextlib.asynccontextmanager + async def _conn_from_server( + self, read_preference: _ServerMode, server: Server, session: Optional[AsyncClientSession] + ) -> AsyncGenerator[tuple[AsyncConnection, _ServerMode], None]: + assert read_preference is not None, "read_preference must not be None" + # Get a connection for a server matching the read preference, and yield + # conn with the effective read preference. The Server Selection + # Spec says not to send any $readPreference to standalones and to + # always send primaryPreferred when directly connected to a repl set + # member. + # Thread safe: if the type is single it cannot change. + # NOTE: We already opened the Topology when selecting a server so there's no need + # to call _get_topology() again. + single = self._topology.description.topology_type == TOPOLOGY_TYPE.Single + async with self._checkout(server, session) as conn: + if single: + if conn.is_repl and not (session and session.in_transaction): + # Use primary preferred to ensure any repl set member + # can handle the request. + read_preference = ReadPreference.PRIMARY_PREFERRED + elif conn.is_standalone: + # Don't send read preference to standalones. + read_preference = ReadPreference.PRIMARY + yield conn, read_preference + + async def _conn_for_reads( + self, + read_preference: _ServerMode, + session: Optional[AsyncClientSession], + operation: str, + ) -> AsyncContextManager[tuple[AsyncConnection, _ServerMode]]: + assert read_preference is not None, "read_preference must not be None" + server = await self._select_server(read_preference, session, operation) + return self._conn_from_server(read_preference, server, session) + + @_csot.apply + async def _run_operation( + self, + operation: Union[_Query, _GetMore], + unpack_res: Callable, # type: ignore[type-arg] + address: Optional[_Address] = None, + ) -> Response: + """Run a _Query/_GetMore operation and return a Response. + + :param operation: a _Query or _GetMore object. + :param unpack_res: A callable that decodes the wire protocol response. + :param address: Optional address when sending a message + to a specific server, used for getMore. + """ + if operation.conn_mgr: + server = await self._select_server( + operation.read_preference, + operation.session, # type: ignore[arg-type] + operation.name, + address=address, + ) + + async with operation.conn_mgr._lock: + async with _MongoClientErrorHandler(self, server, operation.session) as err_handler: # type: ignore[arg-type] + err_handler.contribute_socket(operation.conn_mgr.conn) + return await server.run_operation( + operation.conn_mgr.conn, + operation, + operation.read_preference, + self._event_listeners, + unpack_res, + self, + ) + + async def _cmd( + _session: Optional[AsyncClientSession], + server: Server, + conn: AsyncConnection, + read_preference: _ServerMode, + ) -> Response: + operation.reset() # Reset op in case of retry. + return await server.run_operation( + conn, + operation, + read_preference, + self._event_listeners, + unpack_res, + self, + ) + + return await self._retryable_read( + _cmd, + operation.read_preference, + operation.session, # type: ignore[arg-type] + address=address, + retryable=isinstance(operation, _Query), + operation=operation.name, + ) + + async def _retry_with_session( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[AsyncClientSession], + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]], + operation: str, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with at most one consecutive retries + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + """ + # Ensure that the options supports retry_writes and there is a valid session not in + # transaction, otherwise, we will not support retry behavior for this txn. + retryable = bool( + retryable and self.options.retry_writes and session and not session.in_transaction + ) + return await self._retry_internal( + func=func, + session=session, + bulk=bulk, + operation=operation, + retryable=retryable, + operation_id=operation_id, + ) + + @_csot.apply + async def _retry_internal( + self, + func: _WriteCall[T] | _ReadCall[T], + session: Optional[AsyncClientSession], + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]], + operation: str, + is_read: bool = False, + address: Optional[_Address] = None, + read_pref: Optional[_ServerMode] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ) -> T: + """Internal retryable helper for all client transactions. + + :param func: Callback function we want to retry + :param session: Client Session on which the transaction should occur + :param bulk: Abstraction to handle bulk write operations + :param operation: The name of the operation that the server is being selected for + :param is_read: If this is an exclusive read transaction, defaults to False + :param address: Server Address, defaults to None + :param read_pref: Topology of read operation, defaults to None + :param retryable: If the operation should be retried once, defaults to None + + :return: Output of the calling func() + """ + return await _ClientConnectionRetryable( + mongo_client=self, + func=func, + bulk=bulk, + operation=operation, + is_read=is_read, + session=session, + read_pref=read_pref, + address=address, + retryable=retryable, + operation_id=operation_id, + ).run() + + async def _retryable_read( + self, + func: _ReadCall[T], + read_pref: _ServerMode, + session: Optional[AsyncClientSession], + operation: str, + address: Optional[_Address] = None, + retryable: bool = True, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param func: Read call we want to execute + :param read_pref: Desired topology of read operation + :param session: Client session we should use to execute operation + :param operation: The name of the operation that the server is being selected for + :param address: Optional address when sending a message, defaults to None + :param retryable: if we should attempt retries + (may not always be supported even if supplied), defaults to False + """ + + # Ensure that the client supports retrying on reads and there is no session in + # transaction, otherwise, we will not support retry behavior for this call. + retryable = bool( + retryable and self.options.retry_reads and not (session and session.in_transaction) + ) + async with self._tmp_session(session) as s: + return await self._retry_internal( + func, + s, + None, + operation, + is_read=True, + address=address, + read_pref=read_pref, + retryable=retryable, + operation_id=operation_id, + ) + + async def _retryable_write( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[AsyncClientSession], + operation: str, + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]] = None, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param retryable: if we should attempt retries (may not always be supported) + :param func: write call we want to execute during a session + :param session: Client session we will use to execute write operation + :param operation: The name of the operation that the server is being selected for + :param bulk: bulk abstraction to execute operations in bulk, defaults to None + """ + async with self._tmp_session(session) as s: + return await self._retry_with_session(retryable, func, s, bulk, operation, operation_id) + + def _cleanup_cursor_no_lock( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[AsyncClientSession], + ) -> None: + """Cleanup a cursor from __del__ without locking. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection attached at the time the cursor + was garbage collected. + + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + """ + # The cursor will be closed later in a different session. + if cursor_id or conn_mgr: + self._close_cursor_soon(cursor_id, address, conn_mgr) + if session and session._implicit and not session._leave_alive: + session._end_implicit_session() + + async def _cleanup_cursor_lock( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[AsyncClientSession], + ) -> None: + """Cleanup a cursor from cursor.close() using a lock. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection or implicit session attached at the time the cursor + was closed or garbage collected. + + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + :param session: The cursor's session. + """ + if cursor_id: + if conn_mgr and conn_mgr.more_to_come: + # If this is an exhaust cursor and we haven't completely + # exhausted the result set we *must* close the socket + # to stop the server from sending more data. + assert conn_mgr.conn is not None + await conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) + else: + await self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) + if conn_mgr: + await conn_mgr.close() + if session and session._implicit and not session._leave_alive: + session._end_implicit_session() + + async def _close_cursor_now( + self, + cursor_id: int, + address: Optional[_CursorAddress], + session: Optional[AsyncClientSession] = None, + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Send a kill cursors message with the given id. + + The cursor is closed synchronously on the current thread. + """ + if not isinstance(cursor_id, int): + raise TypeError(f"cursor_id must be an instance of int, not {type(cursor_id)}") + + try: + if conn_mgr: + async with conn_mgr._lock: + # Cursor is pinned to LB outside of a transaction. + assert address is not None + assert conn_mgr.conn is not None + await self._kill_cursor_impl([cursor_id], address, session, conn_mgr.conn) + else: + await self._kill_cursors([cursor_id], address, await self._get_topology(), session) + except PyMongoError: + # Make another attempt to kill the cursor later. + self._close_cursor_soon(cursor_id, address) + + async def _kill_cursors( + self, + cursor_ids: Sequence[int], + address: Optional[_CursorAddress], + topology: Topology, + session: Optional[AsyncClientSession], + ) -> None: + """Send a kill cursors message with the given ids.""" + if address: + # address could be a tuple or _CursorAddress, but + # select_server_by_address needs (host, port). + server = await topology.select_server_by_address(tuple(address), _Op.KILL_CURSORS) # type: ignore[arg-type] + else: + # Application called close_cursor() with no address. + server = await topology.select_server(writable_server_selector, _Op.KILL_CURSORS) + + async with self._checkout(server, session) as conn: + assert address is not None + await self._kill_cursor_impl(cursor_ids, address, session, conn) + + async def _kill_cursor_impl( + self, + cursor_ids: Sequence[int], + address: _CursorAddress, + session: Optional[AsyncClientSession], + conn: AsyncConnection, + ) -> None: + namespace = address.namespace + db, coll = namespace.split(".", 1) + spec = {"killCursors": coll, "cursors": cursor_ids} + await conn.command(db, spec, session=session, client=self) + + async def _process_kill_cursors(self) -> None: + """Process any pending kill cursors requests.""" + address_to_cursor_ids = defaultdict(list) + pinned_cursors = [] + + # Other threads or the GC may append to the queue concurrently. + while True: + try: + address, cursor_id, conn_mgr = self._kill_cursors_queue.pop() + except IndexError: + break + + if conn_mgr: + pinned_cursors.append((address, cursor_id, conn_mgr)) + else: + address_to_cursor_ids[address].append(cursor_id) + + for address, cursor_id, conn_mgr in pinned_cursors: + try: + await self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + # Raise the exception when client is closed so that it + # can be caught in _process_periodic_tasks + raise + else: + _log_client_error() + + # Don't re-open topology if it's closed and there's no pending cursors. + if address_to_cursor_ids: + topology = await self._get_topology() + for address, cursor_ids in address_to_cursor_ids.items(): + try: + await self._kill_cursors(cursor_ids, address, topology, session=None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + raise + else: + _log_client_error() + + # This method is run periodically by a background thread. + async def _process_periodic_tasks(self) -> None: + """Process any pending kill cursors requests and + maintain connection pool parameters. + """ + try: + await self._process_kill_cursors() + await self._topology.update_pool() + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + return + else: + _log_client_error() + + def _return_server_session( + self, server_session: Union[_ServerSession, _EmptyServerSession] + ) -> None: + """Internal: return a _ServerSession to the pool.""" + if isinstance(server_session, _EmptyServerSession): + return None + return self._topology.return_server_session(server_session) + + @contextlib.asynccontextmanager + async def _tmp_session( + self, session: Optional[client_session.AsyncClientSession] + ) -> AsyncGenerator[Optional[client_session.AsyncClientSession], None]: + """If provided session is None, lend a temporary session.""" + if session is not None: + if not isinstance(session, client_session.AsyncClientSession): + raise ValueError( + f"'session' argument must be an AsyncClientSession or None, not {type(session)}" + ) + # Don't call end_session. + yield session + return + + s = self._ensure_session(session) + if s: + try: + yield s + except Exception as exc: + if isinstance(exc, ConnectionFailure): + s._server_session.mark_dirty() + + # Always call end_session on error. + await s.end_session() + raise + finally: + # Call end_session when we exit this scope. + if not s._attached_to_cursor: + await s.end_session() + else: + yield None + + async def _process_response( + self, reply: Mapping[str, Any], session: Optional[AsyncClientSession] + ) -> None: + await self._topology.receive_cluster_time(reply.get("$clusterTime")) + if session is not None: + session._process_response(reply) + + async def server_info( + self, session: Optional[client_session.AsyncClientSession] = None + ) -> dict[str, Any]: + """Get information about the MongoDB server we're connected to. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return cast( # type: ignore[redundant-cast] + dict[str, Any], + await self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ), + ) + + async def _list_databases( + self, + session: Optional[client_session.AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[dict[str, Any]]: + cmd = {"listDatabases": 1} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + admin = self._database_default_options("admin") + res = await admin._retryable_read_command( + cmd, session=session, operation=_Op.LIST_DATABASES + ) + # listDatabases doesn't return a cursor (yet). Fake one. + cursor = { + "id": 0, + "firstBatch": res["databases"], + "ns": "admin.$cmd", + } + return AsyncCommandCursor(admin["$cmd"], cursor, None, comment=comment) + + async def list_databases( + self, + session: Optional[client_session.AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[dict[str, Any]]: + """Get a cursor over the databases of the connected server. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await client.list_databases() as cursor: + async for database in cursor: + print(database) + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listDatabases command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor`. + + .. versionadded:: 3.6 + """ + return await self._list_databases(session, comment, **kwargs) + + async def list_database_names( + self, + session: Optional[client_session.AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> list[str]: + """Get a list of the names of all databases on the connected server. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionadded:: 3.6 + """ + res = await self._list_databases(session, nameOnly=True, comment=comment) + return [doc["name"] async for doc in res] + + @_csot.apply + async def drop_database( + self, + name_or_database: Union[str, database.AsyncDatabase[_DocumentTypeArg]], + session: Optional[client_session.AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> None: + """Drop a database. + + Raises :class:`TypeError` if `name_or_database` is not an instance of + :class:`str` or :class:`~pymongo.asynchronous.database.AsyncDatabase`. + + :param name_or_database: the name of a database to drop, or a + :class:`~pymongo.asynchronous.database.AsyncDatabase` instance representing the + database to drop + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. note:: The :attr:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.write_concern` of + this client is automatically applied to this operation. + + .. versionchanged:: 3.4 + Apply this client's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_database + if isinstance(name, database.AsyncDatabase): + name = name.name + + if not isinstance(name, str): + raise TypeError( + f"name_or_database must be an instance of str or a AsyncDatabase, not {type(name)}" + ) + + async with await self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: + await self[name]._command( + conn, + {"dropDatabase": 1, "comment": comment}, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + @_csot.apply + async def bulk_write( + self, + models: Sequence[_WriteOp], + session: Optional[AsyncClientSession] = None, + ordered: bool = True, + verbose_results: bool = False, + bypass_document_validation: Optional[bool] = None, + comment: Optional[Any] = None, + let: Optional[Mapping[str, Any]] = None, + write_concern: Optional[WriteConcern] = None, + ) -> ClientBulkWriteResult: + """Send a batch of write operations, potentially across multiple namespaces, to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + ... + >>> async for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + ... + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> models = [InsertOne(namespace="db.test", document={'y': 1}), + ... DeleteOne(namespace="db.test", filter={'x': 1}), + ... InsertOne(namespace="db.coll", document={'y': 2}), + ... ReplaceOne(namespace="db.test", filter={'w': 1}, replacement={'z': 1}, upsert=True)] + >>> result = await client.bulk_write(models=models) + >>> result.inserted_count + 2 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_count + 1 + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + ... + >>> async for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + {'y': 2, '_id': ObjectId('507f1f77bcf86cd799439012')} + + :param models: A list of write operation instances. + :param session: (optional) An instance of + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param ordered: If ``True`` (the default), requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False``, requests + will be still performed on the server serially, in the order provided, + but all operations will be attempted even if any errors occur. + :param verbose_results: If ``True``, detailed results for each + successful operation will be included in the returned + :class:`~pymongo.results.ClientBulkWriteResult`. Default is ``False``. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is ``False``. + :param comment: (optional) A user-provided comment to attach to this + command. + :param let: (optional) Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param write_concern: (optional) The write concern to use for this bulk write. + + :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. + + .. seealso:: For more info, see `Client Bulk Write `_. + + .. seealso:: `Writes and ids `_ + + .. note:: requires MongoDB server version 8.0+. + + .. versionadded:: 4.9 + """ + if self._options.auto_encryption_opts: + raise InvalidOperation( + "MongoClient.bulk_write does not currently support automatic encryption" + ) + + if session and session.in_transaction: + # Inherit the transaction write concern. + if write_concern: + raise InvalidOperation("Cannot set write concern after starting a transaction") + write_concern = session._transaction.opts.write_concern # type: ignore[union-attr] + else: + # Inherit the client's write concern if none is provided. + if not write_concern: + write_concern = self.write_concern + + if write_concern and not write_concern.acknowledged and verbose_results: + raise InvalidOperation( + "Cannot request unacknowledged write concern and verbose results" + ) + elif write_concern and not write_concern.acknowledged and ordered: + raise InvalidOperation("Cannot request unacknowledged write concern and ordered writes") + + common.validate_list("models", models) + + blk = _AsyncClientBulk( + self, + write_concern=write_concern, # type: ignore[arg-type] + ordered=ordered, + bypass_document_validation=bypass_document_validation, + comment=comment, + let=let, + verbose_results=verbose_results, + ) + for model in models: + try: + model._add_to_client_bulk(blk) + except AttributeError: + raise TypeError(f"{model!r} is not a valid request") from None + + return await blk.execute(session, _Op.BULK_WRITE) + + +def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: + """Return the server response from PyMongo exception or None.""" + if isinstance(exc, (BulkWriteError, ClientBulkWriteException)): + # Check the last writeConcernError to determine if this + # BulkWriteError is retryable. + wces = exc.details["writeConcernErrors"] + return wces[-1] if wces else None + if isinstance(exc, (NotPrimaryError, OperationFailure)): + return cast(Mapping[str, Any], exc.details) + return None + + +def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mongos: bool) -> None: + doc = _retryable_error_doc(exc) + if doc: + code = doc.get("code", 0) + # retryWrites on MMAPv1 should raise an actionable error. + if code == 20 and str(exc).startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, exc.details) # type: ignore[attr-defined] + if max_wire_version >= 9: + # In MongoDB 4.4+, the server reports the error labels. + for label in doc.get("errorLabels", []): + exc._add_error_label(label) + else: + # Do not consult writeConcernError for pre-4.4 mongos. + if isinstance(exc, WriteConcernError) and is_mongos: + pass + elif code in helpers_shared._RETRYABLE_ERROR_CODES: + exc._add_error_label("RetryableWriteError") + + # AsyncConnection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is + # handled above. + if isinstance(exc, ClientBulkWriteException): + exc_to_check = exc.error + else: + exc_to_check = exc + if isinstance(exc_to_check, ConnectionFailure) and not isinstance( + exc_to_check, (NotPrimaryError, WaitQueueTimeoutError) + ): + exc_to_check._add_error_label("RetryableWriteError") + + +class _MongoClientErrorHandler: + """Handle errors raised when executing an operation.""" + + __slots__ = ( + "client", + "server_address", + "session", + "max_wire_version", + "sock_generation", + "completed_handshake", + "service_id", + "handled", + ) + + def __init__( + self, + client: AsyncMongoClient, # type: ignore[type-arg] + server: Server, + session: Optional[AsyncClientSession], + ): + if not isinstance(client, AsyncMongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncMongoClient" for cls in type(client).__mro__): + raise TypeError(f"AsyncMongoClient required but given {type(client).__name__}") + + self.client = client + self.server_address = server.description.address + self.session = session + self.max_wire_version = common.MIN_WIRE_VERSION + # XXX: When get_socket fails, this generation could be out of date: + # "Note that when a network error occurs before the handshake + # completes then the error's generation number is the generation + # of the pool at the time the connection attempt was started." + self.sock_generation = server.pool.gen.get_overall() + self.completed_handshake = False + self.service_id: Optional[ObjectId] = None + self.handled = False + + def contribute_socket(self, conn: AsyncConnection, completed_handshake: bool = True) -> None: + """Provide socket information to the error handler.""" + self.max_wire_version = conn.max_wire_version + self.sock_generation = conn.generation + self.service_id = conn.service_id + self.completed_handshake = completed_handshake + + async def handle( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException] + ) -> None: + if self.handled or exc_val is None: + return + self.handled = True + if self.session: + if isinstance(exc_val, ClientBulkWriteException): + exc_val = exc_val.error + if isinstance(exc_val, ConnectionFailure): + if self.session.in_transaction: + exc_val._add_error_label("TransientTransactionError") + self.session._server_session.mark_dirty() + + if isinstance(exc_val, PyMongoError): + if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( + "RetryableWriteError" + ): + await self.session._unpin() + err_ctx = _ErrorContext( + exc_val, # type: ignore[arg-type] + self.max_wire_version, + self.sock_generation, + self.completed_handshake, + self.service_id, + ) + assert self.client._topology is not None + await self.client._topology.handle_error(self.server_address, err_ctx) + + async def __aenter__(self) -> _MongoClientErrorHandler: + return self + + async def __aexit__( + self, + exc_type: Optional[Type[Exception]], + exc_val: Optional[Exception], + exc_tb: Optional[TracebackType], + ) -> None: + return await self.handle(exc_type, exc_val) + + +class _ClientConnectionRetryable(Generic[T]): + """Responsible for executing retryable connections on read or write operations""" + + def __init__( + self, + mongo_client: AsyncMongoClient, # type: ignore[type-arg] + func: _WriteCall[T] | _ReadCall[T], + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]], + operation: str, + is_read: bool = False, + session: Optional[AsyncClientSession] = None, + read_pref: Optional[_ServerMode] = None, + address: Optional[_Address] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ): + self._last_error: Optional[Exception] = None + self._retrying = False + self._multiple_retries = _csot.get_timeout() is not None + self._client = mongo_client + + self._func = func + self._bulk = bulk + self._session = session + self._is_read = is_read + self._retryable = retryable + self._read_pref = read_pref + self._server_selector: Callable[[Selection], Selection] = ( + read_pref if is_read else writable_server_selector # type: ignore + ) + self._address = address + self._server: Server = None # type: ignore + self._deprioritized_servers: list[Server] = [] + self._operation = operation + self._operation_id = operation_id + self._attempt_number = 0 + + async def run(self) -> T: + """Runs the supplied func() and attempts a retry + + :raises: self._last_error: Last exception raised + + :return: Result of the func() call + """ + # Increment the transaction id up front to ensure any retry attempt + # will use the proper txnNumber, even if server or socket selection + # fails before the command can be sent. + if self._is_session_state_retryable() and self._retryable and not self._is_read: + self._session._start_retryable_write() # type: ignore + if self._bulk: + self._bulk.started_retryable_write = True + + while True: + self._check_last_error(check_csot=True) + try: + return await self._read() if self._is_read else await self._write() + except ServerSelectionTimeoutError: + # The application may think the write was never attempted + # if we raise ServerSelectionTimeoutError on the retry + # attempt. Raise the original exception instead. + self._check_last_error() + # A ServerSelectionTimeoutError error indicates that there may + # be a persistent outage. Attempting to retry in this case will + # most likely be a waste of time. + raise + except PyMongoError as exc: + # Execute specialized catch on read + if self._is_read: + if isinstance(exc, (ConnectionFailure, OperationFailure)): + # ConnectionFailures do not supply a code property + exc_code = getattr(exc, "code", None) + if self._is_not_eligible_for_retry() or ( + isinstance(exc, OperationFailure) + and exc_code not in helpers_shared._RETRYABLE_ERROR_CODES + ): + raise + self._retrying = True + self._last_error = exc + self._attempt_number += 1 + else: + raise + + # Specialized catch on write operation + if not self._is_read: + if not self._retryable: + raise + if isinstance(exc, ClientBulkWriteException) and exc.error: + retryable_write_error_exc = isinstance( + exc.error, PyMongoError + ) and exc.error.has_error_label("RetryableWriteError") + else: + retryable_write_error_exc = exc.has_error_label("RetryableWriteError") + if retryable_write_error_exc: + assert self._session + await self._session._unpin() + if not retryable_write_error_exc or self._is_not_eligible_for_retry(): + if exc.has_error_label("NoWritesPerformed") and self._last_error: + raise self._last_error from exc + else: + raise + self._attempt_number += 1 + if self._bulk: + self._bulk.retrying = True + else: + self._retrying = True + if not exc.has_error_label("NoWritesPerformed"): + self._last_error = exc + if self._last_error is None: + self._last_error = exc + + if self._client.topology_description.topology_type == TOPOLOGY_TYPE.Sharded: + self._deprioritized_servers.append(self._server) + + def _is_not_eligible_for_retry(self) -> bool: + """Checks if the exchange is not eligible for retry""" + return not self._retryable or (self._is_retrying() and not self._multiple_retries) + + def _is_retrying(self) -> bool: + """Checks if the exchange is currently undergoing a retry""" + return self._bulk.retrying if self._bulk else self._retrying + + def _is_session_state_retryable(self) -> bool: + """Checks if provided session is eligible for retry + + reads: Make sure there is no ongoing transaction (if provided a session) + writes: Make sure there is a session without an active transaction + """ + if self._is_read: + return not (self._session and self._session.in_transaction) + return bool(self._session and not self._session.in_transaction) + + def _check_last_error(self, check_csot: bool = False) -> None: + """Checks if the ongoing client exchange experienced a exception previously. + If so, raise last error + + :param check_csot: Checks CSOT to ensure we are retrying with time remaining defaults to False + """ + if self._is_retrying(): + remaining = _csot.remaining() + if not check_csot or (remaining is not None and remaining <= 0): + assert self._last_error is not None + raise self._last_error + + async def _get_server(self) -> Server: + """Retrieves a server object based on provided object context + + :return: Abstraction to connect to server + """ + return await self._client._select_server( + self._server_selector, + self._session, + self._operation, + address=self._address, + deprioritized_servers=self._deprioritized_servers, + operation_id=self._operation_id, + ) + + async def _write(self) -> T: + """Wrapper method for write-type retryable client executions + + :return: Output for func()'s call + """ + try: + max_wire_version = 0 + is_mongos = False + self._server = await self._get_server() + async with self._client._checkout(self._server, self._session) as conn: + max_wire_version = conn.max_wire_version + sessions_supported = ( + self._session + and self._server.description.retryable_writes_supported + and conn.supports_sessions + ) + is_mongos = conn.is_mongos + if not sessions_supported: + # A retry is not possible because this server does + # not support sessions raise the last error. + self._check_last_error() + self._retryable = False + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying write attempt number {self._attempt_number}", + clientId=self._client._topology_settings._topology_id, + commandName=self._operation, + operationId=self._operation_id, + ) + return await self._func(self._session, conn, self._retryable) # type: ignore + except PyMongoError as exc: + if not self._retryable: + raise + # Add the RetryableWriteError label, if applicable. + _add_retryable_write_error(exc, max_wire_version, is_mongos) + raise + + async def _read(self) -> T: + """Wrapper method for read-type retryable client executions + + :return: Output for func()'s call + """ + self._server = await self._get_server() + assert self._read_pref is not None, "Read Preference required on read calls" + async with self._client._conn_from_server(self._read_pref, self._server, self._session) as ( + conn, + read_pref, + ): + if self._retrying and not self._retryable: + self._check_last_error() + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying read attempt number {self._attempt_number}", + clientId=self._client._topology_settings._topology_id, + commandName=self._operation, + operationId=self._operation_id, + ) + return await self._func(self._session, self._server, conn, read_pref) # type: ignore + + +def _after_fork_child() -> None: + """Releases the locks in child process and resets the + topologies in all MongoClients. + """ + # Reinitialize locks + _release_locks() + + # Perform cleanup in clients (i.e. get rid of topology) + for _, client in AsyncMongoClient._clients.items(): + client._after_fork() + + +def _detect_external_db(entity: str) -> bool: + """Detects external database hosts and logs an informational message at the INFO level.""" + entity = entity.lower() + cosmos_db_hosts = [".cosmos.azure.com"] + document_db_hosts = [".docdb.amazonaws.com", ".docdb-elastic.amazonaws.com"] + + for host in cosmos_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a CosmosDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb", + ) + return True + for host in document_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a DocumentDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/documentdb", + ) + return True + return False + + +if _HAS_REGISTER_AT_FORK: + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork_child) diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py new file mode 100644 index 0000000000..45c12b219f --- /dev/null +++ b/pymongo/asynchronous/monitor.py @@ -0,0 +1,545 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Class to monitor a MongoDB server on a background thread.""" + +from __future__ import annotations + +import asyncio +import atexit +import logging +import time +import weakref +from typing import TYPE_CHECKING, Any, Optional + +from pymongo import common, periodic_executor +from pymongo._csot import MovingMinimum +from pymongo.asynchronous.srv_resolver import _SrvResolver +from pymongo.errors import NetworkTimeout, _OperationCancelled +from pymongo.hello import Hello +from pymongo.lock import _async_create_lock +from pymongo.logger import _SDAM_LOGGER, _debug_log, _SDAMStatusMessage +from pymongo.periodic_executor import _shutdown_executors +from pymongo.pool_options import _is_faas +from pymongo.read_preferences import MovingAverage +from pymongo.server_description import ServerDescription + +if TYPE_CHECKING: + from pymongo.asynchronous.pool import ( # type: ignore[attr-defined] + AsyncConnection, + Pool, + _CancellationContext, + ) + from pymongo.asynchronous.settings import TopologySettings + from pymongo.asynchronous.topology import Topology + +_IS_SYNC = False + + +def _sanitize(error: Exception) -> None: + """PYTHON-2433 Clear error traceback info.""" + error.__traceback__ = None + error.__context__ = None + error.__cause__ = None + + +def _monotonic_duration(start: float) -> float: + """Return the duration since the given start time. + + Accounts for buggy platforms where time.monotonic() is not monotonic. + See PYTHON-4600. + """ + return max(0.0, time.monotonic() - start) + + +class MonitorBase: + def __init__(self, topology: Topology, name: str, interval: int, min_interval: float): + """Base class to do periodic work on a background thread. + + The background thread is signaled to stop when the Topology or + this instance is freed. + """ + + # We strongly reference the executor and it weakly references us via + # this closure. When the monitor is freed, stop the executor soon. + async def target() -> bool: + monitor = self_ref() + if monitor is None: + return False # Stop the executor. + await monitor._run() # type:ignore[attr-defined] + return True + + executor = periodic_executor.AsyncPeriodicExecutor( + interval=interval, min_interval=min_interval, target=target, name=name + ) + + self._executor = executor + + def _on_topology_gc(dummy: Optional[Topology] = None) -> None: + # This prevents GC from waiting 10 seconds for hello to complete + # See test_cleanup_executors_on_client_del. + monitor = self_ref() + if monitor: + monitor.gc_safe_close() + + # Avoid cycles. When self or topology is freed, stop executor soon. + self_ref = weakref.ref(self, executor.close) + self._topology = weakref.proxy(topology, _on_topology_gc) + _register(self) + + def open(self) -> None: + """Start monitoring, or restart after a fork. + + Multiple calls have no effect. + """ + self._executor.open() + + def gc_safe_close(self) -> None: + """GC safe close.""" + self._executor.close() + + async def close(self) -> None: + """Close and stop monitoring. + + open() restarts the monitor after closing. + """ + self.gc_safe_close() + + async def join(self) -> None: + """Wait for the monitor to stop.""" + await self._executor.join() + + def request_check(self) -> None: + """If the monitor is sleeping, wake it soon.""" + self._executor.wake() + + +class Monitor(MonitorBase): + def __init__( + self, + server_description: ServerDescription, + topology: Topology, + pool: Pool, + topology_settings: TopologySettings, + ): + """Class to monitor a MongoDB server on a background thread. + + Pass an initial ServerDescription, a Topology, a Pool, and + TopologySettings. + + The Topology is weakly referenced. The Pool must be exclusive to this + Monitor. + """ + super().__init__( + topology, + "pymongo_server_monitor_task", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + self._server_description = server_description + self._pool = pool + self._settings = topology_settings + self._listeners = self._settings._pool_options._event_listeners + self._publish = self._listeners is not None and self._listeners.enabled_for_server_heartbeat + self._cancel_context: Optional[_CancellationContext] = None + self._conn_id: Optional[int] = None + self._rtt_monitor = _RttMonitor( + topology, + topology_settings, + topology._create_pool_for_monitor(server_description.address), + ) + if topology_settings.server_monitoring_mode == "stream": + self._stream = True + elif topology_settings.server_monitoring_mode == "poll": + self._stream = False + else: + self._stream = not _is_faas() + + def cancel_check(self) -> None: + """Cancel any concurrent hello check. + + Note: this is called from a weakref.proxy callback and MUST NOT take + any locks. + """ + context = self._cancel_context + if context: + # Note: we cannot close the socket because doing so may cause + # concurrent reads/writes to hang until a timeout occurs + # (depending on the platform). + context.cancel() + + async def _start_rtt_monitor(self) -> None: + """Start an _RttMonitor that periodically runs ping.""" + # If this monitor is closed directly before (or during) this open() + # call, the _RttMonitor will not be closed. Checking if this monitor + # was closed directly after resolves the race. + self._rtt_monitor.open() + if self._executor._stopped: + await self._rtt_monitor.close() + + def gc_safe_close(self) -> None: + self._executor.close() + self._rtt_monitor.gc_safe_close() + self.cancel_check() + + async def join(self) -> None: + await asyncio.gather( + self._executor.join(), self._rtt_monitor.join(), return_exceptions=True + ) # type: ignore[func-returns-value] + + async def close(self) -> None: + self.gc_safe_close() + await self._rtt_monitor.close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + await self._reset_connection() + + async def _reset_connection(self) -> None: + # Clear our pooled connection. + await self._pool.reset() + + async def _run(self) -> None: + try: + prev_sd = self._server_description + try: + self._server_description = await self._check_server() + except _OperationCancelled as exc: + _sanitize(exc) + # Already closed the connection, wait for the next check. + self._server_description = ServerDescription( + self._server_description.address, error=exc + ) + if prev_sd.is_server_type_known: + # Immediately retry since we've already waited 500ms to + # discover that we've been cancelled. + self._executor.skip_sleep() + return + + # Update the Topology and clear the server pool on error. + await self._topology.on_change( + self._server_description, + reset_pool=self._server_description.error, + interrupt_connections=isinstance(self._server_description.error, NetworkTimeout), + ) + + if self._stream and ( + self._server_description.is_server_type_known + and self._server_description.topology_version + ): + await self._start_rtt_monitor() + # Immediately check for the next streaming response. + self._executor.skip_sleep() + + if self._server_description.error and prev_sd.is_server_type_known: + # Immediately retry on network errors. + self._executor.skip_sleep() + except ReferenceError: + # Topology was garbage-collected. + await self.close() + finally: + if self._executor._stopped: + await self._rtt_monitor.close() + + async def _check_server(self) -> ServerDescription: + """Call hello or read the next streaming response. + + Returns a ServerDescription. + """ + self._conn_id = None + start = time.monotonic() + try: + return await self._check_once() + except ReferenceError: + raise + except Exception as error: + _sanitize(error) + sd = self._server_description + address = sd.address + duration = _monotonic_duration(start) + awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_FAIL, + topologyId=self._topology._topology_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=duration * 1000, + failure=error, + driverConnectionId=self._conn_id, + ) + await self._reset_connection() + if isinstance(error, _OperationCancelled): + raise + await self._rtt_monitor.reset() + # Server type defaults to Unknown. + return ServerDescription(address, error=error) + + async def _check_once(self) -> ServerDescription: + """A single attempt to call hello. + + Returns a ServerDescription, or raises an exception. + """ + address = self._server_description.address + sd = self._server_description + + # XXX: "awaited" could be incorrectly set to True in the rare case + # the pool checkout closes and recreates a connection. + awaited = bool( + self._pool.conns and self._stream and sd.is_server_type_known and sd.topology_version + ) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_started(address, awaited) + + if self._cancel_context and self._cancel_context.cancelled: + await self._reset_connection() + async with self._pool.checkout() as conn: + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_START, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + ) + + self._cancel_context = conn.cancel_context + # Record the connection id so we can later attach it to the failed log message. + self._conn_id = conn.id + response, round_trip_time = await self._check_with_socket(conn) + if not response.awaitable: + await self._rtt_monitor.add_sample(round_trip_time) + + avg_rtt, min_rtt = await self._rtt_monitor.get() + sd = ServerDescription(address, response, avg_rtt, min_round_trip_time=min_rtt) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_succeeded( + address, round_trip_time, response, response.awaitable + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_SUCCESS, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=round_trip_time * 1000, + reply=response.document, + ) + return sd + + async def _check_with_socket(self, conn: AsyncConnection) -> tuple[Hello, float]: # type: ignore[type-arg] + """Return (Hello, round_trip_time). + + Can raise ConnectionFailure or OperationFailure. + """ + start = time.monotonic() + if conn.more_to_come: + # Read the next streaming hello (MongoDB 4.4+). + response = Hello(await conn._next_reply(), awaitable=True) + elif ( + self._stream and conn.performed_handshake and self._server_description.topology_version + ): + # Initiate streaming hello (MongoDB 4.4+). + response = await conn._hello( + self._server_description.topology_version, + self._settings.heartbeat_frequency, + ) + else: + # New connection handshake or polling hello (MongoDB <4.4). + response = await conn._hello(None, None) + duration = _monotonic_duration(start) + return response, duration + + +class SrvMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings): + """Class to poll SRV records on a background thread. + + Pass a Topology and a TopologySettings. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_srv_polling_thread", + common.MIN_SRV_RESCAN_INTERVAL, + topology_settings.heartbeat_frequency, + ) + self._settings = topology_settings + self._seedlist = self._settings._seeds + assert isinstance(self._settings.fqdn, str) + self._fqdn: str = self._settings.fqdn + self._startup_time = time.monotonic() + + async def _run(self) -> None: + # Don't poll right after creation, wait 60 seconds first + if time.monotonic() < self._startup_time + common.MIN_SRV_RESCAN_INTERVAL: + return + seedlist = await self._get_seedlist() + if seedlist: + self._seedlist = seedlist + try: + await self._topology.on_srv_update(self._seedlist) + except ReferenceError: + # Topology was garbage-collected. + await self.close() + + async def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: + """Poll SRV records for a seedlist. + + Returns a list of ServerDescriptions. + """ + try: + resolver = _SrvResolver( + self._fqdn, + self._settings.pool_options.connect_timeout, + self._settings.srv_service_name, + ) + seedlist, ttl = await resolver.get_hosts_and_min_ttl() + if len(seedlist) == 0: + # As per the spec: this should be treated as a failure. + raise Exception + except Exception as exc: + # As per the spec, upon encountering an error: + # - An error must not be raised + # - SRV records must be rescanned every heartbeatFrequencyMS + # - Topology must be left unchanged + self.request_check() + _debug_log(_SDAM_LOGGER, message="SRV monitor check failed", failure=repr(exc)) + return None + else: + self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) + return seedlist + + +class _RttMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings, pool: Pool): + """Maintain round trip times for a server. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_server_rtt_task", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + + self._pool = pool + self._moving_average = MovingAverage() + self._moving_min = MovingMinimum() + self._lock = _async_create_lock() + + async def close(self) -> None: + self.gc_safe_close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + await self._pool.reset() + + async def add_sample(self, sample: float) -> None: + """Add a RTT sample.""" + async with self._lock: + self._moving_average.add_sample(sample) + self._moving_min.add_sample(sample) + + async def get(self) -> tuple[Optional[float], float]: + """Get the calculated average, or None if no samples yet and the min.""" + async with self._lock: + return self._moving_average.get(), self._moving_min.get() + + async def reset(self) -> None: + """Reset the average RTT.""" + async with self._lock: + self._moving_average.reset() + self._moving_min.reset() + + async def _run(self) -> None: + try: + # NOTE: This thread is only run when using the streaming + # heartbeat protocol (MongoDB 4.4+). + # XXX: Skip check if the server is unknown? + rtt = await self._ping() + await self.add_sample(rtt) + except ReferenceError: + # Topology was garbage-collected. + await self.close() + except Exception: + await self._pool.reset() + + async def _ping(self) -> float: + """Run a "hello" command and return the RTT.""" + async with self._pool.checkout() as conn: + if self._executor._stopped: + raise Exception("_RttMonitor closed") + start = time.monotonic() + await conn.hello() + return _monotonic_duration(start) + + +# Close monitors to cancel any in progress streaming checks before joining +# executor threads. For an explanation of how this works see the comment +# about _EXECUTORS in periodic_executor.py. +_MONITORS = set() + + +def _register(monitor: MonitorBase) -> None: + ref = weakref.ref(monitor, _unregister) + _MONITORS.add(ref) + + +def _unregister(monitor_ref: weakref.ReferenceType[MonitorBase]) -> None: + _MONITORS.remove(monitor_ref) + + +def _shutdown_monitors() -> None: + if _MONITORS is None: + return + + # Copy the set. Closing monitors removes them. + monitors = list(_MONITORS) + + # Close all monitors. + for ref in monitors: + monitor = ref() + if monitor: + monitor.gc_safe_close() + + monitor = None + + +def _shutdown_resources() -> None: + # _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown. + shutdown = _shutdown_monitors + if shutdown: # type:ignore[truthy-function] + shutdown() + shutdown = _shutdown_executors + if shutdown: # type:ignore[truthy-function] + shutdown() + + +if _IS_SYNC: + atexit.register(_shutdown_resources) diff --git a/pymongo/asynchronous/network.py b/pymongo/asynchronous/network.py new file mode 100644 index 0000000000..5a5dc7fa2c --- /dev/null +++ b/pymongo/asynchronous/network.py @@ -0,0 +1,298 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal network layer helper methods.""" +from __future__ import annotations + +import datetime +import logging +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +from bson import _decode_all_selective +from pymongo import _csot, helpers_shared, message +from pymongo.compression_support import _NO_COMPRESSION +from pymongo.errors import ( + NotPrimaryError, + OperationFailure, +) +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import _OpMsg +from pymongo.monitoring import _is_speculative_authenticate +from pymongo.network_layer import ( + async_receive_message, + async_sendall, +) + +if TYPE_CHECKING: + from bson import CodecOptions + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.monitoring import _EventListeners + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType + from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +async def command( + conn: AsyncConnection, + dbname: str, + spec: MutableMapping[str, Any], + is_mongos: bool, + read_preference: Optional[_ServerMode], + codec_options: CodecOptions[_DocumentType], + session: Optional[AsyncClientSession], + client: Optional[AsyncMongoClient[Any]], + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + address: Optional[_Address] = None, + listeners: Optional[_EventListeners] = None, + max_bson_size: Optional[int] = None, + read_concern: Optional[ReadConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + compression_ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, + use_op_msg: bool = False, + unacknowledged: bool = False, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + write_concern: Optional[WriteConcern] = None, +) -> _DocumentType: + """Execute a command over the socket, or raise socket.error. + + :param conn: a AsyncConnection instance + :param dbname: name of the database on which to run the command + :param spec: a command document as an ordered dict type, eg SON. + :param is_mongos: are we connected to a mongos? + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param session: optional AsyncClientSession instance. + :param client: optional AsyncMongoClient instance for updating $clusterTime. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param address: the (host, port) of `conn` + :param listeners: An instance of :class:`~pymongo.monitoring.EventListeners` + :param max_bson_size: The maximum encoded bson size for this server + :param read_concern: The read concern for this command. + :param parse_write_concern_error: Whether to parse the ``writeConcernError`` + field in the command response. + :param collation: The collation for this command. + :param compression_ctx: optional compression Context. + :param use_op_msg: True if we should use OP_MSG. + :param unacknowledged: True if this is an unacknowledged command. + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + :param exhaust_allowed: True if we should enable OP_MSG exhaustAllowed. + """ + name = next(iter(spec)) + ns = dbname + ".$cmd" + speculative_hello = False + + # Publish the original command document, perhaps with lsid and $clusterTime. + orig = spec + if is_mongos and not use_op_msg: + assert read_preference is not None + spec = message._maybe_add_read_preference(spec, read_preference) + if read_concern and not (session and session.in_transaction): + if read_concern.level: + spec["readConcern"] = read_concern.document + if session: + session._update_read_concern(spec, conn) + if collation is not None: + spec["collation"] = collation + + publish = listeners is not None and listeners.enabled_for_commands + start = datetime.datetime.now() + if publish: + speculative_hello = _is_speculative_authenticate(name, spec) + + if compression_ctx and name.lower() in _NO_COMPRESSION: + compression_ctx = None + + if client and client._encrypter and not client._encrypter._bypass_auto_encryption: + spec = orig = await client._encrypter.encrypt(dbname, spec, codec_options) + + # Support CSOT + if client: + conn.apply_timeout(client, spec) + _csot.apply_write_concern(spec, write_concern) + + if use_op_msg: + flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 + flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0 + request_id, msg, size, max_doc_size = message._op_msg( + flags, spec, dbname, read_preference, codec_options, ctx=compression_ctx + ) + # If this is an unacknowledged write then make sure the encoded doc(s) + # are small enough, otherwise rely on the server to return an error. + if unacknowledged and max_bson_size is not None and max_doc_size > max_bson_size: + message._raise_document_too_large(name, size, max_bson_size) + else: + request_id, msg, size = message._query( + 0, ns, 0, -1, spec, None, codec_options, compression_ctx + ) + + if max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD: + message._raise_document_too_large(name, size, max_bson_size + message._COMMAND_OVERHEAD) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=spec, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_start( + orig, + dbname, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + ) + + try: + await async_sendall(conn.conn.get_conn, msg) + if use_op_msg and unacknowledged: + # Unacknowledged, fake a successful command response. + reply = None + response_doc: _DocumentOut = {"ok": 1} + else: + reply = await async_receive_message(conn, request_id) + conn.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response( + codec_options=codec_options, user_fields=user_fields + ) + + response_doc = unpacked_docs[0] + if not conn.ready: + cluster_time = response_doc.get("$clusterTime") + if cluster_time: + conn._cluster_time = cluster_time + if client: + await client._process_response(response_doc, session) + if check: + helpers_shared._check_command_response( + response_doc, + conn.max_wire_version, + allowable_errors, + parse_write_concern_error=parse_write_concern_error, + ) + except Exception as exc: + duration = datetime.datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = message._convert_exception(exc) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_failure( + duration, + failure, + name, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbname, + ) + raise + duration = datetime.datetime.now() - start + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=response_doc, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + speculative_authenticate="speculativeAuthenticate" in orig, + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_success( + duration, + response_doc, + name, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + speculative_hello=speculative_hello, + database_name=dbname, + ) + + if client and client._encrypter and reply: + decrypted = await client._encrypter.decrypt(reply.raw_command_response()) + response_doc = cast( + "_DocumentOut", _decode_all_selective(decrypted, codec_options, user_fields)[0] + ) + + return response_doc # type: ignore[return-value] diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py new file mode 100644 index 0000000000..f521091e3c --- /dev/null +++ b/pymongo/asynchronous/pool.py @@ -0,0 +1,1479 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +from __future__ import annotations + +import asyncio +import collections +import contextlib +import logging +import os +import sys +import time +import weakref +from typing import ( + TYPE_CHECKING, + Any, + AsyncGenerator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from bson import DEFAULT_CODEC_OPTIONS +from pymongo import _csot, helpers_shared +from pymongo.asynchronous.client_session import _validate_session_write_concern +from pymongo.asynchronous.helpers import _handle_reauth +from pymongo.asynchronous.network import command +from pymongo.common import ( + MAX_BSON_SIZE, + MAX_MESSAGE_SIZE, + MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, + ORDERED_TYPES, +) +from pymongo.errors import ( # type:ignore[attr-defined] + AutoReconnect, + ConfigurationError, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _get_timeout_details, format_timeout_details +from pymongo.lock import ( + _async_cond_wait, + _async_create_condition, + _async_create_lock, +) +from pymongo.logger import ( + _CONNECTION_LOGGER, + _ConnectionStatusMessage, + _debug_log, + _verbose_connection_error_reason, +) +from pymongo.monitoring import ( + ConnectionCheckOutFailedReason, + ConnectionClosedReason, +) +from pymongo.network_layer import AsyncNetworkingInterface, async_receive_message, async_sendall +from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + SSLErrors, + _CancellationContext, + _configured_protocol_interface, + _raise_connection_failure, +) +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import _add_to_command +from pymongo.server_type import SERVER_TYPE +from pymongo.socket_checker import SocketChecker + +if TYPE_CHECKING: + from bson import CodecOptions + from bson.objectid import ObjectId + from pymongo.asynchronous.auth import _AuthContext + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.mongo_client import AsyncMongoClient, _MongoClientErrorHandler + from pymongo.compression_support import ( + SnappyContext, + ZlibContext, + ZstdContext, + ) + from pymongo.message import _OpMsg, _OpReply + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.typings import _Address, _CollationIn + from pymongo.write_concern import WriteConcern + +try: + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + + def _set_non_inheritable_non_atomic(fd: int) -> None: + """Set the close-on-exec flag on the given file descriptor.""" + flags = fcntl(fd, F_GETFD) + fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + +except ImportError: + # Windows, various platforms we don't claim to support + # (Jython, IronPython, ..), systems that don't provide + # everything we need from fcntl, etc. + def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 + """Dummy function for platforms that don't provide fcntl.""" + + +_IS_SYNC = False + + +class AsyncConnection: + """Store a connection with some metadata. + + :param conn: a raw connection object + :param pool: a Pool instance + :param address: the server's (host, port) + :param id: the id of this socket in it's pool + :param is_sdam: SDAM connections do not call hello on creation + """ + + def __init__( + self, + conn: AsyncNetworkingInterface, + pool: Pool, + address: tuple[str, int], + id: int, + is_sdam: bool, + ): + self.pool_ref = weakref.ref(pool) + self.conn = conn + self.address = address + self.id = id + self.is_sdam = is_sdam + self.closed = False + self.last_checkin_time = time.monotonic() + self.performed_handshake = False + self.is_writable: bool = False + self.max_wire_version = MAX_WIRE_VERSION + self.max_bson_size = MAX_BSON_SIZE + self.max_message_size = MAX_MESSAGE_SIZE + self.max_write_batch_size = MAX_WRITE_BATCH_SIZE + self.supports_sessions = False + self.hello_ok: bool = False + self.is_mongos = False + self.op_msg_enabled = False + self.listeners = pool.opts._event_listeners + self.enabled_for_cmap = pool.enabled_for_cmap + self.enabled_for_logging = pool.enabled_for_logging + self.compression_settings = pool.opts._compression_settings + self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None + self.socket_checker: SocketChecker = SocketChecker() + self.oidc_token_gen_id: Optional[int] = None + # Support for mechanism negotiation on the initial handshake. + self.negotiated_mechs: Optional[list[str]] = None + self.auth_ctx: Optional[_AuthContext] = None + + # The pool's generation changes with each reset() so we can close + # sockets created before the last reset. + self.pool_gen = pool.gen + self.generation = self.pool_gen.get_overall() + self.ready = False + self.cancel_context: _CancellationContext = _CancellationContext() + self.opts = pool.opts + self.more_to_come: bool = False + # For load balancer support. + self.service_id: Optional[ObjectId] = None + self.server_connection_id: Optional[int] = None + # When executing a transaction in load balancing mode, this flag is + # set to true to indicate that the session now owns the connection. + self.pinned_txn = False + self.pinned_cursor = False + self.active = False + self.last_timeout = self.opts.socket_timeout + self.connect_rtt = 0.0 + self._client_id = pool._client_id + self.creation_time = time.monotonic() + # For gossiping $clusterTime from the connection handshake to the client. + self._cluster_time = None + + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.get_conn.settimeout(timeout) + + def apply_timeout( + self, client: AsyncMongoClient[Any], cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + + def pin_txn(self) -> None: + self.pinned_txn = True + assert not self.pinned_cursor + + def pin_cursor(self) -> None: + self.pinned_cursor = True + assert not self.pinned_txn + + async def unpin(self) -> None: + pool = self.pool_ref() + if pool: + await pool.checkin(self) + else: + await self.close_conn(ConnectionClosedReason.STALE) + + def hello_cmd(self) -> dict[str, Any]: + # Handshake spec requires us to use OP_MSG+hello command for the + # initial handshake in load balanced or stable API mode. + if self.opts.server_api or self.hello_ok or self.opts.load_balanced: + self.op_msg_enabled = True + return {HelloCompat.CMD: 1} + else: + return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} + + async def hello(self) -> Hello[dict[str, Any]]: + return await self._hello(None, None) + + async def _hello( + self, + topology_version: Optional[Any], + heartbeat_frequency: Optional[int], + ) -> Hello[dict[str, Any]]: + cmd = self.hello_cmd() + performing_handshake = not self.performed_handshake + awaitable = False + if performing_handshake: + self.performed_handshake = True + cmd["client"] = self.opts.metadata + if self.compression_settings: + cmd["compression"] = self.compression_settings.compressors + if self.opts.load_balanced: + cmd["loadBalanced"] = True + elif topology_version is not None: + cmd["topologyVersion"] = topology_version + assert heartbeat_frequency is not None + cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) + awaitable = True + # If connect_timeout is None there is no timeout. + if self.opts.connect_timeout: + self.set_conn_timeout(self.opts.connect_timeout + heartbeat_frequency) + + creds = self.opts._credentials + if creds: + if creds.mechanism == "DEFAULT" and creds.username: + cmd["saslSupportedMechs"] = creds.source + "." + creds.username + from pymongo.asynchronous import auth + + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) + if auth_ctx: + speculative_authenticate = auth_ctx.speculate_command() + if speculative_authenticate is not None: + cmd["speculativeAuthenticate"] = speculative_authenticate + else: + auth_ctx = None + + if performing_handshake: + start = time.monotonic() + doc = await self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) + if performing_handshake: + self.connect_rtt = time.monotonic() - start + hello = Hello(doc, awaitable=awaitable) + self.is_writable = hello.is_writable + self.max_wire_version = hello.max_wire_version + self.max_bson_size = hello.max_bson_size + self.max_message_size = hello.max_message_size + self.max_write_batch_size = hello.max_write_batch_size + self.supports_sessions = ( + hello.logical_session_timeout_minutes is not None and hello.is_readable + ) + self.logical_session_timeout_minutes: Optional[int] = hello.logical_session_timeout_minutes + self.hello_ok = hello.hello_ok + self.is_repl = hello.server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, + SERVER_TYPE.RSOther, + SERVER_TYPE.RSGhost, + ) + self.is_standalone = hello.server_type == SERVER_TYPE.Standalone + self.is_mongos = hello.server_type == SERVER_TYPE.Mongos + if performing_handshake and self.compression_settings: + ctx = self.compression_settings.get_compression_context(hello.compressors) + self.compression_context = ctx + + self.op_msg_enabled = True + self.server_connection_id = hello.connection_id + if creds: + self.negotiated_mechs = hello.sasl_supported_mechs + if auth_ctx: + auth_ctx.parse_response(hello) # type:ignore[arg-type] + if auth_ctx.speculate_succeeded(): + self.auth_ctx = auth_ctx + if self.opts.load_balanced: + if not hello.service_id: + raise ConfigurationError( + "Driver attempted to initialize in load balancing mode," + " but the server does not support this mode" + ) + self.service_id = hello.service_id + self.generation = self.pool_gen.get(self.service_id) + return hello + + async def _next_reply(self) -> dict[str, Any]: + reply = await self.receive_message(None) + self.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response() + response_doc = unpacked_docs[0] + helpers_shared._check_command_response(response_doc, self.max_wire_version) + return response_doc + + @_handle_reauth + async def command( + self, + dbname: str, + spec: MutableMapping[str, Any], + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[Mapping[str, Any]] = DEFAULT_CODEC_OPTIONS, # type: ignore[assignment] + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + session: Optional[AsyncClientSession] = None, + client: Optional[AsyncMongoClient[Any]] = None, + retryable_write: bool = False, + publish_events: bool = True, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + ) -> dict[str, Any]: + """Execute a command or raise an error. + + :param dbname: name of the database on which to run the command + :param spec: a command document as a dict, SON, or mapping object + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern: The read concern for this command. + :param write_concern: The write concern for this command. + :param parse_write_concern_error: Whether to parse the + ``writeConcernError`` field in the command response. + :param collation: The collation for this command. + :param session: optional AsyncClientSession instance. + :param client: optional AsyncMongoClient for gossipping $clusterTime. + :param retryable_write: True if this command is a retryable write. + :param publish_events: Should we publish events for this command? + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + """ + self.validate_session(client, session) + session = _validate_session_write_concern(session, write_concern) + + # Ensure command name remains in first place. + if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] + spec = dict(spec) + + if not (write_concern is None or write_concern.acknowledged or collation is None): + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + + self.add_server_api(spec) + if session: + session._apply_to(spec, retryable_write, read_preference, self) + self.send_cluster_time(spec, session, client) + listeners = self.listeners if publish_events else None + unacknowledged = bool(write_concern and not write_concern.acknowledged) + if self.op_msg_enabled: + self._raise_if_not_writable(unacknowledged) + try: + return await command( + self, + dbname, + spec, + self.is_mongos, + read_preference, + codec_options, # type: ignore[arg-type] + session, + client, + check, + allowable_errors, + self.address, + listeners, + self.max_bson_size, + read_concern, + parse_write_concern_error=parse_write_concern_error, + collation=collation, + compression_ctx=self.compression_context, + use_op_msg=self.op_msg_enabled, + unacknowledged=unacknowledged, + user_fields=user_fields, + exhaust_allowed=exhaust_allowed, + write_concern=write_concern, + ) + except (OperationFailure, NotPrimaryError): + raise + # Catch socket.error, KeyboardInterrupt, CancelledError, etc. and close ourselves. + except BaseException as error: + await self._raise_connection_failure(error) + + async def send_message(self, message: bytes, max_doc_size: int) -> None: + """Send a raw BSON message or raise ConnectionFailure. + + If a network exception is raised, the socket is closed. + """ + if self.max_bson_size is not None and max_doc_size > self.max_bson_size: + raise DocumentTooLarge( + "BSON document too large (%d bytes) - the connected server " + "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) + ) + + try: + await async_sendall(self.conn.get_conn, message) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + await self._raise_connection_failure(error) + + async def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise ConnectionFailure. + + If any exception is raised, the socket is closed. + """ + try: + return await async_receive_message(self, request_id, self.max_message_size) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + await self._raise_connection_failure(error) + + def _raise_if_not_writable(self, unacknowledged: bool) -> None: + """Raise NotPrimaryError on unacknowledged write if this socket is not + writable. + """ + if unacknowledged and not self.is_writable: + # Write won't succeed, bail as if we'd received a not primary error. + raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) + + async def unack_write(self, msg: bytes, max_doc_size: int) -> None: + """Send unack OP_MSG. + + Can raise ConnectionFailure or InvalidDocument. + + :param msg: bytes, an OP_MSG message. + :param max_doc_size: size in bytes of the largest document in `msg`. + """ + self._raise_if_not_writable(True) + await self.send_message(msg, max_doc_size) + + async def write_command( + self, request_id: int, msg: bytes, codec_options: CodecOptions[Mapping[str, Any]] + ) -> dict[str, Any]: + """Send "insert" etc. command, returning response as a dict. + + Can raise ConnectionFailure or OperationFailure. + + :param request_id: an int. + :param msg: bytes, the command message. + """ + await self.send_message(msg, 0) + reply = await self.receive_message(request_id) + result = reply.command_response(codec_options) + + # Raises NotPrimaryError or OperationFailure. + helpers_shared._check_command_response(result, self.max_wire_version) + return result + + async def authenticate(self, reauthenticate: bool = False) -> None: + """Authenticate to the server if needed. + + Can raise ConnectionFailure or OperationFailure. + """ + # CMAP spec says to publish the ready event only after authenticating + # the connection. + if reauthenticate: + if self.performed_handshake: + # Existing auth_ctx is stale, remove it. + self.auth_ctx = None + self.ready = False + if not self.ready: + creds = self.opts._credentials + if creds: + from pymongo.asynchronous import auth + + await auth.authenticate(creds, self, reauthenticate=reauthenticate) + self.ready = True + duration = time.monotonic() - self.creation_time + if self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_ready(self.address, self.id, duration) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_READY, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + durationMS=duration, + ) + + def validate_session( + self, client: Optional[AsyncMongoClient[Any]], session: Optional[AsyncClientSession] + ) -> None: + """Validate this session before use with client. + + Raises error if the client is not the one that created the session. + """ + if session: + if session._client is not client: + raise InvalidOperation( + "Can only use session with the AsyncMongoClient that started it" + ) + + async def close_conn(self, reason: Optional[str]) -> None: + """Close this connection with a reason.""" + if self.closed: + return + await self._close_conn() + if reason: + if self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_closed(self.address, self.id, reason) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + reason=_verbose_connection_error_reason(reason), + error=reason, + ) + + async def _close_conn(self) -> None: + """Close this connection.""" + if self.closed: + return + self.closed = True + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. + try: + await self.conn.close() + except Exception: # noqa: S110 + pass + + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + if _IS_SYNC: + return self.socket_checker.socket_closed(self.conn.get_conn) + else: + return self.conn.is_closing() + + def send_cluster_time( + self, + command: MutableMapping[str, Any], + session: Optional[AsyncClientSession], + client: Optional[AsyncMongoClient[Any]], + ) -> None: + """Add $clusterTime.""" + if client: + client._send_cluster_time(command, session) + + def add_server_api(self, command: MutableMapping[str, Any]) -> None: + """Add server_api parameters.""" + if self.opts.server_api: + _add_to_command(command, self.opts.server_api) + + def update_last_checkin_time(self) -> None: + self.last_checkin_time = time.monotonic() + + def update_is_writable(self, is_writable: bool) -> None: + self.is_writable = is_writable + + def idle_time_seconds(self) -> float: + """Seconds since this socket was last checked into its pool.""" + return time.monotonic() - self.last_checkin_time + + async def _raise_connection_failure(self, error: BaseException) -> NoReturn: + # Catch *all* exceptions from socket methods and close the socket. In + # regular Python, socket operations only raise socket.error, even if + # the underlying cause was a Ctrl-C: a signal raised during socket.recv + # is expressed as an EINTR error from poll. See internal_select_ex() in + # socketmodule.c. All error codes from poll become socket.error at + # first. Eventually in PyEval_EvalFrameEx the interpreter checks for + # signals and throws KeyboardInterrupt into the current frame on the + # main thread. + # + # But in Gevent, the polling mechanism (epoll, kqueue, + # ..) is called in Python code, which experiences the signal as a + # KeyboardInterrupt from the start, rather than as an initial + # socket.error, so we catch that, close the socket, and reraise it. + # + # The connection closed event will be emitted later in checkin. + if self.ready: + reason = None + else: + reason = ConnectionClosedReason.ERROR + await self.close_conn(reason) + # SSLError from PyOpenSSL inherits directly from Exception. + if isinstance(error, (IOError, OSError, *SSLErrors)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + else: + raise + + def __eq__(self, other: Any) -> bool: + return self.conn == other.conn + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self.conn) + + def __repr__(self) -> str: + return "AsyncConnection({}){} at {}".format( + repr(self.conn), + self.closed and " CLOSED" or "", + id(self), + ) + + +class _PoolClosedError(PyMongoError): + """Internal error raised when a thread tries to get a connection from a + closed pool. + """ + + +class _PoolGeneration: + def __init__(self) -> None: + # Maps service_id to generation. + self._generations: dict[ObjectId, int] = collections.defaultdict(int) + # Overall pool generation. + self._generation = 0 + + def get(self, service_id: Optional[ObjectId]) -> int: + """Get the generation for the given service_id.""" + if service_id is None: + return self._generation + return self._generations[service_id] + + def get_overall(self) -> int: + """Get the Pool's overall generation.""" + return self._generation + + def inc(self, service_id: Optional[ObjectId]) -> None: + """Increment the generation for the given service_id.""" + self._generation += 1 + if service_id is None: + for service_id in self._generations: + self._generations[service_id] += 1 + else: + self._generations[service_id] += 1 + + def stale(self, gen: int, service_id: Optional[ObjectId]) -> bool: + """Return if the given generation for a given service_id is stale.""" + return gen != self.get(service_id) + + +class PoolState: + PAUSED = 1 + READY = 2 + CLOSED = 3 + + +# Do *not* explicitly inherit from object or Jython won't call __del__ +# https://bugs.jython.org/issue1057 +class Pool: + def __init__( + self, + address: _Address, + options: PoolOptions, + is_sdam: bool = False, + client_id: Optional[ObjectId] = None, + ): + """ + :param address: a (hostname, port) tuple + :param options: a PoolOptions instance + :param is_sdam: whether to call hello for each new AsyncConnection + """ + if options.pause_enabled: + self.state = PoolState.PAUSED + else: + self.state = PoolState.READY + # Check a socket's health with socket_closed() every once in a while. + # Can override for testing: 0 to always check, None to never check. + self._check_interval_seconds = 1 + # LIFO pool. Sockets are ordered on idle time. Sockets claimed + # and returned to pool from the left side. Stale sockets removed + # from the right side. + self.conns: collections.deque[AsyncConnection] = collections.deque() + self.active_contexts: set[_CancellationContext] = set() + self.lock = _async_create_lock() + self._max_connecting_cond = _async_create_condition(self.lock) + self.active_sockets = 0 + # Monotonically increasing connection ID required for CMAP Events. + self.next_connection_id = 1 + # Track whether the sockets in this pool are writeable or not. + self.is_writable: Optional[bool] = None + + # Keep track of resets, so we notice sockets created before the most + # recent reset and close them. + # self.generation = 0 + self.gen = _PoolGeneration() + self.pid = os.getpid() + self.address = address + self.opts = options + self.is_sdam = is_sdam + # Don't publish events or logs in Monitor pools. + self.enabled_for_cmap = ( + not self.is_sdam + and self.opts._event_listeners is not None + and self.opts._event_listeners.enabled_for_cmap + ) + self.enabled_for_logging = not self.is_sdam + + # The first portion of the wait queue. + # Enforces: maxPoolSize + # Also used for: clearing the wait queue + self.size_cond = _async_create_condition(self.lock) + self.requests = 0 + self.max_pool_size = self.opts.max_pool_size + if not self.max_pool_size: + self.max_pool_size = float("inf") + # The second portion of the wait queue. + # Enforces: maxConnecting + # Also used for: clearing the wait queue + self._max_connecting_cond = _async_create_condition(self.lock) + self._max_connecting = self.opts.max_connecting + self._pending = 0 + self._client_id = client_id + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_created( + self.address, self.opts.non_default_options + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CREATED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + **self.opts.non_default_options, + ) + # Similar to active_sockets but includes threads in the wait queue. + self.operation_count: int = 0 + # Retain references to pinned connections to prevent the CPython GC + # from thinking that a cursor's pinned connection can be GC'd when the + # cursor is GC'd (see PYTHON-2751). + self.__pinned_sockets: set[AsyncConnection] = set() + self.ncursors = 0 + self.ntxns = 0 + + async def ready(self) -> None: + # Take the lock to avoid the race condition described in PYTHON-2699. + async with self.lock: + if self.state != PoolState.READY: + self.state = PoolState.READY + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_ready(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_READY, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + @property + def closed(self) -> bool: + return self.state == PoolState.CLOSED + + async def _reset( + self, + close: bool, + pause: bool = True, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: + old_state = self.state + async with self.size_cond: + if self.closed: + return + if self.opts.pause_enabled and pause and not self.opts.load_balanced: + old_state, self.state = self.state, PoolState.PAUSED + self.gen.inc(service_id) + newpid = os.getpid() + if self.pid != newpid: + self.pid = newpid + self.active_sockets = 0 + self.operation_count = 0 + if service_id is None: + sockets, self.conns = self.conns, collections.deque() + else: + discard: collections.deque = collections.deque() # type: ignore[type-arg] + keep: collections.deque = collections.deque() # type: ignore[type-arg] + for conn in self.conns: + if conn.service_id == service_id: + discard.append(conn) + else: + keep.append(conn) + sockets = discard + self.conns = keep + + if close: + self.state = PoolState.CLOSED + # Clear the wait queue + self._max_connecting_cond.notify_all() + self.size_cond.notify_all() + + if interrupt_connections: + for context in self.active_contexts: + context.cancel() + + listeners = self.opts._event_listeners + # CMAP spec says that close() MUST close sockets before publishing the + # PoolClosedEvent but that reset() SHOULD close sockets *after* + # publishing the PoolClearedEvent. + if close: + if not _IS_SYNC: + await asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.POOL_CLOSED) for conn in sockets], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in sockets: + await conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_closed(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + else: + if old_state != PoolState.PAUSED: + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_cleared( + self.address, + service_id=service_id, + interrupt_connections=interrupt_connections, + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CLEARED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + serviceId=service_id, + ) + if not _IS_SYNC: + await asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.STALE) for conn in sockets], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in sockets: + await conn.close_conn(ConnectionClosedReason.STALE) + + async def update_is_writable(self, is_writable: Optional[bool]) -> None: + """Updates the is_writable attribute on all sockets currently in the + Pool. + """ + self.is_writable = is_writable + async with self.lock: + for _socket in self.conns: + _socket.update_is_writable(self.is_writable) # type: ignore[arg-type] + + async def reset( + self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False + ) -> None: + await self._reset( + close=False, service_id=service_id, interrupt_connections=interrupt_connections + ) + + async def reset_without_pause(self) -> None: + await self._reset(close=False, pause=False) + + async def close(self) -> None: + await self._reset(close=True) + + def stale_generation(self, gen: int, service_id: Optional[ObjectId]) -> bool: + return self.gen.stale(gen, service_id) + + async def remove_stale_sockets(self, reference_generation: int) -> None: + """Removes stale sockets then adds new ones if pool is too small and + has not been reset. The `reference_generation` argument specifies the + `generation` at the point in time this operation was requested on the + pool. + """ + # Take the lock to avoid the race condition described in PYTHON-2699. + async with self.lock: + if self.state != PoolState.READY: + return + + if self.opts.max_idle_time_seconds is not None: + close_conns = [] + async with self.lock: + while ( + self.conns + and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds + ): + close_conns.append(self.conns.pop()) + if not _IS_SYNC: + await asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.IDLE) for conn in close_conns], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in close_conns: + await conn.close_conn(ConnectionClosedReason.IDLE) + + while True: + async with self.size_cond: + # There are enough sockets in the pool. + if len(self.conns) + self.active_sockets >= self.opts.min_pool_size: + return + if self.requests >= self.opts.min_pool_size: + return + self.requests += 1 + incremented = False + try: + async with self._max_connecting_cond: + # If maxConnecting connections are already being created + # by this pool then try again later instead of waiting. + if self._pending >= self._max_connecting: + return + self._pending += 1 + incremented = True + conn = await self.connect() + close_conn = False + async with self.lock: + # Close connection and return if the pool was reset during + # socket creation or while acquiring the pool lock. + if self.gen.get_overall() != reference_generation: + close_conn = True + if not close_conn: + self.conns.appendleft(conn) + self.active_contexts.discard(conn.cancel_context) + if close_conn: + await conn.close_conn(ConnectionClosedReason.STALE) + return + finally: + if incremented: + # Notify after adding the socket to the pool. + async with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + + async with self.size_cond: + self.requests -= 1 + self.size_cond.notify() + + async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> AsyncConnection: + """Connect to Mongo and return a new AsyncConnection. + + Can raise ConnectionFailure. + + Note that the pool does not keep a reference to the socket -- you + must call checkin() when you're done with it. + """ + async with self.lock: + conn_id = self.next_connection_id + self.next_connection_id += 1 + # Use a temporary context so that interrupt_connections can cancel creating the socket. + tmp_context = _CancellationContext() + self.active_contexts.add(tmp_context) + + listeners = self.opts._event_listeners + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_created(self.address, conn_id) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CREATED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + ) + + try: + networking_interface = await _configured_protocol_interface(self.address, self.opts) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + async with self.lock: + self.active_contexts.discard(tmp_context) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn_id, ConnectionClosedReason.ERROR + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + if isinstance(error, (IOError, OSError, *SSLErrors)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + + raise + + conn = AsyncConnection(networking_interface, self, self.address, conn_id, self.is_sdam) # type: ignore[arg-type] + async with self.lock: + self.active_contexts.add(conn.cancel_context) + self.active_contexts.discard(tmp_context) + if tmp_context.cancelled: + conn.cancel_context.cancel() + try: + if not self.is_sdam: + await conn.hello() + self.is_writable = conn.is_writable + if handler: + handler.contribute_socket(conn, completed_handshake=False) + + await conn.authenticate() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + async with self.lock: + self.active_contexts.discard(conn.cancel_context) + await conn.close_conn(ConnectionClosedReason.ERROR) + raise + + if handler: + await handler.client._topology.receive_cluster_time(conn._cluster_time) + + return conn + + @contextlib.asynccontextmanager + async def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> AsyncGenerator[AsyncConnection, None]: + """Get a connection from the pool. Use with a "with" statement. + + Returns a :class:`AsyncConnection` object wrapping a connected + :class:`socket.socket`. + + This method should always be used in a with-statement:: + + with pool.get_conn() as connection: + connection.send_message(msg) + data = connection.receive_message(op_code, request_id) + + Can raise ConnectionFailure or OperationFailure. + + :param handler: A _MongoClientErrorHandler. + """ + listeners = self.opts._event_listeners + checkout_started_time = time.monotonic() + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_check_out_started(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_STARTED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + conn = await self._get_conn(checkout_started_time, handler=handler) + + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_out(self.address, conn.id, duration) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + durationMS=duration, + ) + try: + async with self.lock: + self.active_contexts.add(conn.cancel_context) + yield conn + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + # Exception in caller. Ensure the connection gets returned. + # Note that when pinned is True, the session owns the + # connection and it is responsible for checking the connection + # back into the pool. + pinned = conn.pinned_txn or conn.pinned_cursor + if handler: + # Perform SDAM error handling rules while the connection is + # still checked out. + exc_type, exc_val, _ = sys.exc_info() + await handler.handle(exc_type, exc_val) + if not pinned and conn.active: + await self.checkin(conn) + raise + if conn.pinned_txn: + async with self.lock: + self.__pinned_sockets.add(conn) + self.ntxns += 1 + elif conn.pinned_cursor: + async with self.lock: + self.__pinned_sockets.add(conn) + self.ncursors += 1 + elif conn.active: + await self.checkin(conn) + + def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: + if self.state != PoolState.READY: + if emit_event: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + + details = _get_timeout_details(self.opts) + _raise_connection_failure( + self.address, AutoReconnect("connection pool paused"), timeout_details=details + ) + + async def _get_conn( + self, checkout_started_time: float, handler: Optional[_MongoClientErrorHandler] = None + ) -> AsyncConnection: + """Get or create a AsyncConnection. Can raise ConnectionFailure.""" + # We use the pid here to avoid issues with fork / multiprocessing. + # See test.test_client:TestClient.test_fork for an example of + # what could go wrong otherwise + if self.pid != os.getpid(): + await self.reset_without_pause() + + if self.closed: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.POOL_CLOSED, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Connection pool was closed", + error=ConnectionCheckOutFailedReason.POOL_CLOSED, + durationMS=duration, + ) + raise _PoolClosedError( + "Attempted to check out a connection from closed connection pool" + ) + + async with self.lock: + self.operation_count += 1 + + # Get a free socket or create one. + if _csot.get_timeout(): + deadline = _csot.get_deadline() + elif self.opts.wait_queue_timeout: + deadline = time.monotonic() + self.opts.wait_queue_timeout + else: + deadline = None + + async with self.size_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=True) + while not (self.requests < self.max_pool_size): + timeout = deadline - time.monotonic() if deadline else None + if not await _async_cond_wait(self.size_cond, timeout): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.requests < self.max_pool_size: + self.size_cond.notify() + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=True) + self.requests += 1 + + # We've now acquired the semaphore and must release it on error. + conn = None + incremented = False + emitted_event = False + try: + async with self.lock: + self.active_sockets += 1 + incremented = True + while conn is None: + # CMAP: we MUST wait for either maxConnecting OR for a socket + # to be checked back into the pool. + async with self._max_connecting_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=False) + while not (self.conns or self._pending < self._max_connecting): + timeout = deadline - time.monotonic() if deadline else None + if not await _async_cond_wait(self._max_connecting_cond, timeout): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.conns or self._pending < self._max_connecting: + self._max_connecting_cond.notify() + emitted_event = True + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=False) + + try: + conn = self.conns.popleft() + except IndexError: + self._pending += 1 + if conn: # We got a socket from the pool + if await self._perished(conn): + conn = None + continue + else: # We need to create a new connection + try: + conn = await self.connect(handler=handler) + finally: + async with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + if conn: + # We checked out a socket but authentication failed. + await conn.close_conn(ConnectionClosedReason.ERROR) + async with self.size_cond: + self.requests -= 1 + if incremented: + self.active_sockets -= 1 + self.size_cond.notify() + + if not emitted_event: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + raise + + conn.active = True + return conn + + async def checkin(self, conn: AsyncConnection) -> None: + """Return the connection to the pool, or if it's closed discard it. + + :param conn: The connection to check into the pool. + """ + txn = conn.pinned_txn + cursor = conn.pinned_cursor + conn.active = False + conn.pinned_txn = False + conn.pinned_cursor = False + self.__pinned_sockets.discard(conn) + listeners = self.opts._event_listeners + async with self.lock: + self.active_contexts.discard(conn.cancel_context) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_in(self.address, conn.id) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKEDIN, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + ) + if self.pid != os.getpid(): + await self.reset_without_pause() + else: + if self.closed: + await conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + elif conn.closed: + # CMAP requires the closed event be emitted after the check in. + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn.id, ConnectionClosedReason.ERROR + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + else: + close_conn = False + async with self.lock: + # Hold the lock to ensure this section does not race with + # Pool.reset(). + if self.stale_generation(conn.generation, conn.service_id): + close_conn = True + else: + conn.update_last_checkin_time() + conn.update_is_writable(bool(self.is_writable)) + self.conns.appendleft(conn) + # Notify any threads waiting to create a connection. + self._max_connecting_cond.notify() + if close_conn: + await conn.close_conn(ConnectionClosedReason.STALE) + + async with self.size_cond: + if txn: + self.ntxns -= 1 + elif cursor: + self.ncursors -= 1 + self.requests -= 1 + self.active_sockets -= 1 + self.operation_count -= 1 + self.size_cond.notify() + + async def _perished(self, conn: AsyncConnection) -> bool: + """Return True and close the connection if it is "perished". + + This side-effecty function checks if this socket has been idle for + for longer than the max idle time, or if the socket has been closed by + some external network error, or if the socket's generation is outdated. + + Checking sockets lets us avoid seeing *some* + :class:`~pymongo.errors.AutoReconnect` exceptions on server + hiccups, etc. We only check if the socket was closed by an external + error if it has been > 1 second since the socket was checked into the + pool, to keep performance reasonable - we can't avoid AutoReconnects + completely anyway. + """ + idle_time_seconds = conn.idle_time_seconds() + # If socket is idle, open a new one. + if ( + self.opts.max_idle_time_seconds is not None + and idle_time_seconds > self.opts.max_idle_time_seconds + ): + await conn.close_conn(ConnectionClosedReason.IDLE) + return True + + if self._check_interval_seconds is not None and ( + self._check_interval_seconds == 0 or idle_time_seconds > self._check_interval_seconds + ): + if conn.conn_closed(): + await conn.close_conn(ConnectionClosedReason.ERROR) + return True + + if self.stale_generation(conn.generation, conn.service_id): + await conn.close_conn(ConnectionClosedReason.STALE) + return True + + return False + + def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: + listeners = self.opts._event_listeners + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.TIMEOUT, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Wait queue timeout elapsed without a connection becoming available", + error=ConnectionCheckOutFailedReason.TIMEOUT, + durationMS=duration, + ) + timeout = _csot.get_timeout() or self.opts.wait_queue_timeout + if self.opts.load_balanced: + other_ops = self.active_sockets - self.ncursors - self.ntxns + raise WaitQueueTimeoutError( + "Timeout waiting for connection from the connection pool. " + "maxPoolSize: {}, connections in use by cursors: {}, " + "connections in use by transactions: {}, connections in use " + "by other operations: {}, timeout: {}".format( + self.opts.max_pool_size, + self.ncursors, + self.ntxns, + other_ops, + timeout, + ) + ) + raise WaitQueueTimeoutError( + "Timed out while checking out a connection from connection pool. " + f"maxPoolSize: {self.opts.max_pool_size}, timeout: {timeout}" + ) + + def __del__(self) -> None: + # Avoid ResourceWarnings in Python 3 + # Close all sockets without calling reset() or close() because it is + # not safe to acquire a lock in __del__. + if _IS_SYNC: + for conn in self.conns: + conn.close_conn(None) # type: ignore[unused-coroutine] diff --git a/pymongo/asynchronous/server.py b/pymongo/asynchronous/server.py new file mode 100644 index 0000000000..f212306174 --- /dev/null +++ b/pymongo/asynchronous/server.py @@ -0,0 +1,383 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Communicate with one MongoDB server in a topology.""" +from __future__ import annotations + +import logging +from datetime import datetime +from typing import ( + TYPE_CHECKING, + Any, + AsyncContextManager, + Callable, + Optional, + Union, +) + +from bson import _decode_all_selective +from pymongo.asynchronous.helpers import _handle_reauth +from pymongo.errors import NotPrimaryError, OperationFailure +from pymongo.helpers_shared import _check_command_response +from pymongo.logger import ( + _COMMAND_LOGGER, + _SDAM_LOGGER, + _CommandStatusMessage, + _debug_log, + _SDAMStatusMessage, +) +from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query +from pymongo.response import PinnedResponse, Response + +if TYPE_CHECKING: + from queue import Queue + from weakref import ReferenceType + + from bson.objectid import ObjectId + from pymongo.asynchronous.mongo_client import AsyncMongoClient, _MongoClientErrorHandler + from pymongo.asynchronous.monitor import Monitor + from pymongo.asynchronous.pool import AsyncConnection, Pool + from pymongo.monitoring import _EventListeners + from pymongo.read_preferences import _ServerMode + from pymongo.server_description import ServerDescription + from pymongo.typings import _DocumentOut + +_IS_SYNC = False + +_CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} + + +class Server: + def __init__( + self, + server_description: ServerDescription, + pool: Pool, + monitor: Monitor, + topology_id: Optional[ObjectId] = None, + listeners: Optional[_EventListeners] = None, + events: Optional[ReferenceType[Queue[Any]]] = None, + ) -> None: + """Represent one MongoDB server.""" + self._description = server_description + self._pool = pool + self._monitor = monitor + self._topology_id = topology_id + self._publish = listeners is not None and listeners.enabled_for_server + self._listener = listeners + self._events = None + if self._publish: + self._events = events() # type: ignore[misc] + + async def open(self) -> None: + """Start monitoring, or restart after a fork. + + Multiple calls have no effect. + """ + if not self._pool.opts.load_balanced: + self._monitor.open() + + async def reset(self, service_id: Optional[ObjectId] = None) -> None: + """Clear the connection pool.""" + await self.pool.reset(service_id) + + async def close(self) -> None: + """Clear the connection pool and stop the monitor. + + Reconnect with open(). + """ + if self._publish: + assert self._listener is not None + assert self._events is not None + self._events.put( + ( + self._listener.publish_server_closed, + (self._description.address, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.STOP_SERVER, + topologyId=self._topology_id, + serverHost=self._description.address[0], + serverPort=self._description.address[1], + ) + + await self._monitor.close() + await self._pool.close() + + def request_check(self) -> None: + """Check the server's state soon.""" + self._monitor.request_check() + + async def operation_to_command( + self, operation: Union[_Query, _GetMore], conn: AsyncConnection, apply_timeout: bool = False + ) -> tuple[dict[str, Any], str]: + cmd, db = operation.as_command(conn, apply_timeout) + # Support auto encryption + if operation.client._encrypter and not operation.client._encrypter._bypass_auto_encryption: + cmd = await operation.client._encrypter.encrypt( # type: ignore[misc, assignment] + operation.db, cmd, operation.codec_options + ) + operation.update_command(cmd) + + return cmd, db + + @_handle_reauth + async def run_operation( + self, + conn: AsyncConnection, + operation: Union[_Query, _GetMore], + read_preference: _ServerMode, + listeners: Optional[_EventListeners], + unpack_res: Callable[..., list[_DocumentOut]], + client: AsyncMongoClient[Any], + ) -> Response: + """Run a _Query or _GetMore operation and return a Response object. + + This method is used only to run _Query/_GetMore operations from + cursors. + Can raise ConnectionFailure, OperationFailure, etc. + + :param conn: An AsyncConnection instance. + :param operation: A _Query or _GetMore object. + :param read_preference: The read preference to use. + :param listeners: Instance of _EventListeners or None. + :param unpack_res: A callable that decodes the wire protocol response. + :param client: An AsyncMongoClient instance. + """ + assert listeners is not None + publish = listeners.enabled_for_commands + start = datetime.now() + + use_cmd = operation.use_command(conn) + more_to_come = operation.conn_mgr and operation.conn_mgr.more_to_come + cmd, dbn = await self.operation_to_command(operation, conn, use_cmd) + if more_to_come: + request_id = 0 + else: + message = operation.get_message(read_preference, conn, use_cmd) + request_id, data, max_doc_size = self._split_message(message) + + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + + if publish: + if "$db" not in cmd: + cmd["$db"] = dbn + assert listeners is not None + listeners.publish_command_start( + cmd, + dbn, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + ) + + try: + if more_to_come: + reply = await conn.receive_message(None) + else: + await conn.send_message(data, max_doc_size) + reply = await conn.receive_message(request_id) + + # Unpack and check for command errors. + if use_cmd: + user_fields = _CURSOR_DOC_FIELDS + legacy_response = False + else: + user_fields = None + legacy_response = True + docs = unpack_res( + reply, + operation.cursor_id, + operation.codec_options, + legacy_response=legacy_response, + user_fields=user_fields, + ) + if use_cmd: + first = docs[0] + await operation.client._process_response(first, operation.session) # type: ignore[misc, arg-type] + _check_command_response(first, conn.max_wire_version, pool_opts=conn.opts) # type:ignore[has-type] + except Exception as exc: + duration = datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if publish: + assert listeners is not None + listeners.publish_command_failure( + duration, + failure, + operation.name, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbn, + ) + raise + duration = datetime.now() - start + # Must publish in find / getMore / explain command response + # format. + if use_cmd: + res = docs[0] + elif operation.name == "explain": + res = docs[0] if docs else {} + else: + res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} # type: ignore[union-attr] + if operation.name == "find": + res["cursor"]["firstBatch"] = docs + else: + res["cursor"]["nextBatch"] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=res, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: + assert listeners is not None + listeners.publish_command_success( + duration, + res, + operation.name, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbn, + ) + + # Decrypt response. + client = operation.client # type: ignore[assignment] + if client and client._encrypter: + if use_cmd: + decrypted = await client._encrypter.decrypt(reply.raw_command_response()) + docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) + + response: Response + + if client._should_pin_cursor(operation.session) or operation.exhaust: # type: ignore[arg-type] + conn.pin_cursor() + if isinstance(reply, _OpMsg): + # In OP_MSG, the server keeps sending only if the + # more_to_come flag is set. + more_to_come = reply.more_to_come + else: + # In OP_REPLY, the server keeps sending until cursor_id is 0. + more_to_come = bool(operation.exhaust and reply.cursor_id) + if operation.conn_mgr: + operation.conn_mgr.update_exhaust(more_to_come) + response = PinnedResponse( + data=reply, + address=self._description.address, + conn=conn, + duration=duration, + request_id=request_id, + from_command=use_cmd, + docs=docs, + more_to_come=more_to_come, + ) + else: + response = Response( + data=reply, + address=self._description.address, + duration=duration, + request_id=request_id, + from_command=use_cmd, + docs=docs, + ) + + return response + + async def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> AsyncContextManager[AsyncConnection]: + return self.pool.checkout(handler) + + @property + def description(self) -> ServerDescription: + return self._description + + @description.setter + def description(self, server_description: ServerDescription) -> None: + assert server_description.address == self._description.address + self._description = server_description + + @property + def pool(self) -> Pool: + return self._pool + + def _split_message( + self, message: Union[tuple[int, Any], tuple[int, Any, int]] + ) -> tuple[int, Any, int]: + """Return request_id, data, max_doc_size. + + :param message: (request_id, data, max_doc_size) or (request_id, data) + """ + if len(message) == 3: + return message # type: ignore[return-value] + else: + # get_more and kill_cursors messages don't include BSON documents. + request_id, data = message # type: ignore[misc] + return request_id, data, 0 + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self._description!r}>" diff --git a/pymongo/asynchronous/settings.py b/pymongo/asynchronous/settings.py new file mode 100644 index 0000000000..9c2331971a --- /dev/null +++ b/pymongo/asynchronous/settings.py @@ -0,0 +1,175 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Represent MongoClient's configuration.""" +from __future__ import annotations + +import threading +import traceback +from typing import Any, Collection, Optional, Type, Union + +from bson.objectid import ObjectId +from pymongo import common +from pymongo.asynchronous import monitor, pool +from pymongo.asynchronous.pool import Pool +from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT +from pymongo.errors import ConfigurationError +from pymongo.pool_options import PoolOptions +from pymongo.server_description import ServerDescription +from pymongo.topology_description import TOPOLOGY_TYPE, _ServerSelector + +_IS_SYNC = False + + +class TopologySettings: + def __init__( + self, + seeds: Optional[Collection[tuple[str, int]]] = None, + replica_set_name: Optional[str] = None, + pool_class: Optional[Type[Pool]] = None, + pool_options: Optional[PoolOptions] = None, + monitor_class: Optional[Type[monitor.Monitor]] = None, + condition_class: Optional[Type[threading.Condition]] = None, + local_threshold_ms: int = LOCAL_THRESHOLD_MS, + server_selection_timeout: int = SERVER_SELECTION_TIMEOUT, + heartbeat_frequency: int = common.HEARTBEAT_FREQUENCY, + server_selector: Optional[_ServerSelector] = None, + fqdn: Optional[str] = None, + direct_connection: Optional[bool] = False, + load_balanced: Optional[bool] = None, + srv_service_name: str = common.SRV_SERVICE_NAME, + srv_max_hosts: int = 0, + server_monitoring_mode: str = common.SERVER_MONITORING_MODE, + topology_id: Optional[ObjectId] = None, + ): + """Represent MongoClient's configuration. + + Take a list of (host, port) pairs and optional replica set name. + """ + if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL: + raise ConfigurationError( + "heartbeatFrequencyMS cannot be less than %d" + % (common.MIN_HEARTBEAT_INTERVAL * 1000,) + ) + + self._seeds: Collection[tuple[str, int]] = seeds or [("localhost", 27017)] + self._replica_set_name = replica_set_name + self._pool_class: Type[Pool] = pool_class or pool.Pool + self._pool_options: PoolOptions = pool_options or PoolOptions() + self._monitor_class: Type[monitor.Monitor] = monitor_class or monitor.Monitor + self._condition_class: Type[threading.Condition] = condition_class or threading.Condition + self._local_threshold_ms = local_threshold_ms + self._server_selection_timeout = server_selection_timeout + self._server_selector = server_selector + self._fqdn = fqdn + self._heartbeat_frequency = heartbeat_frequency + self._direct = direct_connection + self._load_balanced = load_balanced + self._srv_service_name = srv_service_name + self._srv_max_hosts = srv_max_hosts or 0 + self._server_monitoring_mode = server_monitoring_mode + if topology_id is not None: + self._topology_id = topology_id + else: + self._topology_id = ObjectId() + # Store the allocation traceback to catch unclosed clients in the + # test suite. + self._stack = "".join(traceback.format_stack()[:-2]) + + @property + def seeds(self) -> Collection[tuple[str, int]]: + """List of server addresses.""" + return self._seeds + + @property + def replica_set_name(self) -> Optional[str]: + return self._replica_set_name + + @property + def pool_class(self) -> Type[Pool]: + return self._pool_class + + @property + def pool_options(self) -> PoolOptions: + return self._pool_options + + @property + def monitor_class(self) -> Type[monitor.Monitor]: + return self._monitor_class + + @property + def condition_class(self) -> Type[threading.Condition]: + return self._condition_class + + @property + def local_threshold_ms(self) -> int: + return self._local_threshold_ms + + @property + def server_selection_timeout(self) -> int: + return self._server_selection_timeout + + @property + def server_selector(self) -> Optional[_ServerSelector]: + return self._server_selector + + @property + def heartbeat_frequency(self) -> int: + return self._heartbeat_frequency + + @property + def fqdn(self) -> Optional[str]: + return self._fqdn + + @property + def direct(self) -> Optional[bool]: + """Connect directly to a single server, or use a set of servers? + + True if there is one seed and no replica_set_name. + """ + return self._direct + + @property + def load_balanced(self) -> Optional[bool]: + """True if the client was configured to connect to a load balancer.""" + return self._load_balanced + + @property + def srv_service_name(self) -> str: + """The srvServiceName.""" + return self._srv_service_name + + @property + def srv_max_hosts(self) -> int: + """The srvMaxHosts.""" + return self._srv_max_hosts + + @property + def server_monitoring_mode(self) -> str: + """The serverMonitoringMode.""" + return self._server_monitoring_mode + + def get_topology_type(self) -> int: + if self.load_balanced: + return TOPOLOGY_TYPE.LoadBalanced + elif self.direct: + return TOPOLOGY_TYPE.Single + elif self.replica_set_name is not None: + return TOPOLOGY_TYPE.ReplicaSetNoPrimary + else: + return TOPOLOGY_TYPE.Unknown + + def get_server_descriptions(self) -> dict[Union[tuple[str, int], Any], ServerDescription]: + """Initial dict of (address, ServerDescription) for all seeds.""" + return {address: ServerDescription(address) for address in self.seeds} diff --git a/pymongo/asynchronous/srv_resolver.py b/pymongo/asynchronous/srv_resolver.py new file mode 100644 index 0000000000..9c4d9a9d57 --- /dev/null +++ b/pymongo/asynchronous/srv_resolver.py @@ -0,0 +1,155 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for resolving hosts and options from mongodb+srv:// URIs.""" +from __future__ import annotations + +import ipaddress +import random +from typing import TYPE_CHECKING, Any, Optional, Union + +from pymongo.common import CONNECT_TIMEOUT +from pymongo.errors import ConfigurationError + +if TYPE_CHECKING: + from dns import resolver + +_IS_SYNC = False + + +def _have_dnspython() -> bool: + try: + import dns # noqa: F401 + + return True + except ImportError: + return False + + +# dnspython can return bytes or str from various parts +# of its API depending on version. We always want str. +def maybe_decode(text: Union[str, bytes]) -> str: + if isinstance(text, bytes): + return text.decode() + return text + + +# PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. +async def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: + if _IS_SYNC: + from dns import resolver + + return resolver.resolve(*args, **kwargs) + else: + from dns import asyncresolver + + return await asyncresolver.resolve(*args, **kwargs) # type:ignore[return-value] + + +_INVALID_HOST_MSG = ( + "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " + "Did you mean to use 'mongodb://'?" +) + + +class _SrvResolver: + def __init__( + self, + fqdn: str, + connect_timeout: Optional[float], + srv_service_name: str, + srv_max_hosts: int = 0, + ): + self.__fqdn = fqdn + self.__srv = srv_service_name + self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT + self.__srv_max_hosts = srv_max_hosts or 0 + # Validate the fully qualified domain name. + try: + ipaddress.ip_address(fqdn) + raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) + except ValueError: + pass + try: + split_fqdn = self.__fqdn.split(".") + self.__plist = split_fqdn[1:] if len(split_fqdn) > 2 else split_fqdn + except Exception: + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None + self.__slen = len(self.__plist) + self.nparts = len(split_fqdn) + + async def get_options(self) -> Optional[str]: + from dns import resolver + + try: + results = await _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) + except (resolver.NoAnswer, resolver.NXDOMAIN): + # No TXT records + return None + except Exception as exc: + raise ConfigurationError(str(exc)) from exc + if len(results) > 1: + raise ConfigurationError("Only one TXT record is supported") + return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") # type: ignore[attr-defined] + + async def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: + try: + results = await _resolve( + "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout + ) + except Exception as exc: + if not encapsulate_errors: + # Raise the original error. + raise + # Else, raise all errors as ConfigurationError. + raise ConfigurationError(str(exc)) from exc + return results + + async def _get_srv_response_and_hosts( + self, encapsulate_errors: bool + ) -> tuple[resolver.Answer, list[tuple[str, Any]]]: + results = await self._resolve_uri(encapsulate_errors) + + # Construct address tuples + nodes = [ + (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) # type: ignore[attr-defined] + for res in results + ] + + # Validate hosts + for node in nodes: + srv_host = node[0].lower() + if self.__fqdn == srv_host and self.nparts < 3: + raise ConfigurationError( + "Invalid SRV host: return address is identical to SRV hostname" + ) + try: + nlist = srv_host.split(".")[1:][-self.__slen :] + except Exception as exc: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") from exc + if self.__plist != nlist: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") + if self.__srv_max_hosts: + nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) + return results, nodes + + async def get_hosts(self) -> list[tuple[str, Any]]: + _, nodes = await self._get_srv_response_and_hosts(True) + return nodes + + async def get_hosts_and_min_ttl(self) -> tuple[list[tuple[str, Any]], int]: + results, nodes = await self._get_srv_response_and_hosts(False) + rrset = results.rrset + ttl = rrset.ttl if rrset else 0 + return nodes, ttl diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py new file mode 100644 index 0000000000..283aabc690 --- /dev/null +++ b/pymongo/asynchronous/topology.py @@ -0,0 +1,1127 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Internal class to monitor a topology of one or more servers.""" + +from __future__ import annotations + +import asyncio +import logging +import os +import queue +import random +import sys +import time +import warnings +import weakref +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, cast + +from pymongo import _csot, common, helpers_shared, periodic_executor +from pymongo.asynchronous.client_session import _ServerSession, _ServerSessionPool +from pymongo.asynchronous.monitor import MonitorBase, SrvMonitor +from pymongo.asynchronous.pool import Pool +from pymongo.asynchronous.server import Server +from pymongo.errors import ( + ConnectionFailure, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteError, +) +from pymongo.hello import Hello +from pymongo.lock import ( + _async_cond_wait, + _async_create_condition, + _async_create_lock, +) +from pymongo.logger import ( + _SDAM_LOGGER, + _SERVER_SELECTION_LOGGER, + _debug_log, + _SDAMStatusMessage, + _ServerSelectionStatusMessage, +) +from pymongo.pool_options import PoolOptions +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import ( + Selection, + any_server_selector, + arbiter_server_selector, + secondary_server_selector, + writable_server_selector, +) +from pymongo.topology_description import ( + SRV_POLLING_TOPOLOGIES, + TOPOLOGY_TYPE, + TopologyDescription, + _updated_topology_description_srv_polling, + updated_topology_description, +) + +if TYPE_CHECKING: + from bson import ObjectId + from pymongo.asynchronous.settings import TopologySettings + from pymongo.typings import ClusterTime, _Address + +_IS_SYNC = False + +_pymongo_dir = str(Path(__file__).parent) + + +def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: # type: ignore[type-arg] + q = queue_ref() + if not q: + return False # Cancel PeriodicExecutor. + + while True: + try: + event = q.get_nowait() + except queue.Empty: + break + else: + fn, args = event + fn(*args) + + return True # Continue PeriodicExecutor. + + +class Topology: + """Monitor a topology of one or more servers.""" + + def __init__(self, topology_settings: TopologySettings): + self._topology_id = topology_settings._topology_id + self._listeners = topology_settings._pool_options._event_listeners + self._publish_server = self._listeners is not None and self._listeners.enabled_for_server + self._publish_tp = self._listeners is not None and self._listeners.enabled_for_topology + + # Create events queue if there are publishers. + self._events = None + self.__events_executor: Any = None + + if self._publish_server or self._publish_tp: + self._events = queue.Queue(maxsize=100) + + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.START_TOPOLOGY, + topologyId=self._topology_id, + ) + + if self._publish_tp: + assert self._events is not None + self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) + self._settings = topology_settings + topology_description = TopologyDescription( + topology_settings.get_topology_type(), + topology_settings.get_server_descriptions(), + topology_settings.replica_set_name, + None, + None, + topology_settings, + ) + + self._description = topology_description + initial_td = TopologyDescription( + TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings + ) + if self._publish_tp: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (initial_td, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(initial_td), + newDescription=repr(self._description), + ) + + for seed in topology_settings.seeds: + if self._publish_server: + assert self._events is not None + self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.START_SERVER, + topologyId=self._topology_id, + serverHost=seed[0], + serverPort=seed[1], + ) + + # Store the seed list to help diagnose errors in _error_message(). + self._seed_addresses = list(topology_description.server_descriptions()) + self._opened = False + self._closed = False + self._lock = _async_create_lock() + self._condition = _async_create_condition( + self._lock, self._settings.condition_class if _IS_SYNC else None + ) + self._servers: dict[_Address, Server] = {} + self._pid: Optional[int] = None + self._max_cluster_time: Optional[ClusterTime] = None + self._session_pool = _ServerSessionPool() + + if self._publish_server or self._publish_tp: + assert self._events is not None + weak: weakref.ReferenceType[queue.Queue[Any]] + + async def target() -> bool: + return process_events_queue(weak) + + executor = periodic_executor.AsyncPeriodicExecutor( + interval=common.EVENTS_QUEUE_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_events_thread", + ) + + # We strongly reference the executor and it weakly references + # the queue via this closure. When the topology is freed, stop + # the executor soon. + weak = weakref.ref(self._events, executor.close) + self.__events_executor = executor + executor.open() + + self._srv_monitor = None + if self._settings.fqdn is not None and not self._settings.load_balanced: + self._srv_monitor = SrvMonitor(self, self._settings) + + # Stores all monitor tasks that need to be joined on close or server selection + self._monitor_tasks: list[MonitorBase] = [] + + async def open(self) -> None: + """Start monitoring, or restart after a fork. + + No effect if called multiple times. + + .. warning:: Topology is shared among multiple threads and is protected + by mutual exclusion. Using Topology from a process other than the one + that initialized it will emit a warning and may result in deadlock. To + prevent this from happening, AsyncMongoClient must be created after any + forking. + + """ + pid = os.getpid() + if self._pid is None: + self._pid = pid + elif pid != self._pid: + self._pid = pid + if sys.version_info[:2] >= (3, 12): + kwargs = {"skip_file_prefixes": (_pymongo_dir,)} + else: + kwargs = {"stacklevel": 6} + # Ignore B028 warning for missing stacklevel. + warnings.warn( # type: ignore[call-overload] # noqa: B028 + "AsyncMongoClient opened before fork. May not be entirely fork-safe, " + "proceed with caution. See PyMongo's documentation for details: " + "https://dochub.mongodb.org/core/pymongo-fork-deadlock", + **kwargs, + ) + async with self._lock: + # Close servers and clear the pools. + for server in self._servers.values(): + await server.close() + # Reset the session pool to avoid duplicate sessions in + # the child process. + self._session_pool.reset() + + async with self._lock: + await self._ensure_opened() + + def get_server_selection_timeout(self) -> float: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + return self._settings.server_selection_timeout + return timeout + + async def select_servers( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + operation_id: Optional[int] = None, + ) -> list[Server]: + """Return a list of Servers matching selector, or time out. + + :param selector: function that takes a list of Servers and returns + a subset of them. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. + If not provided, the default value common.SERVER_SELECTION_TIMEOUT + is used. + :param address: optional server address to select. + + Calls self.open() if needed. + + Raises exc:`ServerSelectionTimeoutError` after + `server_selection_timeout` if no matching servers are found. + """ + if server_selection_timeout is None: + server_timeout = self.get_server_selection_timeout() + else: + server_timeout = server_selection_timeout + + # Cleanup any completed monitor tasks safely + if not _IS_SYNC and self._monitor_tasks: + await self.cleanup_monitors() + + async with self._lock: + server_descriptions = await self._select_servers_loop( + selector, server_timeout, operation, operation_id, address + ) + + return [ + cast(Server, self.get_server_by_address(sd.address)) for sd in server_descriptions + ] + + async def _select_servers_loop( + self, + selector: Callable[[Selection], Selection], + timeout: float, + operation: str, + operation_id: Optional[int], + address: Optional[_Address], + ) -> list[ServerDescription]: + """select_servers() guts. Hold the lock when calling this.""" + now = time.monotonic() + end_time = now + timeout + logged_waiting = False + + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.STARTED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + ) + + server_descriptions = self._description.apply_selector( + selector, address, custom_selector=self._settings.server_selector + ) + + while not server_descriptions: + # No suitable servers. + if timeout == 0 or now > end_time: + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.FAILED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + failure=self._error_message(selector), + ) + raise ServerSelectionTimeoutError( + f"{self._error_message(selector)}, Timeout: {timeout}s, Topology Description: {self.description!r}" + ) + + if not logged_waiting: + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.WAITING, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + remainingTimeMS=int(1000 * (end_time - time.monotonic())), + ) + logged_waiting = True + + await self._ensure_opened() + self._request_check_all() + + # Release the lock and wait for the topology description to + # change, or for a timeout. We won't miss any changes that + # came after our most recent apply_selector call, since we've + # held the lock until now. + await _async_cond_wait(self._condition, common.MIN_HEARTBEAT_INTERVAL) + self._description.check_compatible() + now = time.monotonic() + server_descriptions = self._description.apply_selector( + selector, address, custom_selector=self._settings.server_selector + ) + + self._description.check_compatible() + return server_descriptions + + async def _select_server( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + servers = await self.select_servers( + selector, operation, server_selection_timeout, address, operation_id + ) + servers = _filter_servers(servers, deprioritized_servers) + if len(servers) == 1: + return servers[0] + server1, server2 = random.sample(servers, 2) + if server1.pool.operation_count <= server2.pool.operation_count: + return server1 + else: + return server2 + + async def select_server( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Like select_servers, but choose a random server if several match.""" + server = await self._select_server( + selector, + operation, + server_selection_timeout, + address, + deprioritized_servers, + operation_id=operation_id, + ) + if _csot.get_timeout(): + _csot.set_rtt(server.description.min_round_trip_time) + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.SUCCEEDED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + serverHost=server.description.address[0], + serverPort=server.description.address[1], + ) + return server + + async def select_server_by_address( + self, + address: _Address, + operation: str, + server_selection_timeout: Optional[int] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Return a Server for "address", reconnecting if necessary. + + If the server's type is not known, request an immediate check of all + servers. Time out after "server_selection_timeout" if the server + cannot be reached. + + :param address: A (host, port) pair. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. + If not provided, the default value + common.SERVER_SELECTION_TIMEOUT is used. + :param operation_id: The unique id of the current operation being performed. Defaults to None if not provided. + + Calls self.open() if needed. + + Raises exc:`ServerSelectionTimeoutError` after + `server_selection_timeout` if no matching servers are found. + """ + return await self.select_server( + any_server_selector, + operation, + server_selection_timeout, + address, + operation_id=operation_id, + ) + + async def _process_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: + """Process a new ServerDescription on an opened topology. + + Hold the lock when calling this. + """ + td_old = self._description + sd_old = td_old._server_descriptions[server_description.address] + if _is_stale_server_description(sd_old, server_description): + # This is a stale hello response. Ignore it. + return + + new_td = updated_topology_description(self._description, server_description) + # CMAP: Ensure the pool is "ready" when the server is selectable. + if server_description.is_readable or ( + server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single + ): + server = self._servers.get(server_description.address) + if server: + await server.pool.ready() + + suppress_event = sd_old == server_description + if self._publish_server and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_server_description_changed, + (sd_old, server_description, server_description.address, self._topology_id), + ) + ) + + self._description = new_td + await self._update_servers() + + if self._publish_tp and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG) and not suppress_event: + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(td_old), + newDescription=repr(self._description), + ) + + # Shutdown SRV polling for unsupported cluster types. + # This is only applicable if the old topology was Unknown, and the + # new one is something other than Unknown or Sharded. + if self._srv_monitor and ( + td_old.topology_type == TOPOLOGY_TYPE.Unknown + and self._description.topology_type not in SRV_POLLING_TOPOLOGIES + ): + await self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) + + # Wake anything waiting in select_servers(). + self._condition.notify_all() + + async def on_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: + """Process a new ServerDescription after an hello call completes.""" + # We do no I/O holding the lock. + async with self._lock: + # Monitors may continue working on hello calls for some time + # after a call to Topology.close, so this method may be called at + # any time. Ensure the topology is open before processing the + # change. + # Any monitored server was definitely in the topology description + # once. Check if it's still in the description or if some state- + # change removed it. E.g., we got a host list from the primary + # that didn't include this server. + if self._opened and self._description.has_server(server_description.address): + await self._process_change(server_description, reset_pool, interrupt_connections) + # Clear the pool from a failed heartbeat, done outside the lock to avoid blocking on connection close. + if reset_pool: + server = self._servers.get(server_description.address) + if server: + await server.pool.reset(interrupt_connections=interrupt_connections) + + async def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: + """Process a new seedlist on an opened topology. + Hold the lock when calling this. + """ + td_old = self._description + if td_old.topology_type not in SRV_POLLING_TOPOLOGIES: + return + self._description = _updated_topology_description_srv_polling(self._description, seedlist) + + await self._update_servers() + + if self._publish_tp: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(td_old), + newDescription=repr(self._description), + ) + + async def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: + """Process a new list of nodes obtained from scanning SRV records.""" + # We do no I/O holding the lock. + async with self._lock: + if self._opened: + await self._process_srv_update(seedlist) + + def get_server_by_address(self, address: _Address) -> Optional[Server]: + """Get a Server or None. + + Returns the current version of the server immediately, even if it's + Unknown or absent from the topology. Only use this in unittests. + In driver code, use select_server_by_address, since then you're + assured a recent view of the server's type and wire protocol version. + """ + return self._servers.get(address) + + def has_server(self, address: _Address) -> bool: + return address in self._servers + + async def get_primary(self) -> Optional[_Address]: + """Return primary's address or None.""" + # Implemented here in Topology instead of AsyncMongoClient, so it can lock. + async with self._lock: + topology_type = self._description.topology_type + if topology_type != TOPOLOGY_TYPE.ReplicaSetWithPrimary: + return None + + return writable_server_selector(self._new_selection())[0].address + + async def _get_replica_set_members( + self, selector: Callable[[Selection], Selection] + ) -> set[_Address]: + """Return set of replica set member addresses.""" + # Implemented here in Topology instead of AsyncMongoClient, so it can lock. + async with self._lock: + topology_type = self._description.topology_type + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ): + return set() + + return {sd.address for sd in iter(selector(self._new_selection()))} + + async def get_secondaries(self) -> set[_Address]: + """Return set of secondary addresses.""" + return await self._get_replica_set_members(secondary_server_selector) + + async def get_arbiters(self) -> set[_Address]: + """Return set of arbiter addresses.""" + return await self._get_replica_set_members(arbiter_server_selector) + + def max_cluster_time(self) -> Optional[ClusterTime]: + """Return a document, the highest seen $clusterTime.""" + return self._max_cluster_time + + def _receive_cluster_time_no_lock(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + # Driver Sessions Spec: "Whenever a driver receives a cluster time from + # a server it MUST compare it to the current highest seen cluster time + # for the deployment. If the new cluster time is higher than the + # highest seen cluster time it MUST become the new highest seen cluster + # time. Two cluster times are compared using only the BsonTimestamp + # value of the clusterTime embedded field." + if cluster_time: + # ">" uses bson.timestamp.Timestamp's comparison operator. + if ( + not self._max_cluster_time + or cluster_time["clusterTime"] > self._max_cluster_time["clusterTime"] + ): + self._max_cluster_time = cluster_time + + async def receive_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + async with self._lock: + self._receive_cluster_time_no_lock(cluster_time) + + async def request_check_all(self, wait_time: int = 5) -> None: + """Wake all monitors, wait for at least one to check its server.""" + async with self._lock: + self._request_check_all() + await _async_cond_wait(self._condition, wait_time) + + def data_bearing_servers(self) -> list[ServerDescription]: + """Return a list of all data-bearing servers. + + This includes any server that might be selected for an operation. + """ + if self._description.topology_type == TOPOLOGY_TYPE.Single: + return self._description.known_servers + return self._description.readable_servers + + async def update_pool(self) -> None: + # Remove any stale sockets and add new sockets if pool is too small. + servers = [] + async with self._lock: + # Only update pools for data-bearing servers. + for sd in self.data_bearing_servers(): + server = self._servers[sd.address] + servers.append((server, server.pool.gen.get_overall())) + + for server, generation in servers: + try: + await server.pool.remove_stale_sockets(generation) + except PyMongoError as exc: + ctx = _ErrorContext(exc, 0, generation, False, None) + await self.handle_error(server.description.address, ctx) + raise + + async def close(self) -> None: + """Clear pools and terminate monitors. Topology does not reopen on + demand. Any further operations will raise + :exc:`~.errors.InvalidOperation`. + """ + async with self._lock: + old_td = self._description + for server in self._servers.values(): + await server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) + + # Mark all servers Unknown. + self._description = self._description.reset() + for address, sd in self._description.server_descriptions().items(): + if address in self._servers: + self._servers[address].description = sd + + # Stop SRV polling thread. + if self._srv_monitor: + await self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) + + self._opened = False + self._closed = True + + # Publish only after releasing the lock. + if self._publish_tp: + assert self._events is not None + self._description = TopologyDescription( + TOPOLOGY_TYPE.Unknown, + {}, + self._description.replica_set_name, + self._description.max_set_version, + self._description.max_election_id, + self._description._topology_settings, + ) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + ( + old_td, + self._description, + self._topology_id, + ), + ) + ) + self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(old_td), + newDescription=repr(self._description), + ) + _debug_log( + _SDAM_LOGGER, message=_SDAMStatusMessage.STOP_TOPOLOGY, topologyId=self._topology_id + ) + + if self._publish_server or self._publish_tp: + # Make sure the events executor thread is fully closed before publishing the remaining events + self.__events_executor.close() + await self.__events_executor.join(1) + process_events_queue(weakref.ref(self._events)) # type: ignore[arg-type] + + @property + def description(self) -> TopologyDescription: + return self._description + + def pop_all_sessions(self) -> list[_ServerSession]: + """Pop all session ids from the pool.""" + return self._session_pool.pop_all() + + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: + """Start or resume a server session, or raise ConfigurationError.""" + return self._session_pool.get_server_session(session_timeout_minutes) + + def return_server_session(self, server_session: _ServerSession) -> None: + self._session_pool.return_server_session(server_session) + + def _new_selection(self) -> Selection: + """A Selection object, initially including all known servers. + + Hold the lock when calling this. + """ + return Selection.from_topology_description(self._description) + + async def _ensure_opened(self) -> None: + """Start monitors, or restart after a fork. + + Hold the lock when calling this. + """ + if self._closed: + raise InvalidOperation("Cannot use AsyncMongoClient after close") + + if not self._opened: + self._opened = True + await self._update_servers() + + # Start or restart the events publishing thread. + if self._publish_tp or self._publish_server: + self.__events_executor.open() + + # Start the SRV polling thread. + if self._srv_monitor and (self.description.topology_type in SRV_POLLING_TOPOLOGIES): + self._srv_monitor.open() + + if self._settings.load_balanced: + # Emit initial SDAM events for load balancer mode. + await self._process_change( + ServerDescription( + self._seed_addresses[0], + Hello({"ok": 1, "serviceId": self._topology_id, "maxWireVersion": 13}), + ) + ) + + # Ensure that the monitors are open. + for server in self._servers.values(): + await server.open() + + def _is_stale_error(self, address: _Address, err_ctx: _ErrorContext) -> bool: + server = self._servers.get(address) + if server is None: + # Another thread removed this server from the topology. + return True + + if server._pool.stale_generation(err_ctx.sock_generation, err_ctx.service_id): + # This is an outdated error from a previous pool version. + return True + + # topologyVersion check, ignore error when cur_tv >= error_tv: + cur_tv = server.description.topology_version + error = err_ctx.error + error_tv = None + if error and hasattr(error, "details"): + if isinstance(error.details, dict): + error_tv = error.details.get("topologyVersion") + + return _is_stale_error_topology_version(cur_tv, error_tv) + + async def _handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + if self._is_stale_error(address, err_ctx): + return + + server = self._servers[address] + error = err_ctx.error + service_id = err_ctx.service_id + + # Ignore a handshake error if the server is behind a load balancer but + # the service ID is unknown. This indicates that the error happened + # when dialing the connection or during the MongoDB handshake, so we + # don't know the service ID to use for clearing the pool. + if self._settings.load_balanced and not service_id and not err_ctx.completed_handshake: + return + + if isinstance(error, NetworkTimeout) and err_ctx.completed_handshake: + # The socket has been closed. Don't reset the server. + # Server Discovery And Monitoring Spec: "When an application + # operation fails because of any network error besides a socket + # timeout...." + return + elif isinstance(error, WriteError): + # Ignore writeErrors. + return + elif isinstance(error, (NotPrimaryError, OperationFailure)): + # As per the SDAM spec if: + # - the server sees a "not primary" error, and + # - the server is not shutting down, and + # - the server version is >= 4.2, then + # we keep the existing connection pool, but mark the server type + # as Unknown and request an immediate check of the server. + # Otherwise, we clear the connection pool, mark the server as + # Unknown and request an immediate check of the server. + if hasattr(error, "code"): + err_code = error.code + else: + # Default error code if one does not exist. + default = 10107 if isinstance(error, NotPrimaryError) else None + err_code = error.details.get("code", default) # type: ignore[union-attr] + if err_code in helpers_shared._NOT_PRIMARY_CODES: + is_shutting_down = err_code in helpers_shared._SHUTDOWN_CODES + # Mark server Unknown, clear the pool, and request check. + if not self._settings.load_balanced: + await self._process_change(ServerDescription(address, error=error)) + if is_shutting_down or (err_ctx.max_wire_version <= 7): + # Clear the pool. + await server.reset(service_id) + server.request_check() + elif not err_ctx.completed_handshake: + # Unknown command error during the connection handshake. + if not self._settings.load_balanced: + await self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + await server.reset(service_id) + elif isinstance(error, ConnectionFailure): + if isinstance(error, WaitQueueTimeoutError): + return + # "Client MUST replace the server's description with type Unknown + # ... MUST NOT request an immediate check of the server." + if not self._settings.load_balanced: + await self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + await server.reset(service_id) + # "When a client marks a server Unknown from `Network error when + # reading or writing`_, clients MUST cancel the hello check on + # that server and close the current monitoring connection." + server._monitor.cancel_check() + + async def handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + """Handle an application error. + + May reset the server to Unknown, clear the pool, and request an + immediate check depending on the error and the context. + """ + async with self._lock: + await self._handle_error(address, err_ctx) + + def _request_check_all(self) -> None: + """Wake all monitors. Hold the lock when calling this.""" + for server in self._servers.values(): + server.request_check() + + async def _update_servers(self) -> None: + """Sync our Servers from TopologyDescription.server_descriptions. + + Hold the lock while calling this. + """ + for address, sd in self._description.server_descriptions().items(): + if address not in self._servers: + monitor = self._settings.monitor_class( + server_description=sd, + topology=self, + pool=self._create_pool_for_monitor(address), + topology_settings=self._settings, + ) + + weak = None + if self._publish_server and self._events is not None: + weak = weakref.ref(self._events) + server = Server( + server_description=sd, + pool=self._create_pool_for_server(address), + monitor=monitor, + topology_id=self._topology_id, + listeners=self._listeners, + events=weak, + ) + + self._servers[address] = server + await server.open() + else: + # Cache old is_writable value. + was_writable = self._servers[address].description.is_writable + # Update server description. + self._servers[address].description = sd + # Update is_writable value of the pool, if it changed. + if was_writable != sd.is_writable: + await self._servers[address].pool.update_is_writable(sd.is_writable) + + for address, server in list(self._servers.items()): + if not self._description.has_server(address): + await server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) + self._servers.pop(address) + + def _create_pool_for_server(self, address: _Address) -> Pool: + return self._settings.pool_class( + address, self._settings.pool_options, client_id=self._topology_id + ) + + def _create_pool_for_monitor(self, address: _Address) -> Pool: + options = self._settings.pool_options + + # According to the Server Discovery And Monitoring Spec, monitors use + # connect_timeout for both connect_timeout and socket_timeout. The + # pool only has one socket so maxPoolSize and so on aren't needed. + monitor_pool_options = PoolOptions( + connect_timeout=options.connect_timeout, + socket_timeout=options.connect_timeout, + ssl_context=options._ssl_context, + tls_allow_invalid_hostnames=options.tls_allow_invalid_hostnames, + event_listeners=options._event_listeners, + appname=options.appname, + driver=options.driver, + pause_enabled=False, + server_api=options.server_api, + ) + + return self._settings.pool_class( + address, monitor_pool_options, is_sdam=True, client_id=self._topology_id + ) + + def _error_message(self, selector: Callable[[Selection], Selection]) -> str: + """Format an error message if server selection fails. + + Hold the lock when calling this. + """ + is_replica_set = self._description.topology_type in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ) + + if is_replica_set: + server_plural = "replica set members" + elif self._description.topology_type == TOPOLOGY_TYPE.Sharded: + server_plural = "mongoses" + else: + server_plural = "servers" + + if self._description.known_servers: + # We've connected, but no servers match the selector. + if selector is writable_server_selector: + if is_replica_set: + return "No primary available for writes" + else: + return "No %s available for writes" % server_plural + else: + return f'No {server_plural} match selector "{selector}"' + else: + addresses = list(self._description.server_descriptions()) + servers = list(self._description.server_descriptions().values()) + if not servers: + if is_replica_set: + # We removed all servers because of the wrong setName? + return 'No {} available for replica set name "{}"'.format( + server_plural, + self._settings.replica_set_name, + ) + else: + return "No %s available" % server_plural + + # 1 or more servers, all Unknown. Are they unknown for one reason? + error = servers[0].error + same = all(server.error == error for server in servers[1:]) + if same: + if error is None: + # We're still discovering. + return "No %s found yet" % server_plural + + if is_replica_set and not set(addresses).intersection(self._seed_addresses): + # We replaced our seeds with new hosts but can't reach any. + return ( + "Could not reach any servers in %s. Replica set is" + " configured with internal hostnames or IPs?" % addresses + ) + + return str(error) + else: + return ",".join(str(server.error) for server in servers if server.error) + + async def cleanup_monitors(self) -> None: + tasks = [] + try: + while self._monitor_tasks: + tasks.append(self._monitor_tasks.pop()) + except IndexError: + pass + await asyncio.gather(*[t.join() for t in tasks], return_exceptions=True) # type: ignore[func-returns-value] + + def __repr__(self) -> str: + msg = "" + if not self._opened: + msg = "CLOSED " + return f"<{self.__class__.__name__} {msg}{self._description!r}>" + + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + """The properties to use for AsyncMongoClient/Topology equality checks.""" + ts = self._settings + return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name) + + def __eq__(self, other: object) -> bool: + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __hash__(self) -> int: + return hash(self.eq_props()) + + +class _ErrorContext: + """An error with context for SDAM error handling.""" + + def __init__( + self, + error: BaseException, + max_wire_version: int, + sock_generation: int, + completed_handshake: bool, + service_id: Optional[ObjectId], + ): + self.error = error + self.max_wire_version = max_wire_version + self.sock_generation = sock_generation + self.completed_handshake = completed_handshake + self.service_id = service_id + + +def _is_stale_error_topology_version( + current_tv: Optional[Mapping[str, Any]], error_tv: Optional[Mapping[str, Any]] +) -> bool: + """Return True if the error's topologyVersion is <= current.""" + if current_tv is None or error_tv is None: + return False + if current_tv["processId"] != error_tv["processId"]: + return False + return current_tv["counter"] >= error_tv["counter"] + + +def _is_stale_server_description(current_sd: ServerDescription, new_sd: ServerDescription) -> bool: + """Return True if the new topologyVersion is < current.""" + current_tv, new_tv = current_sd.topology_version, new_sd.topology_version + if current_tv is None or new_tv is None: + return False + if current_tv["processId"] != new_tv["processId"]: + return False + return current_tv["counter"] > new_tv["counter"] + + +def _filter_servers( + candidates: list[Server], deprioritized_servers: Optional[list[Server]] = None +) -> list[Server]: + """Filter out deprioritized servers from a list of server candidates.""" + if not deprioritized_servers: + return candidates + + filtered = [server for server in candidates if server not in deprioritized_servers] + + # If not possible to pick a prioritized server, return the original list + return filtered or candidates diff --git a/pymongo/asynchronous/uri_parser.py b/pymongo/asynchronous/uri_parser.py new file mode 100644 index 0000000000..055b04d75a --- /dev/null +++ b/pymongo/asynchronous/uri_parser.py @@ -0,0 +1,193 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI.""" +from __future__ import annotations + +from typing import Any, Optional +from urllib.parse import unquote_plus + +from pymongo.asynchronous.srv_resolver import _SrvResolver +from pymongo.common import SRV_SERVICE_NAME, _CaseInsensitiveDictionary +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.uri_parser_shared import ( + _ALLOWED_TXT_OPTS, + DEFAULT_PORT, + SCHEME, + SCHEME_LEN, + SRV_SCHEME_LEN, + _check_options, + _make_options_case_sensitive, + _validate_uri, + split_hosts, + split_options, +) + +_IS_SYNC = False + + +async def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + """Parse and validate a MongoDB URI. + + Returns a dict of the form:: + + { + 'nodelist': , + 'username': or None, + 'password': or None, + 'database': or None, + 'collection': or None, + 'options': , + 'fqdn': or None + } + + If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done + to build nodelist and options. + + :param uri: The MongoDB URI to parse. + :param default_port: The port number to use when one wasn't specified + for a host in the URI. + :param validate: If ``True`` (the default), validate and + normalize all options. Default: ``True``. + :param warn: When validating, if ``True`` then will warn + the user then ignore any invalid options or values. If ``False``, + validation will error when options are unsupported or values are + invalid. Default: ``False``. + :param normalize: If ``True``, convert names of URI options + to their internally-used names. Default: ``True``. + :param connect_timeout: The maximum time in milliseconds to + wait for a response from the DNS server. + :param srv_service_name: A custom SRV service name + + .. versionchanged:: 4.14 + ``options`` is now type ``dict`` as opposed to a ``_CaseInsensitiveDictionary``. + + .. versionchanged:: 4.6 + The delimiting slash (``/``) between hosts and connection options is now optional. + For example, "mongodb://example.com?tls=true" is now a valid URI. + + .. versionchanged:: 4.0 + To better follow RFC 3986, unquoted percent signs ("%") are no longer + supported. + + .. versionchanged:: 3.9 + Added the ``normalize`` parameter. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + + .. versionchanged:: 3.5 + Return the original value of the ``readPreference`` MongoDB URI option + instead of the validated read preference mode. + + .. versionchanged:: 3.1 + ``warn`` added so invalid options can be ignored. + """ + result = _validate_uri(uri, default_port, validate, warn, normalize, srv_max_hosts) + result.update( + await _parse_srv( + uri, + default_port, + validate, + warn, + normalize, + connect_timeout, + srv_service_name, + srv_max_hosts, + ) + ) + result["options"] = _make_options_case_sensitive(result["options"]) + return result + + +async def _parse_srv( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + else: + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, _ = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if srv_service_name is None: + srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) + if "@" in host_part: + _, _, hosts = host_part.rpartition("@") + else: + hosts = host_part + + hosts = unquote_plus(hosts) + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + nodes = split_hosts(hosts, default_port=None) + fqdn, port = nodes[0] + + # Use the connection timeout. connectTimeoutMS passed as a keyword + # argument overrides the same option passed in the connection string. + connect_timeout = connect_timeout or options.get("connectTimeoutMS") + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) + nodes = await dns_resolver.get_hosts() + dns_options = await dns_resolver.get_options() + if dns_options: + parsed_dns_options = split_options(dns_options, validate, warn, normalize) + if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: + raise ConfigurationError( + "Only authSource, replicaSet, and loadBalanced are supported from DNS" + ) + for opt, val in parsed_dns_options.items(): + if opt not in options: + options[opt] = val + if options.get("loadBalanced") and srv_max_hosts: + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") + if options.get("replicaSet") and srv_max_hosts: + raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") + if "tls" not in options and "ssl" not in options: + options["tls"] = True if validate else "true" + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "options": options, + } diff --git a/pymongo/auth.py b/pymongo/auth.py index f8d3513d9a..a36f3f4233 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -1,10 +1,10 @@ -# Copyright 2013-2014 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,233 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Authentication helpers.""" +"""Re-import of synchronous Auth API for compatibility.""" +from __future__ import annotations -import hmac -try: - import hashlib - _MD5 = hashlib.md5 - _DMOD = _MD5 -except ImportError: # for Python < 2.5 - import md5 - _MD5 = md5.new - _DMOD = md5 - -HAVE_KERBEROS = True -try: - import kerberos -except ImportError: - HAVE_KERBEROS = False - -from bson.binary import Binary -from bson.py3compat import b -from bson.son import SON -from pymongo.errors import ConfigurationError, OperationFailure - - -MECHANISMS = frozenset(['GSSAPI', 'MONGODB-CR', 'MONGODB-X509', 'PLAIN']) -"""The authentication mechanisms supported by PyMongo.""" - - -def _build_credentials_tuple(mech, source, user, passwd, extra): - """Build and return a mechanism specific credentials tuple. - """ - if mech == 'GSSAPI': - gsn = extra.get('gssapiservicename', 'mongodb') - # No password, source is always $external. - return (mech, '$external', user, gsn) - elif mech == 'MONGODB-X509': - return (mech, '$external', user) - return (mech, source, user, passwd) - - -def _password_digest(username, password): - """Get a password digest to use for authentication. - """ - if not isinstance(password, basestring): - raise TypeError("password must be an instance " - "of %s" % (basestring.__name__,)) - if len(password) == 0: - raise ValueError("password can't be empty") - if not isinstance(username, basestring): - raise TypeError("username must be an instance " - "of %s" % (basestring.__name__,)) - - md5hash = _MD5() - data = "%s:mongo:%s" % (username, password) - md5hash.update(data.encode('utf-8')) - return unicode(md5hash.hexdigest()) - - -def _auth_key(nonce, username, password): - """Get an auth key to use for authentication. - """ - digest = _password_digest(username, password) - md5hash = _MD5() - data = "%s%s%s" % (nonce, unicode(username), digest) - md5hash.update(data.encode('utf-8')) - return unicode(md5hash.hexdigest()) - - -def _authenticate_gssapi(credentials, sock_info, cmd_func): - """Authenticate using GSSAPI. - """ - try: - dummy, username, gsn = credentials - # Starting here and continuing through the while loop below - establish - # the security context. See RFC 4752, Section 3.1, first paragraph. - result, ctx = kerberos.authGSSClientInit( - gsn + '@' + sock_info.host, gssflags=kerberos.GSS_C_MUTUAL_FLAG) - - if result != kerberos.AUTH_GSS_COMPLETE: - raise OperationFailure('Kerberos context failed to initialize.') - - try: - # pykerberos uses a weird mix of exceptions and return values - # to indicate errors. - # 0 == continue, 1 == complete, -1 == error - # Only authGSSClientStep can return 0. - if kerberos.authGSSClientStep(ctx, '') != 0: - raise OperationFailure('Unknown kerberos ' - 'failure in step function.') - - # Start a SASL conversation with mongod/s - # Note: pykerberos deals with base64 encoded byte strings. - # Since mongo accepts base64 strings as the payload we don't - # have to use bson.binary.Binary. - payload = kerberos.authGSSClientResponse(ctx) - cmd = SON([('saslStart', 1), - ('mechanism', 'GSSAPI'), - ('payload', payload), - ('autoAuthorize', 1)]) - response, _ = cmd_func(sock_info, '$external', cmd) - - # Limit how many times we loop to catch protocol / library issues - for _ in xrange(10): - result = kerberos.authGSSClientStep(ctx, - str(response['payload'])) - if result == -1: - raise OperationFailure('Unknown kerberos ' - 'failure in step function.') - - payload = kerberos.authGSSClientResponse(ctx) or '' - - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', payload)]) - response, _ = cmd_func(sock_info, '$external', cmd) - - if result == kerberos.AUTH_GSS_COMPLETE: - break - else: - raise OperationFailure('Kerberos ' - 'authentication failed to complete.') - - # Once the security context is established actually authenticate. - # See RFC 4752, Section 3.1, last two paragraphs. - if kerberos.authGSSClientUnwrap(ctx, - str(response['payload'])) != 1: - raise OperationFailure('Unknown kerberos ' - 'failure during GSS_Unwrap step.') - - if kerberos.authGSSClientWrap(ctx, - kerberos.authGSSClientResponse(ctx), - username) != 1: - raise OperationFailure('Unknown kerberos ' - 'failure during GSS_Wrap step.') - - payload = kerberos.authGSSClientResponse(ctx) - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', payload)]) - response, _ = cmd_func(sock_info, '$external', cmd) - - finally: - kerberos.authGSSClientClean(ctx) - - except kerberos.KrbError, exc: - raise OperationFailure(str(exc)) - - -def _authenticate_plain(credentials, sock_info, cmd_func): - """Authenticate using SASL PLAIN (RFC 4616) - """ - source, username, password = credentials - payload = ('\x00%s\x00%s' % (username, password)).encode('utf-8') - cmd = SON([('saslStart', 1), - ('mechanism', 'PLAIN'), - ('payload', Binary(payload)), - ('autoAuthorize', 1)]) - cmd_func(sock_info, source, cmd) - - -def _authenticate_cram_md5(credentials, sock_info, cmd_func): - """Authenticate using CRAM-MD5 (RFC 2195) - """ - source, username, password = credentials - # The password used as the mac key is the - # same as what we use for MONGODB-CR - passwd = _password_digest(username, password) - cmd = SON([('saslStart', 1), - ('mechanism', 'CRAM-MD5'), - ('payload', Binary(b(''))), - ('autoAuthorize', 1)]) - response, _ = cmd_func(sock_info, source, cmd) - # MD5 as implicit default digest for digestmod is deprecated - # in python 3.4 - mac = hmac.HMAC(key=passwd.encode('utf-8'), digestmod=_DMOD) - mac.update(response['payload']) - challenge = username.encode('utf-8') + b(' ') + b(mac.hexdigest()) - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', Binary(challenge))]) - cmd_func(sock_info, source, cmd) - - -def _authenticate_x509(credentials, sock_info, cmd_func): - """Authenticate using MONGODB-X509. - """ - dummy, username = credentials - query = SON([('authenticate', 1), - ('mechanism', 'MONGODB-X509'), - ('user', username)]) - cmd_func(sock_info, '$external', query) - - -def _authenticate_mongo_cr(credentials, sock_info, cmd_func): - """Authenticate using MONGODB-CR. - """ - source, username, password = credentials - # Get a nonce - response, _ = cmd_func(sock_info, source, {'getnonce': 1}) - nonce = response['nonce'] - key = _auth_key(nonce, username, password) - - # Actually authenticate - query = SON([('authenticate', 1), - ('user', username), - ('nonce', nonce), - ('key', key)]) - cmd_func(sock_info, source, query) - - -_AUTH_MAP = { - 'CRAM-MD5': _authenticate_cram_md5, - 'GSSAPI': _authenticate_gssapi, - 'MONGODB-CR': _authenticate_mongo_cr, - 'MONGODB-X509': _authenticate_x509, - 'PLAIN': _authenticate_plain, -} - - -def authenticate(credentials, sock_info, cmd_func): - """Authenticate sock_info. - """ - mechanism = credentials[0] - if mechanism == 'GSSAPI': - if not HAVE_KERBEROS: - raise ConfigurationError('The "kerberos" module must be ' - 'installed to use GSSAPI authentication.') - auth_func = _AUTH_MAP.get(mechanism) - auth_func(credentials[1:], sock_info, cmd_func) +from pymongo.auth_shared import * # noqa: F403 +from pymongo.synchronous.auth import * # noqa: F403 +from pymongo.synchronous.auth import __doc__ as original_doc +__doc__ = original_doc diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py new file mode 100644 index 0000000000..61764b8111 --- /dev/null +++ b/pymongo/auth_oidc.py @@ -0,0 +1,23 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous AuthOIDC API for compatibility.""" +from __future__ import annotations + +from pymongo.auth_oidc_shared import * # noqa: F403 +from pymongo.synchronous.auth_oidc import * # noqa: F403 +from pymongo.synchronous.auth_oidc import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["OIDCCallback", "OIDCCallbackContext", "OIDCCallbackResult", "OIDCIdPInfo"] # noqa: F405 diff --git a/pymongo/auth_oidc_shared.py b/pymongo/auth_oidc_shared.py new file mode 100644 index 0000000000..d33397f52d --- /dev/null +++ b/pymongo/auth_oidc_shared.py @@ -0,0 +1,132 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants, types, and classes shared across OIDC auth implementations.""" +from __future__ import annotations + +import abc +import os +from dataclasses import dataclass, field +from typing import Optional +from urllib.parse import quote + +from pymongo._azure_helpers import _get_azure_response +from pymongo._gcp_helpers import _get_gcp_response + + +@dataclass +class OIDCIdPInfo: + issuer: str + clientId: Optional[str] = field(default=None) + requestScopes: Optional[list[str]] = field(default=None) + + +@dataclass +class OIDCCallbackContext: + timeout_seconds: float + username: str + version: int + refresh_token: Optional[str] = field(default=None) + idp_info: Optional[OIDCIdPInfo] = field(default=None) + + +@dataclass +class OIDCCallbackResult: + access_token: str + expires_in_seconds: Optional[float] = field(default=None) + refresh_token: Optional[str] = field(default=None) + + +class OIDCCallback(abc.ABC): + """A base class for defining OIDC callbacks.""" + + @abc.abstractmethod + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + """Convert the given BSON value into our own type.""" + + +@dataclass +class _OIDCProperties: + callback: Optional[OIDCCallback] = field(default=None) + human_callback: Optional[OIDCCallback] = field(default=None) + environment: Optional[str] = field(default=None) + allowed_hosts: list[str] = field(default_factory=list) + token_resource: Optional[str] = field(default=None) + username: str = "" + + +"""Mechanism properties for MONGODB-OIDC authentication.""" + +TOKEN_BUFFER_MINUTES = 5 +HUMAN_CALLBACK_TIMEOUT_SECONDS = 5 * 60 +CALLBACK_VERSION = 1 +MACHINE_CALLBACK_TIMEOUT_SECONDS = 60 +TIME_BETWEEN_CALLS_SECONDS = 0.1 + + +class _OIDCTestCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + token_file = os.environ.get("OIDC_TOKEN_FILE") + if not token_file: + raise RuntimeError( + 'MONGODB-OIDC with an "test" provider requires "OIDC_TOKEN_FILE" to be set' + ) + with open(token_file) as fid: + return OIDCCallbackResult(access_token=fid.read().strip()) + + +class _OIDCAWSCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + token_file = os.environ.get("AWS_WEB_IDENTITY_TOKEN_FILE") + if not token_file: + raise RuntimeError( + 'MONGODB-OIDC with an "aws" provider requires "AWS_WEB_IDENTITY_TOKEN_FILE" to be set' + ) + with open(token_file) as fid: + return OIDCCallbackResult(access_token=fid.read().strip()) + + +class _OIDCAzureCallback(OIDCCallback): + def __init__(self, token_resource: str) -> None: + self.token_resource = quote(token_resource) + + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + resp = _get_azure_response(self.token_resource, context.username, context.timeout_seconds) + return OIDCCallbackResult( + access_token=resp["access_token"], expires_in_seconds=resp["expires_in"] + ) + + +class _OIDCGCPCallback(OIDCCallback): + def __init__(self, token_resource: str) -> None: + self.token_resource = quote(token_resource) + + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + resp = _get_gcp_response(self.token_resource, context.timeout_seconds) + return OIDCCallbackResult(access_token=resp["access_token"]) + + +class _OIDCK8SCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + return OIDCCallbackResult(access_token=_get_k8s_token()) + + +def _get_k8s_token() -> str: + fname = "/var/run/secrets/kubernetes.io/serviceaccount/token" + for key in ["AZURE_FEDERATED_TOKEN_FILE", "AWS_WEB_IDENTITY_TOKEN_FILE"]: + if key in os.environ: + fname = os.environ[key] + with open(fname) as fid: + return fid.read() diff --git a/pymongo/auth_shared.py b/pymongo/auth_shared.py new file mode 100644 index 0000000000..5a9a2b6732 --- /dev/null +++ b/pymongo/auth_shared.py @@ -0,0 +1,254 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants and types shared across multiple auth types.""" +from __future__ import annotations + +import os +import typing +from base64 import standard_b64encode +from collections import namedtuple +from typing import Any, Dict, Mapping, Optional + +from bson import Binary +from pymongo.auth_oidc_shared import ( + _OIDCAzureCallback, + _OIDCGCPCallback, + _OIDCK8SCallback, + _OIDCProperties, + _OIDCTestCallback, +) +from pymongo.errors import ConfigurationError + +MECHANISMS = frozenset( + [ + "GSSAPI", + "MONGODB-OIDC", + "MONGODB-X509", + "MONGODB-AWS", + "PLAIN", + "SCRAM-SHA-1", + "SCRAM-SHA-256", + "DEFAULT", + ] +) +"""The authentication mechanisms supported by PyMongo.""" + + +class _Cache: + __slots__ = ("data",) + + _hash_val = hash("_Cache") + + def __init__(self) -> None: + self.data = None + + def __eq__(self, other: object) -> bool: + # Two instances must always compare equal. + if isinstance(other, _Cache): + return True + return NotImplemented + + def __ne__(self, other: object) -> bool: + if isinstance(other, _Cache): + return False + return NotImplemented + + def __hash__(self) -> int: + return self._hash_val + + +MongoCredential = namedtuple( + "MongoCredential", + ["mechanism", "source", "username", "password", "mechanism_properties", "cache"], +) +"""A hashable namedtuple of values used for authentication.""" + + +GSSAPIProperties = namedtuple( + "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm", "service_host"] +) +"""Mechanism properties for GSSAPI authentication.""" + + +_AWSProperties = namedtuple("_AWSProperties", ["aws_session_token"]) +"""Mechanism properties for MONGODB-AWS authentication.""" + + +def _validate_canonicalize_host_name(value: str | bool) -> str | bool: + valid_names = [False, True, "none", "forward", "forwardAndReverse"] + if value in ["true", "false", True, False]: + return value in ["true", True] + + if value not in valid_names: + raise ValueError(f"CANONICALIZE_HOST_NAME '{value}' not in valid options: {valid_names}") + return value + + +def _build_credentials_tuple( + mech: str, + source: Optional[str], + user: Optional[str], + passwd: Optional[str], + extra: Mapping[str, Any], + database: Optional[str], +) -> MongoCredential: + """Build and return a mechanism specific credentials tuple.""" + if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: + raise ConfigurationError(f"{mech} requires a username") + if mech == "GSSAPI": + if source is not None and source != "$external": + raise ValueError("authentication source must be $external or None for GSSAPI") + properties = extra.get("authmechanismproperties", {}) + service_name = properties.get("SERVICE_NAME", "mongodb") + service_host = properties.get("SERVICE_HOST", None) + canonicalize = properties.get("CANONICALIZE_HOST_NAME", "false") + canonicalize = _validate_canonicalize_host_name(canonicalize) + service_realm = properties.get("SERVICE_REALM") + props = GSSAPIProperties( + service_name=service_name, + canonicalize_host_name=canonicalize, + service_realm=service_realm, + service_host=service_host, + ) + # Source is always $external. + return MongoCredential(mech, "$external", user, passwd, props, None) + elif mech == "MONGODB-X509": + if passwd is not None: + raise ConfigurationError("Passwords are not supported by MONGODB-X509") + if source is not None and source != "$external": + raise ValueError("authentication source must be $external or None for MONGODB-X509") + # Source is always $external, user can be None. + return MongoCredential(mech, "$external", user, None, None, None) + elif mech == "MONGODB-AWS": + if user is not None and passwd is None: + raise ConfigurationError("username without a password is not supported by MONGODB-AWS") + if source is not None and source != "$external": + raise ConfigurationError( + "authentication source must be $external or None for MONGODB-AWS" + ) + + properties = extra.get("authmechanismproperties", {}) + aws_session_token = properties.get("AWS_SESSION_TOKEN") + aws_props = _AWSProperties(aws_session_token=aws_session_token) + # user can be None for temporary link-local EC2 credentials. + return MongoCredential(mech, "$external", user, passwd, aws_props, None) + elif mech == "MONGODB-OIDC": + properties = extra.get("authmechanismproperties", {}) + callback = properties.get("OIDC_CALLBACK") + human_callback = properties.get("OIDC_HUMAN_CALLBACK") + environ = properties.get("ENVIRONMENT") + token_resource = properties.get("TOKEN_RESOURCE", "") + default_allowed = [ + "*.mongodb.net", + "*.mongodb-dev.net", + "*.mongodb-qa.net", + "*.mongodbgov.net", + "localhost", + "127.0.0.1", + "::1", + ] + allowed_hosts = properties.get("ALLOWED_HOSTS", default_allowed) + if properties.get("ALLOWED_HOSTS", None) is not None and human_callback is None: + raise ConfigurationError("ALLOWED_HOSTS is only valid with OIDC_HUMAN_CALLBACK") + msg = ( + "authentication with MONGODB-OIDC requires providing either a callback or a environment" + ) + if passwd is not None: + msg = "password is not supported by MONGODB-OIDC" + raise ConfigurationError(msg) + if callback or human_callback: + if environ is not None: + raise ConfigurationError(msg) + if callback and human_callback: + msg = "cannot set both OIDC_CALLBACK and OIDC_HUMAN_CALLBACK" + raise ConfigurationError(msg) + elif environ is not None: + if environ == "test": + if user is not None: + msg = "test environment for MONGODB-OIDC does not support username" + raise ConfigurationError(msg) + callback = _OIDCTestCallback() + elif environ == "azure": + passwd = None + if not token_resource: + raise ConfigurationError( + "Azure environment for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" + ) + callback = _OIDCAzureCallback(token_resource) + elif environ == "gcp": + passwd = None + if not token_resource: + raise ConfigurationError( + "GCP provider for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" + ) + callback = _OIDCGCPCallback(token_resource) + elif environ == "k8s": + passwd = None + callback = _OIDCK8SCallback() + else: + raise ConfigurationError(f"unrecognized ENVIRONMENT for MONGODB-OIDC: {environ}") + else: + raise ConfigurationError(msg) + + oidc_props = _OIDCProperties( + callback=callback, + human_callback=human_callback, + environment=environ, + allowed_hosts=allowed_hosts, + token_resource=token_resource, + username=user or "", + ) + return MongoCredential(mech, "$external", user, passwd, oidc_props, _Cache()) + + elif mech == "PLAIN": + source_database = source or database or "$external" + return MongoCredential(mech, source_database, user, passwd, None, None) + else: + source_database = source or database or "admin" + if passwd is None: + raise ConfigurationError("A password is required") + return MongoCredential(mech, source_database, user, passwd, None, _Cache()) + + +def _xor(fir: bytes, sec: bytes) -> bytes: + """XOR two byte strings together.""" + return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) + + +def _parse_scram_response(response: bytes) -> Dict[bytes, bytes]: + """Split a scram response into key, value pairs.""" + return dict( + typing.cast(typing.Tuple[bytes, bytes], item.split(b"=", 1)) + for item in response.split(b",") + ) + + +def _authenticate_scram_start( + credentials: MongoCredential, mechanism: str +) -> tuple[bytes, bytes, typing.MutableMapping[str, Any]]: + username = credentials.username + user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") + nonce = standard_b64encode(os.urandom(32)) + first_bare = b"n=" + user + b",r=" + nonce + + cmd = { + "saslStart": 1, + "mechanism": mechanism, + "payload": Binary(b"n,," + first_bare), + "autoAuthorize": 1, + "options": {"skipEmptyExchange": True}, + } + return nonce, first_bare, cmd diff --git a/pymongo/bulk.py b/pymongo/bulk.py deleted file mode 100644 index 58515f83f4..0000000000 --- a/pymongo/bulk.py +++ /dev/null @@ -1,585 +0,0 @@ -# Copyright 2014-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""The bulk write operations interface. - -.. versionadded:: 2.7 -""" - -from bson.objectid import ObjectId -from bson.son import SON -from pymongo.errors import (BulkWriteError, - DocumentTooLarge, - InvalidOperation, - OperationFailure) -from pymongo.message import (_INSERT, _UPDATE, _DELETE, - insert, _do_batched_write_command) - - -_DELETE_ALL = 0 -_DELETE_ONE = 1 - -# For backwards compatibility. See MongoDB src/mongo/base/error_codes.err -_BAD_VALUE = 2 -_UNKNOWN_ERROR = 8 -_WRITE_CONCERN_ERROR = 64 - -_COMMANDS = ('insert', 'update', 'delete') - - -class _Run(object): - """Represents a batch of write operations. - """ - def __init__(self, op_type): - """Initialize a new Run object. - """ - self.op_type = op_type - self.index_map = [] - self.ops = [] - - def index(self, idx): - """Get the original index of an operation in this run. - - :Parameters: - - `idx`: The Run index that maps to the original index. - """ - return self.index_map[idx] - - def add(self, original_index, operation): - """Add an operation to this Run instance. - - :Parameters: - - `original_index`: The original index of this operation - within a larger bulk operation. - - `operation`: The operation document. - """ - self.index_map.append(original_index) - self.ops.append(operation) - - -def _make_error(index, code, errmsg, operation): - """Create and return an error document. - """ - return { - u"index": index, - u"code": code, - u"errmsg": errmsg, - u"op": operation - } - - -def _merge_legacy(run, full_result, result, index): - """Merge a result from a legacy opcode into the full results. - """ - # MongoDB 2.6 returns {'ok': 0, 'code': 2, ...} if the j write - # concern option is used with --nojournal or w > 1 is used with - # a standalone mongod instance. Raise immediately here for - # consistency when talking to older servers. Since these are - # configuration errors related to write concern the entire batch - # will fail. - note = result.get("jnote", result.get("wnote")) - if note: - raise OperationFailure(note, _BAD_VALUE, result) - - affected = result.get('n', 0) - - errmsg = result.get("errmsg", result.get("err", "")) - if errmsg: - # wtimeout is not considered a hard failure in - # MongoDB 2.6 so don't treat it like one here. - if result.get("wtimeout"): - error_doc = {'errmsg': errmsg, 'code': _WRITE_CONCERN_ERROR} - full_result['writeConcernErrors'].append(error_doc) - else: - code = result.get("code", _UNKNOWN_ERROR) - error = _make_error(run.index(index), code, errmsg, run.ops[index]) - if "errInfo" in result: - error["errInfo"] = result["errInfo"] - full_result["writeErrors"].append(error) - return - - if run.op_type == _INSERT: - full_result['nInserted'] += 1 - elif run.op_type == _UPDATE: - if "upserted" in result: - doc = {u"index": run.index(index), u"_id": result["upserted"]} - full_result["upserted"].append(doc) - full_result['nUpserted'] += affected - else: - full_result['nMatched'] += affected - elif run.op_type == _DELETE: - full_result['nRemoved'] += affected - - -def _merge_command(run, full_result, results): - """Merge a group of results from write commands into the full result. - """ - for offset, result in results: - - affected = result.get("n", 0) - - if run.op_type == _INSERT: - full_result["nInserted"] += affected - - elif run.op_type == _DELETE: - full_result["nRemoved"] += affected - - elif run.op_type == _UPDATE: - upserted = result.get("upserted") - if upserted: - if isinstance(upserted, list): - n_upserted = len(upserted) - for doc in upserted: - doc["index"] = run.index(doc["index"] + offset) - full_result["upserted"].extend(upserted) - else: - n_upserted = 1 - index = run.index(offset) - doc = {u"index": index, u"_id": upserted} - full_result["upserted"].append(doc) - full_result["nUpserted"] += n_upserted - full_result["nMatched"] += (affected - n_upserted) - else: - full_result["nMatched"] += affected - n_modified = result.get("nModified") - # SERVER-13001 - in a mixed sharded cluster a call to - # update could return nModified (>= 2.6) or not (<= 2.4). - # If any call does not return nModified we can't report - # a valid final count so omit the field completely. - if n_modified is not None and "nModified" in full_result: - full_result["nModified"] += n_modified - else: - full_result.pop("nModified", None) - - write_errors = result.get("writeErrors") - if write_errors: - for doc in write_errors: - idx = doc["index"] + offset - doc["index"] = run.index(idx) - # Add the failed operation to the error document. - doc[u"op"] = run.ops[idx] - full_result["writeErrors"].extend(write_errors) - - wc_error = result.get("writeConcernError") - if wc_error: - full_result["writeConcernErrors"].append(wc_error) - - -class _Bulk(object): - """The private guts of the bulk write API. - """ - def __init__(self, collection, ordered): - """Initialize a _Bulk instance. - """ - self.collection = collection - self.ordered = ordered - self.ops = [] - self.name = "%s.%s" % (collection.database.name, collection.name) - self.namespace = collection.database.name + '.$cmd' - self.executed = False - - def add_insert(self, document): - """Add an insert document to the list of ops. - """ - if not isinstance(document, dict): - raise TypeError('document must be an instance of dict') - # Generate ObjectId client side. - if '_id' not in document: - document['_id'] = ObjectId() - self.ops.append((_INSERT, document)) - - def add_update(self, selector, update, multi=False, upsert=False): - """Create an update document and add it to the list of ops. - """ - if not isinstance(update, dict): - raise TypeError('update must be an instance of dict') - # Update can not be {} - if not update: - raise ValueError('update only works with $ operators') - first = iter(update).next() - if not first.startswith('$'): - raise ValueError('update only works with $ operators') - cmd = SON([('q', selector), ('u', update), - ('multi', multi), ('upsert', upsert)]) - self.ops.append((_UPDATE, cmd)) - - def add_replace(self, selector, replacement, upsert=False): - """Create a replace document and add it to the list of ops. - """ - if not isinstance(replacement, dict): - raise TypeError('replacement must be an instance of dict') - # Replacement can be {} - if replacement: - first = iter(replacement).next() - if first.startswith('$'): - raise ValueError('replacement can not include $ operators') - cmd = SON([('q', selector), ('u', replacement), - ('multi', False), ('upsert', upsert)]) - self.ops.append((_UPDATE, cmd)) - - def add_delete(self, selector, limit): - """Create a delete document and add it to the list of ops. - """ - cmd = SON([('q', selector), ('limit', limit)]) - self.ops.append((_DELETE, cmd)) - - def gen_ordered(self): - """Generate batches of operations, batched by type of - operation, in the order **provided**. - """ - run = None - for idx, (op_type, operation) in enumerate(self.ops): - if run is None: - run = _Run(op_type) - elif run.op_type != op_type: - yield run - run = _Run(op_type) - run.add(idx, operation) - yield run - - def gen_unordered(self): - """Generate batches of operations, batched by type of - operation, in arbitrary order. - """ - operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)] - for idx, (op_type, operation) in enumerate(self.ops): - operations[op_type].add(idx, operation) - - for run in operations: - if run.ops: - yield run - - def execute_command(self, generator, write_concern): - """Execute using write commands. - """ - uuid_subtype = self.collection.uuid_subtype - client = self.collection.database.connection - # nModified is only reported for write commands, not legacy ops. - full_result = { - "writeErrors": [], - "writeConcernErrors": [], - "nInserted": 0, - "nUpserted": 0, - "nMatched": 0, - "nModified": 0, - "nRemoved": 0, - "upserted": [], - } - for run in generator: - cmd = SON([(_COMMANDS[run.op_type], self.collection.name), - ('ordered', self.ordered)]) - if write_concern: - cmd['writeConcern'] = write_concern - - results = _do_batched_write_command(self.namespace, - run.op_type, cmd, run.ops, True, uuid_subtype, client) - - _merge_command(run, full_result, results) - # We're supposed to continue if errors are - # at the write concern level (e.g. wtimeout) - if self.ordered and full_result['writeErrors']: - break - - if full_result["writeErrors"] or full_result["writeConcernErrors"]: - if full_result['writeErrors']: - full_result['writeErrors'].sort( - key=lambda error: error['index']) - raise BulkWriteError(full_result) - return full_result - - def execute_no_results(self, generator): - """Execute all operations, returning no results (w=0). - """ - coll = self.collection - w_value = 0 - # If ordered is True we have to send GLE or use write - # commands so we can abort on the first error. - if self.ordered: - w_value = 1 - - for run in generator: - try: - if run.op_type == _INSERT: - coll.insert(run.ops, - continue_on_error=not self.ordered, - w=w_value) - else: - for operation in run.ops: - try: - if run.op_type == _UPDATE: - coll.update(operation['q'], - operation['u'], - upsert=operation['upsert'], - multi=operation['multi'], - w=w_value) - else: - coll.remove(operation['q'], - multi=(not operation['limit']), - w=w_value) - except OperationFailure: - if self.ordered: - return - except OperationFailure: - if self.ordered: - break - - def legacy_insert(self, operation, write_concern): - """Do a legacy insert and return the result. - """ - # We have to do this here since Collection.insert - # throws away results and we need to check for jnote. - client = self.collection.database.connection - uuid_subtype = self.collection.uuid_subtype - return client._send_message( - insert(self.name, [operation], True, True, - write_concern, False, uuid_subtype), True) - - def execute_legacy(self, generator, write_concern): - """Execute using legacy wire protocol ops. - """ - coll = self.collection - full_result = { - "writeErrors": [], - "writeConcernErrors": [], - "nInserted": 0, - "nUpserted": 0, - "nMatched": 0, - "nRemoved": 0, - "upserted": [], - } - stop = False - for run in generator: - for idx, operation in enumerate(run.ops): - try: - # To do per-operation reporting we have to do ops one - # at a time. That means the performance of bulk insert - # will be slower here than calling Collection.insert() - if run.op_type == _INSERT: - result = self.legacy_insert(operation, write_concern) - elif run.op_type == _UPDATE: - result = coll.update(operation['q'], - operation['u'], - upsert=operation['upsert'], - multi=operation['multi'], - **write_concern) - else: - result = coll.remove(operation['q'], - multi=(not operation['limit']), - **write_concern) - _merge_legacy(run, full_result, result, idx) - except DocumentTooLarge, exc: - # MongoDB 2.6 uses error code 2 for "too large". - error = _make_error( - run.index(idx), _BAD_VALUE, str(exc), operation) - full_result['writeErrors'].append(error) - if self.ordered: - stop = True - break - except OperationFailure, exc: - if not exc.details: - # Some error not related to the write operation - # (e.g. kerberos failure). Re-raise immediately. - raise - _merge_legacy(run, full_result, exc.details, idx) - # We're supposed to continue if errors are - # at the write concern level (e.g. wtimeout) - if self.ordered and full_result["writeErrors"]: - stop = True - break - if stop: - break - - if full_result["writeErrors"] or full_result['writeConcernErrors']: - if full_result['writeErrors']: - full_result['writeErrors'].sort( - key=lambda error: error['index']) - raise BulkWriteError(full_result) - return full_result - - def execute(self, write_concern): - """Execute operations. - """ - if not self.ops: - raise InvalidOperation('No operations to execute') - if self.executed: - raise InvalidOperation('Bulk operations can ' - 'only be executed once.') - self.executed = True - client = self.collection.database.connection - client._ensure_connected(sync=True) - write_concern = write_concern or self.collection.write_concern - - if self.ordered: - generator = self.gen_ordered() - else: - generator = self.gen_unordered() - - if write_concern.get('w') == 0: - self.execute_no_results(generator) - elif client.max_wire_version > 1: - return self.execute_command(generator, write_concern) - else: - return self.execute_legacy(generator, write_concern) - - -class BulkUpsertOperation(object): - """An interface for adding upsert operations. - """ - - __slots__ = ('__selector', '__bulk') - - def __init__(self, selector, bulk): - self.__selector = selector - self.__bulk = bulk - - def update_one(self, update): - """Update one document matching the selector. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, - update, multi=False, upsert=True) - - def update(self, update): - """Update all documents matching the selector. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, - update, multi=True, upsert=True) - - def replace_one(self, replacement): - """Replace one entire document matching the selector criteria. - - :Parameters: - - `replacement` (dict): the replacement document - """ - self.__bulk.add_replace(self.__selector, replacement, upsert=True) - - -class BulkWriteOperation(object): - """An interface for adding update or remove operations. - """ - - __slots__ = ('__selector', '__bulk') - - def __init__(self, selector, bulk): - self.__selector = selector - self.__bulk = bulk - - def update_one(self, update): - """Update one document matching the selector criteria. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, update, multi=False) - - def update(self, update): - """Update all documents matching the selector criteria. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, update, multi=True) - - def replace_one(self, replacement): - """Replace one entire document matching the selector criteria. - - :Parameters: - - `replacement` (dict): the replacement document - """ - self.__bulk.add_replace(self.__selector, replacement) - - def remove_one(self): - """Remove a single document matching the selector criteria. - """ - self.__bulk.add_delete(self.__selector, _DELETE_ONE) - - def remove(self): - """Remove all documents matching the selector criteria. - """ - self.__bulk.add_delete(self.__selector, _DELETE_ALL) - - def upsert(self): - """Specify that all chained update operations should be - upserts. - - :Returns: - - A :class:`BulkUpsertOperation` instance, used to add - update operations to this bulk operation. - """ - return BulkUpsertOperation(self.__selector, self.__bulk) - - -class BulkOperationBuilder(object): - """An interface for executing a batch of write operations. - """ - - __slots__ = '__bulk' - - def __init__(self, collection, ordered=True): - """Initialize a new BulkOperationBuilder instance. - - :Parameters: - - `collection`: A :class:`~pymongo.collection.Collection` instance. - - `ordered` (optional): If ``True`` all operations will be executed - serially, in the order provided, and the entire execution will - abort on the first error. If ``False`` operations will be executed - in arbitrary order (possibly in parallel on the server), reporting - any errors that occurred after attempting all operations. Defaults - to ``True``. - - .. warning:: - If you are using a version of MongoDB older than 2.6 you will - get much better bulk insert performance using - :meth:`~pymongo.collection.Collection.insert`. - """ - self.__bulk = _Bulk(collection, ordered) - - def find(self, selector): - """Specify selection criteria for bulk operations. - - :Parameters: - - `selector` (dict): the selection criteria for update - and remove operations. - - :Returns: - - A :class:`BulkWriteOperation` instance, used to add - update and remove operations to this bulk operation. - """ - if not isinstance(selector, dict): - raise TypeError('selector must be an instance of dict') - return BulkWriteOperation(selector, self.__bulk) - - def insert(self, document): - """Insert a single document. - - :Parameters: - - `document` (dict): the document to insert - """ - self.__bulk.add_insert(document) - - def execute(self, write_concern=None): - """Execute all provided operations. - - :Parameters: - - write_concern (optional): the write concern for this bulk - execution. - """ - if write_concern and not isinstance(write_concern, dict): - raise TypeError('write_concern must be an instance of dict') - return self.__bulk.execute(write_concern) diff --git a/pymongo/bulk_shared.py b/pymongo/bulk_shared.py new file mode 100644 index 0000000000..9276419d8a --- /dev/null +++ b/pymongo/bulk_shared.py @@ -0,0 +1,131 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants, types, and classes shared across Bulk Write API implementations.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, NoReturn + +from pymongo.errors import BulkWriteError, OperationFailure +from pymongo.helpers_shared import _get_wce_doc +from pymongo.message import ( + _DELETE, + _INSERT, + _UPDATE, +) + +if TYPE_CHECKING: + from pymongo.typings import _DocumentOut + + +_DELETE_ALL: int = 0 +_DELETE_ONE: int = 1 + +# For backwards compatibility. See MongoDB src/mongo/base/error_codes.err +_BAD_VALUE: int = 2 +_UNKNOWN_ERROR: int = 8 +_WRITE_CONCERN_ERROR: int = 64 + +_COMMANDS: tuple[str, str, str] = ("insert", "update", "delete") + + +class _Run: + """Represents a batch of write operations.""" + + def __init__(self, op_type: int) -> None: + """Initialize a new Run object.""" + self.op_type: int = op_type + self.index_map: list[int] = [] + self.ops: list[Any] = [] + self.idx_offset: int = 0 + + def index(self, idx: int) -> int: + """Get the original index of an operation in this run. + + :param idx: The Run index that maps to the original index. + """ + return self.index_map[idx] + + def add(self, original_index: int, operation: Any) -> None: + """Add an operation to this Run instance. + + :param original_index: The original index of this operation + within a larger bulk operation. + :param operation: The operation document. + """ + self.index_map.append(original_index) + self.ops.append(operation) + + +def _merge_command( + run: _Run, + full_result: MutableMapping[str, Any], + offset: int, + result: Mapping[str, Any], +) -> None: + """Merge a write command result into the full bulk result.""" + affected = result.get("n", 0) + + if run.op_type == _INSERT: + full_result["nInserted"] += affected + + elif run.op_type == _DELETE: + full_result["nRemoved"] += affected + + elif run.op_type == _UPDATE: + upserted = result.get("upserted") + if upserted: + n_upserted = len(upserted) + for doc in upserted: + doc["index"] = run.index(doc["index"] + offset) + full_result["upserted"].extend(upserted) + full_result["nUpserted"] += n_upserted + full_result["nMatched"] += affected - n_upserted + else: + full_result["nMatched"] += affected + full_result["nModified"] += result["nModified"] + + write_errors = result.get("writeErrors") + if write_errors: + for doc in write_errors: + # Leave the server response intact for APM. + replacement = doc.copy() + idx = doc["index"] + offset + replacement["index"] = run.index(idx) + # Add the failed operation to the error document. + replacement["op"] = run.ops[idx] + full_result["writeErrors"].append(replacement) + + wce = _get_wce_doc(result) + if wce: + full_result["writeConcernErrors"].append(wce) + + +def _raise_bulk_write_error(full_result: _DocumentOut) -> NoReturn: + """Raise a BulkWriteError from the full bulk api result.""" + # retryWrites on MMAPv1 should raise an actionable error. + if full_result["writeErrors"]: + full_result["writeErrors"].sort(key=lambda error: error["index"]) + err = full_result["writeErrors"][0] + code = err["code"] + msg = err["errmsg"] + if code == 20 and msg.startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, full_result) + raise BulkWriteError(full_result) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py new file mode 100644 index 0000000000..f9abddec44 --- /dev/null +++ b/pymongo/change_stream.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous ChangeStream API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.change_stream import * # noqa: F403 +from pymongo.synchronous.change_stream import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["ChangeStream", "ClusterChangeStream", "CollectionChangeStream", "DatabaseChangeStream"] # noqa: F405 diff --git a/pymongo/client_options.py b/pymongo/client_options.py new file mode 100644 index 0000000000..8b4eea7e65 --- /dev/null +++ b/pymongo/client_options.py @@ -0,0 +1,348 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools to parse mongo client options. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, cast + +from bson.codec_options import _parse_codec_options +from pymongo import common +from pymongo.compression_support import CompressionSettings +from pymongo.errors import ConfigurationError +from pymongo.monitoring import _EventListener, _EventListeners +from pymongo.pool_options import PoolOptions +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ( + _ServerMode, + make_read_preference, + read_pref_mode_from_name, +) +from pymongo.server_selectors import any_server_selector +from pymongo.ssl_support import get_ssl_context +from pymongo.write_concern import WriteConcern, validate_boolean + +if TYPE_CHECKING: + from bson.codec_options import CodecOptions + from pymongo.auth_shared import MongoCredential + from pymongo.encryption_options import AutoEncryptionOpts + from pymongo.pyopenssl_context import SSLContext + from pymongo.topology_description import _ServerSelector + + +def _parse_credentials( + username: str, password: str, database: Optional[str], options: Mapping[str, Any] +) -> Optional[MongoCredential]: + """Parse authentication credentials.""" + mechanism = options.get("authmechanism", "DEFAULT" if username else None) + source = options.get("authsource") + if username or mechanism: + from pymongo.auth_shared import _build_credentials_tuple + + return _build_credentials_tuple(mechanism, source, username, password, options, database) + return None + + +def _parse_read_preference(options: Mapping[str, Any]) -> _ServerMode: + """Parse read preference options.""" + if "read_preference" in options: + return options["read_preference"] + + name = options.get("readpreference", "primary") + mode = read_pref_mode_from_name(name) + tags = options.get("readpreferencetags") + max_staleness = options.get("maxstalenessseconds", -1) + return make_read_preference(mode, tags, max_staleness) + + +def _parse_write_concern(options: Mapping[str, Any]) -> WriteConcern: + """Parse write concern options.""" + concern = options.get("w") + wtimeout = options.get("wtimeoutms") + j = options.get("journal") + fsync = options.get("fsync") + return WriteConcern(concern, wtimeout, j, fsync) + + +def _parse_read_concern(options: Mapping[str, Any]) -> ReadConcern: + """Parse read concern options.""" + concern = options.get("readconcernlevel") + return ReadConcern(concern) + + +def _parse_ssl_options( + options: Mapping[str, Any], is_sync: bool +) -> tuple[Optional[SSLContext], bool]: + """Parse ssl options.""" + use_tls = options.get("tls") + if use_tls is not None: + validate_boolean("tls", use_tls) + + certfile = options.get("tlscertificatekeyfile") + passphrase = options.get("tlscertificatekeyfilepassword") + ca_certs = options.get("tlscafile") + crlfile = options.get("tlscrlfile") + allow_invalid_certificates = options.get("tlsallowinvalidcertificates", False) + allow_invalid_hostnames = options.get("tlsallowinvalidhostnames", False) + disable_ocsp_endpoint_check = options.get("tlsdisableocspendpointcheck", False) + + enabled_tls_opts = [] + for opt in ( + "tlscertificatekeyfile", + "tlscertificatekeyfilepassword", + "tlscafile", + "tlscrlfile", + ): + # Any non-null value of these options implies tls=True. + if opt in options and options[opt]: + enabled_tls_opts.append(opt) + for opt in ( + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", + ): + # A value of False for these options implies tls=True. + if opt in options and not options[opt]: + enabled_tls_opts.append(opt) + + if enabled_tls_opts: + if use_tls is None: + # Implicitly enable TLS when one of the tls* options is set. + use_tls = True + elif not use_tls: + # Error since tls is explicitly disabled but a tls option is set. + raise ConfigurationError( + "TLS has not been enabled but the " + "following tls parameters have been set: " + "%s. Please set `tls=True` or remove." % ", ".join(enabled_tls_opts) + ) + + if use_tls: + ctx = get_ssl_context( + certfile, + passphrase, + ca_certs, + crlfile, + allow_invalid_certificates, + allow_invalid_hostnames, + disable_ocsp_endpoint_check, + is_sync, + ) + return ctx, allow_invalid_hostnames + return None, allow_invalid_hostnames + + +def _parse_pool_options( + username: str, + password: str, + database: Optional[str], + options: Mapping[str, Any], + is_sync: bool, +) -> PoolOptions: + """Parse connection pool options.""" + credentials = _parse_credentials(username, password, database, options) + max_pool_size = options.get("maxpoolsize", common.MAX_POOL_SIZE) + min_pool_size = options.get("minpoolsize", common.MIN_POOL_SIZE) + max_idle_time_seconds = options.get("maxidletimems", common.MAX_IDLE_TIME_SEC) + if max_pool_size is not None and min_pool_size > max_pool_size: + raise ValueError("minPoolSize must be smaller or equal to maxPoolSize") + connect_timeout = options.get("connecttimeoutms", common.CONNECT_TIMEOUT) + socket_timeout = options.get("sockettimeoutms") + wait_queue_timeout = options.get("waitqueuetimeoutms", common.WAIT_QUEUE_TIMEOUT) + event_listeners = cast(Optional[Sequence[_EventListener]], options.get("event_listeners")) + appname = options.get("appname") + driver = options.get("driver") + server_api = options.get("server_api") + compression_settings = CompressionSettings( + options.get("compressors", []), options.get("zlibcompressionlevel", -1) + ) + ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options, is_sync) + load_balanced = options.get("loadbalanced") + max_connecting = options.get("maxconnecting", common.MAX_CONNECTING) + return PoolOptions( + max_pool_size, + min_pool_size, + max_idle_time_seconds, + connect_timeout, + socket_timeout, + wait_queue_timeout, + ssl_context, + tls_allow_invalid_hostnames, + _EventListeners(event_listeners), + appname, + driver, + compression_settings, + max_connecting=max_connecting, + server_api=server_api, + load_balanced=load_balanced, + credentials=credentials, + is_sync=is_sync, + ) + + +class ClientOptions: + """Read only configuration options for an AsyncMongoClient/MongoClient. + + Should not be instantiated directly by application developers. Access + a client's options via :attr:`pymongo.mongo_client.AsyncMongoClient.options` or :attr:`pymongo.mongo_client.MongoClient.options` + instead. + """ + + def __init__( + self, + username: str, + password: str, + database: Optional[str], + options: Mapping[str, Any], + is_sync: bool = True, + ): + self.__options = options + self.__codec_options = _parse_codec_options(options) + self.__direct_connection = options.get("directconnection") + self.__local_threshold_ms = options.get("localthresholdms", common.LOCAL_THRESHOLD_MS) + # self.__server_selection_timeout is in seconds. Must use full name for + # common.SERVER_SELECTION_TIMEOUT because it is set directly by tests. + self.__server_selection_timeout = options.get( + "serverselectiontimeoutms", common.SERVER_SELECTION_TIMEOUT + ) + self.__pool_options = _parse_pool_options(username, password, database, options, is_sync) + self.__read_preference = _parse_read_preference(options) + self.__replica_set_name = options.get("replicaset") + self.__write_concern = _parse_write_concern(options) + self.__read_concern = _parse_read_concern(options) + self.__connect = options.get("connect") + self.__heartbeat_frequency = options.get("heartbeatfrequencyms", common.HEARTBEAT_FREQUENCY) + self.__retry_writes = options.get("retrywrites", common.RETRY_WRITES) + self.__retry_reads = options.get("retryreads", common.RETRY_READS) + self.__server_selector = options.get("server_selector", any_server_selector) + self.__auto_encryption_opts = options.get("auto_encryption_opts") + self.__load_balanced = options.get("loadbalanced") + self.__timeout = options.get("timeoutms") + self.__server_monitoring_mode = options.get( + "servermonitoringmode", common.SERVER_MONITORING_MODE + ) + + @property + def _options(self) -> Mapping[str, Any]: + """The original options used to create this ClientOptions.""" + return self.__options + + @property + def connect(self) -> Optional[bool]: + """Whether to begin discovering a MongoDB topology automatically.""" + return self.__connect + + @property + def codec_options(self) -> CodecOptions[Any]: + """A :class:`~bson.codec_options.CodecOptions` instance.""" + return self.__codec_options + + @property + def direct_connection(self) -> Optional[bool]: + """Whether to connect to the deployment in 'Single' topology.""" + return self.__direct_connection + + @property + def local_threshold_ms(self) -> int: + """The local threshold for this instance.""" + return self.__local_threshold_ms + + @property + def server_selection_timeout(self) -> int: + """The server selection timeout for this instance in seconds.""" + return self.__server_selection_timeout + + @property + def server_selector(self) -> _ServerSelector: + return self.__server_selector + + @property + def heartbeat_frequency(self) -> int: + """The monitoring frequency in seconds.""" + return self.__heartbeat_frequency + + @property + def pool_options(self) -> PoolOptions: + """A :class:`~pymongo.pool.PoolOptions` instance.""" + return self.__pool_options + + @property + def read_preference(self) -> _ServerMode: + """A read preference instance.""" + return self.__read_preference + + @property + def replica_set_name(self) -> Optional[str]: + """Replica set name or None.""" + return self.__replica_set_name + + @property + def write_concern(self) -> WriteConcern: + """A :class:`~pymongo.write_concern.WriteConcern` instance.""" + return self.__write_concern + + @property + def read_concern(self) -> ReadConcern: + """A :class:`~pymongo.read_concern.ReadConcern` instance.""" + return self.__read_concern + + @property + def timeout(self) -> Optional[float]: + """The configured timeoutMS converted to seconds, or None. + + .. versionadded:: 4.2 + """ + return self.__timeout + + @property + def retry_writes(self) -> bool: + """If this instance should retry supported write operations.""" + return self.__retry_writes + + @property + def retry_reads(self) -> bool: + """If this instance should retry supported read operations.""" + return self.__retry_reads + + @property + def auto_encryption_opts(self) -> Optional[AutoEncryptionOpts]: + """A :class:`~pymongo.encryption.AutoEncryptionOpts` or None.""" + return self.__auto_encryption_opts + + @property + def load_balanced(self) -> Optional[bool]: + """True if the client was configured to connect to a load balancer.""" + return self.__load_balanced + + @property + def event_listeners(self) -> list[_EventListeners]: + """The event listeners registered for this client. + + See :mod:`~pymongo.monitoring` for details. + + .. versionadded:: 4.0 + """ + assert self.__pool_options._event_listeners is not None + return self.__pool_options._event_listeners.event_listeners() + + @property + def server_monitoring_mode(self) -> str: + """The configured serverMonitoringMode option. + + .. versionadded:: 4.5 + """ + return self.__server_monitoring_mode diff --git a/pymongo/client_session.py b/pymongo/client_session.py new file mode 100644 index 0000000000..db72b0b2e1 --- /dev/null +++ b/pymongo/client_session.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous ClientSession API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.client_session import * # noqa: F403 +from pymongo.synchronous.client_session import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["ClientSession", "SessionOptions", "TransactionOptions"] # noqa: F405 diff --git a/pymongo/collation.py b/pymongo/collation.py new file mode 100644 index 0000000000..8a1eca7aff --- /dev/null +++ b/pymongo/collation.py @@ -0,0 +1,226 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with `collations`_. + +.. _collations: https://www.mongodb.com/docs/manual/reference/collation/ + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from typing import Any, Mapping, Optional, Union + +from pymongo import common +from pymongo.write_concern import validate_boolean + + +class CollationStrength: + """ + An enum that defines values for `strength` on a + :class:`~pymongo.collation.Collation`. + """ + + PRIMARY = 1 + """Differentiate base (unadorned) characters.""" + + SECONDARY = 2 + """Differentiate character accents.""" + + TERTIARY = 3 + """Differentiate character case.""" + + QUATERNARY = 4 + """Differentiate words with and without punctuation.""" + + IDENTICAL = 5 + """Differentiate unicode code point (characters are exactly identical).""" + + +class CollationAlternate: + """ + An enum that defines values for `alternate` on a + :class:`~pymongo.collation.Collation`. + """ + + NON_IGNORABLE = "non-ignorable" + """Spaces and punctuation are treated as base characters.""" + + SHIFTED = "shifted" + """Spaces and punctuation are *not* considered base characters. + + Spaces and punctuation are distinguished regardless when the + :class:`~pymongo.collation.Collation` strength is at least + :data:`~pymongo.collation.CollationStrength.QUATERNARY`. + + """ + + +class CollationMaxVariable: + """ + An enum that defines values for `max_variable` on a + :class:`~pymongo.collation.Collation`. + """ + + PUNCT = "punct" + """Both punctuation and spaces are ignored.""" + + SPACE = "space" + """Spaces alone are ignored.""" + + +class CollationCaseFirst: + """ + An enum that defines values for `case_first` on a + :class:`~pymongo.collation.Collation`. + """ + + UPPER = "upper" + """Sort uppercase characters first.""" + + LOWER = "lower" + """Sort lowercase characters first.""" + + OFF = "off" + """Default for locale or collation strength.""" + + +class Collation: + """Collation + + :param locale: (string) The locale of the collation. This should be a string + that identifies an `ICU locale ID` exactly. For example, ``en_US`` is + valid, but ``en_us`` and ``en-US`` are not. Consult the MongoDB + documentation for a list of supported locales. + :param caseLevel: (optional) If ``True``, turn on case sensitivity if + `strength` is 1 or 2 (case sensitivity is implied if `strength` is + greater than 2). Defaults to ``False``. + :param caseFirst: (optional) Specify that either uppercase or lowercase + characters take precedence. Must be one of the following values: + + * :data:`~CollationCaseFirst.UPPER` + * :data:`~CollationCaseFirst.LOWER` + * :data:`~CollationCaseFirst.OFF` (the default) + + :param strength: Specify the comparison strength. This is also + known as the ICU comparison level. This must be one of the following + values: + + * :data:`~CollationStrength.PRIMARY` + * :data:`~CollationStrength.SECONDARY` + * :data:`~CollationStrength.TERTIARY` (the default) + * :data:`~CollationStrength.QUATERNARY` + * :data:`~CollationStrength.IDENTICAL` + + Each successive level builds upon the previous. For example, a + `strength` of :data:`~CollationStrength.SECONDARY` differentiates + characters based both on the unadorned base character and its accents. + + :param numericOrdering: If ``True``, order numbers numerically + instead of in collation order (defaults to ``False``). + :param alternate: Specify whether spaces and punctuation are + considered base characters. This must be one of the following values: + + * :data:`~CollationAlternate.NON_IGNORABLE` (the default) + * :data:`~CollationAlternate.SHIFTED` + + :param maxVariable: When `alternate` is + :data:`~CollationAlternate.SHIFTED`, this option specifies what + characters may be ignored. This must be one of the following values: + + * :data:`~CollationMaxVariable.PUNCT` (the default) + * :data:`~CollationMaxVariable.SPACE` + + :param normalization: If ``True``, normalizes text into Unicode + NFD. Defaults to ``False``. + :param backwards: If ``True``, accents on characters are + considered from the back of the word to the front, as it is done in some + French dictionary ordering traditions. Defaults to ``False``. + :param kwargs: Keyword arguments supplying any additional options + to be sent with this Collation object. + + .. versionadded: 3.4 + + """ + + __slots__ = ("__document",) + + def __init__( + self, + locale: str, + caseLevel: Optional[bool] = None, + caseFirst: Optional[str] = None, + strength: Optional[int] = None, + numericOrdering: Optional[bool] = None, + alternate: Optional[str] = None, + maxVariable: Optional[str] = None, + normalization: Optional[bool] = None, + backwards: Optional[bool] = None, + **kwargs: Any, + ) -> None: + locale = common.validate_string("locale", locale) + self.__document: dict[str, Any] = {"locale": locale} + if caseLevel is not None: + self.__document["caseLevel"] = validate_boolean("caseLevel", caseLevel) + if caseFirst is not None: + self.__document["caseFirst"] = common.validate_string("caseFirst", caseFirst) + if strength is not None: + self.__document["strength"] = common.validate_integer("strength", strength) + if numericOrdering is not None: + self.__document["numericOrdering"] = validate_boolean( + "numericOrdering", numericOrdering + ) + if alternate is not None: + self.__document["alternate"] = common.validate_string("alternate", alternate) + if maxVariable is not None: + self.__document["maxVariable"] = common.validate_string("maxVariable", maxVariable) + if normalization is not None: + self.__document["normalization"] = validate_boolean("normalization", normalization) + if backwards is not None: + self.__document["backwards"] = validate_boolean("backwards", backwards) + self.__document.update(kwargs) + + @property + def document(self) -> dict[str, Any]: + """The document representation of this collation. + + .. note:: + :class:`Collation` is immutable. Mutating the value of + :attr:`document` does not mutate this :class:`Collation`. + """ + return self.__document.copy() + + def __repr__(self) -> str: + document = self.document + return "Collation({})".format(", ".join(f"{key}={document[key]!r}" for key in document)) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Collation): + return self.document == other.document + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + +def validate_collation_or_none( + value: Optional[Union[Mapping[str, Any], Collation]] +) -> Optional[dict[str, Any]]: + if value is None: + return None + if isinstance(value, Collation): + return value.document + if isinstance(value, dict): + return value + raise TypeError("collation must be a dict, an instance of collation.Collation, or None") diff --git a/pymongo/collection.py b/pymongo/collection.py index 870d67f55c..16063425a7 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1,10 +1,10 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,1670 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Collection level utilities for Mongo.""" +"""Re-import of synchronous Collection API for compatibility.""" +from __future__ import annotations -import warnings +from pymongo.synchronous.collection import * # noqa: F403 +from pymongo.synchronous.collection import __doc__ as original_doc -from bson.code import Code -from bson.objectid import ObjectId -from bson.son import SON -from pymongo import (bulk, - common, - helpers, - message) -from pymongo.command_cursor import CommandCursor -from pymongo.cursor import Cursor -from pymongo.errors import InvalidName, OperationFailure -from pymongo.helpers import _check_write_command_response -from pymongo.message import _INSERT, _UPDATE, _DELETE - - -try: - from collections import OrderedDict - ordered_types = (SON, OrderedDict) -except ImportError: - ordered_types = SON - - -def _gen_index_name(keys): - """Generate an index name from the set of fields it is over. - """ - return u"_".join([u"%s_%s" % item for item in keys]) - - -class Collection(common.BaseObject): - """A Mongo collection. - """ - - def __init__(self, database, name, create=False, **kwargs): - """Get / create a Mongo collection. - - Raises :class:`TypeError` if `name` is not an instance of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~pymongo.errors.InvalidName` if `name` is not a valid - collection name. Any additional keyword arguments will be used - as options passed to the create command. See - :meth:`~pymongo.database.Database.create_collection` for valid - options. - - If `create` is ``True`` or additional keyword arguments are - present a create command will be sent. Otherwise, a create - command will not be sent and the collection will be created - implicitly on first use. - - :Parameters: - - `database`: the database to get a collection from - - `name`: the name of the collection to get - - `create` (optional): if ``True``, force collection - creation even without options being set - - `**kwargs` (optional): additional keyword arguments will - be passed as options for the create collection command - - .. versionchanged:: 2.2 - Removed deprecated argument: options - - .. versionadded:: 2.1 - uuid_subtype attribute - - .. versionchanged:: 1.5 - deprecating `options` in favor of kwargs - - .. versionadded:: 1.5 - the `create` parameter - - .. mongodoc:: collections - """ - super(Collection, self).__init__( - slave_okay=database.slave_okay, - read_preference=database.read_preference, - tag_sets=database.tag_sets, - secondary_acceptable_latency_ms=( - database.secondary_acceptable_latency_ms), - safe=database.safe, - uuidrepresentation=database.uuid_subtype, - **database.write_concern) - - if not isinstance(name, basestring): - raise TypeError("name must be an instance " - "of %s" % (basestring.__name__,)) - - if not name or ".." in name: - raise InvalidName("collection names cannot be empty") - if "$" in name and not (name.startswith("oplog.$main") or - name.startswith("$cmd")): - raise InvalidName("collection names must not " - "contain '$': %r" % name) - if name[0] == "." or name[-1] == ".": - raise InvalidName("collection names must not start " - "or end with '.': %r" % name) - if "\x00" in name: - raise InvalidName("collection names must not contain the " - "null character") - - self.__database = database - self.__name = unicode(name) - self.__full_name = u"%s.%s" % (self.__database.name, self.__name) - if create or kwargs: - self.__create(kwargs) - - def __create(self, options): - """Sends a create command with the given options. - """ - - if options: - if "size" in options: - options["size"] = float(options["size"]) - self.__database.command("create", self.__name, **options) - else: - self.__database.command("create", self.__name) - - def __getattr__(self, name): - """Get a sub-collection of this collection by name. - - Raises InvalidName if an invalid collection name is used. - - :Parameters: - - `name`: the name of the collection to get - """ - return Collection(self.__database, u"%s.%s" % (self.__name, name)) - - def __getitem__(self, name): - return self.__getattr__(name) - - def __repr__(self): - return "Collection(%r, %r)" % (self.__database, self.__name) - - def __eq__(self, other): - if isinstance(other, Collection): - us = (self.__database, self.__name) - them = (other.__database, other.__name) - return us == them - return NotImplemented - - def __ne__(self, other): - return not self == other - - @property - def full_name(self): - """The full name of this :class:`Collection`. - - The full name is of the form `database_name.collection_name`. - - .. versionchanged:: 1.3 - ``full_name`` is now a property rather than a method. - """ - return self.__full_name - - @property - def name(self): - """The name of this :class:`Collection`. - - .. versionchanged:: 1.3 - ``name`` is now a property rather than a method. - """ - return self.__name - - @property - def database(self): - """The :class:`~pymongo.database.Database` that this - :class:`Collection` is a part of. - - .. versionchanged:: 1.3 - ``database`` is now a property rather than a method. - """ - return self.__database - - def initialize_unordered_bulk_op(self): - """Initialize an unordered batch of write operations. - - Operations will be performed on the server in arbitrary order, - possibly in parallel. All operations will be attempted. - - Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance. - - See :ref:`unordered_bulk` for examples. - - .. versionadded:: 2.7 - """ - return bulk.BulkOperationBuilder(self, ordered=False) - - def initialize_ordered_bulk_op(self): - """Initialize an ordered batch of write operations. - - Operations will be performed on the server serially, in the - order provided. If an error occurs all remaining operations - are aborted. - - Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance. - - See :ref:`ordered_bulk` for examples. - - .. versionadded:: 2.7 - """ - return bulk.BulkOperationBuilder(self, ordered=True) - - def save(self, to_save, manipulate=True, - safe=None, check_keys=True, **kwargs): - """Save a document in this collection. - - If `to_save` already has an ``"_id"`` then an :meth:`update` - (upsert) operation is performed and any existing document with - that ``"_id"`` is overwritten. Otherwise an :meth:`insert` - operation is performed. In this case if `manipulate` is ``True`` - an ``"_id"`` will be added to `to_save` and this method returns - the ``"_id"`` of the saved document. If `manipulate` is ``False`` - the ``"_id"`` will be added by the server but this method will - return ``None``. - - Raises :class:`TypeError` if `to_save` is not an instance of - :class:`dict`. - - Write concern options can be passed as keyword arguments, overriding - any global defaults. Valid options include w=, - wtimeout=, j=, or fsync=. See the parameter list below - for a detailed explanation of these options. - - By default an acknowledgment is requested from the server that the - save was successful, raising :class:`~pymongo.errors.OperationFailure` - if an error occurred. **Passing w=0 disables write acknowledgement - and all other write concern options.** - - :Parameters: - - `to_save`: the document to be saved - - `manipulate` (optional): manipulate the document before - saving it? - - `safe` (optional): **DEPRECATED** - Use `w` instead. - - `check_keys` (optional): check if keys start with '$' or - contain '.', raising :class:`~pymongo.errors.InvalidName` - in either case. - - `w` (optional): (integer or string) If this is a replica set, write - operations will block until they have been replicated to the - specified number or tagged set of servers. `w=` always includes - the replica set primary (e.g. w=3 means write to the primary and wait - until replicated to **two** secondaries). **Passing w=0 disables - write acknowledgement and all other write concern options.** - - `wtimeout` (optional): (integer) Used in conjunction with `w`. - Specify a value in milliseconds to control how long to wait for - write propagation to complete. If replication does not complete in - the given timeframe, a timeout exception is raised. - - `j` (optional): If ``True`` block until write operations have been - committed to the journal. Ignored if the server is running without - journaling. - - `fsync` (optional): If ``True`` force the database to fsync all - files before returning. When used with `j` the server awaits the - next group commit before returning. - :Returns: - - The ``'_id'`` value of `to_save` or ``[None]`` if `manipulate` is - ``False`` and `to_save` has no '_id' field. - - .. versionadded:: 1.8 - Support for passing `getLastError` options as keyword - arguments. - - .. mongodoc:: insert - """ - if not isinstance(to_save, dict): - raise TypeError("cannot save object of type %s" % type(to_save)) - - if "_id" not in to_save: - return self.insert(to_save, manipulate, safe, check_keys, **kwargs) - else: - self.update({"_id": to_save["_id"]}, to_save, True, - manipulate, safe, check_keys=check_keys, **kwargs) - return to_save.get("_id", None) - - def insert(self, doc_or_docs, manipulate=True, - safe=None, check_keys=True, continue_on_error=False, **kwargs): - """Insert a document(s) into this collection. - - If `manipulate` is ``True``, the document(s) are manipulated using - any :class:`~pymongo.son_manipulator.SONManipulator` instances - that have been added to this :class:`~pymongo.database.Database`. - In this case an ``"_id"`` will be added if the document(s) does - not already contain one and the ``"id"`` (or list of ``"_id"`` - values for more than one document) will be returned. - If `manipulate` is ``False`` and the document(s) does not include - an ``"_id"`` one will be added by the server. The server - does not return the ``"_id"`` it created so ``None`` is returned. - - Write concern options can be passed as keyword arguments, overriding - any global defaults. Valid options include w=, - wtimeout=, j=, or fsync=. See the parameter list below - for a detailed explanation of these options. - - By default an acknowledgment is requested from the server that the - insert was successful, raising :class:`~pymongo.errors.OperationFailure` - if an error occurred. **Passing w=0 disables write acknowledgement - and all other write concern options.** - - :Parameters: - - `doc_or_docs`: a document or list of documents to be - inserted - - `manipulate` (optional): If ``True`` manipulate the documents - before inserting. - - `safe` (optional): **DEPRECATED** - Use `w` instead. - - `check_keys` (optional): If ``True`` check if keys start with '$' - or contain '.', raising :class:`~pymongo.errors.InvalidName` in - either case. - - `continue_on_error` (optional): If ``True``, the database will not - stop processing a bulk insert if one fails (e.g. due to duplicate - IDs). This makes bulk insert behave similarly to a series of single - inserts, except lastError will be set if any insert fails, not just - the last one. If multiple errors occur, only the most recent will - be reported by :meth:`~pymongo.database.Database.error`. - - `w` (optional): (integer or string) If this is a replica set, write - operations will block until they have been replicated to the - specified number or tagged set of servers. `w=` always includes - the replica set primary (e.g. w=3 means write to the primary and wait - until replicated to **two** secondaries). **Passing w=0 disables - write acknowledgement and all other write concern options.** - - `wtimeout` (optional): (integer) Used in conjunction with `w`. - Specify a value in milliseconds to control how long to wait for - write propagation to complete. If replication does not complete in - the given timeframe, a timeout exception is raised. - - `j` (optional): If ``True`` block until write operations have been - committed to the journal. Ignored if the server is running without - journaling. - - `fsync` (optional): If ``True`` force the database to fsync all - files before returning. When used with `j` the server awaits the - next group commit before returning. - :Returns: - - The ``'_id'`` value (or list of '_id' values) of `doc_or_docs` or - ``[None]`` if manipulate is ``False`` and the documents passed - as `doc_or_docs` do not include an '_id' field. - - .. note:: `continue_on_error` requires server version **>= 1.9.1** - - .. versionadded:: 2.1 - Support for continue_on_error. - .. versionadded:: 1.8 - Support for passing `getLastError` options as keyword - arguments. - .. versionchanged:: 1.1 - Bulk insert works with any iterable - - .. mongodoc:: insert - """ - client = self.database.connection - # Batch inserts require us to know the connected primary's - # max_bson_size, max_message_size, and max_write_batch_size. - # We have to be connected to the primary to know that. - client._ensure_connected(True) - - docs = doc_or_docs - return_one = False - if isinstance(docs, dict): - return_one = True - docs = [docs] - - ids = [] - - if manipulate: - def gen(): - db = self.__database - for doc in docs: - if '_id' not in doc: - doc['_id'] = ObjectId() - - # Apply user-configured SON manipulators. - doc = db._fix_incoming(doc, self) - ids.append(doc['_id']) - yield doc - else: - def gen(): - for doc in docs: - ids.append(doc.get('_id')) - yield doc - - safe, options = self._get_write_mode(safe, **kwargs) - - if client.max_wire_version > 1 and safe: - # Insert command - command = SON([('insert', self.name), - ('ordered', not continue_on_error)]) - - if options: - command['writeConcern'] = options - - results = message._do_batched_write_command( - self.database.name + ".$cmd", _INSERT, command, - gen(), check_keys, self.uuid_subtype, client) - _check_write_command_response(results) - else: - # Legacy batched OP_INSERT - message._do_batched_insert(self.__full_name, gen(), check_keys, - safe, options, continue_on_error, - self.uuid_subtype, client) - - if return_one: - return ids[0] - else: - return ids - - def update(self, spec, document, upsert=False, manipulate=False, - safe=None, multi=False, check_keys=True, **kwargs): - """Update a document(s) in this collection. - - Raises :class:`TypeError` if either `spec` or `document` is - not an instance of ``dict`` or `upsert` is not an instance of - ``bool``. - - Write concern options can be passed as keyword arguments, overriding - any global defaults. Valid options include w=, - wtimeout=, j=, or fsync=. See the parameter list below - for a detailed explanation of these options. - - By default an acknowledgment is requested from the server that the - update was successful, raising :class:`~pymongo.errors.OperationFailure` - if an error occurred. **Passing w=0 disables write acknowledgement - and all other write concern options.** - - There are many useful `update modifiers`_ which can be used - when performing updates. For example, here we use the - ``"$set"`` modifier to modify some fields in a matching - document: - - .. doctest:: - - >>> db.test.insert({"x": "y", "a": "b"}) - ObjectId('...') - >>> list(db.test.find()) - [{u'a': u'b', u'x': u'y', u'_id': ObjectId('...')}] - >>> db.test.update({"x": "y"}, {"$set": {"a": "c"}}) - {...} - >>> list(db.test.find()) - [{u'a': u'c', u'x': u'y', u'_id': ObjectId('...')}] - - :Parameters: - - `spec`: a ``dict`` or :class:`~bson.son.SON` instance - specifying elements which must be present for a document - to be updated - - `document`: a ``dict`` or :class:`~bson.son.SON` - instance specifying the document to be used for the update - or (in the case of an upsert) insert - see docs on MongoDB - `update modifiers`_ - - `upsert` (optional): perform an upsert if ``True`` - - `manipulate` (optional): manipulate the document before - updating? If ``True`` all instances of - :mod:`~pymongo.son_manipulator.SONManipulator` added to - this :class:`~pymongo.database.Database` will be applied - to the document before performing the update. - - `check_keys` (optional): check if keys in `document` start - with '$' or contain '.', raising - :class:`~pymongo.errors.InvalidName`. Only applies to - document replacement, not modification through $ - operators. - - `safe` (optional): **DEPRECATED** - Use `w` instead. - - `multi` (optional): update all documents that match - `spec`, rather than just the first matching document. The - default value for `multi` is currently ``False``, but this - might eventually change to ``True``. It is recommended - that you specify this argument explicitly for all update - operations in order to prepare your code for that change. - - `w` (optional): (integer or string) If this is a replica set, write - operations will block until they have been replicated to the - specified number or tagged set of servers. `w=` always includes - the replica set primary (e.g. w=3 means write to the primary and wait - until replicated to **two** secondaries). **Passing w=0 disables - write acknowledgement and all other write concern options.** - - `wtimeout` (optional): (integer) Used in conjunction with `w`. - Specify a value in milliseconds to control how long to wait for - write propagation to complete. If replication does not complete in - the given timeframe, a timeout exception is raised. - - `j` (optional): If ``True`` block until write operations have been - committed to the journal. Ignored if the server is running without - journaling. - - `fsync` (optional): If ``True`` force the database to fsync all - files before returning. When used with `j` the server awaits the - next group commit before returning. - :Returns: - - A document (dict) describing the effect of the update or ``None`` - if write acknowledgement is disabled. - - .. versionadded:: 1.8 - Support for passing `getLastError` options as keyword - arguments. - .. versionchanged:: 1.4 - Return the response to *lastError* if `safe` is ``True``. - .. versionadded:: 1.1.1 - The `multi` parameter. - - .. _update modifiers: http://www.mongodb.org/display/DOCS/Updating - - .. mongodoc:: update - """ - if not isinstance(spec, dict): - raise TypeError("spec must be an instance of dict") - if not isinstance(document, dict): - raise TypeError("document must be an instance of dict") - if not isinstance(upsert, bool): - raise TypeError("upsert must be an instance of bool") - - client = self.database.connection - # Need to connect to know the wire version, and may want to connect - # before applying SON manipulators. - client._ensure_connected(True) - if manipulate: - document = self.__database._fix_incoming(document, self) - - safe, options = self._get_write_mode(safe, **kwargs) - - if document: - # If a top level key begins with '$' this is a modify operation - # and we should skip key validation. It doesn't matter which key - # we check here. Passing a document with a mix of top level keys - # starting with and without a '$' is invalid and the server will - # raise an appropriate exception. - first = (document.iterkeys()).next() - if first.startswith('$'): - check_keys = False - - if client.max_wire_version > 1 and safe: - # Update command - command = SON([('update', self.name)]) - if options: - command['writeConcern'] = options - - docs = [SON([('q', spec), ('u', document), - ('multi', multi), ('upsert', upsert)])] - - results = message._do_batched_write_command( - self.database.name + '.$cmd', _UPDATE, command, - docs, check_keys, self.uuid_subtype, client) - _check_write_command_response(results) - - _, result = results[0] - # Add the updatedExisting field for compatibility - if result.get('n') and 'upserted' not in result: - result['updatedExisting'] = True - else: - result['updatedExisting'] = False - - return result - - else: - # Legacy OP_UPDATE - return client._send_message( - message.update(self.__full_name, upsert, multi, - spec, document, safe, options, - check_keys, self.uuid_subtype), safe) - - def drop(self): - """Alias for :meth:`~pymongo.database.Database.drop_collection`. - - The following two calls are equivalent: - - >>> db.foo.drop() - >>> db.drop_collection("foo") - - .. versionadded:: 1.8 - """ - self.__database.drop_collection(self.__name) - - def remove(self, spec_or_id=None, safe=None, multi=True, **kwargs): - """Remove a document(s) from this collection. - - .. warning:: Calls to :meth:`remove` should be performed with - care, as removed data cannot be restored. - - If `spec_or_id` is ``None``, all documents in this collection - will be removed. This is not equivalent to calling - :meth:`~pymongo.database.Database.drop_collection`, however, - as indexes will not be removed. - - Write concern options can be passed as keyword arguments, overriding - any global defaults. Valid options include w=, - wtimeout=, j=, or fsync=. See the parameter list below - for a detailed explanation of these options. - - By default an acknowledgment is requested from the server that the - remove was successful, raising :class:`~pymongo.errors.OperationFailure` - if an error occurred. **Passing w=0 disables write acknowledgement - and all other write concern options.** - - :Parameters: - - `spec_or_id` (optional): a dictionary specifying the - documents to be removed OR any other type specifying the - value of ``"_id"`` for the document to be removed - - `safe` (optional): **DEPRECATED** - Use `w` instead. - - `multi` (optional): If ``True`` (the default) remove all documents - matching `spec_or_id`, otherwise remove only the first matching - document. - - `w` (optional): (integer or string) If this is a replica set, write - operations will block until they have been replicated to the - specified number or tagged set of servers. `w=` always includes - the replica set primary (e.g. w=3 means write to the primary and wait - until replicated to **two** secondaries). **Passing w=0 disables - write acknowledgement and all other write concern options.** - - `wtimeout` (optional): (integer) Used in conjunction with `w`. - Specify a value in milliseconds to control how long to wait for - write propagation to complete. If replication does not complete in - the given timeframe, a timeout exception is raised. - - `j` (optional): If ``True`` block until write operations have been - committed to the journal. Ignored if the server is running without - journaling. - - `fsync` (optional): If ``True`` force the database to fsync all - files before returning. When used with `j` the server awaits the - next group commit before returning. - :Returns: - - A document (dict) describing the effect of the remove or ``None`` - if write acknowledgement is disabled. - - .. versionadded:: 1.8 - Support for passing `getLastError` options as keyword arguments. - .. versionchanged:: 1.7 Accept any type other than a ``dict`` - instance for removal by ``"_id"``, not just - :class:`~bson.objectid.ObjectId` instances. - .. versionchanged:: 1.4 - Return the response to *lastError* if `safe` is ``True``. - .. versionchanged:: 1.2 - The `spec_or_id` parameter is now optional. If it is - not specified *all* documents in the collection will be - removed. - .. versionadded:: 1.1 - The `safe` parameter. - - .. mongodoc:: remove - """ - if spec_or_id is None: - spec_or_id = {} - if not isinstance(spec_or_id, dict): - spec_or_id = {"_id": spec_or_id} - - safe, options = self._get_write_mode(safe, **kwargs) - - client = self.database.connection - - # Need to connect to know the wire version. - client._ensure_connected(True) - if client.max_wire_version > 1 and safe: - # Delete command - command = SON([('delete', self.name)]) - if options: - command['writeConcern'] = options - - docs = [SON([('q', spec_or_id), ('limit', int(not multi))])] - - results = message._do_batched_write_command( - self.database.name + '.$cmd', _DELETE, command, - docs, False, self.uuid_subtype, client) - _check_write_command_response(results) - - _, result = results[0] - return result - - else: - # Legacy OP_DELETE - return client._send_message( - message.delete(self.__full_name, spec_or_id, safe, - options, self.uuid_subtype, int(not multi)), safe) - - def find_one(self, spec_or_id=None, *args, **kwargs): - """Get a single document from the database. - - All arguments to :meth:`find` are also valid arguments for - :meth:`find_one`, although any `limit` argument will be - ignored. Returns a single document, or ``None`` if no matching - document is found. - - :Parameters: - - - `spec_or_id` (optional): a dictionary specifying - the query to be performed OR any other type to be used as - the value for a query for ``"_id"``. - - - `*args` (optional): any additional positional arguments - are the same as the arguments to :meth:`find`. - - - `**kwargs` (optional): any additional keyword arguments - are the same as the arguments to :meth:`find`. - - - `max_time_ms` (optional): a value for max_time_ms may be - specified as part of `**kwargs`, e.g. - - >>> find_one(max_time_ms=100) - - .. versionchanged:: 1.7 - Allow passing any of the arguments that are valid for - :meth:`find`. - - .. versionchanged:: 1.7 Accept any type other than a ``dict`` - instance as an ``"_id"`` query, not just - :class:`~bson.objectid.ObjectId` instances. - """ - if spec_or_id is not None and not isinstance(spec_or_id, dict): - spec_or_id = {"_id": spec_or_id} - - max_time_ms = kwargs.pop("max_time_ms", None) - cursor = self.find(spec_or_id, - *args, **kwargs).max_time_ms(max_time_ms) - - for result in cursor.limit(-1): - return result - return None - - def find(self, *args, **kwargs): - """Query the database. - - The `spec` argument is a prototype document that all results - must match. For example: - - >>> db.test.find({"hello": "world"}) - - only matches documents that have a key "hello" with value - "world". Matches can have other keys *in addition* to - "hello". The `fields` argument is used to specify a subset of - fields that should be included in the result documents. By - limiting results to a certain subset of fields you can cut - down on network traffic and decoding time. - - Raises :class:`TypeError` if any of the arguments are of - improper type. Returns an instance of - :class:`~pymongo.cursor.Cursor` corresponding to this query. - - :Parameters: - - `spec` (optional): a SON object specifying elements which - must be present for a document to be included in the - result set - - `fields` (optional): a list of field names that should be - returned in the result set or a dict specifying the fields - to include or exclude. If `fields` is a list "_id" will - always be returned. Use a dict to exclude fields from - the result (e.g. fields={'_id': False}). - - `skip` (optional): the number of documents to omit (from - the start of the result set) when returning the results - - `limit` (optional): the maximum number of results to - return - - `timeout` (optional): if True (the default), any returned - cursor is closed by the server after 10 minutes of - inactivity. If set to False, the returned cursor will never - time out on the server. Care should be taken to ensure that - cursors with timeout turned off are properly closed. - - `snapshot` (optional): if True, snapshot mode will be used - for this query. Snapshot mode assures no duplicates are - returned, or objects missed, which were present at both - the start and end of the query's execution. For details, - see the `snapshot documentation - `_. - - `tailable` (optional): the result of this find call will - be a tailable cursor - tailable cursors aren't closed when - the last data is retrieved but are kept open and the - cursors location marks the final document's position. if - more data is received iteration of the cursor will - continue from the last document received. For details, see - the `tailable cursor documentation - `_. - - `sort` (optional): a list of (key, direction) pairs - specifying the sort order for this query. See - :meth:`~pymongo.cursor.Cursor.sort` for details. - - `max_scan` (optional): limit the number of documents - examined when performing the query - - `as_class` (optional): class to use for documents in the - query result (default is - :attr:`~pymongo.mongo_client.MongoClient.document_class`) - - `slave_okay` (optional): if True, allows this query to - be run against a replica secondary. - - `await_data` (optional): if True, the server will block for - some extra time before returning, waiting for more data to - return. Ignored if `tailable` is False. - - `partial` (optional): if True, mongos will return partial - results if some shards are down instead of returning an error. - - `manipulate`: (optional): If True (the default), apply any - outgoing SON manipulators before returning. - - `network_timeout` (optional): specify a timeout to use for - this query, which will override the - :class:`~pymongo.mongo_client.MongoClient`-level default - - `read_preference` (optional): The read preference for - this query. - - `tag_sets` (optional): The tag sets for this query. - - `secondary_acceptable_latency_ms` (optional): Any replica-set - member whose ping time is within secondary_acceptable_latency_ms of - the nearest member may accept reads. Default 15 milliseconds. - **Ignored by mongos** and must be configured on the command line. - See the localThreshold_ option for more information. - - `compile_re` (optional): if ``False``, don't attempt to compile - BSON regex objects into Python regexes. Return instances of - :class:`~bson.regex.Regex` instead. - - `exhaust` (optional): If ``True`` create an "exhaust" cursor. - MongoDB will stream batched results to the client without waiting - for the client to request each batch, reducing latency. - - .. note:: There are a number of caveats to using the `exhaust` - parameter: - - 1. The `exhaust` and `limit` options are incompatible and can - not be used together. - - 2. The `exhaust` option is not supported by mongos and can not be - used with a sharded cluster. - - 3. A :class:`~pymongo.cursor.Cursor` instance created with the - `exhaust` option requires an exclusive :class:`~socket.socket` - connection to MongoDB. If the :class:`~pymongo.cursor.Cursor` is - discarded without being completely iterated the underlying - :class:`~socket.socket` connection will be closed and discarded - without being returned to the connection pool. - - 4. A :class:`~pymongo.cursor.Cursor` instance created with the - `exhaust` option in a :doc:`request
` **must** - be completely iterated before executing any other operation. - - 5. The `network_timeout` option is ignored when using the - `exhaust` option. - - .. note:: The `manipulate` and `compile_re` parameters may default to - False in future releases. - - .. note:: The `max_scan` parameter requires server - version **>= 1.5.1** - - .. versionadded:: 2.7 - The ``compile_re`` parameter. - - .. versionadded:: 2.3 - The `tag_sets` and `secondary_acceptable_latency_ms` parameters. - - .. versionadded:: 1.11+ - The `await_data`, `partial`, and `manipulate` parameters. - - .. versionadded:: 1.8 - The `network_timeout` parameter. - - .. versionadded:: 1.7 - The `sort`, `max_scan` and `as_class` parameters. - - .. versionchanged:: 1.7 - The `fields` parameter can now be a dict or any iterable in - addition to a list. - - .. versionadded:: 1.1 - The `tailable` parameter. - - .. mongodoc:: find - .. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold - """ - if not 'slave_okay' in kwargs: - kwargs['slave_okay'] = self.slave_okay - if not 'read_preference' in kwargs: - kwargs['read_preference'] = self.read_preference - if not 'tag_sets' in kwargs: - kwargs['tag_sets'] = self.tag_sets - if not 'secondary_acceptable_latency_ms' in kwargs: - kwargs['secondary_acceptable_latency_ms'] = ( - self.secondary_acceptable_latency_ms) - return Cursor(self, *args, **kwargs) - - def parallel_scan(self, num_cursors, **kwargs): - """Scan this entire collection in parallel. - - Returns a list of up to ``num_cursors`` cursors that can be iterated - concurrently. As long as the collection is not modified during - scanning, each document appears once in one of the cursors' result - sets. - - For example, to process each document in a collection using some - thread-safe ``process_document()`` function:: - - def process_cursor(cursor): - for document in cursor: - # Some thread-safe processing function: - process_document(document) - - # Get up to 4 cursors. - cursors = collection.parallel_scan(4) - threads = [ - threading.Thread(target=process_cursor, args=(cursor,)) - for cursor in cursors] - - for thread in threads: - thread.start() - - for thread in threads: - thread.join() - - # All documents have now been processed. - - With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` - or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`, - if the `read_preference` attribute of this instance is not set to - :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or the - (deprecated) `slave_okay` attribute of this instance is set to `True` - the command will be sent to a secondary or slave. - - :Parameters: - - `num_cursors`: the number of cursors to return - - .. note:: Requires server version **>= 2.5.5**. - - """ - use_master = not self.slave_okay and not self.read_preference - compile_re = kwargs.get('compile_re', False) - - command_kwargs = { - 'numCursors': num_cursors, - 'read_preference': self.read_preference, - 'tag_sets': self.tag_sets, - 'secondary_acceptable_latency_ms': ( - self.secondary_acceptable_latency_ms), - 'slave_okay': self.slave_okay, - '_use_master': use_master} - command_kwargs.update(kwargs) - - result, conn_id = self.__database._command( - "parallelCollectionScan", self.__name, **command_kwargs) - - return [CommandCursor(self, - cursor['cursor'], - conn_id, - compile_re) for cursor in result['cursors']] - - def count(self): - """Get the number of documents in this collection. - - To get the number of documents matching a specific query use - :meth:`pymongo.cursor.Cursor.count`. - """ - return self.find().count() - - def create_index(self, key_or_list, cache_for=300, **kwargs): - """Creates an index on this collection. - - Takes either a single key or a list of (key, direction) pairs. - The key(s) must be an instance of :class:`basestring` - (:class:`str` in python 3), and the direction(s) must be one of - (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, - :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, - :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`). - - To create a single key index on the key ``'mike'`` we just use - a string argument: - - >>> my_collection.create_index("mike") - - For a compound index on ``'mike'`` descending and ``'eliot'`` - ascending we need to use a list of tuples: - - >>> my_collection.create_index([("mike", pymongo.DESCENDING), - ... ("eliot", pymongo.ASCENDING)]) - - All optional index creation parameters should be passed as - keyword arguments to this method. Valid options include: - - - `name`: custom name to use for this index - if none is - given, a name will be generated - - `unique`: should this index guarantee uniqueness? - - `dropDups` or `drop_dups`: should we drop duplicates - - `background`: if this index should be created in the - background - - `sparse`: if True, omit from the index any documents that lack - the indexed field - - `bucketSize` or `bucket_size`: for use with geoHaystack indexes. - Number of documents to group together within a certain proximity - to a given longitude and latitude. - - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` - index - - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` - index - - `expireAfterSeconds`: Used to create an expiring (TTL) - collection. MongoDB will automatically delete documents from - this collection after seconds. The indexed field must - be a UTC datetime or the data will not expire. - - .. note:: `expireAfterSeconds` requires server version **>= 2.1.2** - - :Parameters: - - `key_or_list`: a single key or a list of (key, direction) - pairs specifying the index to create - - `cache_for` (optional): time window (in seconds) during which - this index will be recognized by subsequent calls to - :meth:`ensure_index` - see documentation for - :meth:`ensure_index` for details - - `**kwargs` (optional): any additional index creation - options (see the above list) should be passed as keyword - arguments - - `ttl` (deprecated): Use `cache_for` instead. - - .. versionchanged:: 2.3 - The `ttl` parameter has been deprecated to avoid confusion with - TTL collections. Use `cache_for` instead. - - .. versionchanged:: 2.2 - Removed deprecated argument: deprecated_unique - - .. versionchanged:: 1.5.1 - Accept kwargs to support all index creation options. - - .. versionadded:: 1.5 - The `name` parameter. - - .. seealso:: :meth:`ensure_index` - - .. mongodoc:: indexes - """ - - if 'ttl' in kwargs: - cache_for = kwargs.pop('ttl') - warnings.warn("ttl is deprecated. Please use cache_for instead.", - DeprecationWarning, stacklevel=2) - - # The types supported by datetime.timedelta. 2to3 removes long. - if not isinstance(cache_for, (int, long, float)): - raise TypeError("cache_for must be an integer or float.") - - keys = helpers._index_list(key_or_list) - index_doc = helpers._index_document(keys) - - name = "name" in kwargs and kwargs["name"] or _gen_index_name(keys) - index = {"key": index_doc, "name": name} - - if "drop_dups" in kwargs: - kwargs["dropDups"] = kwargs.pop("drop_dups") - - if "bucket_size" in kwargs: - kwargs["bucketSize"] = kwargs.pop("bucket_size") - - index.update(kwargs) - - try: - self.__database.command('createIndexes', self.name, indexes=[index]) - except OperationFailure, exc: - if exc.code in (59, None): - index["ns"] = self.__full_name - self.__database.system.indexes.insert(index, manipulate=False, - check_keys=False, - **self._get_wc_override()) - else: - raise - - self.__database.connection._cache_index(self.__database.name, - self.__name, name, cache_for) - - return name - - def ensure_index(self, key_or_list, cache_for=300, **kwargs): - """Ensures that an index exists on this collection. - - Takes either a single key or a list of (key, direction) pairs. - The key(s) must be an instance of :class:`basestring` - (:class:`str` in python 3), and the direction(s) must be one of - (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, - :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, - :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`). - See :meth:`create_index` for a detailed example. - - Unlike :meth:`create_index`, which attempts to create an index - unconditionally, :meth:`ensure_index` takes advantage of some - caching within the driver such that it only attempts to create - indexes that might not already exist. When an index is created - (or ensured) by PyMongo it is "remembered" for `cache_for` - seconds. Repeated calls to :meth:`ensure_index` within that - time limit will be lightweight - they will not attempt to - actually create the index. - - Care must be taken when the database is being accessed through - multiple clients at once. If an index is created using - this client and deleted using another, any call to - :meth:`ensure_index` within the cache window will fail to - re-create the missing index. - - Returns the specified or generated index name used if - :meth:`ensure_index` attempts to create the index. Returns - ``None`` if the index is already cached. - - All optional index creation parameters should be passed as - keyword arguments to this method. Valid options include: - - - `name`: custom name to use for this index - if none is - given, a name will be generated - - `unique`: should this index guarantee uniqueness? - - `dropDups` or `drop_dups`: should we drop duplicates - during index creation when creating a unique index? - - `background`: if this index should be created in the - background - - `sparse`: if True, omit from the index any documents that lack - the indexed field - - `bucketSize` or `bucket_size`: for use with geoHaystack indexes. - Number of documents to group together within a certain proximity - to a given longitude and latitude. - - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` - index - - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` - index - - `expireAfterSeconds`: Used to create an expiring (TTL) - collection. MongoDB will automatically delete documents from - this collection after seconds. The indexed field must - be a UTC datetime or the data will not expire. - - .. note:: `expireAfterSeconds` requires server version **>= 2.1.2** - - :Parameters: - - `key_or_list`: a single key or a list of (key, direction) - pairs specifying the index to create - - `cache_for` (optional): time window (in seconds) during which - this index will be recognized by subsequent calls to - :meth:`ensure_index` - - `**kwargs` (optional): any additional index creation - options (see the above list) should be passed as keyword - arguments - - `ttl` (deprecated): Use `cache_for` instead. - - .. versionchanged:: 2.3 - The `ttl` parameter has been deprecated to avoid confusion with - TTL collections. Use `cache_for` instead. - - .. versionchanged:: 2.2 - Removed deprecated argument: deprecated_unique - - .. versionchanged:: 1.5.1 - Accept kwargs to support all index creation options. - - .. versionadded:: 1.5 - The `name` parameter. - - .. seealso:: :meth:`create_index` - """ - if "name" in kwargs: - name = kwargs["name"] - else: - keys = helpers._index_list(key_or_list) - name = kwargs["name"] = _gen_index_name(keys) - - if not self.__database.connection._cached(self.__database.name, - self.__name, name): - return self.create_index(key_or_list, cache_for, **kwargs) - return None - - def drop_indexes(self): - """Drops all indexes on this collection. - - Can be used on non-existant collections or collections with no indexes. - Raises OperationFailure on an error. - """ - self.__database.connection._purge_index(self.__database.name, - self.__name) - self.drop_index(u"*") - - def drop_index(self, index_or_name): - """Drops the specified index on this collection. - - Can be used on non-existant collections or collections with no - indexes. Raises OperationFailure on an error. `index_or_name` - can be either an index name (as returned by `create_index`), - or an index specifier (as passed to `create_index`). An index - specifier should be a list of (key, direction) pairs. Raises - TypeError if index is not an instance of (str, unicode, list). - - .. warning:: - - if a custom name was used on index creation (by - passing the `name` parameter to :meth:`create_index` or - :meth:`ensure_index`) the index **must** be dropped by name. - - :Parameters: - - `index_or_name`: index (or name of index) to drop - """ - name = index_or_name - if isinstance(index_or_name, list): - name = _gen_index_name(index_or_name) - - if not isinstance(name, basestring): - raise TypeError("index_or_name must be an index name or list") - - self.__database.connection._purge_index(self.__database.name, - self.__name, name) - self.__database.command("dropIndexes", self.__name, index=name, - allowable_errors=["ns not found"]) - - def reindex(self): - """Rebuilds all indexes on this collection. - - .. warning:: reindex blocks all other operations (indexes - are built in the foreground) and will be slow for large - collections. - - .. versionadded:: 1.11+ - """ - return self.__database.command("reIndex", self.__name) - - def index_information(self): - """Get information on this collection's indexes. - - Returns a dictionary where the keys are index names (as - returned by create_index()) and the values are dictionaries - containing information about each index. The dictionary is - guaranteed to contain at least a single key, ``"key"`` which - is a list of (key, direction) pairs specifying the index (as - passed to create_index()). It will also contain any other - information in `system.indexes`, except for the ``"ns"`` and - ``"name"`` keys, which are cleaned. Example output might look - like this: - - >>> db.test.ensure_index("x", unique=True) - u'x_1' - >>> db.test.index_information() - {u'_id_': {u'key': [(u'_id', 1)]}, - u'x_1': {u'unique': True, u'key': [(u'x', 1)]}} - - - .. versionchanged:: 1.7 - The values in the resultant dictionary are now dictionaries - themselves, whose ``"key"`` item contains the list that was - the value in previous versions of PyMongo. - """ - raw = self.__database.system.indexes.find({"ns": self.__full_name}, - {"ns": 0}, as_class=SON) - info = {} - for index in raw: - index["key"] = index["key"].items() - index = dict(index) - info[index.pop("name")] = index - return info - - def options(self): - """Get the options set on this collection. - - Returns a dictionary of options and their values - see - :meth:`~pymongo.database.Database.create_collection` for more - information on the possible options. Returns an empty - dictionary if the collection has not been created yet. - """ - result = self.__database.system.namespaces.find_one( - {"name": self.__full_name}) - - if not result: - return {} - - options = result.get("options", {}) - if "create" in options: - del options["create"] - - return options - - def aggregate(self, pipeline, **kwargs): - """Perform an aggregation using the aggregation framework on this - collection. - - With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` - or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`, - if the `read_preference` attribute of this instance is not set to - :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or the - (deprecated) `slave_okay` attribute of this instance is set to `True` - the `aggregate command`_ will be sent to a secondary or slave. - - :Parameters: - - `pipeline`: a single command or list of aggregation commands - - `**kwargs`: send arbitrary parameters to the aggregate command - - .. note:: Requires server version **>= 2.1.0**. - - With server version **>= 2.5.1**, pass - ``cursor={}`` to retrieve unlimited aggregation results - with a :class:`~pymongo.command_cursor.CommandCursor`:: - - pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}] - cursor = collection.aggregate(pipeline, cursor={}) - for doc in cursor: - print doc - - .. versionchanged:: 2.7 - When the cursor option is used, return - :class:`~pymongo.command_cursor.CommandCursor` instead of - :class:`~pymongo.cursor.Cursor`. - .. versionchanged:: 2.6 - Added cursor support. - .. versionadded:: 2.3 - - .. _aggregate command: - http://docs.mongodb.org/manual/applications/aggregation - """ - if not isinstance(pipeline, (dict, list, tuple)): - raise TypeError("pipeline must be a dict, list or tuple") - - if isinstance(pipeline, dict): - pipeline = [pipeline] - - use_master = not self.slave_okay and not self.read_preference - - command_kwargs = { - 'pipeline': pipeline, - 'read_preference': self.read_preference, - 'tag_sets': self.tag_sets, - 'secondary_acceptable_latency_ms': ( - self.secondary_acceptable_latency_ms), - 'slave_okay': self.slave_okay, - '_use_master': use_master} - - command_kwargs.update(kwargs) - result, conn_id = self.__database._command( - "aggregate", self.__name, **command_kwargs) - - if 'cursor' in result: - return CommandCursor( - self, - result['cursor'], - conn_id, - command_kwargs.get('compile_re', True)) - else: - return result - - # TODO key and condition ought to be optional, but deprecation - # could be painful as argument order would have to change. - def group(self, key, condition, initial, reduce, finalize=None, **kwargs): - """Perform a query similar to an SQL *group by* operation. - - Returns an array of grouped items. - - The `key` parameter can be: - - - ``None`` to use the entire document as a key. - - A :class:`list` of keys (each a :class:`basestring` - (:class:`str` in python 3)) to group by. - - A :class:`basestring` (:class:`str` in python 3), or - :class:`~bson.code.Code` instance containing a JavaScript - function to be applied to each document, returning the key - to group by. - - With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` - or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`, - if the `read_preference` attribute of this instance is not set to - :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or - :attr:`pymongo.read_preferences.ReadPreference.PRIMARY_PREFERRED`, or - the (deprecated) `slave_okay` attribute of this instance is set to - `True`, the group command will be sent to a secondary or slave. - - :Parameters: - - `key`: fields to group by (see above description) - - `condition`: specification of rows to be - considered (as a :meth:`find` query specification) - - `initial`: initial value of the aggregation counter object - - `reduce`: aggregation function as a JavaScript string - - `finalize`: function to be called on each object in output list. - - .. versionchanged:: 2.2 - Removed deprecated argument: command - - .. versionchanged:: 1.4 - The `key` argument can now be ``None`` or a JavaScript function, - in addition to a :class:`list` of keys. - - .. versionchanged:: 1.3 - The `command` argument now defaults to ``True`` and is deprecated. - """ - - group = {} - if isinstance(key, basestring): - group["$keyf"] = Code(key) - elif key is not None: - group = {"key": helpers._fields_list_to_dict(key)} - group["ns"] = self.__name - group["$reduce"] = Code(reduce) - group["cond"] = condition - group["initial"] = initial - if finalize is not None: - group["finalize"] = Code(finalize) - - use_master = not self.slave_okay and not self.read_preference - - return self.__database.command("group", group, - uuid_subtype=self.uuid_subtype, - read_preference=self.read_preference, - tag_sets=self.tag_sets, - secondary_acceptable_latency_ms=( - self.secondary_acceptable_latency_ms), - slave_okay=self.slave_okay, - _use_master=use_master, - **kwargs)["retval"] - - def rename(self, new_name, **kwargs): - """Rename this collection. - - If operating in auth mode, client must be authorized as an - admin to perform this operation. Raises :class:`TypeError` if - `new_name` is not an instance of :class:`basestring` - (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName` - if `new_name` is not a valid collection name. - - :Parameters: - - `new_name`: new name for this collection - - `**kwargs` (optional): any additional rename options - should be passed as keyword arguments - (i.e. ``dropTarget=True``) - - .. versionadded:: 1.7 - support for accepting keyword arguments for rename options - """ - if not isinstance(new_name, basestring): - raise TypeError("new_name must be an instance " - "of %s" % (basestring.__name__,)) - - if not new_name or ".." in new_name: - raise InvalidName("collection names cannot be empty") - if new_name[0] == "." or new_name[-1] == ".": - raise InvalidName("collecion names must not start or end with '.'") - if "$" in new_name and not new_name.startswith("oplog.$main"): - raise InvalidName("collection names must not contain '$'") - - new_name = "%s.%s" % (self.__database.name, new_name) - self.__database.connection.admin.command("renameCollection", - self.__full_name, - to=new_name, **kwargs) - - def distinct(self, key): - """Get a list of distinct values for `key` among all documents - in this collection. - - Raises :class:`TypeError` if `key` is not an instance of - :class:`basestring` (:class:`str` in python 3). - - To get the distinct values for a key in the result set of a - query use :meth:`~pymongo.cursor.Cursor.distinct`. - - :Parameters: - - `key`: name of key for which we want to get the distinct values - - .. note:: Requires server version **>= 1.1.0** - - .. versionadded:: 1.1.1 - """ - return self.find().distinct(key) - - def map_reduce(self, map, reduce, out, full_response=False, **kwargs): - """Perform a map/reduce operation on this collection. - - If `full_response` is ``False`` (default) returns a - :class:`~pymongo.collection.Collection` instance containing - the results of the operation. Otherwise, returns the full - response from the server to the `map reduce command`_. - - :Parameters: - - `map`: map function (as a JavaScript string) - - `reduce`: reduce function (as a JavaScript string) - - `out`: output collection name or `out object` (dict). See - the `map reduce command`_ documentation for available options. - Note: `out` options are order sensitive. :class:`~bson.son.SON` - can be used to specify multiple options. - e.g. SON([('replace', ), ('db', )]) - - `full_response` (optional): if ``True``, return full response to - this command - otherwise just return the result collection - - `**kwargs` (optional): additional arguments to the - `map reduce command`_ may be passed as keyword arguments to this - helper method, e.g.:: - - >>> db.test.map_reduce(map, reduce, "myresults", limit=2) - - .. note:: Requires server version **>= 1.1.1** - - .. seealso:: :doc:`/examples/aggregation` - - .. versionchanged:: 2.2 - Removed deprecated arguments: merge_output and reduce_output - - .. versionchanged:: 1.11+ - DEPRECATED The merge_output and reduce_output parameters. - - .. versionadded:: 1.2 - - .. _map reduce command: http://www.mongodb.org/display/DOCS/MapReduce - - .. mongodoc:: mapreduce - """ - if not isinstance(out, (basestring, dict)): - raise TypeError("'out' must be an instance of " - "%s or dict" % (basestring.__name__,)) - - if isinstance(out, dict) and out.get('inline'): - must_use_master = False - else: - must_use_master = True - - response = self.__database.command("mapreduce", self.__name, - uuid_subtype=self.uuid_subtype, - map=map, reduce=reduce, - read_preference=self.read_preference, - tag_sets=self.tag_sets, - secondary_acceptable_latency_ms=( - self.secondary_acceptable_latency_ms), - out=out, _use_master=must_use_master, - **kwargs) - - if full_response or not response.get('result'): - return response - elif isinstance(response['result'], dict): - dbase = response['result']['db'] - coll = response['result']['collection'] - return self.__database.connection[dbase][coll] - else: - return self.__database[response["result"]] - - def inline_map_reduce(self, map, reduce, full_response=False, **kwargs): - """Perform an inline map/reduce operation on this collection. - - Perform the map/reduce operation on the server in RAM. A result - collection is not created. The result set is returned as a list - of documents. - - If `full_response` is ``False`` (default) returns the - result documents in a list. Otherwise, returns the full - response from the server to the `map reduce command`_. - - With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` - or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`, - if the `read_preference` attribute of this instance is not set to - :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or - :attr:`pymongo.read_preferences.ReadPreference.PRIMARY_PREFERRED`, or - the (deprecated) `slave_okay` attribute of this instance is set to - `True`, the inline map reduce will be run on a secondary or slave. - - :Parameters: - - `map`: map function (as a JavaScript string) - - `reduce`: reduce function (as a JavaScript string) - - `full_response` (optional): if ``True``, return full response to - this command - otherwise just return the result collection - - `**kwargs` (optional): additional arguments to the - `map reduce command`_ may be passed as keyword arguments to this - helper method, e.g.:: - - >>> db.test.inline_map_reduce(map, reduce, limit=2) - - .. note:: Requires server version **>= 1.7.4** - - .. versionadded:: 1.10 - """ - - use_master = not self.slave_okay and not self.read_preference - - res = self.__database.command("mapreduce", self.__name, - uuid_subtype=self.uuid_subtype, - read_preference=self.read_preference, - tag_sets=self.tag_sets, - secondary_acceptable_latency_ms=( - self.secondary_acceptable_latency_ms), - slave_okay=self.slave_okay, - _use_master=use_master, - map=map, reduce=reduce, - out={"inline": 1}, **kwargs) - - if full_response: - return res - else: - return res.get("results") - - def find_and_modify(self, query={}, update=None, - upsert=False, sort=None, full_response=False, **kwargs): - """Update and return an object. - - This is a thin wrapper around the findAndModify_ command. The - positional arguments are designed to match the first three arguments - to :meth:`update` however most options should be passed as named - parameters. Either `update` or `remove` arguments are required, all - others are optional. - - Returns either the object before or after modification based on `new` - parameter. If no objects match the `query` and `upsert` is false, - returns ``None``. If upserting and `new` is false, returns ``{}``. - - If the full_response parameter is ``True``, the return value will be - the entire response object from the server, including the 'ok' and - 'lastErrorObject' fields, rather than just the modified object. - This is useful mainly because the 'lastErrorObject' document holds - information about the command's execution. - - :Parameters: - - `query`: filter for the update (default ``{}``) - - `update`: see second argument to :meth:`update` (no default) - - `upsert`: insert if object doesn't exist (default ``False``) - - `sort`: a list of (key, direction) pairs specifying the sort - order for this query. See :meth:`~pymongo.cursor.Cursor.sort` - for details. - - `full_response`: return the entire response object from the - server (default ``False``) - - `remove`: remove rather than updating (default ``False``) - - `new`: return updated rather than original object - (default ``False``) - - `fields`: see second argument to :meth:`find` (default all) - - `**kwargs`: any other options the findAndModify_ command - supports can be passed here. - - - .. mongodoc:: findAndModify - - .. _findAndModify: http://dochub.mongodb.org/core/findAndModify - - .. note:: Requires server version **>= 1.3.0** - - .. versionchanged:: 2.5 - Added the optional full_response parameter - - .. versionchanged:: 2.4 - Deprecated the use of mapping types for the sort parameter - - .. versionadded:: 1.10 - """ - if (not update and not kwargs.get('remove', None)): - raise ValueError("Must either update or remove") - - if (update and kwargs.get('remove', None)): - raise ValueError("Can't do both update and remove") - - # No need to include empty args - if query: - kwargs['query'] = query - if update: - kwargs['update'] = update - if upsert: - kwargs['upsert'] = upsert - if sort: - # Accept a list of tuples to match Cursor's sort parameter. - if isinstance(sort, list): - kwargs['sort'] = helpers._index_document(sort) - # Accept OrderedDict, SON, and dict with len == 1 so we - # don't break existing code already using find_and_modify. - elif (isinstance(sort, ordered_types) or - isinstance(sort, dict) and len(sort) == 1): - warnings.warn("Passing mapping types for `sort` is deprecated," - " use a list of (key, direction) pairs instead", - DeprecationWarning, stacklevel=2) - kwargs['sort'] = sort - else: - raise TypeError("sort must be a list of (key, direction) " - "pairs, a dict of len 1, or an instance of " - "SON or OrderedDict") - - no_obj_error = "No matching object found" - - out = self.__database.command("findAndModify", self.__name, - allowable_errors=[no_obj_error], - uuid_subtype=self.uuid_subtype, - **kwargs) - - if not out['ok']: - if out["errmsg"] == no_obj_error: - return None - else: - # Should never get here b/c of allowable_errors - raise ValueError("Unexpected Error: %s" % (out,)) - - if full_response: - return out - else: - return out.get('value') - - def __iter__(self): - return self - - def next(self): - raise TypeError("'Collection' object is not iterable") - - def __call__(self, *args, **kwargs): - """This is only here so that some API misusages are easier to debug. - """ - if "." not in self.__name: - raise TypeError("'Collection' object is not callable. If you " - "meant to call the '%s' method on a 'Database' " - "object it is failing because no such method " - "exists." % - self.__name) - raise TypeError("'Collection' object is not callable. If you meant to " - "call the '%s' method on a 'Collection' object it is " - "failing because no such method exists." % - self.__name.split(".")[-1]) +__doc__ = original_doc +__all__ = [ # noqa: F405 + "Collection", + "ReturnDocument", +] diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 690940d709..941e3a0eda 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -1,4 +1,4 @@ -# Copyright 2014 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,164 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""CommandCursor class to iterate over command results.""" +"""Re-import of synchronous CommandCursor API for compatibility.""" +from __future__ import annotations -from collections import deque +from pymongo.synchronous.command_cursor import * # noqa: F403 +from pymongo.synchronous.command_cursor import __doc__ as original_doc -from pymongo import helpers, message -from pymongo.errors import AutoReconnect, CursorNotFound - - -class CommandCursor(object): - """A cursor / iterator over command cursors. - """ - - def __init__(self, collection, cursor_info, - conn_id, compile_re=True, retrieved=0): - """Create a new command cursor. - """ - self.__collection = collection - self.__id = cursor_info['id'] - self.__conn_id = conn_id - self.__data = deque(cursor_info['firstBatch']) - self.__decode_opts = ( - collection.database.connection.document_class, - collection.database.connection.tz_aware, - collection.uuid_subtype, - compile_re - ) - self.__retrieved = retrieved - self.__batch_size = 0 - self.__killed = False - - def __del__(self): - if self.__id and not self.__killed: - self.__die() - - def __die(self): - """Closes this cursor. - """ - if self.__id and not self.__killed: - client = self.__collection.database.connection - if self.__conn_id is not None: - client.close_cursor(self.__id, self.__conn_id) - else: - client.close_cursor(self.__id) - self.__killed = True - - def close(self): - """Explicitly close / kill this cursor. Required for PyPy, Jython and - other Python implementations that don't use reference counting - garbage collection. - """ - self.__die() - - def batch_size(self, batch_size): - """Limits the number of documents returned in one batch. Each batch - requires a round trip to the server. It can be adjusted to optimize - performance and limit data transfer. - - .. note:: batch_size can not override MongoDB's internal limits on the - amount of data it will return to the client in a single batch (i.e - if you set batch size to 1,000,000,000, MongoDB will currently only - return 4-16MB of results per batch). - - Raises :exc:`TypeError` if `batch_size` is not an integer. - Raises :exc:`ValueError` if `batch_size` is less than ``0``. - - :Parameters: - - `batch_size`: The size of each batch of results requested. - """ - if not isinstance(batch_size, (int, long)): - raise TypeError("batch_size must be an integer") - if batch_size < 0: - raise ValueError("batch_size must be >= 0") - - self.__batch_size = batch_size == 1 and 2 or batch_size - return self - - def __send_message(self, msg): - """Send a getmore message and handle the response. - """ - client = self.__collection.database.connection - try: - res = client._send_message_with_response( - msg, _connection_to_use=self.__conn_id) - self.__conn_id, (response, dummy0, dummy1) = res - except AutoReconnect: - # Don't try to send kill cursors on another socket - # or to another server. It can cause a _pinValue - # assertion on some server releases if we get here - # due to a socket timeout. - self.__killed = True - raise - - try: - response = helpers._unpack_response(response, - self.__id, - *self.__decode_opts) - except CursorNotFound: - self.__killed = True - raise - except AutoReconnect: - # Don't send kill cursors to another server after a "not master" - # error. It's completely pointless. - self.__killed = True - client.disconnect() - raise - self.__id = response["cursor_id"] - - assert response["starting_from"] == self.__retrieved, ( - "Result batch started from %s, expected %s" % ( - response['starting_from'], self.__retrieved)) - - self.__retrieved += response["number_returned"] - self.__data = deque(response["data"]) - - def _refresh(self): - """Refreshes the cursor with more data from the server. - - Returns the length of self.__data after refresh. Will exit early if - self.__data is already non-empty. Raises OperationFailure when the - cursor cannot be refreshed due to an error on the query. - """ - if len(self.__data) or self.__killed: - return len(self.__data) - - if self.__id: # Get More - self.__send_message( - message.get_more(self.__collection.full_name, - self.__batch_size, self.__id)) - - else: # Cursor id is zero nothing else to return - self.__killed = True - - return len(self.__data) - - @property - def alive(self): - """Does this cursor have the potential to return more data?""" - return bool(len(self.__data) or (not self.__killed)) - - @property - def cursor_id(self): - """Returns the id of the cursor.""" - return self.__id - - def __iter__(self): - return self - - def next(self): - """Advance the cursor. - """ - if len(self.__data) or self._refresh(): - coll = self.__collection - return coll.database._fix_incoming(self.__data.popleft(), coll) - else: - raise StopIteration - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.__die() +__doc__ = original_doc +__all__ = ["CommandCursor", "RawBatchCommandCursor"] # noqa: F405 diff --git a/pymongo/common.py b/pymongo/common.py index f246339c72..e23adac426 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -1,10 +1,10 @@ -# Copyright 2011-2014 MongoDB, Inc. +# Copyright 2011-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -14,166 +14,311 @@ """Functions and classes common to multiple pymongo modules.""" -import sys -import warnings -from pymongo import read_preferences +from __future__ import annotations -from pymongo.auth import MECHANISMS -from pymongo.read_preferences import ReadPreference +import datetime +import warnings +from collections import OrderedDict, abc +from difflib import get_close_matches +from importlib.metadata import requires, version +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + Union, + overload, +) +from urllib.parse import unquote_plus + +from bson import SON +from bson.binary import UuidRepresentation +from bson.codec_options import CodecOptions, DatetimeConversion, TypeRegistry +from bson.raw_bson import RawBSONDocument +from pymongo.compression_support import ( + validate_compressors, + validate_zlib_compression_level, +) +from pymongo.driver_info import DriverInfo from pymongo.errors import ConfigurationError -from bson.binary import (OLD_UUID_SUBTYPE, UUID_SUBTYPE, - JAVA_LEGACY, CSHARP_LEGACY) - -HAS_SSL = True -try: - import ssl -except ImportError: - HAS_SSL = False +from pymongo.monitoring import _validate_event_listeners +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import _MONGOS_MODES, _ServerMode +from pymongo.server_api import ServerApi +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean +if TYPE_CHECKING: + from pymongo.typings import _AgnosticClientSession -# Jython 2.7 includes an incomplete ssl module. See PYTHON-498. -if sys.platform.startswith('java'): - HAS_SSL = False +ORDERED_TYPES: Sequence[Type[Any]] = (SON, OrderedDict) # Defaults until we connect to a server and get updated limits. -MAX_BSON_SIZE = 16 * (1024 ** 2) -MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE +MAX_BSON_SIZE = 16 * (1024**2) +MAX_MESSAGE_SIZE = 48 * 1000 * 1000 MIN_WIRE_VERSION = 0 MAX_WIRE_VERSION = 0 -MAX_WRITE_BATCH_SIZE = 1000 +MAX_WRITE_BATCH_SIZE = 100000 # What this version of PyMongo supports. -MIN_SUPPORTED_WIRE_VERSION = 0 -MAX_SUPPORTED_WIRE_VERSION = 2 +MIN_SUPPORTED_SERVER_VERSION = "4.2" +MIN_SUPPORTED_WIRE_VERSION = 8 +# MongoDB 8.0 +MAX_SUPPORTED_WIRE_VERSION = 25 + +# Frequency to call hello on servers, in seconds. +HEARTBEAT_FREQUENCY = 10 + +# Frequency to clean up unclosed cursors, in seconds. +# See MongoClient._process_kill_cursors. +KILL_CURSOR_FREQUENCY = 1 + +# Frequency to process events queue, in seconds. +EVENTS_QUEUE_FREQUENCY = 1 + +# How long to wait, in seconds, for a suitable server to be found before +# aborting an operation. For example, if the client attempts an insert +# during a replica set election, SERVER_SELECTION_TIMEOUT governs the +# longest it is willing to wait for a new primary to be found. +SERVER_SELECTION_TIMEOUT = 30 + +# Spec requires at least 500ms between hello calls. +MIN_HEARTBEAT_INTERVAL = 0.5 + +# Spec requires at least 60s between SRV rescans. +MIN_SRV_RESCAN_INTERVAL = 60 + +# Default connectTimeout in seconds. +CONNECT_TIMEOUT = 20.0 + +# Default value for maxPoolSize. +MAX_POOL_SIZE = 100 + +# Default value for minPoolSize. +MIN_POOL_SIZE = 0 + +# The maximum number of concurrent connection creation attempts per pool. +MAX_CONNECTING = 2 + +# Default value for maxIdleTimeMS. +MAX_IDLE_TIME_MS: Optional[int] = None + +# Default value for maxIdleTimeMS in seconds. +MAX_IDLE_TIME_SEC: Optional[int] = None + +# Default value for waitQueueTimeoutMS in seconds. +WAIT_QUEUE_TIMEOUT: Optional[int] = None + +# Default value for localThresholdMS. +LOCAL_THRESHOLD_MS = 15 + +# Default value for retryWrites. +RETRY_WRITES = True + +# Default value for retryReads. +RETRY_READS = True + +# The error code returned when a command doesn't exist. +COMMAND_NOT_FOUND_CODES: Sequence[int] = (59,) + +# Error codes to ignore if GridFS calls createIndex on a secondary +UNAUTHORIZED_CODES: Sequence[int] = (13, 16547, 16548) + +# Maximum number of sessions to send in a single endSessions command. +# From the driver sessions spec. +_MAX_END_SESSIONS = 10000 + +# Default value for srvServiceName +SRV_SERVICE_NAME = "mongodb" + +# Default value for serverMonitoringMode +SERVER_MONITORING_MODE = "auto" # poll/stream/auto +# Auth mechanism properties that must raise an error instead of warning if they invalidate. +_MECH_PROP_MUST_RAISE = ["CANONICALIZE_HOST_NAME"] -def raise_config_error(key, dummy): + +def partition_node(node: str) -> tuple[str, int]: + """Split a host:port string into (host, int(port)) pair.""" + host = node + port = 27017 + idx = node.rfind(":") + if idx != -1: + host, port = node[:idx], int(node[idx + 1 :]) + if host.startswith("["): + host = host[1:-1] + return host, port + + +def clean_node(node: str) -> tuple[str, int]: + """Split and normalize a node name from a hello response.""" + host, port = partition_node(node) + + # Normalize hostname to lowercase, since DNS is case-insensitive: + # https://tools.ietf.org/html/rfc4343 + # This prevents useless rediscovery if "foo.com" is in the seed list but + # "FOO.com" is in the hello response. + return host.lower(), port + + +def raise_config_error(key: str, suggestions: Optional[list[str]] = None) -> NoReturn: """Raise ConfigurationError with the given key name.""" - raise ConfigurationError("Unknown option %s" % (key,)) + msg = f"Unknown option: {key}." + if suggestions: + msg += f" Did you mean one of ({', '.join(suggestions)}) or maybe a camelCase version of one? Refer to docstring." + raise ConfigurationError(msg) # Mapping of URI uuid representation options to valid subtypes. -_UUID_SUBTYPES = { - 'standard': UUID_SUBTYPE, - 'pythonLegacy': OLD_UUID_SUBTYPE, - 'javaLegacy': JAVA_LEGACY, - 'csharpLegacy': CSHARP_LEGACY +_UUID_REPRESENTATIONS = { + "unspecified": UuidRepresentation.UNSPECIFIED, + "standard": UuidRepresentation.STANDARD, + "pythonLegacy": UuidRepresentation.PYTHON_LEGACY, + "javaLegacy": UuidRepresentation.JAVA_LEGACY, + "csharpLegacy": UuidRepresentation.CSHARP_LEGACY, } -def validate_boolean(option, value): - """Validates that 'value' is 'true' or 'false'. - """ - if isinstance(value, bool): - return value - elif isinstance(value, basestring): - if value not in ('true', 'false'): - raise ConfigurationError("The value of %s must be " - "'true' or 'false'" % (option,)) - return value == 'true' - raise TypeError("Wrong type for %s, value must be a boolean" % (option,)) +def validate_boolean_or_string(option: str, value: Any) -> bool: + """Validates that value is True, False, 'true', or 'false'.""" + if isinstance(value, str): + if value not in ("true", "false"): + raise ValueError(f"The value of {option} must be 'true' or 'false'") + return value == "true" + return validate_boolean(option, value) -def validate_integer(option, value): - """Validates that 'value' is an integer (or basestring representation). - """ - if isinstance(value, (int, long)): +def validate_integer(option: str, value: Any) -> int: + """Validates that 'value' is an integer (or basestring representation).""" + if isinstance(value, int): return value - elif isinstance(value, basestring): - if not value.isdigit(): - raise ConfigurationError("The value of %s must be " - "an integer" % (option,)) - return int(value) - raise TypeError("Wrong type for %s, value must be an integer" % (option,)) + elif isinstance(value, str): + try: + return int(value) + except ValueError: + raise ValueError(f"The value of {option} must be an integer") from None + raise TypeError(f"Wrong type for {option}, value must be an integer, not {type(value)}") -def validate_positive_integer(option, value): - """Validate that 'value' is a positive integer. - """ +def validate_positive_integer(option: str, value: Any) -> int: + """Validate that 'value' is a positive integer, which does not include 0.""" + val = validate_integer(option, value) + if val <= 0: + raise ValueError(f"The value of {option} must be a positive integer") + return val + + +def validate_non_negative_integer(option: str, value: Any) -> int: + """Validate that 'value' is a positive integer or 0.""" val = validate_integer(option, value) if val < 0: - raise ConfigurationError("The value of %s must be " - "a positive integer" % (option,)) + raise ValueError(f"The value of {option} must be a non negative integer") return val -def validate_readable(option, value): - """Validates that 'value' is file-like and readable. - """ +def validate_readable(option: str, value: Any) -> Optional[str]: + """Validates that 'value' is file-like and readable.""" + if value is None: + return value # First make sure its a string py3.3 open(True, 'r') succeeds # Used in ssl cert checking due to poor ssl module error reporting - value = validate_basestring(option, value) - open(value, 'r').close() + value = validate_string(option, value) + open(value).close() return value -def validate_cert_reqs(option, value): - """Validate the cert reqs are valid. It must be None or one of the three - values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or ``ssl.CERT_REQUIRED``""" +def validate_positive_integer_or_none(option: str, value: Any) -> Optional[int]: + """Validate that 'value' is a positive integer or None.""" if value is None: return value - if HAS_SSL: - if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED): - return value - raise ConfigurationError("The value of %s must be one of: " - "`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or " - "`ssl.CERT_REQUIRED" % (option,)) - else: - raise ConfigurationError("The value of %s is set but can't be " - "validated. The ssl module is not available" - % (option,)) + return validate_positive_integer(option, value) -def validate_positive_integer_or_none(option, value): - """Validate that 'value' is a positive integer or None. - """ +def validate_non_negative_integer_or_none(option: str, value: Any) -> Optional[int]: + """Validate that 'value' is a positive integer or 0 or None.""" if value is None: return value - return validate_positive_integer(option, value) + return validate_non_negative_integer(option, value) -def validate_basestring(option, value): - """Validates that 'value' is an instance of `basestring`. - """ - if isinstance(value, basestring): +def validate_string(option: str, value: Any) -> str: + """Validates that 'value' is an instance of `str`.""" + if isinstance(value, str): return value - raise TypeError("Wrong type for %s, value must be an " - "instance of %s" % (option, basestring.__name__)) + raise TypeError(f"Wrong type for {option}, value must be an instance of str, not {type(value)}") -def validate_int_or_basestring(option, value): - """Validates that 'value' is an integer or string. - """ - if isinstance(value, (int, long)): +def validate_string_or_none(option: str, value: Any) -> Optional[str]: + """Validates that 'value' is an instance of `basestring` or `None`.""" + if value is None: return value - elif isinstance(value, basestring): - if value.isdigit(): + return validate_string(option, value) + + +def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: + """Validates that 'value' is an integer or string.""" + if isinstance(value, int): + return value + elif isinstance(value, str): + try: return int(value) + except ValueError: + return value + raise TypeError( + f"Wrong type for {option}, value must be an integer or a string, not {type(value)}" + ) + + +def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: + """Validates that 'value' is an integer or string.""" + if isinstance(value, int): return value - raise TypeError("Wrong type for %s, value must be an " - "integer or a string" % (option,)) + elif isinstance(value, str): + try: + val = int(value) + except ValueError: + return value + return validate_non_negative_integer(option, val) + raise TypeError( + f"Wrong type for {option}, value must be an non negative integer or a string, not {type(value)}" + ) -def validate_positive_float(option, value): +def validate_positive_float(option: str, value: Any) -> float: """Validates that 'value' is a float, or can be converted to one, and is - positive. + positive. """ - err = ConfigurationError("%s must be a positive int or float" % (option,)) + errmsg = f"{option} must be an integer or float" try: value = float(value) - except (ValueError, TypeError): - raise err + except ValueError: + raise ValueError(errmsg) from None + except TypeError: + raise TypeError(errmsg) from None # float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at # one billion - this is a reasonable approximation for infinity if not 0 < value < 1e9: - raise err - + raise ValueError(f"{option} must be greater than 0 and less than one billion") return value -def validate_timeout_or_none(option, value): +def validate_positive_float_or_zero(option: str, value: Any) -> float: + """Validates that 'value' is 0 or a positive float, or can be converted to + 0 or a positive float. + """ + if value == 0 or value == "0": + return 0 + return validate_positive_float(option, value) + + +def validate_timeout_or_none(option: str, value: Any) -> Optional[float]: """Validates a timeout specified in milliseconds returning a value in floating point seconds. """ @@ -182,538 +327,857 @@ def validate_timeout_or_none(option, value): return validate_positive_float(option, value) / 1000.0 -def validate_read_preference(dummy, value): - """Validate read preference for a ReplicaSetConnection. +def validate_timeout_or_zero(option: str, value: Any) -> float: + """Validates a timeout specified in milliseconds returning + a value in floating point seconds for the case where None is an error + and 0 is valid. Setting the timeout to nothing in the URI string is a + config error. """ - if value in read_preferences.modes: - return value + if value is None: + raise ConfigurationError(f"{option} cannot be None") + if value == 0 or value == "0": + return 0 + return validate_positive_float(option, value) / 1000.0 - # Also allow string form of enum for uri_parser - try: - return read_preferences.mongos_enum(value) - except ValueError: - raise ConfigurationError("Not a valid read preference") +def validate_timeout_or_none_or_zero(option: Any, value: Any) -> Optional[float]: + """Validates a timeout specified in milliseconds returning + a value in floating point seconds. value=0 and value="0" are treated the + same as value=None which means unlimited timeout. + """ + if value is None or value == 0 or value == "0": + return None + return validate_positive_float(option, value) / 1000.0 -def validate_tag_sets(dummy, value): - """Validate tag sets for a ReplicaSetConnection. + +def validate_timeoutms(option: Any, value: Any) -> Optional[float]: + """Validates a timeout specified in milliseconds returning + a value in floating point seconds. """ if value is None: - return [{}] + return None + return validate_positive_float_or_zero(option, value) / 1000.0 - if not isinstance(value, list): - raise ConfigurationError(( - "Tag sets %s invalid, must be a list") % repr(value)) - if len(value) == 0: - raise ConfigurationError(( - "Tag sets %s invalid, must be None or contain at least one set of" - " tags") % repr(value)) - - for tags in value: - if not isinstance(tags, dict): - raise ConfigurationError( - "Tag set %s invalid, must be a dict" % repr(tags)) +def validate_max_staleness(option: str, value: Any) -> int: + """Validates maxStalenessSeconds according to the Max Staleness Spec.""" + if value == -1 or value == "-1": + # Default: No maximum staleness. + return -1 + return validate_positive_integer(option, value) + + +def validate_read_preference(dummy: Any, value: Any) -> _ServerMode: + """Validate a read preference.""" + if not isinstance(value, _ServerMode): + raise TypeError(f"{value!r} is not a read preference") return value -def validate_auth_mechanism(option, value): - """Validate the authMechanism URI option. +def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: + """Validate read preference mode for a MongoClient. + + .. versionchanged:: 3.5 + Returns the original ``value`` instead of the validated read preference + mode. """ - # CRAM-MD5 is for server testing only. Undocumented, - # unsupported, may be removed at any time. You have - # been warned. - if value not in MECHANISMS and value != 'CRAM-MD5': - raise ConfigurationError("%s must be in " - "%s" % (option, MECHANISMS)) + if value not in _MONGOS_MODES: + raise ValueError(f"{value} is not a valid read preference") return value -def validate_uuid_representation(dummy, value): - """Validate the uuid representation option selected in the URI. - """ - if value not in _UUID_SUBTYPES.keys(): - raise ConfigurationError("%s is an invalid UUID representation. " - "Must be one of " - "%s" % (value, _UUID_SUBTYPES.keys())) - return _UUID_SUBTYPES[value] - - -def validate_uuid_subtype(dummy, value): - """Validate the uuid subtype option, a numerical value whose acceptable - values are defined in bson.binary.""" - if value not in _UUID_SUBTYPES.values(): - raise ConfigurationError("Not a valid setting for uuid_subtype.") +def validate_auth_mechanism(option: str, value: Any) -> str: + """Validate the authMechanism URI option.""" + from pymongo.auth_shared import MECHANISMS + + if value not in MECHANISMS: + raise ValueError(f"{option} must be in {tuple(MECHANISMS)}") return value -# jounal is an alias for j, -# wtimeoutms is an alias for wtimeout, -# readpreferencetags is an alias for tag_sets. -VALIDATORS = { - 'replicaset': validate_basestring, - 'slaveok': validate_boolean, - 'slave_okay': validate_boolean, - 'safe': validate_boolean, - 'w': validate_int_or_basestring, - 'wtimeout': validate_integer, - 'wtimeoutms': validate_integer, - 'fsync': validate_boolean, - 'j': validate_boolean, - 'journal': validate_boolean, - 'connecttimeoutms': validate_timeout_or_none, - 'sockettimeoutms': validate_timeout_or_none, - 'waitqueuetimeoutms': validate_timeout_or_none, - 'waitqueuemultiple': validate_positive_integer_or_none, - 'ssl': validate_boolean, - 'ssl_keyfile': validate_readable, - 'ssl_certfile': validate_readable, - 'ssl_cert_reqs': validate_cert_reqs, - 'ssl_ca_certs': validate_readable, - 'readpreference': validate_read_preference, - 'read_preference': validate_read_preference, - 'readpreferencetags': validate_tag_sets, - 'tag_sets': validate_tag_sets, - 'secondaryacceptablelatencyms': validate_positive_float, - 'secondary_acceptable_latency_ms': validate_positive_float, - 'auto_start_request': validate_boolean, - 'use_greenlets': validate_boolean, - 'authmechanism': validate_auth_mechanism, - 'authsource': validate_basestring, - 'gssapiservicename': validate_basestring, - 'uuidrepresentation': validate_uuid_representation, -} +def validate_uuid_representation(dummy: Any, value: Any) -> int: + """Validate the uuid representation option selected in the URI.""" + try: + return _UUID_REPRESENTATIONS[value] + except KeyError: + raise ValueError( + f"{value} is an invalid UUID representation. " + "Must be one of " + f"{tuple(_UUID_REPRESENTATIONS)}" + ) from None -_AUTH_OPTIONS = frozenset(['gssapiservicename']) +def validate_read_preference_tags(name: str, value: Any) -> list[dict[str, str]]: + """Parse readPreferenceTags if passed as a client kwarg.""" + if not isinstance(value, list): + value = [value] + + tag_sets: list[dict[str, Any]] = [] + for tag_set in value: + if tag_set == "": + tag_sets.append({}) + continue + try: + tags = {} + for tag in tag_set.split(","): + key, val = tag.split(":") + tags[unquote_plus(key)] = unquote_plus(val) + tag_sets.append(tags) + except Exception: + raise ValueError(f"{tag_set!r} not a valid value for {name}") from None + return tag_sets + + +_MECHANISM_PROPS = frozenset( + [ + "SERVICE_NAME", + "SERVICE_HOST", + "CANONICALIZE_HOST_NAME", + "SERVICE_REALM", + "AWS_SESSION_TOKEN", + "ENVIRONMENT", + "TOKEN_RESOURCE", + ] +) + + +def validate_auth_mechanism_properties(option: str, value: Any) -> dict[str, Union[bool, str]]: + """Validate authMechanismProperties.""" + props: dict[str, Any] = {} + if not isinstance(value, str): + if not isinstance(value, dict): + raise ValueError( + f"Auth mechanism properties must be given as a string or a dictionary, not {type(value)}" + ) + for key, value in value.items(): # noqa: B020 + if isinstance(value, str): + props[key] = value + elif isinstance(value, bool): + props[key] = str(value).lower() + elif key in ["ALLOWED_HOSTS"] and isinstance(value, list): + props[key] = value + elif key in ["OIDC_CALLBACK", "OIDC_HUMAN_CALLBACK"]: + from pymongo.auth_oidc_shared import OIDCCallback + + if not isinstance(value, OIDCCallback): + raise ValueError(f"callback must be an OIDCCallback object, not {type(value)}") + props[key] = value + else: + raise ValueError(f"Invalid type for auth mechanism property {key}, {type(value)}") + return props + + value = validate_string(option, value) + value = unquote_plus(value) + for opt in value.split(","): + key, _, val = opt.partition(":") + if not val: + raise ValueError("Malformed auth mechanism properties") + if key not in _MECHANISM_PROPS: + # Try not to leak the token. + if "AWS_SESSION_TOKEN" in key: + raise ValueError( + "auth mechanism properties must be " + "key:value pairs like AWS_SESSION_TOKEN:" + ) + + raise ValueError( + f"{key} is not a supported auth " + "mechanism property. Must be one of " + f"{tuple(_MECHANISM_PROPS)}" + ) + + if key == "CANONICALIZE_HOST_NAME": + from pymongo.auth_shared import _validate_canonicalize_host_name + + props[key] = _validate_canonicalize_host_name(val) + else: + props[key] = val + return props -def validate_auth_option(option, value): - """Validate optional authentication parameters. - """ - lower, value = validate(option, value) - if lower not in _AUTH_OPTIONS: - raise ConfigurationError('Unknown ' - 'authentication option: %s' % (option,)) - return lower, value +def validate_document_class( + option: str, value: Any +) -> Union[Type[MutableMapping[str, Any]], Type[RawBSONDocument]]: + """Validate the document_class option.""" + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(value, abc.MutableMapping) + except TypeError: + if hasattr(value, "__origin__"): + is_mapping = issubclass(value.__origin__, abc.MutableMapping) + if not is_mapping and not issubclass(value, RawBSONDocument): + raise TypeError( + f"{option} must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "subclass of collections.MutableMapping" + ) + return value -def validate(option, value): - """Generic validation function. - """ - lower = option.lower() - validator = VALIDATORS.get(lower, raise_config_error) - value = validator(option, value) - return lower, value +def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: + """Validate the type_registry option.""" + if value is not None and not isinstance(value, TypeRegistry): + raise TypeError(f"{option} must be an instance of {TypeRegistry}") + return value -SAFE_OPTIONS = frozenset([ - 'w', - 'wtimeout', - 'wtimeoutms', - 'fsync', - 'j', - 'journal' -]) +def validate_list(option: str, value: Any) -> list[Any]: + """Validates that 'value' is a list.""" + if not isinstance(value, list): + raise TypeError(f"{option} must be a list, not {type(value)}") + return value -class WriteConcern(dict): - def __init__(self, *args, **kwargs): - """A subclass of dict that overrides __setitem__ to - validate write concern options. - """ - super(WriteConcern, self).__init__(*args, **kwargs) +def validate_list_or_none(option: Any, value: Any) -> Optional[list[Any]]: + """Validates that 'value' is a list or None.""" + if value is None: + return value + return validate_list(option, value) + + +def validate_list_or_mapping(option: Any, value: Any) -> None: + """Validates that 'value' is a list or a document.""" + if not isinstance(value, (abc.Mapping, list)): + raise TypeError( + f"{option} must either be a list or an instance of dict, " + "bson.son.SON, or any other type that inherits from " + "collections.Mapping" + ) + + +def validate_is_mapping(option: str, value: Any) -> None: + """Validate the type of method arguments that expect a document.""" + if not isinstance(value, abc.Mapping): + raise TypeError( + f"{option} must be an instance of dict, bson.son.SON, or " + "any other type that inherits from " + "collections.Mapping" + ) + + +def validate_is_document_type(option: str, value: Any) -> None: + """Validate the type of method arguments that expect a MongoDB document.""" + if not isinstance(value, (abc.MutableMapping, RawBSONDocument)): + raise TypeError( + f"{option} must be an instance of dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or " + "a type that inherits from " + "collections.MutableMapping" + ) + + +def validate_appname_or_none(option: str, value: Any) -> Optional[str]: + """Validate the appname option.""" + if value is None: + return value + validate_string(option, value) + # We need length in bytes, so encode utf8 first. + if len(value.encode("utf-8")) > 128: + raise ValueError(f"{option} must be <= 128 bytes") + return value - def __setitem__(self, key, value): - if key not in SAFE_OPTIONS: - raise ConfigurationError("%s is not a valid write " - "concern option." % (key,)) - key, value = validate(key, value) - super(WriteConcern, self).__setitem__(key, value) +def validate_driver_or_none(option: Any, value: Any) -> Optional[DriverInfo]: + """Validate the driver keyword arg.""" + if value is None: + return value + if not isinstance(value, DriverInfo): + raise TypeError(f"{option} must be an instance of DriverInfo") + return value -class BaseObject(object): - """A base class that provides attributes and methods common - to multiple pymongo classes. - SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB. - """ +def validate_server_api_or_none(option: Any, value: Any) -> Optional[ServerApi]: + """Validate the server_api keyword arg.""" + if value is None: + return value + if not isinstance(value, ServerApi): + raise TypeError(f"{option} must be an instance of ServerApi, not {type(value)}") + return value - def __init__(self, **options): - - self.__slave_okay = False - self.__read_pref = ReadPreference.PRIMARY - self.__tag_sets = [{}] - self.__secondary_acceptable_latency_ms = 15 - self.__safe = None - self.__uuid_subtype = OLD_UUID_SUBTYPE - self.__write_concern = WriteConcern() - self.__set_options(options) - if (self.__read_pref == ReadPreference.PRIMARY - and self.__tag_sets != [{}]): - raise ConfigurationError( - "ReadPreference PRIMARY cannot be combined with tags") - - # If safe hasn't been implicitly set by write concerns then set it. - if self.__safe is None: - if options.get("w") == 0: - self.__safe = False - else: - self.__safe = validate_boolean('safe', - options.get("safe", True)) - # Note: 'safe' is always passed by Connection and ReplicaSetConnection - # Always do the most "safe" thing, but warn about conflicts. - if self.__safe and options.get('w') == 0: - - warnings.warn("Conflicting write concerns: %s. Write concern " - "options were configured, but w=0 disables all " - "other options." % self.write_concern, - UserWarning) - - def __set_safe_option(self, option, value): - """Validates and sets getlasterror options for this - object (Connection, Database, Collection, etc.) - """ - if value is None: - self.__write_concern.pop(option, None) - else: - self.__write_concern[option] = value - if option != "w" or value != 0: - self.__safe = True - - def __set_options(self, options): - """Validates and sets all options passed to this object.""" - for option, value in options.iteritems(): - if option in ('slave_okay', 'slaveok'): - self.__slave_okay = validate_boolean(option, value) - elif option in ('read_preference', "readpreference"): - self.__read_pref = validate_read_preference(option, value) - elif option in ('tag_sets', 'readpreferencetags'): - self.__tag_sets = validate_tag_sets(option, value) - elif option == 'uuidrepresentation': - self.__uuid_subtype = validate_uuid_subtype(option, value) - elif option in ( - 'secondaryacceptablelatencyms', - 'secondary_acceptable_latency_ms' - ): - self.__secondary_acceptable_latency_ms = \ - validate_positive_float(option, value) - elif option in SAFE_OPTIONS: - if option == 'journal': - self.__set_safe_option('j', value) - elif option == 'wtimeoutms': - self.__set_safe_option('wtimeout', value) - else: - self.__set_safe_option(option, value) - - def __set_write_concern(self, value): - """Property setter for write_concern.""" - if not isinstance(value, dict): - raise ConfigurationError("write_concern must be an " - "instance of dict or a subclass.") - # Make a copy here to avoid users accidentally setting the - # same dict on multiple instances. - wc = WriteConcern() - for k, v in value.iteritems(): - # Make sure we validate each option. - wc[k] = v - self.__write_concern = wc - - def __get_write_concern(self): - """The default write concern for this instance. - - Supports dict style access for getting/setting write concern - options. Valid options include: - - - `w`: (integer or string) If this is a replica set, write operations - will block until they have been replicated to the specified number - or tagged set of servers. `w=` always includes the replica set - primary (e.g. w=3 means write to the primary and wait until - replicated to **two** secondaries). **Setting w=0 disables write - acknowledgement and all other write concern options.** - - `wtimeout`: (integer) Used in conjunction with `w`. Specify a value - in milliseconds to control how long to wait for write propagation - to complete. If replication does not complete in the given - timeframe, a timeout exception is raised. - - `j`: If ``True`` block until write operations have been committed - to the journal. Cannot be used in combination with `fsync`. Prior - to MongoDB 2.6 this option was ignored if the server was running - without journaling. Starting with MongoDB 2.6 write operations will - fail with an exception if this option is used when the server is - running without journaling. - - `fsync`: If ``True`` and the server is running without journaling, - blocks until the server has synced all data files to disk. If the - server is running with journaling, this acts the same as the `j` - option, blocking until write operations have been committed to the - journal. Cannot be used in combination with `j`. - - >>> m = pymongo.MongoClient() - >>> m.write_concern - {} - >>> m.write_concern = {'w': 2, 'wtimeout': 1000} - >>> m.write_concern - {'wtimeout': 1000, 'w': 2} - >>> m.write_concern['j'] = True - >>> m.write_concern - {'wtimeout': 1000, 'j': True, 'w': 2} - >>> m.write_concern = {'j': True} - >>> m.write_concern - {'j': True} - >>> # Disable write acknowledgement and write concern - ... - >>> m.write_concern['w'] = 0 +def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable[..., Any]]: + """Validates that 'value' is a callable.""" + if value is None: + return value + if not callable(value): + raise ValueError(f"{option} must be a callable, not {type(value)}") + return value - .. note:: Accessing :attr:`write_concern` returns its value - (a subclass of :class:`dict`), not a copy. - .. warning:: If you are using :class:`~pymongo.connection.Connection` - or :class:`~pymongo.replica_set_connection.ReplicaSetConnection` - make sure you explicitly set ``w`` to 1 (or a greater value) or - :attr:`safe` to ``True``. Unlike calling - :meth:`set_lasterror_options`, setting an option in - :attr:`write_concern` does not implicitly set :attr:`safe` - to ``True``. - """ - # To support dict style access we have to return the actual - # WriteConcern here, not a copy. - return self.__write_concern +def validate_ok_for_replace(replacement: Mapping[str, Any]) -> None: + """Validate a replacement document.""" + validate_is_mapping("replacement", replacement) + # Replacement can be {} + if replacement and not isinstance(replacement, RawBSONDocument): + first = next(iter(replacement)) + if first.startswith("$"): + raise ValueError("replacement can not include $ operators") - write_concern = property(__get_write_concern, __set_write_concern) - def __get_slave_okay(self): - """DEPRECATED. Use :attr:`read_preference` instead. +def validate_ok_for_update(update: Any) -> None: + """Validate an update document.""" + validate_list_or_mapping("update", update) + # Update cannot be {}. + if not update: + raise ValueError("update cannot be empty") - .. versionchanged:: 2.1 - Deprecated slave_okay. - .. versionadded:: 2.0 - """ - return self.__slave_okay + is_document = not isinstance(update, list) + first = next(iter(update)) + if is_document and not first.startswith("$"): + raise ValueError("update only works with $ operators") - def __set_slave_okay(self, value): - """Property setter for slave_okay""" - warnings.warn("slave_okay is deprecated. Please use " - "read_preference instead.", DeprecationWarning, - stacklevel=2) - self.__slave_okay = validate_boolean('slave_okay', value) - slave_okay = property(__get_slave_okay, __set_slave_okay) +_UNICODE_DECODE_ERROR_HANDLERS = frozenset(["strict", "replace", "ignore"]) - def __get_read_pref(self): - """The read preference mode for this instance. - See :class:`~pymongo.read_preferences.ReadPreference` for - available options. +def validate_unicode_decode_error_handler(dummy: Any, value: str) -> str: + """Validate the Unicode decode error handler option of CodecOptions.""" + if value not in _UNICODE_DECODE_ERROR_HANDLERS: + raise ValueError( + f"{value} is an invalid Unicode decode error handler. " + "Must be one of " + f"{tuple(_UNICODE_DECODE_ERROR_HANDLERS)}" + ) + return value - .. versionadded:: 2.1 - """ - return self.__read_pref - def __set_read_pref(self, value): - """Property setter for read_preference""" - self.__read_pref = validate_read_preference('read_preference', value) +def validate_tzinfo(dummy: Any, value: Any) -> Optional[datetime.tzinfo]: + """Validate the tzinfo option""" + if value is not None and not isinstance(value, datetime.tzinfo): + raise TypeError("%s must be an instance of datetime.tzinfo" % value) + return value - read_preference = property(__get_read_pref, __set_read_pref) - def __get_acceptable_latency(self): - """Any replica-set member whose ping time is within - secondary_acceptable_latency_ms of the nearest member may accept - reads. Defaults to 15 milliseconds. +def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[Any]: + """Validate the driver keyword arg.""" + if value is None: + return value + from pymongo.encryption_options import AutoEncryptionOpts - See :class:`~pymongo.read_preferences.ReadPreference`. + if not isinstance(value, AutoEncryptionOpts): + raise TypeError(f"{option} must be an instance of AutoEncryptionOpts, not {type(value)}") - .. versionadded:: 2.3 + return value - .. note:: ``secondary_acceptable_latency_ms`` is ignored when talking - to a replica set *through* a mongos. The equivalent is the - localThreshold_ command line option. - .. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold - """ - return self.__secondary_acceptable_latency_ms +def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeConversion]: + """Validate a DatetimeConversion string.""" + if value is None: + return DatetimeConversion.DATETIME - def __set_acceptable_latency(self, value): - """Property setter for secondary_acceptable_latency_ms""" - self.__secondary_acceptable_latency_ms = (validate_positive_float( - 'secondary_acceptable_latency_ms', value)) + if isinstance(value, str): + if value.isdigit(): + return DatetimeConversion(int(value)) + return DatetimeConversion[value] + elif isinstance(value, int): + return DatetimeConversion(value) + + raise TypeError( + f"{option} must be a str or int representing DatetimeConversion, not {type(value)}" + ) + + +def validate_server_monitoring_mode(option: str, value: str) -> str: + """Validate the serverMonitoringMode option.""" + if value not in {"auto", "stream", "poll"}: + raise ValueError( + f'{option}={value!r} is invalid. Must be one of "auto", "stream", or "poll"' + ) + return value - secondary_acceptable_latency_ms = property( - __get_acceptable_latency, __set_acceptable_latency) - def __get_tag_sets(self): - """Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to - read only from members whose ``dc`` tag has the value ``"ny"``. - To specify a priority-order for tag sets, provide a list of - tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag - set, ``{}``, means "read from any member that matches the mode, - ignoring tags." ReplicaSetConnection tries each set of tags in turn - until it finds a set of tags with at least one matching member. +# Dictionary where keys are the names of public URI options, and values +# are lists of aliases for that option. +URI_OPTIONS_ALIAS_MAP: dict[str, list[str]] = { + "tls": ["ssl"], +} - .. seealso:: `Data-Center Awareness - `_ +# Dictionary where keys are the names of URI options, and values +# are functions that validate user-input values for that option. If an option +# alias uses a different validator than its public counterpart, it should be +# included here as a key, value pair. +URI_OPTIONS_VALIDATOR_MAP: dict[str, Callable[[Any, Any], Any]] = { + "appname": validate_appname_or_none, + "authmechanism": validate_auth_mechanism, + "authmechanismproperties": validate_auth_mechanism_properties, + "authsource": validate_string, + "compressors": validate_compressors, + "connecttimeoutms": validate_timeout_or_none_or_zero, + "directconnection": validate_boolean_or_string, + "heartbeatfrequencyms": validate_timeout_or_none, + "journal": validate_boolean_or_string, + "localthresholdms": validate_positive_float_or_zero, + "maxidletimems": validate_timeout_or_none, + "maxconnecting": validate_positive_integer, + "maxpoolsize": validate_non_negative_integer_or_none, + "maxstalenessseconds": validate_max_staleness, + "readconcernlevel": validate_string_or_none, + "readpreference": validate_read_preference_mode, + "readpreferencetags": validate_read_preference_tags, + "replicaset": validate_string_or_none, + "retryreads": validate_boolean_or_string, + "retrywrites": validate_boolean_or_string, + "loadbalanced": validate_boolean_or_string, + "serverselectiontimeoutms": validate_timeout_or_zero, + "sockettimeoutms": validate_timeout_or_none_or_zero, + "tls": validate_boolean_or_string, + "tlsallowinvalidcertificates": validate_boolean_or_string, + "tlsallowinvalidhostnames": validate_boolean_or_string, + "tlscafile": validate_readable, + "tlscertificatekeyfile": validate_readable, + "tlscertificatekeyfilepassword": validate_string_or_none, + "tlsdisableocspendpointcheck": validate_boolean_or_string, + "tlsinsecure": validate_boolean_or_string, + "w": validate_non_negative_int_or_basestring, + "wtimeoutms": validate_non_negative_integer, + "zlibcompressionlevel": validate_zlib_compression_level, + "srvservicename": validate_string, + "srvmaxhosts": validate_non_negative_integer, + "timeoutms": validate_timeoutms, + "servermonitoringmode": validate_server_monitoring_mode, +} - .. versionadded:: 2.3 - """ - return self.__tag_sets +# Dictionary where keys are the names of URI options specific to pymongo, +# and values are functions that validate user-input values for those options. +NONSPEC_OPTIONS_VALIDATOR_MAP: dict[str, Callable[[Any, Any], Any]] = { + "connect": validate_boolean_or_string, + "driver": validate_driver_or_none, + "server_api": validate_server_api_or_none, + "fsync": validate_boolean_or_string, + "minpoolsize": validate_non_negative_integer, + "tlscrlfile": validate_readable, + "tz_aware": validate_boolean_or_string, + "unicode_decode_error_handler": validate_unicode_decode_error_handler, + "uuidrepresentation": validate_uuid_representation, + "waitqueuemultiple": validate_non_negative_integer_or_none, + "waitqueuetimeoutms": validate_timeout_or_none, + "datetime_conversion": validate_datetime_conversion, +} + +# Dictionary where keys are the names of keyword-only options for the +# MongoClient constructor, and values are functions that validate user-input +# values for those options. +KW_VALIDATORS: dict[str, Callable[[Any, Any], Any]] = { + "document_class": validate_document_class, + "type_registry": validate_type_registry, + "read_preference": validate_read_preference, + "event_listeners": _validate_event_listeners, + "tzinfo": validate_tzinfo, + "username": validate_string_or_none, + "password": validate_string_or_none, + "server_selector": validate_is_callable_or_none, + "auto_encryption_opts": validate_auto_encryption_opts_or_none, + "authoidcallowedhosts": validate_list, +} + +# Dictionary where keys are any URI option name, and values are the +# internally-used names of that URI option. Options with only one name +# variant need not be included here. Options whose public and internal +# names are the same need not be included here. +INTERNAL_URI_OPTION_NAME_MAP: dict[str, str] = { + "ssl": "tls", +} + +# Map from deprecated URI option names to a tuple indicating the method of +# their deprecation and any additional information that may be needed to +# construct the warning message. +URI_OPTIONS_DEPRECATION_MAP: dict[str, tuple[str, str]] = { + # format: : (, ), + # Supported values: + # - 'renamed': should be the new option name. Note that case is + # preserved for renamed options as they are part of user warnings. + # - 'removed': may suggest the rationale for deprecating the + # option and/or recommend remedial action. + # For example: + # 'wtimeout': ('renamed', 'wTimeoutMS'), +} + +# Augment the option validator map with pymongo-specific option information. +URI_OPTIONS_VALIDATOR_MAP.update(NONSPEC_OPTIONS_VALIDATOR_MAP) +for optname, aliases in URI_OPTIONS_ALIAS_MAP.items(): + for alias in aliases: + if alias not in URI_OPTIONS_VALIDATOR_MAP: + URI_OPTIONS_VALIDATOR_MAP[alias] = URI_OPTIONS_VALIDATOR_MAP[optname] + +# Map containing all URI option and keyword argument validators. +VALIDATORS: dict[str, Callable[[Any, Any], Any]] = URI_OPTIONS_VALIDATOR_MAP.copy() +VALIDATORS.update(KW_VALIDATORS) + +# List of timeout-related options. +TIMEOUT_OPTIONS: list[str] = [ + "connecttimeoutms", + "heartbeatfrequencyms", + "maxidletimems", + "maxstalenessseconds", + "serverselectiontimeoutms", + "sockettimeoutms", + "waitqueuetimeoutms", +] + +_AUTH_OPTIONS = frozenset(["authmechanismproperties"]) + + +def validate_auth_option(option: str, value: Any) -> tuple[str, Any]: + """Validate optional authentication parameters.""" + lower, value = validate(option, value) + if lower not in _AUTH_OPTIONS: + raise ConfigurationError(f"Unknown option: {option}. Must be in {_AUTH_OPTIONS}") + return option, value - def __set_tag_sets(self, value): - """Property setter for tag_sets""" - self.__tag_sets = validate_tag_sets('tag_sets', value) - tag_sets = property(__get_tag_sets, __set_tag_sets) +def _get_validator( + key: str, validators: dict[str, Callable[[Any, Any], Any]], normed_key: Optional[str] = None +) -> Callable[[Any, Any], Any]: + normed_key = normed_key or key + try: + return validators[normed_key] + except KeyError: + suggestions = get_close_matches(normed_key, validators, cutoff=0.2) + raise_config_error(key, suggestions) + + +def validate(option: str, value: Any) -> tuple[str, Any]: + """Generic validation function.""" + validator = _get_validator(option, VALIDATORS, normed_key=option.lower()) + value = validator(option, value) + return option, value - def __get_uuid_subtype(self): - """This attribute specifies which BSON Binary subtype is used when - storing UUIDs. Historically UUIDs have been stored as BSON Binary - subtype 3. This attribute is used to switch to the newer BSON Binary - subtype 4. It can also be used to force legacy byte order and subtype - compatibility with the Java and C# drivers. See the :mod:`bson.binary` - module for all options.""" - return self.__uuid_subtype - def __set_uuid_subtype(self, value): - """Sets the BSON Binary subtype to be used when storing UUIDs.""" - self.__uuid_subtype = validate_uuid_subtype("uuid_subtype", value) +def get_validated_options( + options: Mapping[str, Any], warn: bool = True +) -> MutableMapping[str, Any]: + """Validate each entry in options and raise a warning if it is not valid. + Returns a copy of options with invalid entries removed. - uuid_subtype = property(__get_uuid_subtype, __set_uuid_subtype) + :param opts: A dict containing MongoDB URI options. + :param warn: If ``True`` then warnings will be logged and + invalid options will be ignored. Otherwise, invalid options will + cause errors. + """ + validated_options: MutableMapping[str, Any] + if isinstance(options, _CaseInsensitiveDictionary): + validated_options = _CaseInsensitiveDictionary() - def __get_safe(self): - """**DEPRECATED:** Use the 'w' :attr:`write_concern` option instead. + def get_normed_key(x: str) -> str: + return x - Use getlasterror with every write operation? + def get_setter_key(x: str) -> str: + return options.cased_key(x) - .. versionadded:: 2.0 - """ - return self.__safe + else: + validated_options = {} + + def get_normed_key(x: str) -> str: + return x.lower() + + def get_setter_key(x: str) -> str: + return x + + for opt, value in options.items(): + normed_key = get_normed_key(opt) + try: + validator = _get_validator(opt, URI_OPTIONS_VALIDATOR_MAP, normed_key=normed_key) + validated = validator(opt, value) + except (ValueError, TypeError, ConfigurationError) as exc: + if ( + normed_key == "authmechanismproperties" + and any(p in str(exc) for p in _MECH_PROP_MUST_RAISE) + and "is not a supported auth mechanism property" not in str(exc) + ): + raise + if warn: + warnings.warn(str(exc), stacklevel=2) + else: + raise + else: + validated_options[get_setter_key(normed_key)] = validated + return validated_options - def __set_safe(self, value): - """Property setter for safe""" - warnings.warn("safe is deprecated. Please use the" - " 'w' write_concern option instead.", - DeprecationWarning, stacklevel=2) - self.__safe = validate_boolean('safe', value) - safe = property(__get_safe, __set_safe) +def _esc_coll_name(encrypted_fields: Mapping[str, Any], name: str) -> Any: + return encrypted_fields.get("escCollection", f"enxcol_.{name}.esc") - def get_lasterror_options(self): - """DEPRECATED: Use :attr:`write_concern` instead. - Returns a dict of the getlasterror options set on this instance. +def _ecoc_coll_name(encrypted_fields: Mapping[str, Any], name: str) -> Any: + return encrypted_fields.get("ecocCollection", f"enxcol_.{name}.ecoc") - .. versionchanged:: 2.4 - Deprecated get_lasterror_options. - .. versionadded:: 2.0 - """ - warnings.warn("get_lasterror_options is deprecated. Please use " - "write_concern instead.", DeprecationWarning, - stacklevel=2) - return self.__write_concern.copy() - def set_lasterror_options(self, **kwargs): - """DEPRECATED: Use :attr:`write_concern` instead. +# List of write-concern-related options. +WRITE_CONCERN_OPTIONS = frozenset(["w", "wtimeout", "wtimeoutms", "fsync", "j", "journal"]) - Set getlasterror options for this instance. - Valid options include j=, w=, wtimeout=, - and fsync=. Implies safe=True. +class BaseObject: + """A base class that provides attributes and methods common + to multiple pymongo classes. - :Parameters: - - `**kwargs`: Options should be passed as keyword - arguments (e.g. w=2, fsync=True) + SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB. + """ - .. versionchanged:: 2.4 - Deprecated set_lasterror_options. - .. versionadded:: 2.0 + def __init__( + self, + codec_options: CodecOptions[Any], + read_preference: _ServerMode, + write_concern: WriteConcern, + read_concern: ReadConcern, + ) -> None: + if not isinstance(codec_options, CodecOptions): + raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") + self._codec_options = codec_options + + if not isinstance(read_preference, _ServerMode): + raise TypeError( + f"{read_preference!r} is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." + ) + self._read_preference = read_preference + + if not isinstance(write_concern, WriteConcern): + raise TypeError( + f"write_concern must be an instance of pymongo.write_concern.WriteConcern, not {type(write_concern)}" + ) + self._write_concern = write_concern + + if not isinstance(read_concern, ReadConcern): + raise TypeError( + f"read_concern must be an instance of pymongo.read_concern.ReadConcern, not {type(read_concern)}" + ) + self._read_concern = read_concern + + @property + def codec_options(self) -> CodecOptions[Any]: + """Read only access to the :class:`~bson.codec_options.CodecOptions` + of this instance. """ - warnings.warn("set_lasterror_options is deprecated. Please use " - "write_concern instead.", DeprecationWarning, - stacklevel=2) - for key, value in kwargs.iteritems(): - self.__set_safe_option(key, value) + return self._codec_options - def unset_lasterror_options(self, *options): - """DEPRECATED: Use :attr:`write_concern` instead. + @property + def write_concern(self) -> WriteConcern: + """Read only access to the :class:`~pymongo.write_concern.WriteConcern` + of this instance. - Unset getlasterror options for this instance. + .. versionchanged:: 3.0 + The :attr:`write_concern` attribute is now read only. + """ + return self._write_concern - If no options are passed unsets all getlasterror options. - This does not set `safe` to False. + def _write_concern_for(self, session: Optional[_AgnosticClientSession]) -> WriteConcern: + """Read only access to the write concern of this instance or session.""" + # Override this operation's write concern with the transaction's. + if session and session.in_transaction: + return DEFAULT_WRITE_CONCERN + return self.write_concern - :Parameters: - - `*options`: The list of options to unset. + @property + def read_preference(self) -> _ServerMode: + """Read only access to the read preference of this instance. - .. versionchanged:: 2.4 - Deprecated unset_lasterror_options. - .. versionadded:: 2.0 + .. versionchanged:: 3.0 + The :attr:`read_preference` attribute is now read only. """ - warnings.warn("unset_lasterror_options is deprecated. Please use " - "write_concern instead.", DeprecationWarning, - stacklevel=2) - if len(options): - for option in options: - self.__write_concern.pop(option, None) - else: - self.__write_concern = WriteConcern() + return self._read_preference + + def _read_preference_for(self, session: Optional[_AgnosticClientSession]) -> _ServerMode: + """Read only access to the read preference of this instance or session.""" + # Override this operation's read preference with the transaction's. + if session: + return session._txn_read_preference() or self._read_preference + return self._read_preference - def _get_wc_override(self): - """Get write concern override. + @property + def read_concern(self) -> ReadConcern: + """Read only access to the :class:`~pymongo.read_concern.ReadConcern` + of this instance. - Used in internal methods that **must** do acknowledged write ops. - We don't want to override user write concern options if write concern - is already enabled. + .. versionadded:: 3.2 """ - if self.safe and self.__write_concern.get('w') != 0: - return {} - return {'w': 1} + return self._read_concern - def _get_write_mode(self, safe=None, **options): - """Get the current write mode. - Determines if the current write is safe or not based on the - passed in or inherited safe value, write_concern values, or - passed options. +class _CaseInsensitiveDictionary(MutableMapping[str, Any]): + def __init__(self, *args: Any, **kwargs: Any): + self.__casedkeys: dict[str, Any] = {} + self.__data: dict[str, Any] = {} + self.update(dict(*args, **kwargs)) - :Parameters: - - `safe`: check that the operation succeeded? - - `**options`: overriding write concern options. + def __contains__(self, key: str) -> bool: # type: ignore[override] + return key.lower() in self.__data - .. versionadded:: 2.3 - """ - # Don't ever send w=1 to the server. - def pop1(dct): - if dct.get('w') == 1: - dct.pop('w') - return dct - - if safe is not None: - warnings.warn("The safe parameter is deprecated. Please use " - "write concern options instead.", DeprecationWarning, - stacklevel=3) - validate_boolean('safe', safe) - - # Passed options override collection level defaults. - if safe is not None or options: - if safe or options: - if not options: - options = self.__write_concern.copy() - # Backwards compatability edge case. Call getLastError - # with no options if safe=True was passed but collection - # level defaults have been disabled with w=0. - # These should be equivalent: - # Connection(w=0).foo.bar.insert({}, safe=True) - # MongoClient(w=0).foo.bar.insert({}, w=1) - if options.get('w') == 0: - return True, {} - # Passing w=0 overrides passing safe=True. - return options.get('w') != 0, pop1(options) - return False, {} - - # Fall back to collection level defaults. - # w=0 takes precedence over self.safe = True - if self.__write_concern.get('w') == 0: - return False, {} - elif self.safe or self.__write_concern.get('w', 0) != 0: - return True, pop1(self.__write_concern.copy()) - - return False, {} + def __len__(self) -> int: + return len(self.__data) + + def __iter__(self) -> Iterator[str]: + return (key for key in self.__casedkeys) + + def __repr__(self) -> str: + return str({self.__casedkeys[k]: self.__data[k] for k in self}) + + def __setitem__(self, key: str, value: Any) -> None: + lc_key = key.lower() + self.__casedkeys[lc_key] = key + self.__data[lc_key] = value + + def __getitem__(self, key: str) -> Any: + return self.__data[key.lower()] + + def __delitem__(self, key: str) -> None: + lc_key = key.lower() + del self.__casedkeys[lc_key] + del self.__data[lc_key] + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, abc.Mapping): + return NotImplemented + if len(self) != len(other): + return False + for key in other: # noqa: SIM110 + if self[key] != other[key]: + return False + + return True + + def get(self, key: str, default: Optional[Any] = None) -> Any: + return self.__data.get(key.lower(), default) + + def pop(self, key: str, *args: Any, **kwargs: Any) -> Any: + lc_key = key.lower() + self.__casedkeys.pop(lc_key, None) + return self.__data.pop(lc_key, *args, **kwargs) + + def popitem(self) -> tuple[str, Any]: + lc_key, cased_key = self.__casedkeys.popitem() + value = self.__data.pop(lc_key) + return cased_key, value + + def clear(self) -> None: + self.__casedkeys.clear() + self.__data.clear() + + @overload + def setdefault(self, key: str, default: None = None) -> Optional[Any]: + ... + + @overload + def setdefault(self, key: str, default: Any) -> Any: + ... + + def setdefault(self, key: str, default: Optional[Any] = None) -> Optional[Any]: + lc_key = key.lower() + if key in self: + return self.__data[lc_key] + else: + self.__casedkeys[lc_key] = key + self.__data[lc_key] = default + return default + + def update(self, other: Mapping[str, Any]) -> None: # type: ignore[override] + if isinstance(other, _CaseInsensitiveDictionary): + for key in other: + self[other.cased_key(key)] = other[key] + else: + for key in other: + self[key] = other[key] + + def cased_key(self, key: str) -> Any: + return self.__casedkeys[key.lower()] + + +def has_c() -> bool: + """Is the C extension installed?""" + try: + from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 + + return True + except ImportError: + return False + + +class Version(tuple[int, ...]): + """A class that can be used to compare version strings.""" + + def __new__(cls, *version: int) -> Version: + padded_version = cls._padded(version, 4) + return super().__new__(cls, tuple(padded_version)) + + @classmethod + def _padded(cls, iter: Any, length: int, padding: int = 0) -> list[int]: + as_list = list(iter) + if len(as_list) < length: + for _ in range(length - len(as_list)): + as_list.append(padding) + return as_list + + @classmethod + def from_string(cls, version_string: str) -> Version: + mod = 0 + bump_patch_level = False + if version_string.endswith("+"): + version_string = version_string[0:-1] + mod = 1 + elif version_string.endswith("-pre-"): + version_string = version_string[0:-5] + mod = -1 + elif version_string.endswith("-"): + version_string = version_string[0:-1] + mod = -1 + # Deal with .devX substrings + if ".dev" in version_string: + version_string = version_string[0 : version_string.find(".dev")] + mod = -1 + # Deal with '-rcX' substrings + if "-rc" in version_string: + version_string = version_string[0 : version_string.find("-rc")] + mod = -1 + # Deal with git describe generated substrings + elif "-" in version_string: + version_string = version_string[0 : version_string.find("-")] + mod = -1 + bump_patch_level = True + + version = [int(part) for part in version_string.split(".")] + version = cls._padded(version, 3) + # Make from_string and from_version_array agree. For example: + # MongoDB Enterprise > db.runCommand('buildInfo').versionArray + # [ 3, 2, 1, -100 ] + # MongoDB Enterprise > db.runCommand('buildInfo').version + # 3.2.0-97-g1ef94fe + if bump_patch_level: + version[-1] += 1 + version.append(mod) + + return Version(*version) + + @classmethod + def from_version_array(cls, version_array: Any) -> Version: + version = list(version_array) + if version[-1] < 0: + version[-1] = -1 + version = cls._padded(version, 3) + return Version(*version) + + def at_least(self, *other_version: Any) -> bool: + return self >= Version(*other_version) + + def __str__(self) -> str: + return ".".join(map(str, self)) + + +def check_for_min_version(package_name: str) -> tuple[str, str, bool]: + """Test whether an installed package is of the desired version.""" + package_version_str = version(package_name) + package_version = Version.from_string(package_version_str) + # Dependency is expected to be in one of the forms: + # "pymongocrypt<2.0.0,>=1.13.0; extra == 'encryption'" + # 'dnspython<3.0.0,>=1.16.0' + # + requirements = requires("pymongo") + assert requirements is not None + requirement = [i for i in requirements if i.startswith(package_name)][0] # noqa: RUF015 + if ";" in requirement: + requirement = requirement.split(";")[0] + required_version = requirement[requirement.find(">=") + 2 :] + is_valid = package_version >= Version.from_string(required_version) + return package_version_str, required_version, is_valid diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py new file mode 100644 index 0000000000..64ffe052ec --- /dev/null +++ b/pymongo/compression_support.py @@ -0,0 +1,175 @@ +# Copyright 2018 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import warnings +from typing import Any, Iterable, Optional, Union + +from pymongo.hello import HelloCompat +from pymongo.helpers_shared import _SENSITIVE_COMMANDS + +_SUPPORTED_COMPRESSORS = {"snappy", "zlib", "zstd"} +_NO_COMPRESSION = {HelloCompat.CMD, HelloCompat.LEGACY_CMD} +_NO_COMPRESSION.update(_SENSITIVE_COMMANDS) + + +def _have_snappy() -> bool: + try: + import snappy # type:ignore[import-untyped] # noqa: F401 + + return True + except ImportError: + return False + + +def _have_zlib() -> bool: + try: + import zlib # noqa: F401 + + return True + except ImportError: + return False + + +def _have_zstd() -> bool: + try: + import zstandard # noqa: F401 + + return True + except ImportError: + return False + + +def validate_compressors(dummy: Any, value: Union[str, Iterable[str]]) -> list[str]: + try: + # `value` is string. + compressors = value.split(",") # type: ignore[union-attr] + except AttributeError: + # `value` is an iterable. + compressors = list(value) + + for compressor in compressors[:]: + if compressor not in _SUPPORTED_COMPRESSORS: + compressors.remove(compressor) + warnings.warn(f"Unsupported compressor: {compressor}", stacklevel=2) + elif compressor == "snappy" and not _have_snappy(): + compressors.remove(compressor) + warnings.warn( + "Wire protocol compression with snappy is not available. " + "You must install the python-snappy module for snappy support.", + stacklevel=2, + ) + elif compressor == "zlib" and not _have_zlib(): + compressors.remove(compressor) + warnings.warn( + "Wire protocol compression with zlib is not available. " + "The zlib module is not available.", + stacklevel=2, + ) + elif compressor == "zstd" and not _have_zstd(): + compressors.remove(compressor) + warnings.warn( + "Wire protocol compression with zstandard is not available. " + "You must install the zstandard module for zstandard support.", + stacklevel=2, + ) + return compressors + + +def validate_zlib_compression_level(option: str, value: Any) -> int: + try: + level = int(value) + except Exception: + raise TypeError(f"{option} must be an integer, not {value!r}") from None + if level < -1 or level > 9: + raise ValueError("%s must be between -1 and 9, not %d." % (option, level)) + return level + + +class CompressionSettings: + def __init__(self, compressors: list[str], zlib_compression_level: int): + self.compressors = compressors + self.zlib_compression_level = zlib_compression_level + + def get_compression_context( + self, compressors: Optional[list[str]] + ) -> Union[SnappyContext, ZlibContext, ZstdContext, None]: + if compressors: + chosen = compressors[0] + if chosen == "snappy": + return SnappyContext() + elif chosen == "zlib": + return ZlibContext(self.zlib_compression_level) + elif chosen == "zstd": + return ZstdContext() + return None + return None + + +class SnappyContext: + compressor_id = 1 + + @staticmethod + def compress(data: bytes) -> bytes: + import snappy + + return snappy.compress(data) + + +class ZlibContext: + compressor_id = 2 + + def __init__(self, level: int): + self.level = level + + def compress(self, data: bytes) -> bytes: + import zlib + + return zlib.compress(data, self.level) + + +class ZstdContext: + compressor_id = 3 + + @staticmethod + def compress(data: bytes) -> bytes: + # ZstdCompressor is not thread safe. + # TODO: Use a pool? + + import zstandard + + return zstandard.ZstdCompressor().compress(data) + + +def decompress(data: bytes | memoryview, compressor_id: int) -> bytes: + if compressor_id == SnappyContext.compressor_id: + # python-snappy doesn't support the buffer interface. + # https://github.com/andrix/python-snappy/issues/65 + # This only matters when data is a memoryview since + # id(bytes(data)) == id(data) when data is a bytes. + import snappy + + return snappy.uncompress(bytes(data)) + elif compressor_id == ZlibContext.compressor_id: + import zlib + + return zlib.decompress(data) + elif compressor_id == ZstdContext.compressor_id: + # ZstdDecompressor is not thread safe. + # TODO: Use a pool? + import zstandard + + return zstandard.ZstdDecompressor().decompress(data) + else: + raise ValueError("Unknown compressorId %d" % (compressor_id,)) diff --git a/pymongo/connection.py b/pymongo/connection.py deleted file mode 100644 index 0b4ec35c91..0000000000 --- a/pymongo/connection.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Tools for connecting to MongoDB. - -.. warning:: - **DEPRECATED:** Please use :mod:`~pymongo.mongo_client` instead. - -.. seealso:: Module :mod:`~pymongo.master_slave_connection` for - connecting to master-slave clusters, and - :doc:`/examples/high_availability` for an example of how to connect - to a replica set, or specify a list of mongos instances for automatic - failover. - -To get a :class:`~pymongo.database.Database` instance from a -:class:`Connection` use either dictionary-style or attribute-style -access: - -.. doctest:: - - >>> from pymongo import Connection - >>> c = Connection() - >>> c.test_database - Database(Connection('localhost', 27017), u'test_database') - >>> c['test-database'] - Database(Connection('localhost', 27017), u'test-database') -""" -from pymongo.mongo_client import MongoClient -from pymongo.errors import ConfigurationError - - -class Connection(MongoClient): - """Connection to MongoDB. - """ - - def __init__(self, host=None, port=None, max_pool_size=None, - network_timeout=None, document_class=dict, - tz_aware=False, _connect=True, **kwargs): - """Create a new connection to a single MongoDB instance at *host:port*. - - .. warning:: - **DEPRECATED:** :class:`Connection` is deprecated. Please - use :class:`~pymongo.mongo_client.MongoClient` instead. - - The resultant connection object has connection-pooling built - in. It also performs auto-reconnection when necessary. If an - operation fails because of a connection error, - :class:`~pymongo.errors.ConnectionFailure` is raised. If - auto-reconnection will be performed, - :class:`~pymongo.errors.AutoReconnect` will be - raised. Application code should handle this exception - (recognizing that the operation failed) and then continue to - execute. - - Raises :class:`TypeError` if port is not an instance of - ``int``. Raises :class:`~pymongo.errors.ConnectionFailure` if - the connection cannot be made. - - The `host` parameter can be a full `mongodb URI - `_, in addition to - a simple hostname. It can also be a list of hostnames or - URIs. Any port specified in the host string(s) will override - the `port` parameter. If multiple mongodb URIs containing - database or auth information are passed, the last database, - username, and password present will be used. For username and - passwords reserved characters like ':', '/', '+' and '@' must be - escaped following RFC 2396. - - :Parameters: - - `host` (optional): hostname or IP address of the - instance to connect to, or a mongodb URI, or a list of - hostnames / mongodb URIs. If `host` is an IPv6 literal - it must be enclosed in '[' and ']' characters following - the RFC2732 URL syntax (e.g. '[::1]' for localhost) - - `port` (optional): port number on which to connect - - `max_pool_size` (optional): The maximum number of connections - that the pool will open simultaneously. If this is set, operations - will block if there are `max_pool_size` outstanding connections - from the pool. By default the pool size is unlimited. - - `network_timeout` (optional): timeout (in seconds) to use - for socket operations - default is no timeout - - `document_class` (optional): default class to use for - documents returned from queries on this connection - - `tz_aware` (optional): if ``True``, - :class:`~datetime.datetime` instances returned as values - in a document by this :class:`Connection` will be timezone - aware (otherwise they will be naive) - - | **Other optional parameters can be passed as keyword arguments:** - - - `socketTimeoutMS`: (integer) How long (in milliseconds) a send or - receive on a socket can take before timing out. Defaults to ``None`` - (no timeout). - - `connectTimeoutMS`: (integer) How long (in milliseconds) a - connection can take to be opened before timing out. Defaults to - ``20000``. - - `waitQueueTimeoutMS`: (integer) How long (in milliseconds) a - thread will wait for a socket from the pool if the pool has no - free sockets. Defaults to ``None`` (no timeout). - - `waitQueueMultiple`: (integer) Multiplied by max_pool_size to give - the number of threads allowed to wait for a socket at one time. - Defaults to ``None`` (no waiters). - - - `auto_start_request`: If ``True`` (the default), each thread that - accesses this Connection has a socket allocated to it for the - thread's lifetime. This ensures consistent reads, even if you read - after an unsafe write. - - `use_greenlets`: if ``True``, :meth:`start_request()` will ensure - that the current greenlet uses the same socket for all operations - until :meth:`end_request()` - - | **Write Concern options:** - - - `safe`: :class:`Connection` **disables** acknowledgement of write - operations. Use ``safe=True`` to enable write acknowledgement. - - `w`: (integer or string) If this is a replica set, write operations - will block until they have been replicated to the specified number - or tagged set of servers. `w=` always includes the replica set - primary (e.g. w=3 means write to the primary and wait until - replicated to **two** secondaries). Implies safe=True. - - `wtimeout`: (integer) Used in conjunction with `w`. Specify a value - in milliseconds to control how long to wait for write propagation - to complete. If replication does not complete in the given - timeframe, a timeout exception is raised. Implies safe=True. - - `j`: If ``True`` block until write operations have been committed - to the journal. Cannot be used in combination with `fsync`. Prior - to MongoDB 2.6 this option was ignored if the server was running - without journaling. Starting with MongoDB 2.6 write operations will - fail with an exception if this option is used when the server is - running without journaling. Implies safe=True. - - `fsync`: If ``True`` and the server is running without journaling, - blocks until the server has synced all data files to disk. If the - server is running with journaling, this acts the same as the `j` - option, blocking until write operations have been committed to the - journal. Cannot be used in combination with `j`. Implies safe=True. - - | **Replica-set keyword arguments for connecting with a replica-set - - either directly or via a mongos:** - | (ignored by standalone mongod instances) - - - `slave_okay` or `slaveOk` (deprecated): Use `read_preference` - instead. - - `replicaSet`: (string) The name of the replica-set to connect to. - The driver will verify that the replica-set it connects to matches - this name. Implies that the hosts specified are a seed list and the - driver should attempt to find all members of the set. *Ignored by - mongos*. - - `read_preference`: The read preference for this client. If - connecting to a secondary then a read preference mode *other* than - PRIMARY is required - otherwise all queries will throw a - :class:`~pymongo.errors.AutoReconnect` "not master" error. - See :class:`~pymongo.read_preferences.ReadPreference` for all - available read preference options. - - `tag_sets`: Ignored unless connecting to a replica-set via mongos. - Specify a priority-order for tag sets, provide a list of - tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag - set, ``{}``, means "read from any member that matches the mode, - ignoring tags. - - | **SSL configuration:** - - - `ssl`: If ``True``, create the connection to the server using SSL. - - `ssl_keyfile`: The private keyfile used to identify the local - connection against mongod. If included with the ``certfile` then - only the ``ssl_certfile`` is needed. Implies ``ssl=True``. - - `ssl_certfile`: The certificate file used to identify the local - connection against mongod. Implies ``ssl=True``. - - `ssl_cert_reqs`: The parameter cert_reqs specifies whether a - certificate is required from the other side of the connection, - and whether it will be validated if provided. It must be one of the - three values ``ssl.CERT_NONE`` (certificates ignored), - ``ssl.CERT_OPTIONAL`` (not required, but validated if provided), or - ``ssl.CERT_REQUIRED`` (required and validated). If the value of - this parameter is not ``ssl.CERT_NONE``, then the ``ssl_ca_certs`` - parameter must point to a file of CA certificates. - Implies ``ssl=True``. - - `ssl_ca_certs`: The ca_certs file contains a set of concatenated - "certification authority" certificates, which are used to validate - certificates passed from the other end of the connection. - Implies ``ssl=True``. - - .. seealso:: :meth:`end_request` - .. versionchanged:: 2.5 - Added additional ssl options - .. versionchanged:: 2.3 - Added support for failover between mongos seed list members. - .. versionchanged:: 2.2 - Added `auto_start_request` option back. Added `use_greenlets` - option. - .. versionchanged:: 2.1 - Support `w` = integer or string. - Added `ssl` option. - DEPRECATED slave_okay/slaveOk. - .. versionchanged:: 2.0 - `slave_okay` is a pure keyword argument. Added support for safe, - and getlasterror options as keyword arguments. - .. versionchanged:: 1.11 - Added `max_pool_size`. Completely removed previously deprecated - `pool_size`, `auto_start_request` and `timeout` parameters. - .. versionchanged:: 1.8 - The `host` parameter can now be a full `mongodb URI - `_, in addition - to a simple hostname. It can also be a list of hostnames or - URIs. - .. versionadded:: 1.8 - The `tz_aware` parameter. - .. versionadded:: 1.7 - The `document_class` parameter. - .. versionadded:: 1.1 - The `network_timeout` parameter. - - .. mongodoc:: connections - """ - if network_timeout is not None: - if (not isinstance(network_timeout, (int, float)) or - network_timeout <= 0): - raise ConfigurationError("network_timeout must " - "be a positive integer") - kwargs['socketTimeoutMS'] = network_timeout * 1000 - - kwargs['auto_start_request'] = kwargs.get('auto_start_request', True) - kwargs['safe'] = kwargs.get('safe', False) - - super(Connection, self).__init__(host, port, - max_pool_size, document_class, tz_aware, _connect, **kwargs) - - def __repr__(self): - if len(self.nodes) == 1: - return "Connection(%r, %r)" % (self.host, self.port) - else: - return "Connection(%r)" % ["%s:%d" % n for n in self.nodes] - - def next(self): - raise TypeError("'Connection' object is not iterable") diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 908b02c3f5..869adddc37 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,1086 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Cursor class to iterate over Mongo query results.""" -import copy -from collections import deque +"""Re-import of synchronous Cursor API for compatibility.""" +from __future__ import annotations -from bson import RE_TYPE -from bson.code import Code -from bson.son import SON -from pymongo import helpers, message, read_preferences -from pymongo.read_preferences import ReadPreference, secondary_ok_commands -from pymongo.errors import (AutoReconnect, - CursorNotFound, - InvalidOperation) +from pymongo.cursor_shared import * # noqa: F403 +from pymongo.synchronous.cursor import * # noqa: F403 +from pymongo.synchronous.cursor import __doc__ as original_doc -_QUERY_OPTIONS = { - "tailable_cursor": 2, - "slave_okay": 4, - "oplog_replay": 8, - "no_timeout": 16, - "await_data": 32, - "exhaust": 64, - "partial": 128} - - -# This has to be an old style class due to -# http://bugs.jython.org/issue1057 -class _SocketManager: - """Used with exhaust cursors to ensure the socket is returned. - """ - def __init__(self, sock, pool): - self.sock = sock - self.pool = pool - self.__closed = False - - def __del__(self): - self.close() - - def close(self): - """Return this instance's socket to the connection pool. - """ - if not self.__closed: - self.__closed = True - self.pool.maybe_return_socket(self.sock) - self.sock, self.pool = None, None - - -# TODO might be cool to be able to do find().include("foo") or -# find().exclude(["bar", "baz"]) or find().slice("a", 1, 2) as an -# alternative to the fields specifier. -class Cursor(object): - """A cursor / iterator over Mongo query results. - """ - - def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, - timeout=True, snapshot=False, tailable=False, sort=None, - max_scan=None, as_class=None, slave_okay=False, - await_data=False, partial=False, manipulate=True, - read_preference=ReadPreference.PRIMARY, - tag_sets=[{}], secondary_acceptable_latency_ms=None, - exhaust=False, compile_re=True, _must_use_master=False, - _uuid_subtype=None, **kwargs): - """Create a new cursor. - - Should not be called directly by application developers - see - :meth:`~pymongo.collection.Collection.find` instead. - - .. mongodoc:: cursors - """ - self.__id = None - - if spec is None: - spec = {} - - if not isinstance(spec, dict): - raise TypeError("spec must be an instance of dict") - if not isinstance(skip, int): - raise TypeError("skip must be an instance of int") - if not isinstance(limit, int): - raise TypeError("limit must be an instance of int") - if not isinstance(timeout, bool): - raise TypeError("timeout must be an instance of bool") - if not isinstance(snapshot, bool): - raise TypeError("snapshot must be an instance of bool") - if not isinstance(tailable, bool): - raise TypeError("tailable must be an instance of bool") - if not isinstance(slave_okay, bool): - raise TypeError("slave_okay must be an instance of bool") - if not isinstance(await_data, bool): - raise TypeError("await_data must be an instance of bool") - if not isinstance(partial, bool): - raise TypeError("partial must be an instance of bool") - if not isinstance(exhaust, bool): - raise TypeError("exhaust must be an instance of bool") - - if fields is not None: - if not fields: - fields = {"_id": 1} - if not isinstance(fields, dict): - fields = helpers._fields_list_to_dict(fields) - - if as_class is None: - as_class = collection.database.connection.document_class - - self.__collection = collection - self.__spec = spec - self.__fields = fields - self.__skip = skip - self.__limit = limit - self.__max_time_ms = None - self.__batch_size = 0 - self.__max = None - self.__min = None - - # Exhaust cursor support - if self.__collection.database.connection.is_mongos and exhaust: - raise InvalidOperation('Exhaust cursors are ' - 'not supported by mongos') - if limit and exhaust: - raise InvalidOperation("Can't use limit and exhaust together.") - self.__exhaust = exhaust - self.__exhaust_mgr = None - - # This is ugly. People want to be able to do cursor[5:5] and - # get an empty result set (old behavior was an - # exception). It's hard to do that right, though, because the - # server uses limit(0) to mean 'no limit'. So we set __empty - # in that case and check for it when iterating. We also unset - # it anytime we change __limit. - self.__empty = False - - self.__snapshot = snapshot - self.__ordering = sort and helpers._index_document(sort) or None - self.__max_scan = max_scan - self.__explain = False - self.__hint = None - self.__comment = None - self.__as_class = as_class - self.__slave_okay = slave_okay - self.__manipulate = manipulate - self.__read_preference = read_preference - self.__tag_sets = tag_sets - self.__secondary_acceptable_latency_ms = secondary_acceptable_latency_ms - self.__tz_aware = collection.database.connection.tz_aware - self.__compile_re = compile_re - self.__must_use_master = _must_use_master - self.__uuid_subtype = _uuid_subtype or collection.uuid_subtype - - self.__data = deque() - self.__connection_id = None - self.__retrieved = 0 - self.__killed = False - - self.__query_flags = 0 - if tailable: - self.__query_flags |= _QUERY_OPTIONS["tailable_cursor"] - if not timeout: - self.__query_flags |= _QUERY_OPTIONS["no_timeout"] - if tailable and await_data: - self.__query_flags |= _QUERY_OPTIONS["await_data"] - if exhaust: - self.__query_flags |= _QUERY_OPTIONS["exhaust"] - if partial: - self.__query_flags |= _QUERY_OPTIONS["partial"] - - # this is for passing network_timeout through if it's specified - # need to use kwargs as None is a legit value for network_timeout - self.__kwargs = kwargs - - @property - def collection(self): - """The :class:`~pymongo.collection.Collection` that this - :class:`Cursor` is iterating. - - .. versionadded:: 1.1 - """ - return self.__collection - - @property - def conn_id(self): - """The server/client/pool this cursor lives on. - - Could be (host, port), -1, or None depending on what - client class executed the initial query or this cursor - being advanced at all. - """ - return self.__connection_id - - @property - def retrieved(self): - """The number of documents retrieved so far. - """ - return self.__retrieved - - def __del__(self): - if self.__id and not self.__killed: - self.__die() - - def rewind(self): - """Rewind this cursor to its unevaluated state. - - Reset this cursor if it has been partially or completely evaluated. - Any options that are present on the cursor will remain in effect. - Future iterating performed on this cursor will cause new queries to - be sent to the server, even if the resultant data has already been - retrieved by this cursor. - """ - self.__data = deque() - self.__id = None - self.__connection_id = None - self.__retrieved = 0 - self.__killed = False - - return self - - def clone(self): - """Get a clone of this cursor. - - Returns a new Cursor instance with options matching those that have - been set on the current instance. The clone will be completely - unevaluated, even if the current instance has been partially or - completely evaluated. - """ - return self._clone(True) - - def _clone(self, deepcopy=True): - clone = self._clone_base() - values_to_clone = ("spec", "fields", "skip", "limit", "max_time_ms", - "comment", "max", "min", - "snapshot", "ordering", "explain", "hint", - "batch_size", "max_scan", "as_class", "slave_okay", - "manipulate", "read_preference", "tag_sets", - "secondary_acceptable_latency_ms", - "must_use_master", "uuid_subtype", "compile_re", - "query_flags", "kwargs") - data = dict((k, v) for k, v in self.__dict__.iteritems() - if k.startswith('_Cursor__') and k[9:] in values_to_clone) - if deepcopy: - data = self._deepcopy(data) - clone.__dict__.update(data) - return clone - - def _clone_base(self): - """Creates an empty Cursor object for information to be copied into. - """ - return Cursor(self.__collection) - - def __die(self): - """Closes this cursor. - """ - if self.__id and not self.__killed: - if self.__exhaust and self.__exhaust_mgr: - # If this is an exhaust cursor and we haven't completely - # exhausted the result set we *must* close the socket - # to stop the server from sending more data. - self.__exhaust_mgr.sock.close() - else: - connection = self.__collection.database.connection - if self.__connection_id is not None: - connection.close_cursor(self.__id, self.__connection_id) - else: - connection.close_cursor(self.__id) - if self.__exhaust and self.__exhaust_mgr: - self.__exhaust_mgr.close() - self.__killed = True - - def close(self): - """Explicitly close / kill this cursor. Required for PyPy, Jython and - other Python implementations that don't use reference counting - garbage collection. - """ - self.__die() - - def __query_spec(self): - """Get the spec to use for a query. - """ - operators = {} - if self.__ordering: - operators["$orderby"] = self.__ordering - if self.__explain: - operators["$explain"] = True - if self.__hint: - operators["$hint"] = self.__hint - if self.__comment: - operators["$comment"] = self.__comment - if self.__snapshot: - operators["$snapshot"] = True - if self.__max_scan: - operators["$maxScan"] = self.__max_scan - if self.__max_time_ms is not None: - operators["$maxTimeMS"] = self.__max_time_ms - if self.__max: - operators["$max"] = self.__max - if self.__min: - operators["$min"] = self.__min - # Only set $readPreference if it's something other than - # PRIMARY to avoid problems with mongos versions that - # don't support read preferences. - if (self.__collection.database.connection.is_mongos and - self.__read_preference != ReadPreference.PRIMARY): - - has_tags = self.__tag_sets and self.__tag_sets != [{}] - - # For maximum backwards compatibility, don't set $readPreference - # for SECONDARY_PREFERRED unless tags are in use. Just rely on - # the slaveOkay bit (set automatically if read preference is not - # PRIMARY), which has the same behavior. - if (self.__read_preference != ReadPreference.SECONDARY_PREFERRED or - has_tags): - - read_pref = { - 'mode': read_preferences.mongos_mode(self.__read_preference) - } - if has_tags: - read_pref['tags'] = self.__tag_sets - - operators['$readPreference'] = read_pref - - if operators: - # Make a shallow copy so we can cleanly rewind or clone. - spec = self.__spec.copy() - - # Only commands that can be run on secondaries should have any - # operators added to the spec. Command queries can be issued - # by db.command or calling find_one on $cmd directly - if self.collection.name == "$cmd": - # Don't change commands that can't be sent to secondaries - command_name = spec and spec.keys()[0].lower() or "" - if command_name not in secondary_ok_commands: - return spec - elif command_name == 'mapreduce': - # mapreduce shouldn't be changed if its not inline - out = spec.get('out') - if not isinstance(out, dict) or not out.get('inline'): - return spec - - # White-listed commands must be wrapped in $query. - if "$query" not in spec: - # $query has to come first - spec = SON([("$query", spec)]) - - if not isinstance(spec, SON): - # Ensure the spec is SON. As order is important this will - # ensure its set before merging in any extra operators. - spec = SON(spec) - - spec.update(operators) - return spec - # Have to wrap with $query if "query" is the first key. - # We can't just use $query anytime "query" is a key as - # that breaks commands like count and find_and_modify. - # Checking spec.keys()[0] covers the case that the spec - # was passed as an instance of SON or OrderedDict. - elif ("query" in self.__spec and - (len(self.__spec) == 1 or self.__spec.keys()[0] == "query")): - return SON({"$query": self.__spec}) - - return self.__spec - - def __query_options(self): - """Get the query options string to use for this query. - """ - options = self.__query_flags - if (self.__slave_okay - or self.__read_preference != ReadPreference.PRIMARY - ): - options |= _QUERY_OPTIONS["slave_okay"] - return options - - def __check_okay_to_chain(self): - """Check if it is okay to chain more options onto this cursor. - """ - if self.__retrieved or self.__id is not None: - raise InvalidOperation("cannot set options after executing query") - - def add_option(self, mask): - """Set arbitrary query flags using a bitmask. - - To set the tailable flag: - cursor.add_option(2) - """ - if not isinstance(mask, int): - raise TypeError("mask must be an int") - self.__check_okay_to_chain() - - if mask & _QUERY_OPTIONS["slave_okay"]: - self.__slave_okay = True - if mask & _QUERY_OPTIONS["exhaust"]: - if self.__limit: - raise InvalidOperation("Can't use limit and exhaust together.") - if self.__collection.database.connection.is_mongos: - raise InvalidOperation('Exhaust cursors are ' - 'not supported by mongos') - self.__exhaust = True - - self.__query_flags |= mask - return self - - def remove_option(self, mask): - """Unset arbitrary query flags using a bitmask. - - To unset the tailable flag: - cursor.remove_option(2) - """ - if not isinstance(mask, int): - raise TypeError("mask must be an int") - self.__check_okay_to_chain() - - if mask & _QUERY_OPTIONS["slave_okay"]: - self.__slave_okay = False - if mask & _QUERY_OPTIONS["exhaust"]: - self.__exhaust = False - - self.__query_flags &= ~mask - return self - - def limit(self, limit): - """Limits the number of results to be returned by this cursor. - - Raises :exc:`TypeError` if `limit` is not an integer. Raises - :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` - has already been used. The last `limit` applied to this cursor - takes precedence. A limit of ``0`` is equivalent to no limit. - - :Parameters: - - `limit`: the number of results to return - - .. mongodoc:: limit - """ - if not isinstance(limit, (int, long)): - raise TypeError("limit must be an integer") - if self.__exhaust: - raise InvalidOperation("Can't use limit and exhaust together.") - self.__check_okay_to_chain() - - self.__empty = False - self.__limit = limit - return self - - def batch_size(self, batch_size): - """Limits the number of documents returned in one batch. Each batch - requires a round trip to the server. It can be adjusted to optimize - performance and limit data transfer. - - .. note:: batch_size can not override MongoDB's internal limits on the - amount of data it will return to the client in a single batch (i.e - if you set batch size to 1,000,000,000, MongoDB will currently only - return 4-16MB of results per batch). - - Raises :exc:`TypeError` if `batch_size` is not an integer. - Raises :exc:`ValueError` if `batch_size` is less than ``0``. - Raises :exc:`~pymongo.errors.InvalidOperation` if this - :class:`Cursor` has already been used. The last `batch_size` - applied to this cursor takes precedence. - - :Parameters: - - `batch_size`: The size of each batch of results requested. - - .. versionadded:: 1.9 - """ - if not isinstance(batch_size, (int, long)): - raise TypeError("batch_size must be an integer") - if batch_size < 0: - raise ValueError("batch_size must be >= 0") - self.__check_okay_to_chain() - - self.__batch_size = batch_size == 1 and 2 or batch_size - return self - - def skip(self, skip): - """Skips the first `skip` results of this cursor. - - Raises :exc:`TypeError` if `skip` is not an integer. Raises - :exc:`ValueError` if `skip` is less than ``0``. Raises - :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has - already been used. The last `skip` applied to this cursor takes - precedence. - - :Parameters: - - `skip`: the number of results to skip - """ - if not isinstance(skip, (int, long)): - raise TypeError("skip must be an integer") - if skip < 0: - raise ValueError("skip must be >= 0") - self.__check_okay_to_chain() - - self.__skip = skip - return self - - def max_time_ms(self, max_time_ms): - """Specifies a time limit for a query operation. If the specified - time is exceeded, the operation will be aborted and - :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` - is ``None`` no limit is applied. - - Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``. - Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` - has already been used. - - :Parameters: - - `max_time_ms`: the time limit after which the operation is aborted - """ - if not isinstance(max_time_ms, (int, long)) and max_time_ms is not None: - raise TypeError("max_time_ms must be an integer or None") - self.__check_okay_to_chain() - - self.__max_time_ms = max_time_ms - return self - - def __getitem__(self, index): - """Get a single document or a slice of documents from this cursor. - - Raises :class:`~pymongo.errors.InvalidOperation` if this - cursor has already been used. - - To get a single document use an integral index, e.g.:: - - >>> db.test.find()[50] - - An :class:`IndexError` will be raised if the index is negative - or greater than the amount of documents in this cursor. Any - limit previously applied to this cursor will be ignored. - - To get a slice of documents use a slice index, e.g.:: - - >>> db.test.find()[20:25] - - This will return this cursor with a limit of ``5`` and skip of - ``20`` applied. Using a slice index will override any prior - limits or skips applied to this cursor (including those - applied through previous calls to this method). Raises - :class:`IndexError` when the slice has a step, a negative - start value, or a stop value less than or equal to the start - value. - - :Parameters: - - `index`: An integer or slice index to be applied to this cursor - """ - self.__check_okay_to_chain() - self.__empty = False - if isinstance(index, slice): - if index.step is not None: - raise IndexError("Cursor instances do not support slice steps") - - skip = 0 - if index.start is not None: - if index.start < 0: - raise IndexError("Cursor instances do not support" - "negative indices") - skip = index.start - - if index.stop is not None: - limit = index.stop - skip - if limit < 0: - raise IndexError("stop index must be greater than start" - "index for slice %r" % index) - if limit == 0: - self.__empty = True - else: - limit = 0 - - self.__skip = skip - self.__limit = limit - return self - - if isinstance(index, (int, long)): - if index < 0: - raise IndexError("Cursor instances do not support negative" - "indices") - clone = self.clone() - clone.skip(index + self.__skip) - clone.limit(-1) # use a hard limit - for doc in clone: - return doc - raise IndexError("no such item for Cursor instance") - raise TypeError("index %r cannot be applied to Cursor " - "instances" % index) - - def max_scan(self, max_scan): - """Limit the number of documents to scan when performing the query. - - Raises :class:`~pymongo.errors.InvalidOperation` if this - cursor has already been used. Only the last :meth:`max_scan` - applied to this cursor has any effect. - - :Parameters: - - `max_scan`: the maximum number of documents to scan - - .. note:: Requires server version **>= 1.5.1** - - .. versionadded:: 1.7 - """ - self.__check_okay_to_chain() - self.__max_scan = max_scan - return self - - def max(self, spec): - """Adds `max` operator that specifies upper bound for specific index. - - :Parameters: - - `spec`: a list of field, limit pairs specifying the exclusive - upper bound for all keys of a specific index in order. - - .. versionadded:: 2.7 - """ - if not isinstance(spec, (list, tuple)): - raise TypeError("spec must be an instance of list or tuple") - - self.__check_okay_to_chain() - self.__max = SON(spec) - return self - - def min(self, spec): - """Adds `min` operator that specifies lower bound for specific index. - - :Parameters: - - `spec`: a list of field, limit pairs specifying the inclusive - lower bound for all keys of a specific index in order. - - .. versionadded:: 2.7 - """ - if not isinstance(spec, (list, tuple)): - raise TypeError("spec must be an instance of list or tuple") - - self.__check_okay_to_chain() - self.__min = SON(spec) - return self - - def sort(self, key_or_list, direction=None): - """Sorts this cursor's results. - - Pass a field name and a direction, either - :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: - - for doc in collection.find().sort('field', pymongo.ASCENDING): - print(doc) - - To sort by multiple fields, pass a list of (key, direction) pairs:: - - for doc in collection.find().sort([ - ('field1', pymongo.ASCENDING), - ('field2', pymongo.DESCENDING)]): - print(doc) - - Beginning with MongoDB version 2.6, text search results can be - sorted by relevance:: - - cursor = db.test.find( - {'$text': {'$search': 'some words'}}, - {'score': {'$meta': 'textScore'}}) - - # Sort by 'score' field. - cursor.sort([('score', {'$meta': 'textScore'})]) - - for doc in cursor: - print(doc) - - Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has - already been used. Only the last :meth:`sort` applied to this - cursor has any effect. - - :Parameters: - - `key_or_list`: a single key or a list of (key, direction) - pairs specifying the keys to sort on - - `direction` (optional): only used if `key_or_list` is a single - key, if not given :data:`~pymongo.ASCENDING` is assumed - """ - self.__check_okay_to_chain() - keys = helpers._index_list(key_or_list, direction) - self.__ordering = helpers._index_document(keys) - return self - - def count(self, with_limit_and_skip=False): - """Get the size of the results set for this query. - - Returns the number of documents in the results set for this query. Does - not take :meth:`limit` and :meth:`skip` into account by default - set - `with_limit_and_skip` to ``True`` if that is the desired behavior. - Raises :class:`~pymongo.errors.OperationFailure` on a database error. - - With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` - or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`, - if `read_preference` is not - :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or - :attr:`pymongo.read_preferences.ReadPreference.PRIMARY_PREFERRED`, or - (deprecated) `slave_okay` is `True`, the count command will be sent to - a secondary or slave. - - :Parameters: - - `with_limit_and_skip` (optional): take any :meth:`limit` or - :meth:`skip` that has been applied to this cursor into account when - getting the count - - .. note:: The `with_limit_and_skip` parameter requires server - version **>= 1.1.4-** - - .. note:: ``count`` ignores ``network_timeout``. For example, the - timeout is ignored in the following code:: - - collection.find({}, network_timeout=1).count() - - .. versionadded:: 1.1.1 - The `with_limit_and_skip` parameter. - :meth:`~pymongo.cursor.Cursor.__len__` was deprecated in favor of - calling :meth:`count` with `with_limit_and_skip` set to ``True``. - """ - if not isinstance(with_limit_and_skip, bool): - raise TypeError("with_limit_and_skip must be an instance of bool") - command = {"query": self.__spec, "fields": self.__fields} - - command['read_preference'] = self.__read_preference - command['tag_sets'] = self.__tag_sets - command['secondary_acceptable_latency_ms'] = ( - self.__secondary_acceptable_latency_ms) - command['slave_okay'] = self.__slave_okay - use_master = not self.__slave_okay and not self.__read_preference - command['_use_master'] = use_master - if self.__max_time_ms is not None: - command["maxTimeMS"] = self.__max_time_ms - if self.__comment: - command['$comment'] = self.__comment - - if with_limit_and_skip: - if self.__limit: - command["limit"] = self.__limit - if self.__skip: - command["skip"] = self.__skip - - database = self.__collection.database - r = database.command("count", self.__collection.name, - allowable_errors=["ns missing"], - uuid_subtype=self.__uuid_subtype, - compile_re=self.__compile_re, - **command) - if r.get("errmsg", "") == "ns missing": - return 0 - return int(r["n"]) - - def distinct(self, key): - """Get a list of distinct values for `key` among all documents - in the result set of this query. - - Raises :class:`TypeError` if `key` is not an instance of - :class:`basestring` (:class:`str` in python 3). - - With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` - or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`, - if `read_preference` is - not :attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or - (deprecated) `slave_okay` is `True` the distinct command will be sent - to a secondary or slave. - - :Parameters: - - `key`: name of key for which we want to get the distinct values - - .. note:: Requires server version **>= 1.1.3+** - - .. seealso:: :meth:`pymongo.collection.Collection.distinct` - - .. versionadded:: 1.2 - """ - if not isinstance(key, basestring): - raise TypeError("key must be an instance " - "of %s" % (basestring.__name__,)) - - options = {"key": key} - if self.__spec: - options["query"] = self.__spec - - options['read_preference'] = self.__read_preference - options['tag_sets'] = self.__tag_sets - options['secondary_acceptable_latency_ms'] = ( - self.__secondary_acceptable_latency_ms) - options['slave_okay'] = self.__slave_okay - use_master = not self.__slave_okay and not self.__read_preference - options['_use_master'] = use_master - if self.__max_time_ms is not None: - options['maxTimeMS'] = self.__max_time_ms - if self.__comment: - options['$comment'] = self.__comment - - database = self.__collection.database - return database.command("distinct", - self.__collection.name, - uuid_subtype=self.__uuid_subtype, - compile_re=self.__compile_re, - **options)["values"] - - def explain(self): - """Returns an explain plan record for this cursor. - - .. mongodoc:: explain - """ - c = self.clone() - c.__explain = True - - # always use a hard limit for explains - if c.__limit: - c.__limit = -abs(c.__limit) - return c.next() - - def hint(self, index): - """Adds a 'hint', telling Mongo the proper index to use for the query. - - Judicious use of hints can greatly improve query - performance. When doing a query on multiple fields (at least - one of which is indexed) pass the indexed field as a hint to - the query. Hinting will not do anything if the corresponding - index does not exist. Raises - :class:`~pymongo.errors.InvalidOperation` if this cursor has - already been used. - - `index` should be an index as passed to - :meth:`~pymongo.collection.Collection.create_index` - (e.g. ``[('field', ASCENDING)]``). If `index` - is ``None`` any existing hints for this query are cleared. The - last hint applied to this cursor takes precedence over all - others. - - :Parameters: - - `index`: index to hint on (as an index specifier) - """ - self.__check_okay_to_chain() - if index is None: - self.__hint = None - return self - - self.__hint = helpers._index_document(index) - return self - - def comment(self, comment): - """Adds a 'comment' to the cursor. - - http://docs.mongodb.org/manual/reference/operator/comment/ - - :Parameters: - - `comment`: A string or document - - .. versionadded:: 2.7 - """ - self.__check_okay_to_chain() - self.__comment = comment - return self - - def where(self, code): - """Adds a $where clause to this query. - - The `code` argument must be an instance of :class:`basestring` - (:class:`str` in python 3) or :class:`~bson.code.Code` - containing a JavaScript expression. This expression will be - evaluated for each document scanned. Only those documents - for which the expression evaluates to *true* will be returned - as results. The keyword *this* refers to the object currently - being scanned. - - Raises :class:`TypeError` if `code` is not an instance of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~pymongo.errors.InvalidOperation` if this - :class:`Cursor` has already been used. Only the last call to - :meth:`where` applied to a :class:`Cursor` has any effect. - - :Parameters: - - `code`: JavaScript expression to use as a filter - """ - self.__check_okay_to_chain() - if not isinstance(code, Code): - code = Code(code) - - self.__spec["$where"] = code - return self - - def __send_message(self, message): - """Send a query or getmore message and handles the response. - - If message is ``None`` this is an exhaust cursor, which reads - the next result batch off the exhaust socket instead of - sending getMore messages to the server. - """ - client = self.__collection.database.connection - - if message: - kwargs = {"_must_use_master": self.__must_use_master} - kwargs["read_preference"] = self.__read_preference - kwargs["tag_sets"] = self.__tag_sets - kwargs["secondary_acceptable_latency_ms"] = ( - self.__secondary_acceptable_latency_ms) - kwargs['exhaust'] = self.__exhaust - if self.__connection_id is not None: - kwargs["_connection_to_use"] = self.__connection_id - kwargs.update(self.__kwargs) - - try: - res = client._send_message_with_response(message, **kwargs) - self.__connection_id, (response, sock, pool) = res - if self.__exhaust: - self.__exhaust_mgr = _SocketManager(sock, pool) - except AutoReconnect: - # Don't try to send kill cursors on another socket - # or to another server. It can cause a _pinValue - # assertion on some server releases if we get here - # due to a socket timeout. - self.__killed = True - raise - else: # exhaust cursor - no getMore message - response = client._exhaust_next(self.__exhaust_mgr.sock) - - try: - response = helpers._unpack_response(response, self.__id, - self.__as_class, - self.__tz_aware, - self.__uuid_subtype, - self.__compile_re) - except CursorNotFound: - self.__killed = True - # If this is a tailable cursor the error is likely - # due to capped collection roll over. Setting - # self.__killed to True ensures Cursor.alive will be - # False. No need to re-raise. - if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]: - return - raise - except AutoReconnect: - # Don't send kill cursors to another server after a "not master" - # error. It's completely pointless. - self.__killed = True - client.disconnect() - raise - self.__id = response["cursor_id"] - - # starting from doesn't get set on getmore's for tailable cursors - if not (self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]): - assert response["starting_from"] == self.__retrieved, ( - "Result batch started from %s, expected %s" % ( - response['starting_from'], self.__retrieved)) - - self.__retrieved += response["number_returned"] - self.__data = deque(response["data"]) - - if self.__limit and self.__id and self.__limit <= self.__retrieved: - self.__die() - - # Don't wait for garbage collection to call __del__, return the - # socket to the pool now. - if self.__exhaust and self.__id == 0: - self.__exhaust_mgr.close() - - def _refresh(self): - """Refreshes the cursor with more data from Mongo. - - Returns the length of self.__data after refresh. Will exit early if - self.__data is already non-empty. Raises OperationFailure when the - cursor cannot be refreshed due to an error on the query. - """ - if len(self.__data) or self.__killed: - return len(self.__data) - - if self.__id is None: # Query - ntoreturn = self.__batch_size - if self.__limit: - if self.__batch_size: - ntoreturn = min(self.__limit, self.__batch_size) - else: - ntoreturn = self.__limit - self.__send_message( - message.query(self.__query_options(), - self.__collection.full_name, - self.__skip, ntoreturn, - self.__query_spec(), self.__fields, - self.__uuid_subtype)) - if not self.__id: - self.__killed = True - elif self.__id: # Get More - if self.__limit: - limit = self.__limit - self.__retrieved - if self.__batch_size: - limit = min(limit, self.__batch_size) - else: - limit = self.__batch_size - - # Exhaust cursors don't send getMore messages. - if self.__exhaust: - self.__send_message(None) - else: - self.__send_message( - message.get_more(self.__collection.full_name, - limit, self.__id)) - - else: # Cursor id is zero nothing else to return - self.__killed = True - - return len(self.__data) - - @property - def alive(self): - """Does this cursor have the potential to return more data? - - This is mostly useful with `tailable cursors - `_ - since they will stop iterating even though they *may* return more - results in the future. - - .. versionadded:: 1.5 - """ - return bool(len(self.__data) or (not self.__killed)) - - @property - def cursor_id(self): - """Returns the id of the cursor - - Useful if you need to manage cursor ids and want to handle killing - cursors manually using - :meth:`~pymongo.mongo_client.MongoClient.kill_cursors` - - .. versionadded:: 2.2 - """ - return self.__id - - def __iter__(self): - return self - - def next(self): - if self.__empty: - raise StopIteration - db = self.__collection.database - if len(self.__data) or self._refresh(): - if self.__manipulate: - return db._fix_outgoing(self.__data.popleft(), - self.__collection) - else: - return self.__data.popleft() - else: - raise StopIteration - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.__die() - - def __copy__(self): - """Support function for `copy.copy()`. - - .. versionadded:: 2.4 - """ - return self._clone(deepcopy=False) - - def __deepcopy__(self, memo): - """Support function for `copy.deepcopy()`. - - .. versionadded:: 2.4 - """ - return self._clone(deepcopy=True) - - def _deepcopy(self, x, memo=None): - """Deepcopy helper for the data dictionary or list. - - Regular expressions cannot be deep copied but as they are immutable we - don't have to copy them when cloning. - """ - if not hasattr(x, 'items'): - y, is_list, iterator = [], True, enumerate(x) - else: - y, is_list, iterator = {}, False, x.iteritems() - - if memo is None: - memo = {} - val_id = id(x) - if val_id in memo: - return memo.get(val_id) - memo[val_id] = y - - for key, value in iterator: - if isinstance(value, (dict, list)) and not isinstance(value, SON): - value = self._deepcopy(value, memo) - elif not isinstance(value, RE_TYPE): - value = copy.deepcopy(value, memo) - - if is_list: - y.append(value) - else: - if not isinstance(key, RE_TYPE): - key = copy.deepcopy(key, memo) - y[key] = value - return y +__doc__ = original_doc +__all__ = ["Cursor", "CursorType", "RawBatchCursor"] # noqa: F405 diff --git a/pymongo/cursor_manager.py b/pymongo/cursor_manager.py deleted file mode 100644 index 918dc11105..0000000000 --- a/pymongo/cursor_manager.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""DEPRECATED - Different managers to handle when cursors are killed after -they are closed. - -New cursor managers should be defined as subclasses of CursorManager and can be -installed on a connection by calling -`pymongo.connection.Connection.set_cursor_manager`. - -.. versionchanged:: 2.1+ - Deprecated. -""" - -import weakref - - -class CursorManager(object): - """The default cursor manager. - - This manager will kill cursors one at a time as they are closed. - """ - - def __init__(self, connection): - """Instantiate the manager. - - :Parameters: - - `connection`: a Mongo Connection - """ - self.__connection = weakref.ref(connection) - - def close(self, cursor_id): - """Close a cursor by killing it immediately. - - Raises TypeError if cursor_id is not an instance of (int, long). - - :Parameters: - - `cursor_id`: cursor id to close - """ - if not isinstance(cursor_id, (int, long)): - raise TypeError("cursor_id must be an instance of (int, long)") - - self.__connection().kill_cursors([cursor_id]) - - -class BatchCursorManager(CursorManager): - """A cursor manager that kills cursors in batches. - """ - - def __init__(self, connection): - """Instantiate the manager. - - :Parameters: - - `connection`: a Mongo Connection - """ - self.__dying_cursors = [] - self.__max_dying_cursors = 20 - self.__connection = weakref.ref(connection) - - CursorManager.__init__(self, connection) - - def __del__(self): - """Cleanup - be sure to kill any outstanding cursors. - """ - self.__connection().kill_cursors(self.__dying_cursors) - - def close(self, cursor_id): - """Close a cursor by killing it in a batch. - - Raises TypeError if cursor_id is not an instance of (int, long). - - :Parameters: - - `cursor_id`: cursor id to close - """ - if not isinstance(cursor_id, (int, long)): - raise TypeError("cursor_id must be an instance of (int, long)") - - self.__dying_cursors.append(cursor_id) - - if len(self.__dying_cursors) > self.__max_dying_cursors: - self.__connection().kill_cursors(self.__dying_cursors) - self.__dying_cursors = [] diff --git a/pymongo/cursor_shared.py b/pymongo/cursor_shared.py new file mode 100644 index 0000000000..de6126c4fb --- /dev/null +++ b/pymongo/cursor_shared.py @@ -0,0 +1,94 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants and types shared across all cursor classes.""" +from __future__ import annotations + +from typing import Any, Mapping, Sequence, Tuple, Union + +# These errors mean that the server has already killed the cursor so there is +# no need to send killCursors. +_CURSOR_CLOSED_ERRORS = frozenset( + [ + 43, # CursorNotFound + 175, # QueryPlanKilled + 237, # CursorKilled + # On a tailable cursor, the following errors mean the capped collection + # rolled over. + # MongoDB 2.6: + # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} + 28617, + # MongoDB 3.0: + # {'$err': 'getMore executor error: UnknownError no details available', + # 'code': 17406, 'ok': 0} + 17406, + # MongoDB 3.2 + 3.4: + # {'ok': 0.0, 'errmsg': 'GetMore command executor error: + # CappedPositionLost: CollectionScan died due to failure to restore + # tailable cursor position. Last seen record id: RecordId(3)', + # 'code': 96} + 96, + # MongoDB 3.6+: + # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to + # restore tailable cursor position. Last seen record id: RecordId(3)"', + # 'code': 136, 'codeName': 'CappedPositionLost'} + 136, + ] +) + +_QUERY_OPTIONS = { + "tailable_cursor": 2, + "secondary_okay": 4, + "oplog_replay": 8, + "no_timeout": 16, + "await_data": 32, + "exhaust": 64, + "partial": 128, +} + + +class CursorType: + NON_TAILABLE = 0 + """The standard cursor type.""" + + TAILABLE = _QUERY_OPTIONS["tailable_cursor"] + """The tailable cursor type. + + Tailable cursors are only for use with capped collections. They are not + closed when the last data is retrieved but are kept open and the cursor + location marks the final document position. If more data is received + iteration of the cursor will continue from the last document received. + """ + + TAILABLE_AWAIT = TAILABLE | _QUERY_OPTIONS["await_data"] + """A tailable cursor with the await option set. + + Creates a tailable cursor that will wait for a few seconds after returning + the full result set so that it can capture and return additional data added + during the query. + """ + + EXHAUST = _QUERY_OPTIONS["exhaust"] + """An exhaust cursor. + + MongoDB will stream batched results to the client without waiting for the + client to request each batch, reducing latency. + """ + + +_Sort = Union[ + Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] +] +_Hint = Union[str, _Sort] diff --git a/pymongo/daemon.py b/pymongo/daemon.py new file mode 100644 index 0000000000..c0a01db16d --- /dev/null +++ b/pymongo/daemon.py @@ -0,0 +1,148 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Support for spawning a daemon process. + +PyMongo only attempts to spawn the mongocryptd daemon process when automatic +client-side field level encryption is enabled. See +`Client-side Field Level Encryption `_ for more info. +""" +from __future__ import annotations + +import os +import subprocess +import sys +import warnings +from typing import Any, Optional, Sequence + +# The maximum amount of time to wait for the intermediate subprocess. +_WAIT_TIMEOUT = 10 +_THIS_FILE = os.path.realpath(__file__) + + +def _popen_wait(popen: subprocess.Popen[Any], timeout: Optional[float]) -> Optional[int]: + """Implement wait timeout support for Python 3.""" + try: + return popen.wait(timeout=timeout) + except subprocess.TimeoutExpired: + # Silence TimeoutExpired errors. + return None + + +def _silence_resource_warning(popen: Optional[subprocess.Popen[Any]]) -> None: + """Silence Popen's ResourceWarning. + + Note this should only be used if the process was created as a daemon. + """ + # Set the returncode to avoid this warning when popen is garbage collected: + # "ResourceWarning: subprocess XXX is still running". + # See https://bugs.python.org/issue38890 and + # https://bugs.python.org/issue26741. + # popen is None when mongocryptd spawning fails + if popen is not None: + popen.returncode = 0 + + +if sys.platform == "win32": + # On Windows we spawn the daemon process simply by using DETACHED_PROCESS. + _DETACHED_PROCESS = getattr(subprocess, "DETACHED_PROCESS", 0x00000008) + + def _spawn_daemon(args: Sequence[str]) -> None: + """Spawn a daemon process (Windows).""" + try: + with open(os.devnull, "r+b") as devnull: + popen = subprocess.Popen( + args, # noqa: S603 + creationflags=_DETACHED_PROCESS, + stdin=devnull, + stderr=devnull, + stdout=devnull, + ) + _silence_resource_warning(popen) + except FileNotFoundError as exc: + warnings.warn( + f"Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}", + RuntimeWarning, + stacklevel=2, + ) + +else: + # On Unix we spawn the daemon process with a double Popen. + # 1) The first Popen runs this file as a Python script using the current + # interpreter. + # 2) The script then decouples itself and performs the second Popen to + # spawn the daemon process. + # 3) The original process waits up to 10 seconds for the script to exit. + # + # Note that we do not call fork() directly because we want this procedure + # to be safe to call from any thread. Using Popen instead of fork also + # avoids triggering the application's os.register_at_fork() callbacks when + # we spawn the mongocryptd daemon process. + def _spawn(args: Sequence[str]) -> Optional[subprocess.Popen[Any]]: + """Spawn the process and silence stdout/stderr.""" + try: + with open(os.devnull, "r+b") as devnull: + return subprocess.Popen( + args, # noqa: S603 + close_fds=True, + stdin=devnull, + stderr=devnull, + stdout=devnull, + ) + except FileNotFoundError as exc: + warnings.warn( + f"Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}", + RuntimeWarning, + stacklevel=2, + ) + return None + + def _spawn_daemon_double_popen(args: Sequence[str]) -> None: + """Spawn a daemon process using a double subprocess.Popen.""" + spawner_args = [sys.executable, _THIS_FILE] + spawner_args.extend(args) + temp_proc = subprocess.Popen(spawner_args, close_fds=True) # noqa: S603 + # Reap the intermediate child process to avoid creating zombie + # processes. + _popen_wait(temp_proc, _WAIT_TIMEOUT) + + def _spawn_daemon(args: Sequence[str]) -> None: + """Spawn a daemon process (Unix).""" + # "If Python is unable to retrieve the real path to its executable, + # sys.executable will be an empty string or None". + if sys.executable: + _spawn_daemon_double_popen(args) + else: + # Fallback to spawn a non-daemon process without silencing the + # resource warning. We do not use fork here because it is not + # safe to call from a thread on all systems. + # Unfortunately, this means that: + # 1) If the parent application is killed via Ctrl-C, the + # non-daemon process will also be killed. + # 2) Each non-daemon process will hang around as a zombie process + # until the main application exits. + _spawn(args) + + if __name__ == "__main__": + # Attempt to start a new session to decouple from the parent. + if hasattr(os, "setsid"): + try: + os.setsid() + except OSError: + pass + + # We are performing a double fork (Popen) to spawn the process as a + # daemon so it is safe to ignore the resource warning. + _silence_resource_warning(_spawn(sys.argv[1:])) + os._exit(0) diff --git a/pymongo/database.py b/pymongo/database.py index 646c0a97a2..f85b312f91 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -1,10 +1,10 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,993 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Database level operations.""" +"""Re-import of synchronous Database API for compatibility.""" +from __future__ import annotations -import warnings +from pymongo.synchronous.database import * # noqa: F403 +from pymongo.synchronous.database import __doc__ as original_doc -from bson.binary import OLD_UUID_SUBTYPE -from bson.code import Code -from bson.dbref import DBRef -from bson.son import SON -from pymongo import auth, common, helpers -from pymongo.collection import Collection -from pymongo.errors import (CollectionInvalid, - ConfigurationError, - InvalidName, - OperationFailure) -from pymongo import read_preferences as rp - - -def _check_name(name): - """Check if a database name is valid. - """ - if not name: - raise InvalidName("database name cannot be the empty string") - - for invalid_char in [" ", ".", "$", "/", "\\", "\x00"]: - if invalid_char in name: - raise InvalidName("database names cannot contain the " - "character %r" % invalid_char) - - -class Database(common.BaseObject): - """A Mongo database. - """ - - def __init__(self, connection, name): - """Get a database by connection and name. - - Raises :class:`TypeError` if `name` is not an instance of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~pymongo.errors.InvalidName` if `name` is not a valid - database name. - - :Parameters: - - `connection`: a client instance - - `name`: database name - - .. mongodoc:: databases - """ - super(Database, - self).__init__(slave_okay=connection.slave_okay, - read_preference=connection.read_preference, - tag_sets=connection.tag_sets, - secondary_acceptable_latency_ms=( - connection.secondary_acceptable_latency_ms), - safe=connection.safe, - uuidrepresentation=connection.uuid_subtype, - **connection.write_concern) - - if not isinstance(name, basestring): - raise TypeError("name must be an instance " - "of %s" % (basestring.__name__,)) - - if name != '$external': - _check_name(name) - - self.__name = unicode(name) - self.__connection = connection - - self.__incoming_manipulators = [] - self.__incoming_copying_manipulators = [] - self.__outgoing_manipulators = [] - self.__outgoing_copying_manipulators = [] - - def add_son_manipulator(self, manipulator): - """Add a new son manipulator to this database. - - Newly added manipulators will be applied before existing ones. - - :Parameters: - - `manipulator`: the manipulator to add - """ - def method_overwritten(instance, method): - return getattr(instance, method) != \ - getattr(super(instance.__class__, instance), method) - - if manipulator.will_copy(): - if method_overwritten(manipulator, "transform_incoming"): - self.__incoming_copying_manipulators.insert(0, manipulator) - if method_overwritten(manipulator, "transform_outgoing"): - self.__outgoing_copying_manipulators.insert(0, manipulator) - else: - if method_overwritten(manipulator, "transform_incoming"): - self.__incoming_manipulators.insert(0, manipulator) - if method_overwritten(manipulator, "transform_outgoing"): - self.__outgoing_manipulators.insert(0, manipulator) - - @property - def system_js(self): - """A :class:`SystemJS` helper for this :class:`Database`. - - See the documentation for :class:`SystemJS` for more details. - - .. versionadded:: 1.5 - """ - return SystemJS(self) - - @property - def connection(self): - """The client instance for this :class:`Database`. - - .. versionchanged:: 1.3 - ``connection`` is now a property rather than a method. - """ - return self.__connection - - @property - def name(self): - """The name of this :class:`Database`. - - .. versionchanged:: 1.3 - ``name`` is now a property rather than a method. - """ - return self.__name - - @property - def incoming_manipulators(self): - """List all incoming SON manipulators - installed on this instance. - - .. versionadded:: 2.0 - """ - return [manipulator.__class__.__name__ - for manipulator in self.__incoming_manipulators] - - @property - def incoming_copying_manipulators(self): - """List all incoming SON copying manipulators - installed on this instance. - - .. versionadded:: 2.0 - """ - return [manipulator.__class__.__name__ - for manipulator in self.__incoming_copying_manipulators] - - @property - def outgoing_manipulators(self): - """List all outgoing SON manipulators - installed on this instance. - - .. versionadded:: 2.0 - """ - return [manipulator.__class__.__name__ - for manipulator in self.__outgoing_manipulators] - - @property - def outgoing_copying_manipulators(self): - """List all outgoing SON copying manipulators - installed on this instance. - - .. versionadded:: 2.0 - """ - return [manipulator.__class__.__name__ - for manipulator in self.__outgoing_copying_manipulators] - - def __eq__(self, other): - if isinstance(other, Database): - us = (self.__connection, self.__name) - them = (other.__connection, other.__name) - return us == them - return NotImplemented - - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "Database(%r, %r)" % (self.__connection, self.__name) - - def __getattr__(self, name): - """Get a collection of this database by name. - - Raises InvalidName if an invalid collection name is used. - - :Parameters: - - `name`: the name of the collection to get - """ - return Collection(self, name) - - def __getitem__(self, name): - """Get a collection of this database by name. - - Raises InvalidName if an invalid collection name is used. - - :Parameters: - - `name`: the name of the collection to get - """ - return self.__getattr__(name) - - def create_collection(self, name, **kwargs): - """Create a new :class:`~pymongo.collection.Collection` in this - database. - - Normally collection creation is automatic. This method should - only be used to specify options on - creation. :class:`~pymongo.errors.CollectionInvalid` will be - raised if the collection already exists. - - Options should be passed as keyword arguments to this - method. Any of the following options are valid: - - - "size": desired initial size for the collection (in - bytes). For capped collections this size is the max - size of the collection. - - "capped": if True, this is a capped collection - - "max": maximum number of objects if capped (optional) - - :Parameters: - - `name`: the name of the collection to create - - `**kwargs` (optional): additional keyword arguments will - be passed as options for the create collection command - - .. versionchanged:: 2.2 - Removed deprecated argument: options - - .. versionchanged:: 1.5 - deprecating `options` in favor of kwargs - """ - opts = {"create": True} - opts.update(kwargs) - - if name in self.collection_names(): - raise CollectionInvalid("collection %s already exists" % name) - - return Collection(self, name, **opts) - - def _fix_incoming(self, son, collection): - """Apply manipulators to an incoming SON object before it gets stored. - - :Parameters: - - `son`: the son object going into the database - - `collection`: the collection the son object is being saved in - """ - for manipulator in self.__incoming_manipulators: - son = manipulator.transform_incoming(son, collection) - for manipulator in self.__incoming_copying_manipulators: - son = manipulator.transform_incoming(son, collection) - return son - - def _fix_outgoing(self, son, collection): - """Apply manipulators to a SON object as it comes out of the database. - - :Parameters: - - `son`: the son object coming out of the database - - `collection`: the collection the son object was saved in - """ - for manipulator in reversed(self.__outgoing_manipulators): - son = manipulator.transform_outgoing(son, collection) - for manipulator in reversed(self.__outgoing_copying_manipulators): - son = manipulator.transform_outgoing(son, collection) - return son - - def _command(self, command, value=1, - check=True, allowable_errors=None, - uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True, **kwargs): - """Internal command helper. - """ - - if isinstance(command, basestring): - command = SON([(command, value)]) - - command_name = command.keys()[0].lower() - must_use_master = kwargs.pop('_use_master', False) - if command_name not in rp.secondary_ok_commands: - must_use_master = True - - # Special-case: mapreduce can go to secondaries only if inline - if command_name == 'mapreduce': - out = command.get('out') or kwargs.get('out') - if not isinstance(out, dict) or not out.get('inline'): - must_use_master = True - - # Special-case: aggregate with $out cannot go to secondaries. - if command_name == 'aggregate': - for stage in kwargs.get('pipeline', []): - if '$out' in stage: - must_use_master = True - break - - extra_opts = { - 'as_class': kwargs.pop('as_class', None), - 'slave_okay': kwargs.pop('slave_okay', self.slave_okay), - '_must_use_master': must_use_master, - '_uuid_subtype': uuid_subtype - } - - extra_opts['read_preference'] = kwargs.pop( - 'read_preference', - self.read_preference) - extra_opts['tag_sets'] = kwargs.pop( - 'tag_sets', - self.tag_sets) - extra_opts['secondary_acceptable_latency_ms'] = kwargs.pop( - 'secondary_acceptable_latency_ms', - self.secondary_acceptable_latency_ms) - extra_opts['compile_re'] = compile_re - - fields = kwargs.get('fields') - if fields is not None and not isinstance(fields, dict): - kwargs['fields'] = helpers._fields_list_to_dict(fields) - - command.update(kwargs) - - # Warn if must_use_master will override read_preference. - if (extra_opts['read_preference'] != rp.ReadPreference.PRIMARY and - extra_opts['_must_use_master']): - warnings.warn("%s does not support %s read preference " - "and will be routed to the primary instead." % - (command_name, - rp.modes[extra_opts['read_preference']]), - UserWarning) - - cursor = self["$cmd"].find(command, **extra_opts).limit(-1) - for doc in cursor: - result = doc - - if check: - msg = "command %s failed: %%s" % repr(command).replace("%", "%%") - helpers._check_command_response(result, self.connection.disconnect, - msg, allowable_errors) - - return result, cursor.conn_id - - def command(self, command, value=1, - check=True, allowable_errors=[], - uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True, **kwargs): - """Issue a MongoDB command. - - Send command `command` to the database and return the - response. If `command` is an instance of :class:`basestring` - (:class:`str` in python 3) then the command {`command`: `value`} - will be sent. Otherwise, `command` must be an instance of - :class:`dict` and will be sent as is. - - Any additional keyword arguments will be added to the final - command document before it is sent. - - For example, a command like ``{buildinfo: 1}`` can be sent - using: - - >>> db.command("buildinfo") - - For a command where the value matters, like ``{collstats: - collection_name}`` we can do: - - >>> db.command("collstats", collection_name) - - For commands that take additional arguments we can use - kwargs. So ``{filemd5: object_id, root: file_root}`` becomes: - - >>> db.command("filemd5", object_id, root=file_root) - - :Parameters: - - `command`: document representing the command to be issued, - or the name of the command (for simple commands only). - - .. note:: the order of keys in the `command` document is - significant (the "verb" must come first), so commands - which require multiple keys (e.g. `findandmodify`) - should use an instance of :class:`~bson.son.SON` or - a string and kwargs instead of a Python `dict`. - - - `value` (optional): value to use for the command verb when - `command` is passed as a string - - `check` (optional): check the response for errors, raising - :class:`~pymongo.errors.OperationFailure` if there are any - - `allowable_errors`: if `check` is ``True``, error messages - in this list will be ignored by error-checking - - `uuid_subtype` (optional): The BSON binary subtype to use - for a UUID used in this command. - - `compile_re` (optional): if ``False``, don't attempt to compile - BSON regular expressions into Python regular expressions. Return - instances of :class:`~bson.regex.Regex` instead. Can avoid - :exc:`~bson.errors.InvalidBSON` errors when receiving - Python-incompatible regular expressions, for example from - ``currentOp`` - - `read_preference`: The read preference for this connection. - See :class:`~pymongo.read_preferences.ReadPreference` for available - options. - - `tag_sets`: Read from replica-set members with these tags. - To specify a priority-order for tag sets, provide a list of - tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag - set, ``{}``, means "read from any member that matches the mode, - ignoring tags." ReplicaSetConnection tries each set of tags in turn - until it finds a set of tags with at least one matching member. - - `secondary_acceptable_latency_ms`: Any replica-set member whose - ping time is within secondary_acceptable_latency_ms of the nearest - member may accept reads. Default 15 milliseconds. - **Ignored by mongos** and must be configured on the command line. - See the localThreshold_ option for more information. - - `**kwargs` (optional): additional keyword arguments will - be added to the command document before it is sent - - .. note:: ``command`` ignores the ``network_timeout`` parameter. - - .. versionchanged:: 2.7 - Added ``compile_re`` option. - .. versionchanged:: 2.3 - Added `tag_sets` and `secondary_acceptable_latency_ms` options. - .. versionchanged:: 2.2 - Added support for `as_class` - the class you want to use for - the resulting documents - .. versionchanged:: 1.6 - Added the `value` argument for string commands, and keyword - arguments for additional command options. - .. versionchanged:: 1.5 - `command` can be a string in addition to a full document. - .. versionadded:: 1.4 - - .. mongodoc:: commands - .. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold - """ - return self._command(command, value, check, allowable_errors, - uuid_subtype, compile_re, **kwargs)[0] - - def collection_names(self, include_system_collections=True): - """Get a list of all the collection names in this database. - - :Parameters: - - `include_system_collections` (optional): if ``False`` list - will not include system collections (e.g ``system.indexes``) - """ - results = self["system.namespaces"].find(_must_use_master=True) - names = [r["name"] for r in results] - names = [n[len(self.__name) + 1:] for n in names - if n.startswith(self.__name + ".") and "$" not in n] - if not include_system_collections: - names = [n for n in names if not n.startswith("system.")] - return names - - def drop_collection(self, name_or_collection): - """Drop a collection. - - :Parameters: - - `name_or_collection`: the name of a collection to drop or the - collection object itself - """ - name = name_or_collection - if isinstance(name, Collection): - name = name.name - - if not isinstance(name, basestring): - raise TypeError("name_or_collection must be an instance of " - "%s or Collection" % (basestring.__name__,)) - - self.__connection._purge_index(self.__name, name) - - self.command("drop", unicode(name), allowable_errors=["ns not found"]) - - def validate_collection(self, name_or_collection, - scandata=False, full=False): - """Validate a collection. - - Returns a dict of validation info. Raises CollectionInvalid if - validation fails. - - With MongoDB < 1.9 the result dict will include a `result` key - with a string value that represents the validation results. With - MongoDB >= 1.9 the `result` key no longer exists and the results - are split into individual fields in the result dict. - - :Parameters: - - `name_or_collection`: A Collection object or the name of a - collection to validate. - - `scandata`: Do extra checks beyond checking the overall - structure of the collection. - - `full`: Have the server do a more thorough scan of the - collection. Use with `scandata` for a thorough scan - of the structure of the collection and the individual - documents. Ignored in MongoDB versions before 1.9. - - .. versionchanged:: 1.11 - validate_collection previously returned a string. - .. versionadded:: 1.11 - Added `scandata` and `full` options. - """ - name = name_or_collection - if isinstance(name, Collection): - name = name.name - - if not isinstance(name, basestring): - raise TypeError("name_or_collection must be an instance of " - "%s or Collection" % (basestring.__name__,)) - - result = self.command("validate", unicode(name), - scandata=scandata, full=full) - - valid = True - # Pre 1.9 results - if "result" in result: - info = result["result"] - if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: %s" % (name, info)) - # Sharded results - elif "raw" in result: - for _, res in result["raw"].iteritems(): - if "result" in res: - info = res["result"] - if (info.find("exception") != -1 or - info.find("corrupt") != -1): - raise CollectionInvalid("%s invalid: " - "%s" % (name, info)) - elif not res.get("valid", False): - valid = False - break - # Post 1.9 non-sharded results. - elif not result.get("valid", False): - valid = False - - if not valid: - raise CollectionInvalid("%s invalid: %r" % (name, result)) - - return result - - def current_op(self, include_all=False): - """Get information on operations currently running. - - :Parameters: - - `include_all` (optional): if ``True`` also list currently - idle operations in the result - """ - if include_all: - return self['$cmd.sys.inprog'].find_one({"$all": True}) - else: - return self['$cmd.sys.inprog'].find_one() - - def profiling_level(self): - """Get the database's current profiling level. - - Returns one of (:data:`~pymongo.OFF`, - :data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`). - - .. mongodoc:: profiling - """ - result = self.command("profile", -1) - - assert result["was"] >= 0 and result["was"] <= 2 - return result["was"] - - def set_profiling_level(self, level, slow_ms=None): - """Set the database's profiling level. - - :Parameters: - - `level`: Specifies a profiling level, see list of possible values - below. - - `slow_ms`: Optionally modify the threshold for the profile to - consider a query or operation. Even if the profiler is off queries - slower than the `slow_ms` level will get written to the logs. - - Possible `level` values: - - +----------------------------+------------------------------------+ - | Level | Setting | - +============================+====================================+ - | :data:`~pymongo.OFF` | Off. No profiling. | - +----------------------------+------------------------------------+ - | :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. | - +----------------------------+------------------------------------+ - | :data:`~pymongo.ALL` | On. Includes all operations. | - +----------------------------+------------------------------------+ - - Raises :class:`ValueError` if level is not one of - (:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`, - :data:`~pymongo.ALL`). - - .. mongodoc:: profiling - """ - if not isinstance(level, int) or level < 0 or level > 2: - raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)") - - if slow_ms is not None and not isinstance(slow_ms, int): - raise TypeError("slow_ms must be an integer") - - if slow_ms is not None: - self.command("profile", level, slowms=slow_ms) - else: - self.command("profile", level) - - def profiling_info(self): - """Returns a list containing current profiling information. - - .. mongodoc:: profiling - """ - return list(self["system.profile"].find()) - - def error(self): - """Get a database error if one occured on the last operation. - - Return None if the last operation was error-free. Otherwise return the - error that occurred. - """ - error = self.command("getlasterror") - error_msg = error.get("err", "") - if error_msg is None: - return None - if error_msg.startswith("not master"): - self.__connection.disconnect() - return error - - def last_status(self): - """Get status information from the last operation. - - Returns a SON object with status information. - """ - return self.command("getlasterror") - - def previous_error(self): - """Get the most recent error to have occurred on this database. - - Only returns errors that have occurred since the last call to - `Database.reset_error_history`. Returns None if no such errors have - occurred. - """ - error = self.command("getpreverror") - if error.get("err", 0) is None: - return None - return error - - def reset_error_history(self): - """Reset the error history of this database. - - Calls to `Database.previous_error` will only return errors that have - occurred since the most recent call to this method. - """ - self.command("reseterror") - - def __iter__(self): - return self - - def next(self): - raise TypeError("'Database' object is not iterable") - - def _default_role(self, read_only): - if self.name == "admin": - if read_only: - return "readAnyDatabase" - else: - return "root" - else: - if read_only: - return "read" - else: - return "dbOwner" - - def _create_or_update_user( - self, create, name, password, read_only, **kwargs): - """Use a command to create (if create=True) or modify a user. - """ - opts = {} - if read_only or (create and "roles" not in kwargs): - warnings.warn("Creating a user with the read_only option " - "or without roles is deprecated in MongoDB " - ">= 2.6", DeprecationWarning) - - opts["roles"] = [self._default_role(read_only)] - - elif read_only: - warnings.warn("The read_only option is deprecated in MongoDB " - ">= 2.6, use 'roles' instead", DeprecationWarning) - - if password is not None: - # We always salt and hash client side. - if "digestPassword" in kwargs: - raise ConfigurationError("The digestPassword option is not " - "supported via add_user. Please use " - "db.command('createUser', ...) " - "instead for this option.") - opts["pwd"] = auth._password_digest(name, password) - opts["digestPassword"] = False - - opts["writeConcern"] = self._get_wc_override() - opts.update(kwargs) - - if create: - command_name = "createUser" - else: - command_name = "updateUser" - - self.command(command_name, name, **opts) - - def _legacy_add_user(self, name, password, read_only, **kwargs): - """Uses v1 system to add users, i.e. saving to system.users. - """ - user = self.system.users.find_one({"user": name}) or {"user": name} - if password is not None: - user["pwd"] = auth._password_digest(name, password) - if read_only is not None: - user["readOnly"] = read_only - user.update(kwargs) - - try: - self.system.users.save(user, **self._get_wc_override()) - except OperationFailure, exc: - # First admin user add fails gle in MongoDB >= 2.1.2 - # See SERVER-4225 for more information. - if 'login' in str(exc): - pass - else: - raise - - def add_user(self, name, password=None, read_only=None, **kwargs): - """Create user `name` with password `password`. - - Add a new user with permissions for this :class:`Database`. - - .. note:: Will change the password if user `name` already exists. - - :Parameters: - - `name`: the name of the user to create - - `password` (optional): the password of the user to create. Can not - be used with the ``userSource`` argument. - - `read_only` (optional): if ``True`` the user will be read only - - `**kwargs` (optional): optional fields for the user document - (e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See - ``_ - for more information. - - .. note:: The use of optional keyword arguments like ``userSource``, - ``otherDBRoles``, or ``roles`` requires MongoDB >= 2.4.0 - - .. versionchanged:: 2.5 - Added kwargs support for optional fields introduced in MongoDB 2.4 - - .. versionchanged:: 2.2 - Added support for read only users - - .. versionadded:: 1.4 - """ - if not isinstance(name, basestring): - raise TypeError("name must be an instance " - "of %s" % (basestring.__name__,)) - if password is not None: - if not isinstance(password, basestring): - raise TypeError("password must be an instance " - "of %s or None" % (basestring.__name__,)) - if len(password) == 0: - raise ValueError("password can't be empty") - if read_only is not None: - read_only = common.validate_boolean('read_only', read_only) - if 'roles' in kwargs: - raise ConfigurationError("Can not use " - "read_only and roles together") - - try: - uinfo = self.command("usersInfo", name) - except OperationFailure, exc: - # MongoDB >= 2.5.3 requires the use of commands to manage - # users. "No such command" error didn't return an error - # code (59) before MongoDB 2.4.7 so we assume that an error - # code of None means the userInfo command doesn't exist and - # we should fall back to the legacy add user code. - if exc.code in (59, None): - self._legacy_add_user(name, password, read_only, **kwargs) - return - raise - - # Create the user if not found in uinfo, otherwise update one. - self._create_or_update_user( - (not uinfo["users"]), name, password, read_only, **kwargs) - - def remove_user(self, name): - """Remove user `name` from this :class:`Database`. - - User `name` will no longer have permissions to access this - :class:`Database`. - - :Parameters: - - `name`: the name of the user to remove - - .. versionadded:: 1.4 - """ - - try: - self.command("dropUser", name, - writeConcern=self._get_wc_override()) - except OperationFailure, exc: - # See comment in add_user try / except above. - if exc.code in (59, None): - self.system.users.remove({"user": name}, - **self._get_wc_override()) - return - raise - - def authenticate(self, name, password=None, - source=None, mechanism='MONGODB-CR', **kwargs): - """Authenticate to use this database. - - Authentication lasts for the life of the underlying client - instance, or until :meth:`logout` is called. - - Raises :class:`TypeError` if (required) `name`, (optional) `password`, - or (optional) `source` is not an instance of :class:`basestring` - (:class:`str` in python 3). - - .. note:: - - This method authenticates the current connection, and - will also cause all new :class:`~socket.socket` connections - in the underlying client instance to be authenticated automatically. - - - Authenticating more than once on the same database with different - credentials is not supported. You must call :meth:`logout` before - authenticating with new credentials. - - - When sharing a client instance between multiple threads, all - threads will share the authentication. If you need different - authentication profiles for different purposes you must use - distinct client instances. - - - To get authentication to apply immediately to all - existing sockets you may need to reset this client instance's - sockets using :meth:`~pymongo.mongo_client.MongoClient.disconnect`. - - :Parameters: - - `name`: the name of the user to authenticate. - - `password` (optional): the password of the user to authenticate. - Not used with GSSAPI or MONGODB-X509 authentication. - - `source` (optional): the database to authenticate on. If not - specified the current database is used. - - `mechanism` (optional): See - :data:`~pymongo.auth.MECHANISMS` for options. - Defaults to MONGODB-CR (MongoDB Challenge Response protocol) - - `gssapiServiceName` (optional): Used with the GSSAPI mechanism - to specify the service name portion of the service principal name. - Defaults to 'mongodb'. - - .. versionchanged:: 2.5 - Added the `source` and `mechanism` parameters. :meth:`authenticate` - now raises a subclass of :class:`~pymongo.errors.PyMongoError` if - authentication fails due to invalid credentials or configuration - issues. - - .. mongodoc:: authenticate - """ - if not isinstance(name, basestring): - raise TypeError("name must be an instance " - "of %s" % (basestring.__name__,)) - if password is not None and not isinstance(password, basestring): - raise TypeError("password must be an instance " - "of %s" % (basestring.__name__,)) - if source is not None and not isinstance(source, basestring): - raise TypeError("source must be an instance " - "of %s" % (basestring.__name__,)) - common.validate_auth_mechanism('mechanism', mechanism) - - validated_options = {} - for option, value in kwargs.iteritems(): - normalized, val = common.validate_auth_option(option, value) - validated_options[normalized] = val - - credentials = auth._build_credentials_tuple(mechanism, - source or self.name, unicode(name), - password and unicode(password) or None, - validated_options) - self.connection._cache_credentials(self.name, credentials) - return True - - def logout(self): - """Deauthorize use of this database for this client instance. - - .. note:: Other databases may still be authenticated, and other - existing :class:`~socket.socket` connections may remain - authenticated for this database unless you reset all sockets - with :meth:`~pymongo.mongo_client.MongoClient.disconnect`. - """ - # Sockets will be deauthenticated as they are used. - self.connection._purge_credentials(self.name) - - def dereference(self, dbref): - """Dereference a :class:`~bson.dbref.DBRef`, getting the - document it points to. - - Raises :class:`TypeError` if `dbref` is not an instance of - :class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if - the reference does not point to a valid document. Raises - :class:`ValueError` if `dbref` has a database specified that - is different from the current database. - - :Parameters: - - `dbref`: the reference - """ - if not isinstance(dbref, DBRef): - raise TypeError("cannot dereference a %s" % type(dbref)) - if dbref.database is not None and dbref.database != self.__name: - raise ValueError("trying to dereference a DBRef that points to " - "another database (%r not %r)" % (dbref.database, - self.__name)) - return self[dbref.collection].find_one({"_id": dbref.id}) - - def eval(self, code, *args): - """Evaluate a JavaScript expression in MongoDB. - - Useful if you need to touch a lot of data lightly; in such a - scenario the network transfer of the data could be a - bottleneck. The `code` argument must be a JavaScript - function. Additional positional arguments will be passed to - that function when it is run on the server. - - Raises :class:`TypeError` if `code` is not an instance of - :class:`basestring` (:class:`str` in python 3) or `Code`. - Raises :class:`~pymongo.errors.OperationFailure` if the eval - fails. Returns the result of the evaluation. - - :Parameters: - - `code`: string representation of JavaScript code to be - evaluated - - `args` (optional): additional positional arguments are - passed to the `code` being evaluated - """ - if not isinstance(code, Code): - code = Code(code) - - result = self.command("$eval", code, args=args) - return result.get("retval", None) - - def __call__(self, *args, **kwargs): - """This is only here so that some API misusages are easier to debug. - """ - raise TypeError("'Database' object is not callable. If you meant to " - "call the '%s' method on a '%s' object it is " - "failing because no such method exists." % ( - self.__name, self.__connection.__class__.__name__)) - - -class SystemJS(object): - """Helper class for dealing with stored JavaScript. - """ - - def __init__(self, database): - """Get a system js helper for the database `database`. - - An instance of :class:`SystemJS` can be created with an instance - of :class:`Database` through :attr:`Database.system_js`, - manual instantiation of this class should not be necessary. - - :class:`SystemJS` instances allow for easy manipulation and - access to server-side JavaScript: - - .. doctest:: - - >>> db.system_js.add1 = "function (x) { return x + 1; }" - >>> db.system.js.find({"_id": "add1"}).count() - 1 - >>> db.system_js.add1(5) - 6.0 - >>> del db.system_js.add1 - >>> db.system.js.find({"_id": "add1"}).count() - 0 - - .. note:: Requires server version **>= 1.1.1** - - .. versionadded:: 1.5 - """ - # can't just assign it since we've overridden __setattr__ - object.__setattr__(self, "_db", database) - - def __setattr__(self, name, code): - self._db.system.js.save({"_id": name, "value": Code(code)}, - **self._db._get_wc_override()) - - def __setitem__(self, name, code): - self.__setattr__(name, code) - - def __delattr__(self, name): - self._db.system.js.remove({"_id": name}, **self._db._get_wc_override()) - - def __delitem__(self, name): - self.__delattr__(name) - - def __getattr__(self, name): - return lambda *args: self._db.eval(Code("function() { " - "return this[name].apply(" - "this, arguments); }", - scope={'name': name}), *args) - - def __getitem__(self, name): - return self.__getattr__(name) - - def list(self): - """Get a list of the names of the functions stored in this database. - - .. versionadded:: 1.9 - """ - return [x["_id"] for x in self._db.system.js.find(fields=["_id"])] +__doc__ = original_doc +__all__ = ["Database"] # noqa: F405 diff --git a/pymongo/database_shared.py b/pymongo/database_shared.py new file mode 100644 index 0000000000..d6563a4b3d --- /dev/null +++ b/pymongo/database_shared.py @@ -0,0 +1,34 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants, helpers, and types shared across all database classes.""" +from __future__ import annotations + +from typing import Any, Mapping, TypeVar + +from pymongo.errors import InvalidName + + +def _check_name(name: str) -> None: + """Check if a database name is valid.""" + if not name: + raise InvalidName("database name cannot be the empty string") + + for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: + if invalid_char in name: + raise InvalidName("database names cannot contain the character %r" % invalid_char) + + +_CodecDocumentType = TypeVar("_CodecDocumentType", bound=Mapping[str, Any]) diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py new file mode 100644 index 0000000000..f24321d973 --- /dev/null +++ b/pymongo/driver_info.py @@ -0,0 +1,45 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Advanced options for MongoDB drivers implemented on top of PyMongo. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from collections import namedtuple +from typing import Optional + + +class DriverInfo(namedtuple("DriverInfo", ["name", "version", "platform"])): + """Info about a driver wrapping PyMongo. + + The MongoDB server logs PyMongo's name, version, and platform whenever + PyMongo establishes a connection. A driver implemented on top of PyMongo + can add its own info to this log message. Initialize with three strings + like 'MyDriver', '1.2.3', 'some platform info'. Any of these strings may be + None to accept PyMongo's default. + """ + + def __new__( + cls, name: str, version: Optional[str] = None, platform: Optional[str] = None + ) -> DriverInfo: + self = super().__new__(cls, name, version, platform) + for key, value in self._asdict().items(): + if value is not None and not isinstance(value, str): + raise TypeError( + f"Wrong type for DriverInfo {key} option, value must be an instance of str, not {type(value)}" + ) + + return self diff --git a/pymongo/encryption.py b/pymongo/encryption.py new file mode 100644 index 0000000000..71c1d4b723 --- /dev/null +++ b/pymongo/encryption.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous Encryption API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.encryption import * # noqa: F403 +from pymongo.synchronous.encryption import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["Algorithm", "ClientEncryption", "QueryType", "RewrapManyDataKeyResult"] # noqa: F405 diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py new file mode 100644 index 0000000000..b2037617b0 --- /dev/null +++ b/pymongo/encryption_options.py @@ -0,0 +1,392 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Support for automatic client-side field level encryption. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, TypedDict + +from pymongo.uri_parser_shared import _parse_kms_tls_options + +try: + import pymongocrypt # type:ignore[import-untyped] # noqa: F401 + + # Check for pymongocrypt>=1.10. + from pymongocrypt import synchronous as _ # noqa: F401 + + _HAVE_PYMONGOCRYPT = True +except ImportError: + _HAVE_PYMONGOCRYPT = False +from bson import int64 +from pymongo.common import check_for_min_version, validate_is_mapping +from pymongo.errors import ConfigurationError + +if TYPE_CHECKING: + from pymongo.pyopenssl_context import SSLContext + from pymongo.typings import _AgnosticMongoClient + + +def check_min_pymongocrypt() -> None: + """Raise an appropriate error if the min pymongocrypt is not installed.""" + pymongocrypt_version, required_version, is_valid = check_for_min_version("pymongocrypt") + if not is_valid: + raise ConfigurationError( + f"client side encryption requires pymongocrypt>={required_version}, " + f"found version {pymongocrypt_version}. " + "Install a compatible version with: " + "python -m pip install 'pymongo[encryption]'" + ) + + +class AutoEncryptionOpts: + """Options to configure automatic client-side field level encryption.""" + + def __init__( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: Optional[_AgnosticMongoClient] = None, + schema_map: Optional[Mapping[str, Any]] = None, + bypass_auto_encryption: bool = False, + mongocryptd_uri: str = "mongodb://localhost:27020", + mongocryptd_bypass_spawn: bool = False, + mongocryptd_spawn_path: str = "mongocryptd", + mongocryptd_spawn_args: Optional[list[str]] = None, + kms_tls_options: Optional[Mapping[str, Any]] = None, + crypt_shared_lib_path: Optional[str] = None, + crypt_shared_lib_required: bool = False, + bypass_query_analysis: bool = False, + encrypted_fields_map: Optional[Mapping[str, Any]] = None, + key_expiration_ms: Optional[int] = None, + ) -> None: + """Options to configure automatic client-side field level encryption. + + Automatic client-side field level encryption requires MongoDB >=4.2 + enterprise or a MongoDB >=4.2 Atlas cluster. Automatic encryption is not + supported for operations on a database or view and will result in + error. + + Although automatic encryption requires MongoDB >=4.2 enterprise or a + MongoDB >=4.2 Atlas cluster, automatic *decryption* is supported for all + users. To configure automatic *decryption* without automatic + *encryption* set ``bypass_auto_encryption=True``. Explicit + encryption and explicit decryption is also supported for all users + with the :class:`~pymongo.asynchronous.encryption.AsyncClientEncryption` and :class:`~pymongo.encryption.ClientEncryption` classes. + + See `client-side field level encryption `_ for an example. + + :param kms_providers: Map of KMS provider options. The `kms_providers` + map values differ by provider: + + - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. + These are the AWS access key ID and AWS secret access key used + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string. + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. + + KMS providers may be specified with an optional name suffix + separated by a colon, for example "kmip:name" or "aws:name". + Named KMS providers do not support `CSFLE on-demand credentials `_. + Named KMS providers enables more than one of each KMS provider type to be configured. + For example, to configure multiple local KMS providers:: + + kms_providers = { + "local": {"key": local_kek1}, # Unnamed KMS provider. + "local:myname": {"key": local_kek2}, # Named KMS provider with name "myname". + } + + :param key_vault_namespace: The namespace for the key vault collection. + The key vault collection contains all data keys used for encryption + and decryption. Data keys are stored as documents in this MongoDB + collection. Data keys are protected with encryption by a KMS + provider. + :param key_vault_client: By default, the key vault collection + is assumed to reside in the same MongoDB cluster as the encrypted + AsyncMongoClient/MongoClient. Use this option to route data key queries to a + separate MongoDB cluster. + :param schema_map: Map of collection namespace ("db.coll") to + JSON Schema. By default, a collection's JSONSchema is periodically + polled with the listCollections command. But a JSONSchema may be + specified locally with the schemaMap option. + + **Supplying a `schema_map` provides more security than relying on + JSON Schemas obtained from the server. It protects against a + malicious server advertising a false JSON Schema, which could trick + the client into sending unencrypted data that should be + encrypted.** + + Schemas supplied in the schemaMap only apply to configuring + automatic encryption for client side encryption. Other validation + rules in the JSON schema will not be enforced by the driver and + will result in an error. + :param bypass_auto_encryption: If ``True``, automatic + encryption will be disabled but automatic decryption will still be + enabled. Defaults to ``False``. + :param mongocryptd_uri: The MongoDB URI used to connect + to the *local* mongocryptd process. Defaults to + ``'mongodb://localhost:27020'``. + :param mongocryptd_bypass_spawn: If ``True``, the encrypted + AsyncMongoClient/MongoClient will not attempt to spawn the mongocryptd process. + Defaults to ``False``. + :param mongocryptd_spawn_path: Used for spawning the + mongocryptd process. Defaults to ``'mongocryptd'`` and spawns + mongocryptd from the system path. + :param mongocryptd_spawn_args: A list of string arguments to + use when spawning the mongocryptd process. Defaults to + ``['--idleShutdownTimeoutSecs=60']``. If the list does not include + the ``idleShutdownTimeoutSecs`` option then + ``'--idleShutdownTimeoutSecs=60'`` will be added. + :param kms_tls_options: A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.AsyncMongoClient` and :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + :param crypt_shared_lib_path: Override the path to load the crypt_shared library. + :param crypt_shared_lib_required: If True, raise an error if libmongocrypt is + unable to load the crypt_shared library. + :param bypass_query_analysis: If ``True``, disable automatic analysis + of outgoing commands. Set `bypass_query_analysis` to use explicit + encryption on indexed fields without the MongoDB Enterprise Advanced + licensed crypt_shared library. + :param encrypted_fields_map: Map of collection namespace ("db.coll") to documents + that described the encrypted fields for Queryable Encryption. For example:: + + { + "db.encryptedCollection": { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + } + :param key_expiration_ms: The cache expiration time for data encryption keys. + Defaults to ``None`` which defers to libmongocrypt's default which is currently 60000. + Set to 0 to disable key expiration. + + .. versionchanged:: 4.12 + Added the `key_expiration_ms` parameter. + .. versionchanged:: 4.2 + Added the `encrypted_fields_map`, `crypt_shared_lib_path`, `crypt_shared_lib_required`, + and `bypass_query_analysis` parameters. + + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter and the "kmip" KMS provider. + + .. versionadded:: 3.9 + """ + if not _HAVE_PYMONGOCRYPT: + raise ConfigurationError( + "client side encryption requires the pymongocrypt library: " + "install a compatible version with: " + "python -m pip install 'pymongo[encryption]'" + ) + check_min_pymongocrypt() + if encrypted_fields_map: + validate_is_mapping("encrypted_fields_map", encrypted_fields_map) + self._encrypted_fields_map = encrypted_fields_map + self._crypt_shared_lib_path = crypt_shared_lib_path + self._crypt_shared_lib_required = crypt_shared_lib_required + self._kms_providers = kms_providers + self._key_vault_namespace = key_vault_namespace + self._key_vault_client = key_vault_client + self._schema_map = schema_map + self._bypass_auto_encryption = bypass_auto_encryption + self._mongocryptd_uri = mongocryptd_uri + self._mongocryptd_bypass_spawn = mongocryptd_bypass_spawn + self._mongocryptd_spawn_path = mongocryptd_spawn_path + if mongocryptd_spawn_args is None: + mongocryptd_spawn_args = ["--idleShutdownTimeoutSecs=60"] + self._mongocryptd_spawn_args = mongocryptd_spawn_args + if not isinstance(self._mongocryptd_spawn_args, list): + raise TypeError( + f"mongocryptd_spawn_args must be a list, not {type(self._mongocryptd_spawn_args)}" + ) + if not any("idleShutdownTimeoutSecs" in s for s in self._mongocryptd_spawn_args): + self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") + # Maps KMS provider name to a SSLContext. + self._kms_tls_options = kms_tls_options + self._sync_kms_ssl_contexts: Optional[dict[str, SSLContext]] = None + self._async_kms_ssl_contexts: Optional[dict[str, SSLContext]] = None + self._bypass_query_analysis = bypass_query_analysis + self._key_expiration_ms = key_expiration_ms + + def _kms_ssl_contexts(self, is_sync: bool) -> dict[str, SSLContext]: + if is_sync: + if self._sync_kms_ssl_contexts is None: + self._sync_kms_ssl_contexts = _parse_kms_tls_options(self._kms_tls_options, True) + return self._sync_kms_ssl_contexts + else: + if self._async_kms_ssl_contexts is None: + self._async_kms_ssl_contexts = _parse_kms_tls_options(self._kms_tls_options, False) + return self._async_kms_ssl_contexts + + +class RangeOpts: + """Options to configure encrypted queries using the range algorithm.""" + + def __init__( + self, + sparsity: Optional[int] = None, + trim_factor: Optional[int] = None, + min: Optional[Any] = None, + max: Optional[Any] = None, + precision: Optional[int] = None, + ) -> None: + """Options to configure encrypted queries using the range algorithm. + + :param sparsity: An integer. + :param trim_factor: An integer. + :param min: A BSON scalar value corresponding to the type being queried. + :param max: A BSON scalar value corresponding to the type being queried. + :param precision: An integer, may only be set for double or decimal128 types. + + .. versionadded:: 4.4 + """ + self.min = min + self.max = max + self.sparsity = sparsity + self.trim_factor = trim_factor + self.precision = precision + + @property + def document(self) -> dict[str, Any]: + doc = {} + for k, v in [ + ("sparsity", int64.Int64(self.sparsity) if self.sparsity else None), + ("trimFactor", self.trim_factor), + ("precision", self.precision), + ("min", self.min), + ("max", self.max), + ]: + if v is not None: + doc[k] = v + return doc + + +class TextOpts: + """**BETA** Options to configure encrypted queries using the text algorithm. + + TextOpts is currently unstable API and subject to backwards breaking changes.""" + + def __init__( + self, + substring: Optional[SubstringOpts] = None, + prefix: Optional[PrefixOpts] = None, + suffix: Optional[SuffixOpts] = None, + case_sensitive: Optional[bool] = None, + diacritic_sensitive: Optional[bool] = None, + ) -> None: + """Options to configure encrypted queries using the text algorithm. + + :param substring: Further options to support substring queries. + :param prefix: Further options to support prefix queries. + :param suffix: Further options to support suffix queries. + :param case_sensitive: Whether text indexes for this field are case sensitive. + :param diacritic_sensitive: Whether text indexes for this field are diacritic sensitive. + + .. versionadded:: 4.15 + """ + self.substring = substring + self.prefix = prefix + self.suffix = suffix + self.case_sensitive = case_sensitive + self.diacritic_sensitive = diacritic_sensitive + + @property + def document(self) -> dict[str, Any]: + doc = {} + for k, v in [ + ("substring", self.substring), + ("prefix", self.prefix), + ("suffix", self.suffix), + ("caseSensitive", self.case_sensitive), + ("diacriticSensitive", self.diacritic_sensitive), + ]: + if v is not None: + doc[k] = v + return doc + + +class SubstringOpts(TypedDict): + """**BETA** Options for substring text queries. + + SubstringOpts is currently unstable API and subject to backwards breaking changes. + """ + + # strMaxLength is the maximum allowed length to insert. Inserting longer strings will error. + strMaxLength: int + # strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error. + strMinQueryLength: int + # strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error. + strMaxQueryLength: int + + +class PrefixOpts(TypedDict): + """**BETA** Options for prefix text queries. + + PrefixOpts is currently unstable API and subject to backwards breaking changes. + """ + + # strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error. + strMinQueryLength: int + # strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error. + strMaxQueryLength: int + + +class SuffixOpts(TypedDict): + """**BETA** Options for suffix text queries. + + SuffixOpts is currently unstable API and subject to backwards breaking changes. + """ + + # strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error. + strMinQueryLength: int + # strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error. + strMaxQueryLength: int diff --git a/pymongo/errors.py b/pymongo/errors.py index b4541a555e..794b5a9398 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -1,10 +1,10 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,27 +12,74 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Exceptions raised by PyMongo.""" +"""Exceptions raised by PyMongo. -from bson.errors import * +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations -try: - from ssl import CertificateError -except ImportError: - from pymongo.ssl_match_hostname import CertificateError +from ssl import SSLCertVerificationError as _CertificateError # noqa: F401 +from typing import TYPE_CHECKING, Any, Iterable, Mapping, Optional, Sequence, Union + +from bson.errors import InvalidDocument + +if TYPE_CHECKING: + from pymongo.results import ClientBulkWriteResult + from pymongo.typings import _DocumentOut class PyMongoError(Exception): - """Base class for all PyMongo exceptions. + """Base class for all PyMongo exceptions.""" - .. versionadded:: 1.4 - """ + def __init__(self, message: str = "", error_labels: Optional[Iterable[str]] = None) -> None: + super().__init__(message) + self._message = message + self._error_labels = set(error_labels or []) + + def has_error_label(self, label: str) -> bool: + """Return True if this error contains the given label. + + .. versionadded:: 3.7 + """ + return label in self._error_labels + + def _add_error_label(self, label: str) -> None: + """Add the given label to this error.""" + self._error_labels.add(label) + + def _remove_error_label(self, label: str) -> None: + """Remove the given label from this error.""" + self._error_labels.discard(label) + + @property + def timeout(self) -> bool: + """True if this error was caused by a timeout. + + .. versionadded:: 4.2 + """ + return False + + +class ProtocolError(PyMongoError): + """Raised for failures related to the wire protocol.""" class ConnectionFailure(PyMongoError): - """Raised when a connection to the database cannot be made or is lost. + """Raised when a connection to the database cannot be made or is lost.""" + + +class WaitQueueTimeoutError(ConnectionFailure): + """Raised when an operation times out waiting to checkout a connection from the pool. + + Subclass of :exc:`~pymongo.errors.ConnectionFailure`. + + .. versionadded:: 4.2 """ + @property + def timeout(self) -> bool: + return True + class AutoReconnect(ConnectionFailure): """Raised when a connection to the database is lost and an attempt to @@ -43,40 +90,122 @@ class AutoReconnect(ConnectionFailure): operations will attempt to open a new connection to the database (and will continue to raise this exception until the first successful connection is made). + + Subclass of :exc:`~pymongo.errors.ConnectionFailure`. """ - def __init__(self, message='', errors=None): - self.errors = errors or [] - ConnectionFailure.__init__(self, message) + errors: Union[Mapping[str, Any], Sequence[Any]] + details: Union[Mapping[str, Any], Sequence[Any]] -class ConfigurationError(PyMongoError): - """Raised when something is incorrectly configured. + def __init__( + self, message: str = "", errors: Optional[Union[Mapping[str, Any], Sequence[Any]]] = None + ) -> None: + error_labels = None + if errors is not None: + if isinstance(errors, dict): + error_labels = errors.get("errorLabels") + super().__init__(message, error_labels) + self.errors = self.details = errors or [] + + +class NetworkTimeout(AutoReconnect): + """An operation on an open connection exceeded socketTimeoutMS. + + The remaining connections in the pool stay open. In the case of a write + operation, you cannot know whether it succeeded or failed. + + Subclass of :exc:`~pymongo.errors.AutoReconnect`. + """ + + @property + def timeout(self) -> bool: + return True + + +def _format_detailed_error( + message: str, details: Optional[Union[Mapping[str, Any], list[Any]]] +) -> str: + if details is not None: + message = f"{message}, full error: {details}" + return message + + +class NotPrimaryError(AutoReconnect): + """The server responded "not primary" or "node is recovering". + + These errors result from a query, write, or command. The operation failed + because the client thought it was using the primary but the primary has + stepped down, or the client thought it was using a healthy secondary but + the secondary is stale and trying to recover. + + The client launches a refresh operation on a background thread, to update + its view of the server as soon as possible after throwing this exception. + + Subclass of :exc:`~pymongo.errors.AutoReconnect`. + + .. versionadded:: 3.12 """ + def __init__( + self, message: str = "", errors: Optional[Union[Mapping[str, Any], list[Any]]] = None + ) -> None: + super().__init__(_format_detailed_error(message, errors), errors=errors) + + +class ServerSelectionTimeoutError(AutoReconnect): + """Thrown when no MongoDB server is available for an operation + + If there is no suitable server for an operation PyMongo tries for + ``serverSelectionTimeoutMS`` (default 30 seconds) to find one, then + throws this exception. For example, it is thrown after attempting an + operation when PyMongo cannot connect to any server, or if you attempt + an insert into a replica set that has no primary and does not elect one + within the timeout window, or if you attempt to query with a Read + Preference that the replica set cannot satisfy. + """ + + @property + def timeout(self) -> bool: + return True + + +class ConfigurationError(PyMongoError): + """Raised when something is incorrectly configured.""" + class OperationFailure(PyMongoError): """Raised when a database operation fails. .. versionadded:: 2.7 The :attr:`details` attribute. - - .. versionadded:: 1.8 - The :attr:`code` attribute. """ - def __init__(self, error, code=None, details=None): + def __init__( + self, + error: str, + code: Optional[int] = None, + details: Optional[Mapping[str, Any]] = None, + max_wire_version: Optional[int] = None, + ) -> None: + error_labels = None + if details is not None: + error_labels = details.get("errorLabels") + super().__init__(_format_detailed_error(error, details), error_labels=error_labels) self.__code = code self.__details = details - PyMongoError.__init__(self, error) + self.__max_wire_version = max_wire_version @property - def code(self): - """The error code returned by the server, if any. - """ + def _max_wire_version(self) -> Optional[int]: + return self.__max_wire_version + + @property + def code(self) -> Optional[int]: + """The error code returned by the server, if any.""" return self.__code @property - def details(self): + def details(self) -> Optional[Mapping[str, Any]]: """The complete error document returned by the server. Depending on the error that occurred, the error document @@ -87,6 +216,10 @@ def details(self): """ return self.__details + @property + def timeout(self) -> bool: + return self.__code in (50,) + class CursorNotFound(OperationFailure): """Raised while iterating query results if the cursor is @@ -105,15 +238,26 @@ class ExecutionTimeout(OperationFailure): .. versionadded:: 2.7 """ + @property + def timeout(self) -> bool: + return True + + +class WriteConcernError(OperationFailure): + """Base exception type for errors raised due to write concern. + + .. versionadded:: 3.0 + """ + -class TimeoutError(OperationFailure): - """DEPRECATED - will be removed in PyMongo 3.0. See WTimeoutError instead. +class WriteError(OperationFailure): + """Base exception type for errors raised during write operations. - .. versionadded:: 1.8 + .. versionadded:: 3.0 """ -class WTimeoutError(TimeoutError): +class WTimeoutError(WriteConcernError): """Raised when a database operation times out (i.e. wtimeout expires) before replication completes. @@ -123,14 +267,18 @@ class WTimeoutError(TimeoutError): .. versionadded:: 2.7 """ + @property + def timeout(self) -> bool: + return True -class DuplicateKeyError(OperationFailure): - """Raised when a safe insert or update fails due to a duplicate key error. - .. note:: Requires server version **>= 1.3.0** +class DuplicateKeyError(WriteError): + """Raised when an insert or update fails due to a duplicate key error.""" - .. versionadded:: 1.4 - """ + +def _wtimeout_error(error: Any) -> bool: + """Return True if this writeConcernError doc is a caused by a timeout.""" + return error.get("code") == 50 or ("errInfo" in error and error["errInfo"].get("wtimeout")) class BulkWriteError(OperationFailure): @@ -138,50 +286,151 @@ class BulkWriteError(OperationFailure): .. versionadded:: 2.7 """ - def __init__(self, results): - OperationFailure.__init__( - self, "batch op errors occurred", 65, results) + + details: _DocumentOut + + def __init__(self, results: _DocumentOut) -> None: + super().__init__("batch op errors occurred", 65, results) + + def __reduce__(self) -> tuple[Any, Any]: + return self.__class__, (self.details,) + + @property + def timeout(self) -> bool: + # Check the last writeConcernError and last writeError to determine if this + # BulkWriteError was caused by a timeout. + wces = self.details.get("writeConcernErrors", []) + if wces and _wtimeout_error(wces[-1]): + return True + + werrs = self.details.get("writeErrors", []) + if werrs and werrs[-1].get("code") == 50: + return True + return False + + +class ClientBulkWriteException(OperationFailure): + """Exception class for client-level bulk write errors.""" + + details: _DocumentOut + verbose: bool + + def __init__(self, results: _DocumentOut, verbose: bool) -> None: + super().__init__("batch op errors occurred", 65, results) + self.verbose = verbose + + def __reduce__(self) -> tuple[Any, Any]: + return self.__class__, (self.details,) + + @property + def error(self) -> Optional[Any]: + """A top-level error that occurred when attempting to + communicate with the server or execute the bulk write. + + This value may not be populated if the exception was + thrown due to errors occurring on individual writes. + """ + return self.details.get("error", None) + + @property + def write_concern_errors(self) -> Optional[list[WriteConcernError]]: + """Write concern errors that occurred during the bulk write. + + This list may have multiple items if more than one + server command was required to execute the bulk write. + """ + return self.details.get("writeConcernErrors", []) + + @property + def write_errors(self) -> Optional[list[WriteError]]: + """Errors that occurred during the execution of individual write operations. + + This list will contain at most one entry if the bulk write was ordered. + """ + return self.details.get("writeErrors", {}) + + @property + def partial_result(self) -> Optional[ClientBulkWriteResult]: + """The results of any successful operations that were + performed before the error was encountered. + """ + from pymongo.results import ClientBulkWriteResult + + if self.details.get("anySuccessful"): + return ClientBulkWriteResult( + self.details, # type: ignore[arg-type] + acknowledged=True, + has_verbose_results=self.verbose, + ) + return None class InvalidOperation(PyMongoError): - """Raised when a client attempts to perform an invalid operation. - """ + """Raised when a client attempts to perform an invalid operation.""" class InvalidName(PyMongoError): - """Raised when an invalid name is used. - """ + """Raised when an invalid name is used.""" class CollectionInvalid(PyMongoError): - """Raised when collection validation fails. - """ + """Raised when collection validation fails.""" class InvalidURI(ConfigurationError): - """Raised when trying to parse an invalid mongodb URI. + """Raised when trying to parse an invalid mongodb URI.""" - .. versionadded:: 1.5 - """ + +class DocumentTooLarge(InvalidDocument): + """Raised when an encoded document is too large for the connected server.""" -class UnsupportedOption(ConfigurationError): - """Exception for unsupported options. +class EncryptionError(PyMongoError): + """Raised when encryption or decryption fails. - .. versionadded:: 2.0 + This error always wraps another exception which can be retrieved via the + :attr:`cause` property. + + .. versionadded:: 3.9 """ + def __init__(self, cause: Exception) -> None: + super().__init__(str(cause)) + self.__cause = cause -class ExceededMaxWaiters(Exception): - """Raised when a thread tries to get a connection from a pool and - ``max_pool_size * waitQueueMultiple`` threads are already waiting. + @property + def cause(self) -> Exception: + """The exception that caused this encryption or decryption error.""" + return self.__cause - .. versionadded:: 2.6 - """ - pass + @property + def timeout(self) -> bool: + if isinstance(self.__cause, PyMongoError): + return self.__cause.timeout + return False -class DocumentTooLarge(InvalidDocument): - """Raised when an encoded document is too large for the connected server. +class EncryptedCollectionError(EncryptionError): + """Raised when creating a collection with encrypted_fields fails. + + .. versionadded:: 4.4 """ - pass + + def __init__(self, cause: Exception, encrypted_fields: Mapping[str, Any]) -> None: + super().__init__(cause) + self.__encrypted_fields = encrypted_fields + + @property + def encrypted_fields(self) -> Mapping[str, Any]: + """The encrypted_fields document that allows inferring which data keys are *known* to be created. + + Note that the returned document is not guaranteed to contain information about *all* of the data keys that + were created, for example in the case of an indefinite error like a timeout. Use the `cause` property to + determine whether a definite or indefinite error caused this error, and only rely on the accuracy of the + encrypted_fields if the error is definite. + """ + return self.__encrypted_fields + + +class _OperationCancelled(AutoReconnect): + """Internal error raised when a socket operation is cancelled.""" diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py new file mode 100644 index 0000000000..80acaa10c0 --- /dev/null +++ b/pymongo/event_loggers.py @@ -0,0 +1,226 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Example event logger classes. + +.. versionadded:: 3.11 + +These loggers can be registered using :func:`register` or +:class:`~pymongo.mongo_client.MongoClient`. + +``monitoring.register(CommandLogger())`` + +or + +``MongoClient(event_listeners=[CommandLogger()])`` + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. + +""" +from __future__ import annotations + +import logging + +from pymongo import monitoring + + +class CommandLogger(monitoring.CommandListener): + """A simple listener that logs command events. + + Listens for :class:`~pymongo.monitoring.CommandStartedEvent`, + :class:`~pymongo.monitoring.CommandSucceededEvent` and + :class:`~pymongo.monitoring.CommandFailedEvent` events and + logs them at the `INFO` severity level using :mod:`logging`. + .. versionadded:: 3.11 + """ + + def started(self, event: monitoring.CommandStartedEvent) -> None: + logging.info( + f"Command {event.command_name} with request id " + f"{event.request_id} started on server " + f"{event.connection_id}" + ) + + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: + logging.info( + f"Command {event.command_name} with request id " + f"{event.request_id} on server {event.connection_id} " + f"succeeded in {event.duration_micros} " + "microseconds" + ) + + def failed(self, event: monitoring.CommandFailedEvent) -> None: + logging.info( + f"Command {event.command_name} with request id " + f"{event.request_id} on server {event.connection_id} " + f"failed in {event.duration_micros} " + "microseconds" + ) + + +class ServerLogger(monitoring.ServerListener): + """A simple listener that logs server discovery events. + + Listens for :class:`~pymongo.monitoring.ServerOpeningEvent`, + :class:`~pymongo.monitoring.ServerDescriptionChangedEvent`, + and :class:`~pymongo.monitoring.ServerClosedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def opened(self, event: monitoring.ServerOpeningEvent) -> None: + logging.info(f"Server {event.server_address} added to topology {event.topology_id}") + + def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) -> None: + previous_server_type = event.previous_description.server_type + new_server_type = event.new_description.server_type + if new_server_type != previous_server_type: + # server_type_name was added in PyMongo 3.4 + logging.info( + f"Server {event.server_address} changed type from " + f"{event.previous_description.server_type_name} to " + f"{event.new_description.server_type_name}" + ) + + def closed(self, event: monitoring.ServerClosedEvent) -> None: + logging.warning(f"Server {event.server_address} removed from topology {event.topology_id}") + + +class HeartbeatLogger(monitoring.ServerHeartbeatListener): + """A simple listener that logs server heartbeat events. + + Listens for :class:`~pymongo.monitoring.ServerHeartbeatStartedEvent`, + :class:`~pymongo.monitoring.ServerHeartbeatSucceededEvent`, + and :class:`~pymongo.monitoring.ServerHeartbeatFailedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: + logging.info(f"Heartbeat sent to server {event.connection_id}") + + def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: + # The reply.document attribute was added in PyMongo 3.4. + logging.info( + f"Heartbeat to server {event.connection_id} " + "succeeded with reply " + f"{event.reply.document}" + ) + + def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: + logging.warning( + f"Heartbeat to server {event.connection_id} failed with error {event.reply}" + ) + + +class TopologyLogger(monitoring.TopologyListener): + """A simple listener that logs server topology events. + + Listens for :class:`~pymongo.monitoring.TopologyOpenedEvent`, + :class:`~pymongo.monitoring.TopologyDescriptionChangedEvent`, + and :class:`~pymongo.monitoring.TopologyClosedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def opened(self, event: monitoring.TopologyOpenedEvent) -> None: + logging.info(f"Topology with id {event.topology_id} opened") + + def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) -> None: + logging.info(f"Topology description updated for topology id {event.topology_id}") + previous_topology_type = event.previous_description.topology_type + new_topology_type = event.new_description.topology_type + if new_topology_type != previous_topology_type: + # topology_type_name was added in PyMongo 3.4 + logging.info( + f"Topology {event.topology_id} changed type from " + f"{event.previous_description.topology_type_name} to " + f"{event.new_description.topology_type_name}" + ) + # The has_writable_server and has_readable_server methods + # were added in PyMongo 3.4. + if not event.new_description.has_writable_server(): + logging.warning("No writable servers available.") + if not event.new_description.has_readable_server(): + logging.warning("No readable servers available.") + + def closed(self, event: monitoring.TopologyClosedEvent) -> None: + logging.info(f"Topology with id {event.topology_id} closed") + + +class ConnectionPoolLogger(monitoring.ConnectionPoolListener): + """A simple listener that logs server connection pool events. + + Listens for :class:`~pymongo.monitoring.PoolCreatedEvent`, + :class:`~pymongo.monitoring.PoolClearedEvent`, + :class:`~pymongo.monitoring.PoolClosedEvent`, + :~pymongo.monitoring.class:`ConnectionCreatedEvent`, + :class:`~pymongo.monitoring.ConnectionReadyEvent`, + :class:`~pymongo.monitoring.ConnectionClosedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckOutStartedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckOutFailedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckedOutEvent`, + and :class:`~pymongo.monitoring.ConnectionCheckedInEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: + logging.info(f"[pool {event.address}] pool created") + + def pool_ready(self, event: monitoring.PoolReadyEvent) -> None: + logging.info(f"[pool {event.address}] pool ready") + + def pool_cleared(self, event: monitoring.PoolClearedEvent) -> None: + logging.info(f"[pool {event.address}] pool cleared") + + def pool_closed(self, event: monitoring.PoolClosedEvent) -> None: + logging.info(f"[pool {event.address}] pool closed") + + def connection_created(self, event: monitoring.ConnectionCreatedEvent) -> None: + logging.info(f"[pool {event.address}][conn #{event.connection_id}] connection created") + + def connection_ready(self, event: monitoring.ConnectionReadyEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] connection setup succeeded" + ) + + def connection_closed(self, event: monitoring.ConnectionClosedEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] " + f'connection closed, reason: "{event.reason}"' + ) + + def connection_check_out_started( + self, event: monitoring.ConnectionCheckOutStartedEvent + ) -> None: + logging.info(f"[pool {event.address}] connection check out started") + + def connection_check_out_failed(self, event: monitoring.ConnectionCheckOutFailedEvent) -> None: + logging.info(f"[pool {event.address}] connection check out failed, reason: {event.reason}") + + def connection_checked_out(self, event: monitoring.ConnectionCheckedOutEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] connection checked out of pool" + ) + + def connection_checked_in(self, event: monitoring.ConnectionCheckedInEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] connection checked into pool" + ) diff --git a/pymongo/hello.py b/pymongo/hello.py new file mode 100644 index 0000000000..1eb40ed929 --- /dev/null +++ b/pymongo/hello.py @@ -0,0 +1,224 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for the 'hello' and legacy hello commands.""" +from __future__ import annotations + +import copy +import datetime +import itertools +from typing import Any, Generic, Mapping, Optional + +from bson.objectid import ObjectId +from pymongo import common +from pymongo.server_type import SERVER_TYPE +from pymongo.typings import ClusterTime, _DocumentType + + +def _get_server_type(doc: Mapping[str, Any]) -> int: + """Determine the server type from a hello response.""" + if not doc.get("ok"): + return SERVER_TYPE.Unknown + + if doc.get("serviceId"): + return SERVER_TYPE.LoadBalancer + elif doc.get("isreplicaset"): + return SERVER_TYPE.RSGhost + elif doc.get("setName"): + if doc.get("hidden"): + return SERVER_TYPE.RSOther + elif doc.get(HelloCompat.PRIMARY): + return SERVER_TYPE.RSPrimary + elif doc.get(HelloCompat.LEGACY_PRIMARY): + return SERVER_TYPE.RSPrimary + elif doc.get("secondary"): + return SERVER_TYPE.RSSecondary + elif doc.get("arbiterOnly"): + return SERVER_TYPE.RSArbiter + else: + return SERVER_TYPE.RSOther + elif doc.get("msg") == "isdbgrid": + return SERVER_TYPE.Mongos + else: + return SERVER_TYPE.Standalone + + +class HelloCompat: + CMD = "hello" + LEGACY_CMD = "ismaster" + PRIMARY = "isWritablePrimary" + LEGACY_PRIMARY = "ismaster" + LEGACY_ERROR = "not master" + + +class Hello(Generic[_DocumentType]): + """Parse a hello response from the server. + + .. versionadded:: 3.12 + """ + + __slots__ = ("_doc", "_server_type", "_is_writable", "_is_readable", "_awaitable") + + def __init__(self, doc: _DocumentType, awaitable: bool = False) -> None: + self._server_type = _get_server_type(doc) + self._doc: _DocumentType = doc + self._is_writable = self._server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.Standalone, + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + + self._is_readable = self.server_type == SERVER_TYPE.RSSecondary or self._is_writable + self._awaitable = awaitable + + @property + def document(self) -> _DocumentType: + """The complete hello command response document. + + .. versionadded:: 3.4 + """ + return copy.copy(self._doc) + + @property + def server_type(self) -> int: + return self._server_type + + @property + def all_hosts(self) -> set[tuple[str, int]]: + """List of hosts, passives, and arbiters known to this server.""" + return set( + map( + common.clean_node, + itertools.chain( + self._doc.get("hosts", []), + self._doc.get("passives", []), + self._doc.get("arbiters", []), + ), + ) + ) + + @property + def tags(self) -> Mapping[str, Any]: + """Replica set member tags or empty dict.""" + return self._doc.get("tags", {}) + + @property + def primary(self) -> Optional[tuple[str, int]]: + """This server's opinion about who the primary is, or None.""" + if self._doc.get("primary"): + return common.partition_node(self._doc["primary"]) + else: + return None + + @property + def replica_set_name(self) -> Optional[str]: + """Replica set name or None.""" + return self._doc.get("setName") + + @property + def max_bson_size(self) -> int: + return self._doc.get("maxBsonObjectSize", common.MAX_BSON_SIZE) + + @property + def max_message_size(self) -> int: + return self._doc.get("maxMessageSizeBytes", common.MAX_MESSAGE_SIZE) + + @property + def max_write_batch_size(self) -> int: + return self._doc.get("maxWriteBatchSize", common.MAX_WRITE_BATCH_SIZE) + + @property + def min_wire_version(self) -> int: + return self._doc.get("minWireVersion", common.MIN_WIRE_VERSION) + + @property + def max_wire_version(self) -> int: + return self._doc.get("maxWireVersion", common.MAX_WIRE_VERSION) + + @property + def set_version(self) -> Optional[int]: + return self._doc.get("setVersion") + + @property + def election_id(self) -> Optional[ObjectId]: + return self._doc.get("electionId") + + @property + def cluster_time(self) -> Optional[ClusterTime]: + return self._doc.get("$clusterTime") + + @property + def logical_session_timeout_minutes(self) -> Optional[int]: + return self._doc.get("logicalSessionTimeoutMinutes") + + @property + def is_writable(self) -> bool: + return self._is_writable + + @property + def is_readable(self) -> bool: + return self._is_readable + + @property + def me(self) -> Optional[tuple[str, int]]: + me = self._doc.get("me") + if me: + return common.clean_node(me) + return None + + @property + def last_write_date(self) -> Optional[datetime.datetime]: + return self._doc.get("lastWrite", {}).get("lastWriteDate") + + @property + def compressors(self) -> Optional[list[str]]: + return self._doc.get("compression") + + @property + def sasl_supported_mechs(self) -> list[str]: + """Supported authentication mechanisms for the current user. + + For example:: + + >>> hello.sasl_supported_mechs + ["SCRAM-SHA-1", "SCRAM-SHA-256"] + + """ + return self._doc.get("saslSupportedMechs", []) + + @property + def speculative_authenticate(self) -> Optional[Mapping[str, Any]]: + """The speculativeAuthenticate field.""" + return self._doc.get("speculativeAuthenticate") + + @property + def topology_version(self) -> Optional[Mapping[str, Any]]: + return self._doc.get("topologyVersion") + + @property + def awaitable(self) -> bool: + return self._awaitable + + @property + def service_id(self) -> Optional[ObjectId]: + return self._doc.get("serviceId") + + @property + def hello_ok(self) -> bool: + return self._doc.get("helloOk", False) + + @property + def connection_id(self) -> Optional[int]: + return self._doc.get("connectionId") diff --git a/pymongo/helpers.py b/pymongo/helpers.py deleted file mode 100644 index 0ecd1f479c..0000000000 --- a/pymongo/helpers.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Bits and pieces used by the driver that don't really fit elsewhere.""" - -import random -import struct - -import bson -import pymongo - -from bson.binary import OLD_UUID_SUBTYPE -from bson.son import SON -from pymongo.errors import (AutoReconnect, - CursorNotFound, - DuplicateKeyError, - OperationFailure, - ExecutionTimeout, - WTimeoutError) - - -def _index_list(key_or_list, direction=None): - """Helper to generate a list of (key, direction) pairs. - - Takes such a list, or a single key, or a single key and direction. - """ - if direction is not None: - return [(key_or_list, direction)] - else: - if isinstance(key_or_list, basestring): - return [(key_or_list, pymongo.ASCENDING)] - elif not isinstance(key_or_list, (list, tuple)): - raise TypeError("if no direction is specified, " - "key_or_list must be an instance of list") - return key_or_list - - -def _index_document(index_list): - """Helper to generate an index specifying document. - - Takes a list of (key, direction) pairs. - """ - if isinstance(index_list, dict): - raise TypeError("passing a dict to sort/create_index/hint is not " - "allowed - use a list of tuples instead. did you " - "mean %r?" % list(index_list.iteritems())) - elif not isinstance(index_list, (list, tuple)): - raise TypeError("must use a list of (key, direction) pairs, " - "not: " + repr(index_list)) - if not len(index_list): - raise ValueError("key_or_list must not be the empty list") - - index = SON() - for (key, value) in index_list: - if not isinstance(key, basestring): - raise TypeError("first item in each key pair must be a string") - if not isinstance(value, (basestring, int, dict)): - raise TypeError("second item in each key pair must be 1, -1, " - "'2d', 'geoHaystack', or another valid MongoDB " - "index specifier.") - index[key] = value - return index - - -def _unpack_response(response, cursor_id=None, as_class=dict, - tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE, - compile_re=True): - """Unpack a response from the database. - - Check the response for errors and unpack, returning a dictionary - containing the response data. - - :Parameters: - - `response`: byte string as returned from the database - - `cursor_id` (optional): cursor_id we sent to get this response - - used for raising an informative exception when we get cursor id not - valid at server response - - `as_class` (optional): class to use for resulting documents - """ - response_flag = struct.unpack(" dict[str, float]: + from pymongo import _csot + + details = {} + timeout = _csot.get_timeout() + socket_timeout = options.socket_timeout + connect_timeout = options.connect_timeout + if timeout: + details["timeoutMS"] = timeout * 1000 + if socket_timeout and not timeout: + details["socketTimeoutMS"] = socket_timeout * 1000 + if connect_timeout: + details["connectTimeoutMS"] = connect_timeout * 1000 + return details + + +def format_timeout_details(details: Optional[dict[str, float]]) -> str: + result = "" + if details: + result += " (configured timeouts:" + for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: + if timeout in details: + result += f" {timeout}: {details[timeout]}ms," + result = result[:-1] + result += ")" + return result + + +def _gen_index_name(keys: _IndexList) -> str: + """Generate an index name from the set of fields it is over.""" + return "_".join(["{}_{}".format(*item) for item in keys]) + + +def _index_list( + key_or_list: _Hint, direction: Optional[Union[int, str]] = None +) -> Sequence[tuple[str, Union[int, str, Mapping[str, Any]]]]: + """Helper to generate a list of (key, direction) pairs. + + Takes such a list, or a single key, or a single key and direction. + """ + if direction is not None: + if not isinstance(key_or_list, str): + raise TypeError(f"Expected a string and a direction, not {type(key_or_list)}") + return [(key_or_list, direction)] + else: + if isinstance(key_or_list, str): + return [(key_or_list, ASCENDING)] + elif isinstance(key_or_list, abc.ItemsView): + return list(key_or_list) # type: ignore[arg-type] + elif isinstance(key_or_list, abc.Mapping): + return list(key_or_list.items()) + elif not isinstance(key_or_list, (list, tuple)): + raise TypeError( + f"if no direction is specified, key_or_list must be an instance of list, not {type(key_or_list)}" + ) + values: list[tuple[str, int]] = [] + for item in key_or_list: + if isinstance(item, str): + item = (item, ASCENDING) # noqa: PLW2901 + values.append(item) + return values + + +def _index_document(index_list: _IndexList) -> dict[str, Any]: + """Helper to generate an index specifying document. + + Takes a list of (key, direction) pairs. + """ + if not isinstance(index_list, (list, tuple, abc.Mapping)): + raise TypeError( + "must use a dictionary or a list of (key, direction) pairs, not: " + repr(index_list) + ) + if not len(index_list): + raise ValueError("key_or_list must not be empty") + + index: dict[str, Any] = {} + + if isinstance(index_list, abc.Mapping): + for key in index_list: + value = index_list[key] + _validate_index_key_pair(key, value) + index[key] = value + else: + for item in index_list: + if isinstance(item, str): + item = (item, ASCENDING) # noqa: PLW2901 + key, value = item + _validate_index_key_pair(key, value) + index[key] = value + return index + + +def _validate_index_key_pair(key: Any, value: Any) -> None: + if not isinstance(key, str): + raise TypeError(f"first item in each key pair must be an instance of str, not {type(key)}") + if not isinstance(value, (str, int, abc.Mapping)): + raise TypeError( + "second item in each key pair must be 1, -1, " + "'2d', or another valid MongoDB index specifier." + f", not {type(value)}" + ) + + +def _check_command_response( + response: _DocumentOut, + max_wire_version: Optional[int], + allowable_errors: Optional[Container[Union[int, str]]] = None, + parse_write_concern_error: bool = False, + pool_opts: Optional[PoolOptions] = None, +) -> None: + """Check the response to a command for errors.""" + if "ok" not in response: + # Server didn't recognize our message as a command. + raise OperationFailure( + response.get("$err"), # type: ignore[arg-type] + response.get("code"), + response, + max_wire_version, + ) + + if parse_write_concern_error and "writeConcernError" in response: + _error = response["writeConcernError"] + _labels = response.get("errorLabels") + if _labels: + _error.update({"errorLabels": _labels}) + _raise_write_concern_error(_error) + + if response["ok"]: + return + + details = response + # Mongos returns the error details in a 'raw' object + # for some errors. + if "raw" in response: + for shard in response["raw"].values(): + # Grab the first non-empty raw error from a shard. + if shard.get("errmsg") and not shard.get("ok"): + details = shard + break + + errmsg = details["errmsg"] + code = details.get("code") + + # For allowable errors, only check for error messages when the code is not + # included. + if allowable_errors: + if code is not None: + if code in allowable_errors: + return + elif errmsg in allowable_errors: + return + + # Server is "not primary" or "recovering" + if code is not None: + if code in _NOT_PRIMARY_CODES: + raise NotPrimaryError(errmsg, response) + elif HelloCompat.LEGACY_ERROR in errmsg or "node is recovering" in errmsg: + raise NotPrimaryError(errmsg, response) + + # Other errors + # findAndModify with upsert can raise duplicate key error + if code in (11000, 11001, 12582): + raise DuplicateKeyError(errmsg, code, response, max_wire_version) + elif code == 50: + # Append timeout details to MaxTimeMSExpired responses. + if pool_opts: + timeout_details = _get_timeout_details(pool_opts) + errmsg += format_timeout_details(timeout_details) + raise ExecutionTimeout(errmsg, code, response, max_wire_version) + elif code == 43: + raise CursorNotFound(errmsg, code, response, max_wire_version) + + raise OperationFailure(errmsg, code, response, max_wire_version) + + +def _raise_last_write_error(write_errors: list[Any]) -> NoReturn: + # If the last batch had multiple errors only report + # the last error to emulate continue_on_error. + error = write_errors[-1] + if error.get("code") == 11000: + raise DuplicateKeyError(error.get("errmsg"), 11000, error) + raise WriteError(error.get("errmsg"), error.get("code"), error) + + +def _raise_write_concern_error(error: Any) -> NoReturn: + if _wtimeout_error(error): + # Make sure we raise WTimeoutError + raise WTimeoutError(error.get("errmsg"), error.get("code"), error) + raise WriteConcernError(error.get("errmsg"), error.get("code"), error) + + +def _get_wce_doc(result: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: + """Return the writeConcernError or None.""" + wce = result.get("writeConcernError") + if wce: + # The server reports errorLabels at the top level but it's more + # convenient to attach it to the writeConcernError doc itself. + error_labels = result.get("errorLabels") + if error_labels: + # Copy to avoid changing the original document. + wce = wce.copy() + wce["errorLabels"] = error_labels + return wce + + +def _check_write_command_response(result: Mapping[str, Any]) -> None: + """Backward compatibility helper for write command error handling.""" + # Prefer write errors over write concern errors + write_errors = result.get("writeErrors") + if write_errors: + _raise_last_write_error(write_errors) + + wce = _get_wce_doc(result) + if wce: + _raise_write_concern_error(wce) + + +def _fields_list_to_dict( + fields: Union[Mapping[str, Any], Iterable[str]], option_name: str +) -> Mapping[str, Any]: + """Takes a sequence of field names and returns a matching dictionary. + + ["a", "b"] becomes {"a": 1, "b": 1} + + and + + ["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1} + """ + if isinstance(fields, abc.Mapping): + return fields + + if isinstance(fields, (abc.Sequence, abc.Set)): + if not all(isinstance(field, str) for field in fields): + raise TypeError(f"{option_name} must be a list of key names, each an instance of str") + return dict.fromkeys(fields, 1) + + raise TypeError(f"{option_name} must be a mapping or list of key names") + + +def _handle_exception() -> None: + """Print exceptions raised by subscribers to stderr.""" + # Heavily influenced by logging.Handler.handleError. + + # See note here: + # https://docs.python.org/3.4/library/sys.html#sys.__stderr__ + if sys.stderr: + einfo = sys.exc_info() + try: + traceback.print_exception(einfo[0], einfo[1], einfo[2], None, sys.stderr) + except OSError: + pass + finally: + del einfo diff --git a/pymongo/lock.py b/pymongo/lock.py new file mode 100644 index 0000000000..ad990fce3f --- /dev/null +++ b/pymongo/lock.py @@ -0,0 +1,92 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal helpers for lock and condition coordination primitives.""" + +from __future__ import annotations + +import asyncio +import os +import sys +import threading +import weakref +from asyncio import wait_for +from typing import Any, Optional, TypeVar + +import pymongo._asyncio_lock + +_HAS_REGISTER_AT_FORK = hasattr(os, "register_at_fork") + +# References to instances of _create_lock +_forkable_locks: weakref.WeakSet[threading.Lock] = weakref.WeakSet() + +_T = TypeVar("_T") + +# Needed to support 3.13 asyncio fixes (https://github.com/python/cpython/issues/112202) +# in older versions of Python +if sys.version_info >= (3, 13): + Lock = asyncio.Lock + Condition = asyncio.Condition +else: + Lock = pymongo._asyncio_lock.Lock + Condition = pymongo._asyncio_lock.Condition + + +def _create_lock() -> threading.Lock: + """Represents a lock that is tracked upon instantiation using a WeakSet and + reset by pymongo upon forking. + """ + lock = threading.Lock() + if _HAS_REGISTER_AT_FORK: + _forkable_locks.add(lock) + return lock + + +def _async_create_lock() -> Lock: + """Represents an asyncio.Lock.""" + return Lock() + + +def _create_condition( + lock: threading.Lock, condition_class: Optional[Any] = None +) -> threading.Condition: + """Represents a threading.Condition.""" + if condition_class: + return condition_class(lock) + return threading.Condition(lock) + + +def _async_create_condition(lock: Lock, condition_class: Optional[Any] = None) -> Condition: + """Represents an asyncio.Condition.""" + if condition_class: + return condition_class(lock) + return Condition(lock) + + +def _release_locks() -> None: + # Completed the fork, reset all the locks in the child. + for lock in _forkable_locks: + if lock.locked(): + lock.release() + + +async def _async_cond_wait(condition: Condition, timeout: Optional[float]) -> bool: + try: + return await wait_for(condition.wait(), timeout) + except asyncio.TimeoutError: + return False + + +def _cond_wait(condition: threading.Condition, timeout: Optional[float]) -> bool: + return condition.wait(timeout) diff --git a/pymongo/logger.py b/pymongo/logger.py new file mode 100644 index 0000000000..1b3fe43b86 --- /dev/null +++ b/pymongo/logger.py @@ -0,0 +1,189 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import enum +import logging +import os +import warnings +from typing import Any + +from bson import UuidRepresentation, json_util +from bson.json_util import JSONOptions, _truncate_documents +from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason + + +class _CommandStatusMessage(str, enum.Enum): + STARTED = "Command started" + SUCCEEDED = "Command succeeded" + FAILED = "Command failed" + + +class _ServerSelectionStatusMessage(str, enum.Enum): + STARTED = "Server selection started" + SUCCEEDED = "Server selection succeeded" + FAILED = "Server selection failed" + WAITING = "Waiting for suitable server to become available" + + +class _ConnectionStatusMessage(str, enum.Enum): + POOL_CREATED = "Connection pool created" + POOL_READY = "Connection pool ready" + POOL_CLOSED = "Connection pool closed" + POOL_CLEARED = "Connection pool cleared" + + CONN_CREATED = "Connection created" + CONN_READY = "Connection ready" + CONN_CLOSED = "Connection closed" + + CHECKOUT_STARTED = "Connection checkout started" + CHECKOUT_SUCCEEDED = "Connection checked out" + CHECKOUT_FAILED = "Connection checkout failed" + CHECKEDIN = "Connection checked in" + + +class _SDAMStatusMessage(str, enum.Enum): + START_TOPOLOGY = "Starting topology monitoring" + STOP_TOPOLOGY = "Stopped topology monitoring" + START_SERVER = "Starting server monitoring" + STOP_SERVER = "Stopped server monitoring" + TOPOLOGY_CHANGE = "Topology description changed" + HEARTBEAT_START = "Server heartbeat started" + HEARTBEAT_SUCCESS = "Server heartbeat succeeded" + HEARTBEAT_FAIL = "Server heartbeat failed" + + +_DEFAULT_DOCUMENT_LENGTH = 1000 +_SENSITIVE_COMMANDS = [ + "authenticate", + "saslStart", + "saslContinue", + "getnonce", + "createUser", + "updateUser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", +] +_HELLO_COMMANDS = ["hello", "ismaster", "isMaster"] +_REDACTED_FAILURE_FIELDS = ["code", "codeName", "errorLabels"] +_DOCUMENT_NAMES = ["command", "reply", "failure"] +_JSON_OPTIONS = JSONOptions(uuid_representation=UuidRepresentation.STANDARD) +_COMMAND_LOGGER = logging.getLogger("pymongo.command") +_CONNECTION_LOGGER = logging.getLogger("pymongo.connection") +_SERVER_SELECTION_LOGGER = logging.getLogger("pymongo.serverSelection") +_CLIENT_LOGGER = logging.getLogger("pymongo.client") +_SDAM_LOGGER = logging.getLogger("pymongo.topology") +_VERBOSE_CONNECTION_ERROR_REASONS = { + ConnectionClosedReason.POOL_CLOSED: "Connection pool was closed", + ConnectionCheckOutFailedReason.POOL_CLOSED: "Connection pool was closed", + ConnectionClosedReason.STALE: "Connection pool was stale", + ConnectionClosedReason.ERROR: "An error occurred while using the connection", + ConnectionCheckOutFailedReason.CONN_ERROR: "An error occurred while trying to establish a new connection", + ConnectionClosedReason.IDLE: "Connection was idle too long", + ConnectionCheckOutFailedReason.TIMEOUT: "Connection exceeded the specified timeout", +} + + +def _log_client_error() -> None: + # This is called from a daemon thread so check for None to account for interpreter shutdown. + logger = _CLIENT_LOGGER + if logger: + # logger.exception includes the full traceback. + logger.exception("MongoClient background task encountered an error:") + + +def _debug_log(logger: logging.Logger, **fields: Any) -> None: + logger.debug(LogMessage(**fields)) + + +def _verbose_connection_error_reason(reason: str) -> str: + return _VERBOSE_CONNECTION_ERROR_REASONS.get(reason, reason) + + +def _info_log(logger: logging.Logger, **fields: Any) -> None: + logger.info(LogMessage(**fields)) + + +def _log_or_warn(logger: logging.Logger, message: str) -> None: + if logger.isEnabledFor(logging.INFO): + logger.info(message) + else: + # stacklevel=4 ensures that the warning is for the user's code. + warnings.warn(message, UserWarning, stacklevel=4) + + +class LogMessage: + __slots__ = ("_kwargs", "_redacted") + + def __init__(self, **kwargs: Any): + self._kwargs = kwargs + self._redacted = False + + def __str__(self) -> str: + self._redact() + return "%s" % ( + json_util.dumps( + self._kwargs, json_options=_JSON_OPTIONS, default=lambda o: o.__repr__() + ) + ) + + def _is_sensitive(self, doc_name: str) -> bool: + is_speculative_authenticate = ( + self._kwargs.pop("speculative_authenticate", False) + or "speculativeAuthenticate" in self._kwargs[doc_name] + ) + is_sensitive_command = ( + "commandName" in self._kwargs and self._kwargs["commandName"] in _SENSITIVE_COMMANDS + ) + + is_sensitive_hello = ( + self._kwargs.get("commandName", None) in _HELLO_COMMANDS and is_speculative_authenticate + ) + + return is_sensitive_command or is_sensitive_hello + + def _redact(self) -> None: + if self._redacted: + return + self._kwargs = {k: v for k, v in self._kwargs.items() if v is not None} + if "durationMS" in self._kwargs and hasattr(self._kwargs["durationMS"], "total_seconds"): + self._kwargs["durationMS"] = self._kwargs["durationMS"].total_seconds() * 1000 + if "serviceId" in self._kwargs: + self._kwargs["serviceId"] = str(self._kwargs["serviceId"]) + document_length = int(os.getenv("MONGOB_LOG_MAX_DOCUMENT_LENGTH", _DEFAULT_DOCUMENT_LENGTH)) + if document_length < 0: + document_length = _DEFAULT_DOCUMENT_LENGTH + is_server_side_error = self._kwargs.pop("isServerSideError", False) + + for doc_name in _DOCUMENT_NAMES: + doc = self._kwargs.get(doc_name) + if doc: + if doc_name == "failure" and is_server_side_error: + doc = {k: v for k, v in doc.items() if k in _REDACTED_FAILURE_FIELDS} + if doc_name != "failure" and self._is_sensitive(doc_name): + doc = json_util.dumps({}) + else: + truncated_doc = _truncate_documents(doc, document_length)[0] + doc = json_util.dumps( + truncated_doc, + json_options=_JSON_OPTIONS, + default=lambda o: o.__repr__(), + ) + if len(doc) > document_length: + doc = ( + doc.encode()[:document_length].decode("unicode-escape", "ignore") + ) + "..." + self._kwargs[doc_name] = doc + self._redacted = True diff --git a/pymongo/master_slave_connection.py b/pymongo/master_slave_connection.py deleted file mode 100644 index dbee7897ca..0000000000 --- a/pymongo/master_slave_connection.py +++ /dev/null @@ -1,368 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Master-Slave connection to Mongo. - -Performs all writes to Master instance and distributes reads among all -slaves. Reads are tried on each slave in turn until the read succeeds -or all slaves failed. -""" - -from pymongo import helpers, thread_util -from pymongo import ReadPreference -from pymongo.common import BaseObject -from pymongo.mongo_client import MongoClient -from pymongo.database import Database -from pymongo.errors import AutoReconnect - - -class MasterSlaveConnection(BaseObject): - """A master-slave connection to Mongo. - """ - - def __init__(self, master, slaves=[], document_class=dict, tz_aware=False): - """Create a new Master-Slave connection. - - The resultant connection should be interacted with using the same - mechanisms as a regular `MongoClient`. The `MongoClient` instances used - to create this `MasterSlaveConnection` can themselves make use of - connection pooling, etc. `MongoClient` instances used as slaves should - be created with the read_preference option set to - :attr:`~pymongo.read_preferences.ReadPreference.SECONDARY`. Write - concerns are inherited from `master` and can be changed in this - instance. - - Raises TypeError if `master` is not an instance of `MongoClient` or - slaves is not a list of at least one `MongoClient` instances. - - :Parameters: - - `master`: `MongoClient` instance for the writable Master - - `slaves`: list of `MongoClient` instances for the - read-only slaves - - `document_class` (optional): default class to use for - documents returned from queries on this connection - - `tz_aware` (optional): if ``True``, - :class:`~datetime.datetime` instances returned as values - in a document by this :class:`MasterSlaveConnection` will be timezone - aware (otherwise they will be naive) - """ - if not isinstance(master, MongoClient): - raise TypeError("master must be a MongoClient instance") - if not isinstance(slaves, list) or len(slaves) == 0: - raise TypeError("slaves must be a list of length >= 1") - - for slave in slaves: - if not isinstance(slave, MongoClient): - raise TypeError("slave %r is not an instance of MongoClient" % - slave) - - super(MasterSlaveConnection, - self).__init__(read_preference=ReadPreference.SECONDARY, - safe=master.safe, - **master.write_concern) - - self.__master = master - self.__slaves = slaves - self.__document_class = document_class - self.__tz_aware = tz_aware - self.__request_counter = thread_util.Counter(master.use_greenlets) - - @property - def master(self): - return self.__master - - @property - def slaves(self): - return self.__slaves - - @property - def is_mongos(self): - """If this MasterSlaveConnection is connected to mongos (always False) - - .. versionadded:: 2.3 - """ - return False - - @property - def use_greenlets(self): - """Whether calling :meth:`start_request` assigns greenlet-local, - rather than thread-local, sockets. - - .. versionadded:: 2.4.2 - """ - return self.master.use_greenlets - - def get_document_class(self): - return self.__document_class - - def set_document_class(self, klass): - self.__document_class = klass - - document_class = property(get_document_class, set_document_class, - doc="""Default class to use for documents - returned on this connection.""") - - @property - def tz_aware(self): - return self.__tz_aware - - @property - def max_bson_size(self): - """Return the maximum size BSON object the connected master - accepts in bytes. Defaults to 4MB in server < 1.7.4. - - .. versionadded:: 2.6 - """ - return self.master.max_bson_size - - @property - def max_message_size(self): - """Return the maximum message size the connected master - accepts in bytes. - - .. versionadded:: 2.6 - """ - return self.master.max_message_size - - @property - def min_wire_version(self): - """The minWireVersion reported by the server. - - Returns ``0`` when connected to server versions prior to MongoDB 2.6. - - .. versionadded:: 2.7 - """ - return self.master.min_wire_version - - @property - def max_wire_version(self): - """The maxWireVersion reported by the server. - - Returns ``0`` when connected to server versions prior to MongoDB 2.6. - - .. versionadded:: 2.7 - """ - return self.master.max_wire_version - - @property - def max_write_batch_size(self): - """The maxWriteBatchSize reported by the server. - - Returns a default value when connected to server versions prior to - MongoDB 2.6. - - .. versionadded:: 2.7 - """ - return self.master.max_write_batch_size - - def disconnect(self): - """Disconnect from MongoDB. - - Disconnecting will call disconnect on all master and slave - connections. - - .. seealso:: Module :mod:`~pymongo.mongo_client` - .. versionadded:: 1.10.1 - """ - self.__master.disconnect() - for slave in self.__slaves: - slave.disconnect() - - def set_cursor_manager(self, manager_class): - """Set the cursor manager for this connection. - - Helper to set cursor manager for each individual `MongoClient` instance - that make up this `MasterSlaveConnection`. - """ - self.__master.set_cursor_manager(manager_class) - for slave in self.__slaves: - slave.set_cursor_manager(manager_class) - - def _ensure_connected(self, sync): - """Ensure the master is connected to a mongod/s. - """ - self.__master._ensure_connected(sync) - - # _connection_to_use is a hack that we need to include to make sure - # that killcursor operations can be sent to the same instance on which - # the cursor actually resides... - def _send_message(self, message, - with_last_error=False, - command=False, _connection_to_use=None): - """Say something to Mongo. - - Sends a message on the Master connection. This is used for inserts, - updates, and deletes. - - Raises ConnectionFailure if the message cannot be sent. Returns the - request id of the sent message. - - :Parameters: - - `operation`: opcode of the message - - `data`: data to send - - `safe`: perform a getLastError after sending the message - """ - if _connection_to_use is None or _connection_to_use == -1: - return self.__master._send_message(message, - with_last_error, command) - return self.__slaves[_connection_to_use]._send_message( - message, with_last_error, command, check_primary=False) - - # _connection_to_use is a hack that we need to include to make sure - # that getmore operations can be sent to the same instance on which - # the cursor actually resides... - def _send_message_with_response(self, message, _connection_to_use=None, - _must_use_master=False, **kwargs): - """Receive a message from Mongo. - - Sends the given message and returns a (connection_id, response) pair. - - :Parameters: - - `operation`: opcode of the message to send - - `data`: data to send - """ - if _connection_to_use is not None: - if _connection_to_use == -1: - member = self.__master - conn = -1 - else: - member = self.__slaves[_connection_to_use] - conn = _connection_to_use - return (conn, - member._send_message_with_response(message, **kwargs)[1]) - - # _must_use_master is set for commands, which must be sent to the - # master instance. any queries in a request must be sent to the - # master since that is where writes go. - if _must_use_master or self.in_request(): - return (-1, self.__master._send_message_with_response(message, - **kwargs)[1]) - - # Iterate through the slaves randomly until we have success. Raise - # reconnect if they all fail. - for connection_id in helpers.shuffled(xrange(len(self.__slaves))): - try: - slave = self.__slaves[connection_id] - return (connection_id, - slave._send_message_with_response(message, - **kwargs)[1]) - except AutoReconnect: - pass - - raise AutoReconnect("failed to connect to slaves") - - def start_request(self): - """Start a "request". - - Start a sequence of operations in which order matters. Note - that all operations performed within a request will be sent - using the Master connection. - """ - self.__request_counter.inc() - self.master.start_request() - - def in_request(self): - return bool(self.__request_counter.get()) - - def end_request(self): - """End the current "request". - - See documentation for `MongoClient.end_request`. - """ - self.__request_counter.dec() - self.master.end_request() - - def __eq__(self, other): - if isinstance(other, MasterSlaveConnection): - us = (self.__master, self.slaves) - them = (other.__master, other.__slaves) - return us == them - return NotImplemented - - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "MasterSlaveConnection(%r, %r)" % (self.__master, self.__slaves) - - def __getattr__(self, name): - """Get a database by name. - - Raises InvalidName if an invalid database name is used. - - :Parameters: - - `name`: the name of the database to get - """ - return Database(self, name) - - def __getitem__(self, name): - """Get a database by name. - - Raises InvalidName if an invalid database name is used. - - :Parameters: - - `name`: the name of the database to get - """ - return self.__getattr__(name) - - def close_cursor(self, cursor_id, connection_id): - """Close a single database cursor. - - Raises TypeError if cursor_id is not an instance of (int, long). What - closing the cursor actually means depends on this connection's cursor - manager. - - :Parameters: - - `cursor_id`: cursor id to close - - `connection_id`: id of the `MongoClient` instance where the cursor - was opened - """ - if connection_id == -1: - return self.__master.close_cursor(cursor_id) - return self.__slaves[connection_id].close_cursor(cursor_id) - - def database_names(self): - """Get a list of all database names. - """ - return self.__master.database_names() - - def drop_database(self, name_or_database): - """Drop a database. - - :Parameters: - - `name_or_database`: the name of a database to drop or the object - itself - """ - return self.__master.drop_database(name_or_database) - - def __iter__(self): - return self - - def next(self): - raise TypeError("'MasterSlaveConnection' object is not iterable") - - def _cached(self, database_name, collection_name, index_name): - return self.__master._cached(database_name, - collection_name, index_name) - - def _cache_index(self, database_name, collection_name, - index_name, cache_for): - return self.__master._cache_index(database_name, collection_name, - index_name, cache_for) - - def _purge_index(self, database_name, - collection_name=None, index_name=None): - return self.__master._purge_index(database_name, - collection_name, - index_name) diff --git a/pymongo/max_staleness_selectors.py b/pymongo/max_staleness_selectors.py new file mode 100644 index 0000000000..5f1e404720 --- /dev/null +++ b/pymongo/max_staleness_selectors.py @@ -0,0 +1,124 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Criteria to select ServerDescriptions based on maxStalenessSeconds. + +The Max Staleness Spec says: When there is a known primary P, +a secondary S's staleness is estimated with this formula: + + (S.lastUpdateTime - S.lastWriteDate) - (P.lastUpdateTime - P.lastWriteDate) + + heartbeatFrequencyMS + +When there is no known primary, a secondary S's staleness is estimated with: + + SMax.lastWriteDate - S.lastWriteDate + heartbeatFrequencyMS + +where "SMax" is the secondary with the greatest lastWriteDate. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pymongo.errors import ConfigurationError +from pymongo.server_type import SERVER_TYPE + +if TYPE_CHECKING: + from pymongo.server_selectors import Selection + + +# Constant defined in Max Staleness Spec: An idle primary writes a no-op every +# 10 seconds to refresh secondaries' lastWriteDate values. +IDLE_WRITE_PERIOD = 10 +SMALLEST_MAX_STALENESS = 90 + + +def _validate_max_staleness(max_staleness: int, heartbeat_frequency: int) -> None: + # We checked for max staleness -1 before this, it must be positive here. + if max_staleness < heartbeat_frequency + IDLE_WRITE_PERIOD: + raise ConfigurationError( + "maxStalenessSeconds must be at least heartbeatFrequencyMS +" + " %d seconds. maxStalenessSeconds is set to %d," + " heartbeatFrequencyMS is set to %d." + % (IDLE_WRITE_PERIOD, max_staleness, heartbeat_frequency * 1000) + ) + + if max_staleness < SMALLEST_MAX_STALENESS: + raise ConfigurationError( + "maxStalenessSeconds must be at least %d. " + "maxStalenessSeconds is set to %d." % (SMALLEST_MAX_STALENESS, max_staleness) + ) + + +def _with_primary(max_staleness: int, selection: Selection) -> Selection: + """Apply max_staleness, in seconds, to a Selection with a known primary.""" + primary = selection.primary + assert primary + sds = [] + + for s in selection.server_descriptions: + if s.server_type == SERVER_TYPE.RSSecondary: + # See max-staleness.rst for explanation of this formula. + assert s.last_write_date and primary.last_write_date # noqa: PT018 + staleness = ( + (s.last_update_time - s.last_write_date) + - (primary.last_update_time - primary.last_write_date) + + selection.heartbeat_frequency + ) + + if staleness <= max_staleness: + sds.append(s) + else: + sds.append(s) + + return selection.with_server_descriptions(sds) + + +def _no_primary(max_staleness: int, selection: Selection) -> Selection: + """Apply max_staleness, in seconds, to a Selection with no known primary.""" + # Secondary that's replicated the most recent writes. + smax = selection.secondary_with_max_last_write_date() + if not smax: + # No secondaries and no primary, short-circuit out of here. + return selection.with_server_descriptions([]) + + sds = [] + + for s in selection.server_descriptions: + if s.server_type == SERVER_TYPE.RSSecondary: + # See max-staleness.rst for explanation of this formula. + assert smax.last_write_date and s.last_write_date # noqa: PT018 + staleness = smax.last_write_date - s.last_write_date + selection.heartbeat_frequency + + if staleness <= max_staleness: + sds.append(s) + else: + sds.append(s) + + return selection.with_server_descriptions(sds) + + +def select(max_staleness: int, selection: Selection) -> Selection: + """Apply max_staleness, in seconds, to a Selection.""" + if max_staleness == -1: + return selection + + # Server Selection Spec: If the TopologyType is ReplicaSetWithPrimary or + # ReplicaSetNoPrimary, a client MUST raise an error if maxStaleness < + # heartbeatFrequency + IDLE_WRITE_PERIOD, or if maxStaleness < 90. + _validate_max_staleness(max_staleness, selection.heartbeat_frequency) + + if selection.primary: + return _with_primary(max_staleness, selection) + else: + return _no_primary(max_staleness, selection) diff --git a/pymongo/member.py b/pymongo/member.py deleted file mode 100644 index dbdafdf0fa..0000000000 --- a/pymongo/member.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2013-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Represent a mongod / mongos instance""" - -from pymongo import common -from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference - -# Member states -PRIMARY = 1 -SECONDARY = 2 -ARBITER = 3 -OTHER = 4 - - -# TODO: rename 'Server' or 'ServerDescription'. -class Member(object): - """Immutable representation of one server. - - :Parameters: - - `host`: A (host, port) pair - - `connection_pool`: A Pool instance - - `ismaster_response`: A dict, MongoDB's ismaster response - - `ping_time`: A MovingAverage instance - """ - # For unittesting only. Use under no circumstances! - _host_to_ping_time = {} - - def __init__(self, host, connection_pool, ismaster_response, ping_time): - self.host = host - self.pool = connection_pool - self.ismaster_response = ismaster_response - self.ping_time = ping_time - self.is_mongos = (ismaster_response.get('msg') == 'isdbgrid') - - if ismaster_response['ismaster']: - self.state = PRIMARY - elif ismaster_response.get('secondary'): - self.state = SECONDARY - elif ismaster_response.get('arbiterOnly'): - self.state = ARBITER - else: - self.state = OTHER - - self.set_name = ismaster_response.get('setName') - self.tags = ismaster_response.get('tags', {}) - self.max_bson_size = ismaster_response.get( - 'maxBsonObjectSize', common.MAX_BSON_SIZE) - self.max_message_size = ismaster_response.get( - 'maxMessageSizeBytes', 2 * self.max_bson_size) - self.min_wire_version = ismaster_response.get( - 'minWireVersion', common.MIN_WIRE_VERSION) - self.max_wire_version = ismaster_response.get( - 'maxWireVersion', common.MAX_WIRE_VERSION) - self.max_write_batch_size = ismaster_response.get( - 'maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE) - - # self.min/max_wire_version is the server's wire protocol. - # MIN/MAX_SUPPORTED_WIRE_VERSION is what PyMongo supports. - if ( - # Server too new. - common.MAX_SUPPORTED_WIRE_VERSION < self.min_wire_version - # Server too old. - or common.MIN_SUPPORTED_WIRE_VERSION > self.max_wire_version - ): - raise ConfigurationError( - "Server at %s:%d uses wire protocol versions %d through %d, " - "but PyMongo only supports %d through %d" - % (self.host[0], self.host[1], - self.min_wire_version, self.max_wire_version, - common.MIN_SUPPORTED_WIRE_VERSION, - common.MAX_SUPPORTED_WIRE_VERSION)) - - def clone_with(self, ismaster_response, ping_time_sample): - """Get a clone updated with ismaster response and a single ping time. - """ - ping_time = self.ping_time.clone_with(ping_time_sample) - return Member(self.host, self.pool, ismaster_response, ping_time) - - @property - def is_primary(self): - return self.state == PRIMARY - - @property - def is_secondary(self): - return self.state == SECONDARY - - @property - def is_arbiter(self): - return self.state == ARBITER - - def get_avg_ping_time(self): - """Get a moving average of this member's ping times. - """ - if self.host in Member._host_to_ping_time: - # Simulate ping times for unittesting - return Member._host_to_ping_time[self.host] - - return self.ping_time.get() - - def matches_mode(self, mode): - assert not self.is_mongos, \ - "Tried to match read preference mode on a mongos Member" - - if mode == ReadPreference.PRIMARY and not self.is_primary: - return False - - if mode == ReadPreference.SECONDARY and not self.is_secondary: - return False - - # If we're not primary or secondary, then we're in a state like - # RECOVERING and we don't match any mode - return self.is_primary or self.is_secondary - - def matches_tags(self, tags): - """Return True if this member's tags are a superset of the passed-in - tags. E.g., if this member is tagged {'dc': 'ny', 'rack': '1'}, - then it matches {'dc': 'ny'}. - """ - for key, value in tags.items(): - if key not in self.tags or self.tags[key] != value: - return False - - return True - - def matches_tag_sets(self, tag_sets): - """Return True if this member matches any of the tag sets, e.g. - [{'dc': 'ny'}, {'dc': 'la'}, {}] - """ - for tags in tag_sets: - if self.matches_tags(tags): - return True - - return False - - def __str__(self): - return '' % ( - self.host[0], self.host[1], self.is_primary) diff --git a/pymongo/message.py b/pymongo/message.py index 17c09c0004..0f3aaaba77 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -1,10 +1,10 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,299 +13,1288 @@ # limitations under the License. """Tools for creating `messages -`_ to be sent to +`_ to be sent to MongoDB. .. note:: This module is for internal use and is generally not needed by application developers. - -.. versionadded:: 1.1.2 """ +from __future__ import annotations +import datetime import random import struct +from io import BytesIO as _BytesIO +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterable, + Mapping, + MutableMapping, + NoReturn, + Optional, + Union, +) import bson -from bson.binary import OLD_UUID_SUBTYPE -from bson.py3compat import b, StringIO -from bson.son import SON +from bson import CodecOptions, _dict_to_bson, _make_c_string +from bson.int64 import Int64 +from bson.raw_bson import ( + _RAW_ARRAY_BSON_OPTIONS, + DEFAULT_RAW_BSON_OPTIONS, + RawBSONDocument, + _inflate_bson, +) +from pymongo.hello import HelloCompat +from pymongo.monitoring import _EventListeners + try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore[attr-defined] + _use_c = True except ImportError: _use_c = False -from pymongo.errors import DocumentTooLarge, InvalidOperation, OperationFailure +from pymongo.errors import ( + ConfigurationError, + CursorNotFound, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NotPrimaryError, + OperationFailure, + ProtocolError, +) +from pymongo.read_preferences import ReadPreference, _ServerMode + +if TYPE_CHECKING: + from pymongo.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.read_concern import ReadConcern + from pymongo.typings import ( + _Address, + _AgnosticClientSession, + _AgnosticConnection, + _AgnosticMongoClient, + _DocumentOut, + ) MAX_INT32 = 2147483647 MIN_INT32 = -2147483648 +# Overhead allowed for encoded command documents. +_COMMAND_OVERHEAD = 16382 + _INSERT = 0 _UPDATE = 1 _DELETE = 2 -_EMPTY = b('') -_BSONOBJ = b('\x03') -_ZERO_8 = b('\x00') -_ZERO_16 = b('\x00\x00') -_ZERO_32 = b('\x00\x00\x00\x00') -_ZERO_64 = b('\x00\x00\x00\x00\x00\x00\x00\x00') -_SKIPLIM = b('\x00\x00\x00\x00\xff\xff\xff\xff') +_EMPTY = b"" +_BSONOBJ = b"\x03" +_ZERO_8 = b"\x00" +_ZERO_16 = b"\x00\x00" +_ZERO_32 = b"\x00\x00\x00\x00" +_ZERO_64 = b"\x00\x00\x00\x00\x00\x00\x00\x00" +_SKIPLIM = b"\x00\x00\x00\x00\xff\xff\xff\xff" _OP_MAP = { - _INSERT: b('\x04documents\x00\x00\x00\x00\x00'), - _UPDATE: b('\x04updates\x00\x00\x00\x00\x00'), - _DELETE: b('\x04deletes\x00\x00\x00\x00\x00'), + _INSERT: b"\x04documents\x00\x00\x00\x00\x00", + _UPDATE: b"\x04updates\x00\x00\x00\x00\x00", + _DELETE: b"\x04deletes\x00\x00\x00\x00\x00", } +_FIELD_MAP = { + "insert": "documents", + "update": "updates", + "delete": "deletes", + "bulkWrite": "ops", +} + +_UNICODE_REPLACE_CODEC_OPTIONS: CodecOptions[Mapping[str, Any]] = CodecOptions( + unicode_decode_error_handler="replace" +) + + +def _randint() -> int: + """Generate a pseudo random 32 bit integer.""" + return random.randint(MIN_INT32, MAX_INT32) # noqa: S311 -def __last_error(namespace, args): - """Data to send to do a lastError. +def _maybe_add_read_preference( + spec: MutableMapping[str, Any], read_preference: _ServerMode +) -> MutableMapping[str, Any]: + """Add $readPreference to spec when appropriate.""" + mode = read_preference.mode + document = read_preference.document + # Only add $readPreference if it's something other than primary to avoid + # problems with mongos versions that don't support read preferences. Also, + # for maximum backwards compatibility, don't add $readPreference for + # secondaryPreferred unless tags or maxStalenessSeconds are in use (setting + # the secondaryOkay bit has the same effect). + if mode and (mode != ReadPreference.SECONDARY_PREFERRED.mode or len(document) > 1): + if "$query" not in spec: + spec = {"$query": spec} + spec["$readPreference"] = document + return spec + + +def _convert_exception(exception: Exception) -> dict[str, Any]: + """Convert an Exception into a failure document for publishing.""" + return {"errmsg": str(exception), "errtype": exception.__class__.__name__} + + +def _convert_client_bulk_exception(exception: Exception) -> dict[str, Any]: + """Convert an Exception into a failure document for publishing, + for use in client-level bulk write API. """ - cmd = SON([("getlasterror", 1)]) - cmd.update(args) - splitns = namespace.split('.', 1) - return query(0, splitns[0] + '.$cmd', 0, -1, cmd) + return { + "errmsg": str(exception), + "code": exception.code, # type: ignore[attr-defined] + "errtype": exception.__class__.__name__, + } + + +def _convert_write_result( + operation: str, command: Mapping[str, Any], result: Mapping[str, Any] +) -> dict[str, Any]: + """Convert a legacy write result to write command format.""" + # Based on _merge_legacy from bulk.py + affected = result.get("n", 0) + res = {"ok": 1, "n": affected} + errmsg = result.get("errmsg", result.get("err", "")) + if errmsg: + # The write was successful on at least the primary so don't return. + if result.get("wtimeout"): + res["writeConcernError"] = {"errmsg": errmsg, "code": 64, "errInfo": {"wtimeout": True}} + else: + # The write failed. + error = {"index": 0, "code": result.get("code", 8), "errmsg": errmsg} + if "errInfo" in result: + error["errInfo"] = result["errInfo"] + res["writeErrors"] = [error] + return res + if operation == "insert": + # GLE result for insert is always 0 in most MongoDB versions. + res["n"] = len(command["documents"]) + elif operation == "update": + if "upserted" in result: + res["upserted"] = [{"index": 0, "_id": result["upserted"]}] + # Versions of MongoDB before 2.6 don't return the _id for an + # upsert if _id is not an ObjectId. + elif result.get("updatedExisting") is False and affected == 1: + # If _id is in both the update document *and* the query spec + # the update document _id takes precedence. + update = command["updates"][0] + _id = update["u"].get("_id", update["q"].get("_id")) + res["upserted"] = [{"index": 0, "_id": _id}] + return res + + +_OPTIONS = { + "tailable": 2, + "oplogReplay": 8, + "noCursorTimeout": 16, + "awaitData": 32, + "allowPartialResults": 128, +} -def __pack_message(operation, data): +_MODIFIERS = { + "$query": "filter", + "$orderby": "sort", + "$hint": "hint", + "$comment": "comment", + "$maxScan": "maxScan", + "$maxTimeMS": "maxTimeMS", + "$max": "max", + "$min": "min", + "$returnKey": "returnKey", + "$showRecordId": "showRecordId", + "$showDiskLoc": "showRecordId", # <= MongoDb 3.0 + "$snapshot": "snapshot", +} + + +def _gen_find_command( + coll: str, + spec: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]], + skip: int, + limit: int, + batch_size: Optional[int], + options: Optional[int], + read_concern: ReadConcern, + collation: Optional[Mapping[str, Any]] = None, + session: Optional[_AgnosticClientSession] = None, + allow_disk_use: Optional[bool] = None, +) -> dict[str, Any]: + """Generate a find command document.""" + cmd: dict[str, Any] = {"find": coll} + if "$query" in spec: + cmd.update( + [ + (_MODIFIERS[key], val) if key in _MODIFIERS else (key, val) + for key, val in spec.items() + ] + ) + if "$explain" in cmd: + cmd.pop("$explain") + if "$readPreference" in cmd: + cmd.pop("$readPreference") + else: + cmd["filter"] = spec + + if projection: + cmd["projection"] = projection + if skip: + cmd["skip"] = skip + if limit: + cmd["limit"] = abs(limit) + if limit < 0: + cmd["singleBatch"] = True + if batch_size: + # When limit and batchSize are equal we increase batchSize by 1 to + # avoid an unnecessary killCursors. + if limit == batch_size: + batch_size += 1 + cmd["batchSize"] = batch_size + if read_concern.level and not (session and session.in_transaction): + cmd["readConcern"] = read_concern.document + if collation: + cmd["collation"] = collation + if allow_disk_use is not None: + cmd["allowDiskUse"] = allow_disk_use + if options: + cmd.update([(opt, True) for opt, val in _OPTIONS.items() if options & val]) + + return cmd + + +def _gen_get_more_command( + cursor_id: Optional[int], + coll: str, + batch_size: Optional[int], + max_await_time_ms: Optional[int], + comment: Optional[Any], + conn: _AgnosticConnection, +) -> dict[str, Any]: + """Generate a getMore command document.""" + cmd: dict[str, Any] = {"getMore": cursor_id, "collection": coll} + if batch_size: + cmd["batchSize"] = batch_size + if max_await_time_ms is not None: + cmd["maxTimeMS"] = max_await_time_ms + if comment is not None and conn.max_wire_version >= 9: + cmd["comment"] = comment + return cmd + + +_pack_compression_header = struct.Struct(" tuple[int, bytes]: + """Takes message data, compresses it, and adds an OP_COMPRESSED header.""" + compressed = ctx.compress(data) + request_id = _randint() + + header = _pack_compression_header( + _COMPRESSION_HEADER_SIZE + len(compressed), # Total message length + request_id, # Request id + 0, # responseTo + 2012, # operation id + operation, # original operation id + len(data), # uncompressed message length + ctx.compressor_id, + ) # compressor id + return request_id, header + compressed + + +_pack_header = struct.Struct(" tuple[int, bytes]: """Takes message data and adds a message header based on the operation. Returns the resultant message string. """ - request_id = random.randint(MIN_INT32, MAX_INT32) - message = struct.pack(" tuple[bytes, int, int]: + """Get a OP_MSG message. + Note: this method handles multiple documents in a type one payload but + it does not perform batch splitting and the total message size is + only checked *after* generating the entire message. """ - options = 0 - if continue_on_error: - options += 1 - data = struct.pack(" tuple[int, bytes, int, int]: + """Internal OP_MSG message helper.""" + msg, total_size, max_bson_size = _op_msg_no_header(flags, command, identifier, docs, opts) + rid, msg = _compress(2013, msg, ctx) + return rid, msg, total_size, max_bson_size + + +def _op_msg_uncompressed( + flags: int, + command: Mapping[str, Any], + identifier: str, + docs: Optional[list[Mapping[str, Any]]], + opts: CodecOptions[Any], +) -> tuple[int, bytes, int, int]: + """Internal compressed OP_MSG message helper.""" + data, total_size, max_bson_size = _op_msg_no_header(flags, command, identifier, docs, opts) + request_id, op_message = __pack_message(2013, data) + return request_id, op_message, total_size, max_bson_size + + if _use_c: - insert = _cmessage._insert_message + _op_msg_uncompressed = _cmessage._op_msg -def update(collection_name, upsert, multi, - spec, doc, safe, last_error_args, check_keys, uuid_subtype): - """Get an **update** message. - """ - options = 0 - if upsert: - options += 1 - if multi: - options += 2 - - data = _ZERO_32 - data += bson._make_c_string(collection_name) - data += struct.pack(" tuple[int, bytes, int, int]: + """Get a OP_MSG message.""" + command["$db"] = dbname + # getMore commands do not send $readPreference. + if read_preference is not None and "$readPreference" not in command: + # Only send $readPreference if it's not primary (the default). + if read_preference.mode: + command["$readPreference"] = read_preference.document + name = next(iter(command)) + try: + identifier = _FIELD_MAP[name] + docs = command.pop(identifier) + except KeyError: + identifier = "" + docs = None + try: + if ctx: + return _op_msg_compressed(flags, command, identifier, docs, opts, ctx) + return _op_msg_uncompressed(flags, command, identifier, docs, opts) + finally: + # Add the field back to the command. + if identifier: + command[identifier] = docs + + +def _query_impl( + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions[Any], +) -> tuple[bytes, int]: + """Get an OP_QUERY message.""" + encoded = _dict_to_bson(query, False, opts) + if field_selector: + efs = _dict_to_bson(field_selector, False, opts) else: - (request_id, update_message) = __pack_message(2001, data) - return (request_id, update_message, len(encoded)) + efs = b"" + max_bson_size = max(len(encoded), len(efs)) + return ( + b"".join( + [ + _pack_int(options), + bson._make_c_string(collection_name), + _pack_int(num_to_skip), + _pack_int(num_to_return), + encoded, + efs, + ] + ), + max_bson_size, + ) + + +def _query_compressed( + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions[Any], + ctx: Union[SnappyContext, ZlibContext, ZstdContext], +) -> tuple[int, bytes, int]: + """Internal compressed query message helper.""" + op_query, max_bson_size = _query_impl( + options, collection_name, num_to_skip, num_to_return, query, field_selector, opts + ) + rid, msg = _compress(2004, op_query, ctx) + return rid, msg, max_bson_size + + +def _query_uncompressed( + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions[Any], +) -> tuple[int, bytes, int]: + """Internal query message helper.""" + op_query, max_bson_size = _query_impl( + options, collection_name, num_to_skip, num_to_return, query, field_selector, opts + ) + rid, msg = __pack_message(2004, op_query) + return rid, msg, max_bson_size + + if _use_c: - update = _cmessage._update_message + _query_uncompressed = _cmessage._query_message + + +def _query( + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions[Any], + ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, +) -> tuple[int, bytes, int]: + """Get a **query** message.""" + if ctx: + return _query_compressed( + options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, ctx + ) + return _query_uncompressed( + options, collection_name, num_to_skip, num_to_return, query, field_selector, opts + ) + + +_pack_long_long = struct.Struct(" bytes: + """Get an OP_GET_MORE message.""" + return b"".join( + [ + _ZERO_32, + bson._make_c_string(collection_name), + _pack_int(num_to_return), + _pack_long_long(cursor_id), + ] + ) + + +def _get_more_compressed( + collection_name: str, + num_to_return: int, + cursor_id: int, + ctx: Union[SnappyContext, ZlibContext, ZstdContext], +) -> tuple[int, bytes]: + """Internal compressed getMore message helper.""" + return _compress(2005, _get_more_impl(collection_name, num_to_return, cursor_id), ctx) + + +def _get_more_uncompressed( + collection_name: str, num_to_return: int, cursor_id: int +) -> tuple[int, bytes]: + """Internal getMore message helper.""" + return __pack_message(2005, _get_more_impl(collection_name, num_to_return, cursor_id)) + + +if _use_c: + _get_more_uncompressed = _cmessage._get_more_message + + +def _get_more( + collection_name: str, + num_to_return: int, + cursor_id: int, + ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, +) -> tuple[int, bytes]: + """Get a **getMore** message.""" + if ctx: + return _get_more_compressed(collection_name, num_to_return, cursor_id, ctx) + return _get_more_uncompressed(collection_name, num_to_return, cursor_id) + + +# OP_MSG ------------------------------------------------------------- -def query(options, collection_name, num_to_skip, - num_to_return, query, field_selector=None, - uuid_subtype=OLD_UUID_SUBTYPE): - """Get a **query** message. +_OP_MSG_MAP = { + _INSERT: b"documents\x00", + _UPDATE: b"updates\x00", + _DELETE: b"deletes\x00", +} + + +class _BulkWriteContextBase: + """Private base class for wrapping around AsyncConnection to use with write splitting functions.""" + + __slots__ = ( + "db_name", + "conn", + "op_id", + "name", + "field", + "publish", + "start_time", + "listeners", + "session", + "compress", + "op_type", + "codec", + ) + + def __init__( + self, + database_name: str, + cmd_name: str, + conn: _AgnosticConnection, + operation_id: int, + listeners: _EventListeners, + session: Optional[_AgnosticClientSession], + op_type: int, + codec: CodecOptions[Any], + ): + self.db_name = database_name + self.conn = conn + self.op_id = operation_id + self.listeners = listeners + self.publish = listeners.enabled_for_commands + self.name = cmd_name + self.field = _FIELD_MAP[self.name] + self.start_time = datetime.datetime.now() + self.session = session + self.compress = bool(conn.compression_context) + self.op_type = op_type + self.codec = codec + + @property + def max_bson_size(self) -> int: + """A proxy for SockInfo.max_bson_size.""" + return self.conn.max_bson_size + + @property + def max_message_size(self) -> int: + """A proxy for SockInfo.max_message_size.""" + if self.compress: + # Subtract 16 bytes for the message header. + return self.conn.max_message_size - 16 + return self.conn.max_message_size + + @property + def max_write_batch_size(self) -> int: + """A proxy for SockInfo.max_write_batch_size.""" + return self.conn.max_write_batch_size + + @property + def max_split_size(self) -> int: + """The maximum size of a BSON command before batch splitting.""" + return self.max_bson_size + + def _succeed(self, request_id: int, reply: _DocumentOut, duration: datetime.timedelta) -> None: + """Publish a CommandSucceededEvent.""" + self.listeners.publish_command_success( + duration, + reply, + self.name, + request_id, + self.conn.address, + self.conn.server_connection_id, + self.op_id, + self.conn.service_id, + database_name=self.db_name, + ) + + def _fail(self, request_id: int, failure: _DocumentOut, duration: datetime.timedelta) -> None: + """Publish a CommandFailedEvent.""" + self.listeners.publish_command_failure( + duration, + failure, + self.name, + request_id, + self.conn.address, + self.conn.server_connection_id, + self.op_id, + self.conn.service_id, + database_name=self.db_name, + ) + + +class _BulkWriteContext(_BulkWriteContextBase): + """A wrapper around AsyncConnection/Connection for use with the collection-level bulk write API.""" + + __slots__ = () + + def __init__( + self, + database_name: str, + cmd_name: str, + conn: _AgnosticConnection, + operation_id: int, + listeners: _EventListeners, + session: Optional[_AgnosticClientSession], + op_type: int, + codec: CodecOptions[Any], + ): + super().__init__( + database_name, + cmd_name, + conn, + operation_id, + listeners, + session, + op_type, + codec, + ) + + def batch_command( + self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]] + ) -> tuple[int, Union[bytes, dict[str, Any]], list[Mapping[str, Any]]]: + namespace = self.db_name + ".$cmd" + request_id, msg, to_send = _do_batched_op_msg( + namespace, self.op_type, cmd, docs, self.codec, self + ) + if not to_send: + raise InvalidOperation("cannot do an empty bulk write") + return request_id, msg, to_send + + def _start( + self, cmd: MutableMapping[str, Any], request_id: int, docs: list[Mapping[str, Any]] + ) -> MutableMapping[str, Any]: + """Publish a CommandStartedEvent.""" + cmd[self.field] = docs + self.listeners.publish_command_start( + cmd, + self.db_name, + request_id, + self.conn.address, + self.conn.server_connection_id, + self.op_id, + self.conn.service_id, + ) + return cmd + + +class _EncryptedBulkWriteContext(_BulkWriteContext): + __slots__ = () + + def batch_command( + self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]] + ) -> tuple[int, dict[str, Any], list[Mapping[str, Any]]]: + namespace = self.db_name + ".$cmd" + msg, to_send = _encode_batched_write_command( + namespace, self.op_type, cmd, docs, self.codec, self + ) + if not to_send: + raise InvalidOperation("cannot do an empty bulk write") + + # Chop off the OP_QUERY header to get a properly batched write command. + cmd_start = msg.index(b"\x00", 4) + 9 + outgoing = _inflate_bson(memoryview(msg)[cmd_start:], DEFAULT_RAW_BSON_OPTIONS) + return -1, outgoing, to_send + + @property + def max_split_size(self) -> int: + """Reduce the batch splitting size.""" + return _MAX_SPLIT_SIZE_ENC + + +def _raise_document_too_large(operation: str, doc_size: int, max_size: int) -> NoReturn: + """Internal helper for raising DocumentTooLarge.""" + if operation == "insert": + raise DocumentTooLarge( + "BSON document too large (%d bytes)" + " - the connected server supports" + " BSON document sizes up to %d" + " bytes." % (doc_size, max_size) + ) + else: + # There's nothing intelligent we can say + # about size for update and delete + raise DocumentTooLarge(f"{operation!r} command document too large") + + +# From the Client Side Encryption spec: +# Because automatic encryption increases the size of commands, the driver +# MUST split bulk writes at a reduced size limit before undergoing automatic +# encryption. The write payload MUST be split at 2MiB (2097152). +_MAX_SPLIT_SIZE_ENC = 2097152 + + +def _batched_op_msg_impl( + operation: int, + command: Mapping[str, Any], + docs: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions[Any], + ctx: _BulkWriteContext, + buf: _BytesIO, +) -> tuple[list[Mapping[str, Any]], int]: + """Create a batched OP_MSG write.""" + max_bson_size = ctx.max_bson_size + max_write_batch_size = ctx.max_write_batch_size + max_message_size = ctx.max_message_size + + flags = b"\x00\x00\x00\x00" if ack else b"\x02\x00\x00\x00" + # Flags + buf.write(flags) + + # Type 0 Section + buf.write(b"\x00") + buf.write(_dict_to_bson(command, False, opts)) + + # Type 1 Section + buf.write(b"\x01") + size_location = buf.tell() + # Save space for size + buf.write(b"\x00\x00\x00\x00") + try: + buf.write(_OP_MSG_MAP[operation]) + except KeyError: + raise InvalidOperation("Unknown command") from None + + to_send = [] + idx = 0 + for doc in docs: + # Encode the current operation + value = _dict_to_bson(doc, False, opts) + doc_length = len(value) + new_message_size = buf.tell() + doc_length + # Does first document exceed max_message_size? + doc_too_large = idx == 0 and (new_message_size > max_message_size) + # When OP_MSG is used unacknowledged we have to check + # document size client side or applications won't be notified. + # Otherwise we let the server deal with documents that are too large + # since ordered=False causes those documents to be skipped instead of + # halting the bulk write operation. + unacked_doc_too_large = not ack and (doc_length > max_bson_size) + if doc_too_large or unacked_doc_too_large: + write_op = list(_FIELD_MAP.keys())[operation] + _raise_document_too_large(write_op, len(value), max_bson_size) + # We have enough data, return this batch. + if new_message_size > max_message_size: + break + buf.write(value) + to_send.append(doc) + idx += 1 + # We have enough documents, return this batch. + if idx == max_write_batch_size: + break + + # Write type 1 section size + length = buf.tell() + buf.seek(size_location) + buf.write(_pack_int(length - size_location)) + + return to_send, length + + +def _encode_batched_op_msg( + operation: int, + command: Mapping[str, Any], + docs: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions[Any], + ctx: _BulkWriteContext, +) -> tuple[bytes, list[Mapping[str, Any]]]: + """Encode the next batched insert, update, or delete operation + as OP_MSG. """ - data = struct.pack(" tuple[int, bytes, list[Mapping[str, Any]]]: + """Create the next batched insert, update, or delete operation + with OP_MSG, compressed. """ - data = _ZERO_32 - data += bson._make_c_string(collection_name) - data += struct.pack(" tuple[int, bytes, list[Mapping[str, Any]]]: + """OP_MSG implementation entry point.""" + buf = _BytesIO() + + # Save space for message length and request id + buf.write(_ZERO_64) + # responseTo, opCode + buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00") + + to_send, length = _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf) + + # Header - request id and message length + buf.seek(4) + request_id = _randint() + buf.write(_pack_int(request_id)) + buf.seek(0) + buf.write(_pack_int(length)) + + return request_id, buf.getvalue(), to_send + + if _use_c: - get_more = _cmessage._get_more_message + _batched_op_msg = _cmessage._batched_op_msg -def delete(collection_name, spec, safe, - last_error_args, uuid_subtype, options=0): - """Get a **delete** message. +def _do_batched_op_msg( + namespace: str, + operation: int, + command: MutableMapping[str, Any], + docs: list[Mapping[str, Any]], + opts: CodecOptions[Any], + ctx: _BulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]]]: + """Create the next batched insert, update, or delete operation + using OP_MSG. """ - data = _ZERO_32 - data += bson._make_c_string(collection_name) - data += struct.pack(" tuple[int, Union[bytes, dict[str, Any]], list[Mapping[str, Any]], list[Mapping[str, Any]]]: + request_id, msg, to_send_ops, to_send_ns = _client_do_batched_op_msg( + cmd, operations, namespaces, self.codec, self + ) + if not to_send_ops: + raise InvalidOperation("cannot do an empty bulk write") + return request_id, msg, to_send_ops, to_send_ns + + def _start( + self, + cmd: MutableMapping[str, Any], + request_id: int, + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + ) -> MutableMapping[str, Any]: + """Publish a CommandStartedEvent.""" + cmd["ops"] = op_docs + cmd["nsInfo"] = ns_docs + self.listeners.publish_command_start( + cmd, + self.db_name, + request_id, + self.conn.address, + self.conn.server_connection_id, + self.op_id, + self.conn.service_id, + ) + return cmd + + +_OP_MSG_OVERHEAD = 1000 + + +def _client_construct_op_msg( + command_encoded: bytes, + to_send_ops_encoded: list[bytes], + to_send_ns_encoded: list[bytes], + ack: bool, + buf: _BytesIO, +) -> int: + # Write flags + flags = b"\x00\x00\x00\x00" if ack else b"\x02\x00\x00\x00" + buf.write(flags) + + # Type 0 Section + buf.write(b"\x00") + buf.write(command_encoded) + + # Type 1 Section for ops + buf.write(b"\x01") + size_location = buf.tell() + # Save space for size + buf.write(b"\x00\x00\x00\x00") + buf.write(b"ops\x00") + # Write all the ops documents + for op_encoded in to_send_ops_encoded: + buf.write(op_encoded) + resume_location = buf.tell() + # Write type 1 section size + length = buf.tell() + buf.seek(size_location) + buf.write(_pack_int(length - size_location)) + buf.seek(resume_location) + + # Type 1 Section for nsInfo + buf.write(b"\x01") + size_location = buf.tell() + # Save space for size + buf.write(b"\x00\x00\x00\x00") + buf.write(b"nsInfo\x00") + # Write all the nsInfo documents + for ns_encoded in to_send_ns_encoded: + buf.write(ns_encoded) + # Write type 1 section size + length = buf.tell() + buf.seek(size_location) + buf.write(_pack_int(length - size_location)) + return length -def _do_batched_insert(collection_name, docs, check_keys, - safe, last_error_args, continue_on_error, uuid_subtype, client): - """Insert `docs` using multiple batches. + +def _client_batched_op_msg_impl( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ack: bool, + opts: CodecOptions[Any], + ctx: _ClientBulkWriteContext, + buf: _BytesIO, +) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]], int]: + """Create a batched OP_MSG write for client-level bulk write.""" + + def _check_doc_size_limits( + op_type: str, + doc_size: int, + limit: int, + ) -> None: + if doc_size > limit: + _raise_document_too_large(op_type, doc_size, limit) + + max_bson_size = ctx.max_bson_size + max_write_batch_size = ctx.max_write_batch_size + max_message_size = ctx.max_message_size + + command_encoded = _dict_to_bson(command, False, opts) + # When OP_MSG is used unacknowledged we have to check command + # document size client-side or applications won't be notified. + if not ack: + _check_doc_size_limits("bulkWrite", len(command_encoded), max_bson_size + _COMMAND_OVERHEAD) + + # Don't include bulkWrite-command-agnostic fields in batch-splitting calculations. + abridged_keys = ["bulkWrite", "errorsOnly", "ordered"] + if command.get("bypassDocumentValidation"): + abridged_keys.append("bypassDocumentValidation") + if command.get("comment"): + abridged_keys.append("comment") + if command.get("let"): + abridged_keys.append("let") + command_abridged = {key: command[key] for key in abridged_keys} + command_len_abridged = len(_dict_to_bson(command_abridged, False, opts)) + + # Maximum combined size of the ops and nsInfo document sequences. + max_doc_sequences_bytes = max_message_size - (_OP_MSG_OVERHEAD + command_len_abridged) + + ns_info = {} + to_send_ops: list[Mapping[str, Any]] = [] + to_send_ns: list[Mapping[str, str]] = [] + to_send_ops_encoded: list[bytes] = [] + to_send_ns_encoded: list[bytes] = [] + total_ops_length = 0 + total_ns_length = 0 + idx = 0 + + for (real_op_type, op_doc), namespace in zip(operations, namespaces): + op_type = real_op_type + # Check insert/replace document size if unacknowledged. + if real_op_type == "insert": + if not ack: + doc_size = len(_dict_to_bson(op_doc["document"], False, opts)) + _check_doc_size_limits(real_op_type, doc_size, max_bson_size) + if real_op_type == "replace": + op_type = "update" + if not ack: + doc_size = len(_dict_to_bson(op_doc["updateMods"], False, opts)) + _check_doc_size_limits(real_op_type, doc_size, max_bson_size) + + ns_doc = None + ns_length = 0 + + if namespace not in ns_info: + ns_doc = {"ns": namespace} + new_ns_index = len(to_send_ns) + ns_info[namespace] = new_ns_index + + # First entry in the operation doc has the operation type as its + # key and the index of its namespace within ns_info as its value. + op_doc[op_type] = ns_info[namespace] # type: ignore[index] + + # Encode current operation doc and, if newly added, namespace doc. + op_doc_encoded = _dict_to_bson(op_doc, False, opts) + op_length = len(op_doc_encoded) + if ns_doc: + ns_doc_encoded = _dict_to_bson(ns_doc, False, opts) + ns_length = len(ns_doc_encoded) + + # Check operation document size if unacknowledged. + if not ack: + _check_doc_size_limits(op_type, op_length, max_bson_size + _COMMAND_OVERHEAD) + + new_message_size = total_ops_length + total_ns_length + op_length + ns_length + # We have enough data, return this batch. + if new_message_size > max_doc_sequences_bytes: + if idx == 0: + _raise_document_too_large(op_type, op_length, max_bson_size + _COMMAND_OVERHEAD) + break + + # Add op and ns documents to this batch. + to_send_ops.append(op_doc) + to_send_ops_encoded.append(op_doc_encoded) + total_ops_length += op_length + if ns_doc: + to_send_ns.append(ns_doc) + to_send_ns_encoded.append(ns_doc_encoded) + total_ns_length += ns_length + + idx += 1 + + # We have enough documents, return this batch. + if idx == max_write_batch_size: + break + + # Construct the entire OP_MSG. + length = _client_construct_op_msg( + command_encoded, to_send_ops_encoded, to_send_ns_encoded, ack, buf + ) + + return to_send_ops, to_send_ns, length + + +def _client_encode_batched_op_msg( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ack: bool, + opts: CodecOptions[Any], + ctx: _ClientBulkWriteContext, +) -> tuple[bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Encode the next batched client-level bulkWrite + operation as OP_MSG. """ - def _insert_message(insert_message, send_safe): - """Build the insert message with header and GLE. - """ - request_id, final_message = __pack_message(2002, insert_message) - if send_safe: - request_id, error_message, _ = __last_error(collection_name, - last_error_args) - final_message += error_message - return request_id, final_message - - send_safe = safe or not continue_on_error - last_error = None - data = StringIO() - data.write(struct.pack(" client.max_bson_size) - - message_length += encoded_length - if message_length < client.max_message_size and not too_large: - data.write(encoded) - has_docs = True - continue - - if has_docs: - # We have enough data, send this message. - try: - client._send_message(_insert_message(data.getvalue(), - send_safe), send_safe) - # Exception type could be OperationFailure or a subtype - # (e.g. DuplicateKeyError) - except OperationFailure, exc: - # Like it says, continue on error... - if continue_on_error: - # Store exception details to re-raise after the final batch. - last_error = exc - # With unacknowledged writes just return at the first error. - elif not safe: - return - # With acknowledged writes raise immediately. - else: - raise - - if too_large: - raise DocumentTooLarge("BSON document too large (%d bytes)" - " - the connected server supports" - " BSON document sizes up to %d" - " bytes." % - (encoded_length, client.max_bson_size)) - - message_length = begin_loc + encoded_length - data.seek(begin_loc) - data.truncate() - data.write(encoded) - - if not has_docs: - raise InvalidOperation("cannot do an empty bulk insert") - - client._send_message(_insert_message(data.getvalue(), safe), safe) - - # Re-raise any exception stored due to continue_on_error - if last_error is not None: - raise last_error -if _use_c: - _do_batched_insert = _cmessage._do_batched_insert + buf = _BytesIO() + + to_send_ops, to_send_ns, _ = _client_batched_op_msg_impl( + command, operations, namespaces, ack, opts, ctx, buf + ) + return buf.getvalue(), to_send_ops, to_send_ns -def _do_batched_write_command(namespace, operation, command, - docs, check_keys, uuid_subtype, client): - """Execute a batch of insert, update, or delete commands. +def _client_batched_op_msg_compressed( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ack: bool, + opts: CodecOptions[Any], + ctx: _ClientBulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Create the next batched client-level bulkWrite operation + with OP_MSG, compressed. """ - max_bson_size = client.max_bson_size - max_write_batch_size = client.max_write_batch_size - # Max BSON object size + 16k - 2 bytes for ending NUL bytes - # XXX: This should come from the server - SERVER-10643 - max_cmd_size = max_bson_size + 16382 + data, to_send_ops, to_send_ns = _client_encode_batched_op_msg( + command, operations, namespaces, ack, opts, ctx + ) - ordered = command.get('ordered', True) + assert ctx.conn.compression_context is not None + request_id, msg = _compress(2013, data, ctx.conn.compression_context) + return request_id, msg, to_send_ops, to_send_ns + + +def _client_batched_op_msg( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ack: bool, + opts: CodecOptions[Any], + ctx: _ClientBulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """OP_MSG implementation entry point for client-level bulkWrite.""" + buf = _BytesIO() - buf = StringIO() # Save space for message length and request id buf.write(_ZERO_64) # responseTo, opCode - buf.write(b("\x00\x00\x00\x00\xd4\x07\x00\x00")) + buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00") + + to_send_ops, to_send_ns, length = _client_batched_op_msg_impl( + command, operations, namespaces, ack, opts, ctx, buf + ) + + # Header - request id and message length + buf.seek(4) + request_id = _randint() + buf.write(_pack_int(request_id)) + buf.seek(0) + buf.write(_pack_int(length)) + + return request_id, buf.getvalue(), to_send_ops, to_send_ns + + +def _client_do_batched_op_msg( + command: MutableMapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + opts: CodecOptions[Any], + ctx: _ClientBulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Create the next batched client-level bulkWrite + operation using OP_MSG. + """ + command["$db"] = "admin" + if "writeConcern" in command: + ack = bool(command["writeConcern"].get("w", 1)) + else: + ack = True + if ctx.conn.compression_context: + return _client_batched_op_msg_compressed(command, operations, namespaces, ack, opts, ctx) + return _client_batched_op_msg(command, operations, namespaces, ack, opts, ctx) + + +# End OP_MSG ----------------------------------------------------- + + +def _encode_batched_write_command( + namespace: str, + operation: int, + command: MutableMapping[str, Any], + docs: list[Mapping[str, Any]], + opts: CodecOptions[Any], + ctx: _BulkWriteContext, +) -> tuple[bytes, list[Mapping[str, Any]]]: + """Encode the next batched insert, update, or delete command.""" + buf = _BytesIO() + + to_send, _ = _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf) + return buf.getvalue(), to_send + + +if _use_c: + _encode_batched_write_command = _cmessage._encode_batched_write_command + + +def _batched_write_command_impl( + namespace: str, + operation: int, + command: MutableMapping[str, Any], + docs: list[Mapping[str, Any]], + opts: CodecOptions[Any], + ctx: _BulkWriteContext, + buf: _BytesIO, +) -> tuple[list[Mapping[str, Any]], int]: + """Create a batched OP_QUERY write command.""" + max_bson_size = ctx.max_bson_size + max_write_batch_size = ctx.max_write_batch_size + # Max BSON object size + 16k - 2 bytes for ending NUL bytes. + # Server guarantees there is enough room: SERVER-10643. + max_cmd_size = max_bson_size + _COMMAND_OVERHEAD + max_split_size = ctx.max_split_size + # No options buf.write(_ZERO_32) # Namespace as C string - buf.write(b(namespace)) + buf.write(namespace.encode("utf8")) buf.write(_ZERO_8) # Skip: 0, Limit: -1 buf.write(_SKIPLIM) # Where to write command document length command_start = buf.tell() - buf.write(bson.BSON.encode(command)) + buf.write(bson.encode(command)) # Start of payload buf.seek(-1, 2) @@ -314,83 +1303,600 @@ def _do_batched_write_command(namespace, operation, command, try: buf.write(_OP_MAP[operation]) except KeyError: - raise InvalidOperation('Unknown command') - - if operation in (_UPDATE, _DELETE): - check_keys = False + raise InvalidOperation("Unknown command") from None # Where to write list document length list_start = buf.tell() - 4 - - def send_message(): - """Finalize and send the current OP_QUERY message. - """ - # Close list and command documents - buf.write(_ZERO_16) - - # Write document lengths and request id - length = buf.tell() - buf.seek(list_start) - buf.write(struct.pack('= max_cmd_size - enough_documents = (idx >= max_write_batch_size) + key = str(idx).encode("utf8") + value = _dict_to_bson(doc, False, opts) + # Is there enough room to add this document? max_cmd_size accounts for + # the two trailing null bytes. + doc_too_large = len(value) > max_cmd_size + if doc_too_large: + write_op = list(_FIELD_MAP.keys())[operation] + _raise_document_too_large(write_op, len(value), max_bson_size) + enough_data = idx >= 1 and (buf.tell() + len(key) + len(value)) >= max_split_size + enough_documents = idx >= max_write_batch_size if enough_data or enough_documents: - if not idx: - if operation == _INSERT: - raise DocumentTooLarge("BSON document too large (%d bytes)" - " - the connected server supports" - " BSON document sizes up to %d" - " bytes." % (len(value), - max_bson_size)) - # There's nothing intelligent we can say - # about size for update and remove - raise DocumentTooLarge("command document too large") - result = send_message() - results.append((idx_offset, result)) - if ordered and "writeErrors" in result: - return results - - # Truncate back to the start of list elements - buf.seek(list_start + 4) - buf.truncate() - idx_offset += idx - idx = 0 - key = b('0') + break buf.write(_BSONOBJ) buf.write(key) buf.write(_ZERO_8) buf.write(value) + to_send.append(doc) idx += 1 - if not has_docs: - raise InvalidOperation("cannot do an empty bulk write") + # Finalize the current OP_QUERY message. + # Close list and command documents + buf.write(_ZERO_16) - results.append((idx_offset, send_message())) - return results -if _use_c: - _do_batched_write_command = _cmessage._do_batched_write_command + # Write document lengths and request id + length = buf.tell() + buf.seek(list_start) + buf.write(_pack_int(length - list_start - 1)) + buf.seek(command_start) + buf.write(_pack_int(length - command_start)) + + return to_send, length + + +class _OpReply: + """A MongoDB OP_REPLY response message.""" + + __slots__ = ("flags", "cursor_id", "number_returned", "documents") + + UNPACK_FROM = struct.Struct(" list[bytes | memoryview]: + """Check the response header from the database, without decoding BSON. + + Check the response for errors and unpack. + + Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or + OperationFailure. + + :param cursor_id: cursor_id we sent to get this response - + used for raising an informative exception when we get cursor id not + valid at server response. + """ + if self.flags & 1: + # Shouldn't get this response if we aren't doing a getMore + if cursor_id is None: + raise ProtocolError("No cursor id for getMore operation") + + # Fake a getMore command response. OP_GET_MORE provides no + # document. + msg = "Cursor not found, cursor id: %d" % (cursor_id,) + errobj = {"ok": 0, "errmsg": msg, "code": 43} + raise CursorNotFound(msg, 43, errobj) + elif self.flags & 2: + error_object: dict[str, Any] = bson.BSON(self.documents).decode() + # Fake the ok field if it doesn't exist. + error_object.setdefault("ok", 0) + if error_object["$err"].startswith(HelloCompat.LEGACY_ERROR): + raise NotPrimaryError(error_object["$err"], error_object) + elif error_object.get("code") == 50: + default_msg = "operation exceeded time limit" + raise ExecutionTimeout( + error_object.get("$err", default_msg), error_object.get("code"), error_object + ) + raise OperationFailure( + "database error: %s" % error_object.get("$err"), + error_object.get("code"), + error_object, + ) + if self.documents: + return [self.documents] + return [] + + def unpack_response( + self, + cursor_id: Optional[int] = None, + codec_options: CodecOptions[Any] = _UNICODE_REPLACE_CODEC_OPTIONS, + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[dict[str, Any]]: + """Unpack a response from the database and decode the BSON document(s). + + Check the response for errors and unpack, returning a dictionary + containing the response data. + + Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or + OperationFailure. + + :param cursor_id: cursor_id we sent to get this response - + used for raising an informative exception when we get cursor id not + valid at server response + :param codec_options: an instance of + :class:`~bson.codec_options.CodecOptions` + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + """ + self.raw_response(cursor_id) + if legacy_response: + return bson.decode_all(self.documents, codec_options) + return bson._decode_all_selective(self.documents, codec_options, user_fields) + + def command_response(self, codec_options: CodecOptions[Any]) -> dict[str, Any]: + """Unpack a command response.""" + docs = self.unpack_response(codec_options=codec_options) + assert self.number_returned == 1 + return docs[0] + + def raw_command_response(self) -> NoReturn: + """Return the bytes of the command response.""" + # This should never be called on _OpReply. + raise NotImplementedError + + @property + def more_to_come(self) -> bool: + """Is the moreToCome bit set on this response?""" + return False + + @classmethod + def unpack(cls, msg: bytes | memoryview) -> _OpReply: + """Construct an _OpReply from raw bytes.""" + # PYTHON-945: ignore starting_from field. + flags, cursor_id, _, number_returned = cls.UNPACK_FROM(msg) + + documents = msg[20:] + return cls(flags, cursor_id, number_returned, documents) + + +class _OpMsg: + """A MongoDB OP_MSG response message.""" + + __slots__ = ("flags", "cursor_id", "number_returned", "payload_document") + + UNPACK_FROM = struct.Struct(" list[Mapping[str, Any]]: + """ + cursor_id is ignored + user_fields is used to determine which fields must not be decoded + """ + inflated_response = bson._decode_selective( + RawBSONDocument(self.payload_document), user_fields, _RAW_ARRAY_BSON_OPTIONS + ) + return [inflated_response] + + def unpack_response( + self, + cursor_id: Optional[int] = None, + codec_options: CodecOptions[Any] = _UNICODE_REPLACE_CODEC_OPTIONS, + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[dict[str, Any]]: + """Unpack a OP_MSG command response. + + :param cursor_id: Ignored, for compatibility with _OpReply. + :param codec_options: an instance of + :class:`~bson.codec_options.CodecOptions` + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + """ + # If _OpMsg is in-use, this cannot be a legacy response. + assert not legacy_response + return bson._decode_all_selective(self.payload_document, codec_options, user_fields) + + def command_response(self, codec_options: CodecOptions[Any]) -> dict[str, Any]: + """Unpack a command response.""" + return self.unpack_response(codec_options=codec_options)[0] + + def raw_command_response(self) -> bytes | memoryview: + """Return the bytes of the command response.""" + return self.payload_document + + @property + def more_to_come(self) -> bool: + """Is the moreToCome bit set on this response?""" + return bool(self.flags & self.MORE_TO_COME) + + @classmethod + def unpack(cls, msg: bytes | memoryview) -> _OpMsg: + """Construct an _OpMsg from raw bytes.""" + flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) + if flags != 0: + if flags & cls.CHECKSUM_PRESENT: + raise ProtocolError(f"Unsupported OP_MSG flag checksumPresent: 0x{flags:x}") + + if flags ^ cls.MORE_TO_COME: + raise ProtocolError(f"Unsupported OP_MSG flags: 0x{flags:x}") + if first_payload_type != 0: + raise ProtocolError(f"Unsupported OP_MSG payload type: 0x{first_payload_type:x}") + + if len(msg) != first_payload_size + 5: + raise ProtocolError("Unsupported OP_MSG reply: >1 section") + + payload_document = msg[5:] + return cls(flags, payload_document) + + +_UNPACK_REPLY: dict[int, Callable[[bytes | memoryview], Union[_OpReply, _OpMsg]]] = { + _OpReply.OP_CODE: _OpReply.unpack, + _OpMsg.OP_CODE: _OpMsg.unpack, +} + + +class _Query: + """A query operation.""" + + __slots__ = ( + "flags", + "db", + "coll", + "ntoskip", + "spec", + "fields", + "codec_options", + "read_preference", + "limit", + "batch_size", + "name", + "read_concern", + "collation", + "session", + "client", + "allow_disk_use", + "_as_command", + "exhaust", + ) + + # For compatibility with the _GetMore class. + conn_mgr = None + cursor_id = None + + def __init__( + self, + flags: int, + db: str, + coll: str, + ntoskip: int, + spec: Mapping[str, Any], + fields: Optional[Mapping[str, Any]], + codec_options: CodecOptions[Any], + read_preference: _ServerMode, + limit: int, + batch_size: int, + read_concern: ReadConcern, + collation: Optional[Mapping[str, Any]], + session: Optional[_AgnosticClientSession], + client: _AgnosticMongoClient, + allow_disk_use: Optional[bool], + exhaust: bool, + ): + self.flags = flags + self.db = db + self.coll = coll + self.ntoskip = ntoskip + self.spec = spec + self.fields = fields + self.codec_options = codec_options + self.read_preference = read_preference + self.read_concern = read_concern + self.limit = limit + self.batch_size = batch_size + self.collation = collation + self.session = session + self.client = client + self.allow_disk_use = allow_disk_use + self.name = "find" + self._as_command: Optional[tuple[dict[str, Any], str]] = None + self.exhaust = exhaust + + def reset(self) -> None: + self._as_command = None + + def namespace(self) -> str: + return f"{self.db}.{self.coll}" + + def use_command(self, conn: _AgnosticConnection) -> bool: + use_find_cmd = False + if not self.exhaust: + use_find_cmd = True + elif conn.max_wire_version >= 8: + # OP_MSG supports exhaust on MongoDB 4.2+ + use_find_cmd = True + elif not self.read_concern.ok_for_legacy: + raise ConfigurationError( + "read concern level of %s is not valid " + "with a max wire version of %d." % (self.read_concern.level, conn.max_wire_version) + ) + + conn.validate_session(self.client, self.session) # type: ignore[arg-type] + return use_find_cmd + + def update_command(self, cmd: dict[str, Any]) -> None: + self._as_command = cmd, self.db + + def as_command( + self, conn: _AgnosticConnection, apply_timeout: bool = False + ) -> tuple[dict[str, Any], str]: + """Return a find command document for this query.""" + # We use the command twice: on the wire and for command monitoring. + # Generate it once, for speed and to avoid repeating side-effects. + if self._as_command is not None: + return self._as_command + + explain = "$explain" in self.spec + cmd: dict[str, Any] = _gen_find_command( + self.coll, + self.spec, + self.fields, + self.ntoskip, + self.limit, + self.batch_size, + self.flags, + self.read_concern, + self.collation, + self.session, + self.allow_disk_use, + ) + if explain: + self.name = "explain" + cmd = {"explain": cmd} + conn.add_server_api(cmd) + if self.session: + self.session._apply_to(cmd, False, self.read_preference, conn) # type: ignore[arg-type] + # Explain does not support readConcern. + if not explain and not self.session.in_transaction: + self.session._update_read_concern(cmd, conn) # type: ignore[arg-type] + conn.send_cluster_time(cmd, self.session, self.client) # type: ignore[arg-type] + # Support CSOT + if apply_timeout: + conn.apply_timeout(self.client, cmd=cmd) # type: ignore[arg-type] + self._as_command = cmd, self.db + return self._as_command + + def get_message( + self, read_preference: _ServerMode, conn: _AgnosticConnection, use_cmd: bool = False + ) -> tuple[int, bytes, int]: + """Get a query message, possibly setting the secondaryOk bit.""" + # Use the read_preference decided by _socket_from_server. + self.read_preference = read_preference + if read_preference.mode: + # Set the secondaryOk bit. + flags = self.flags | 4 + else: + flags = self.flags + + ns = self.namespace() + spec = self.spec + + if use_cmd: + spec = self.as_command(conn)[0] + request_id, msg, size, _ = _op_msg( + 0, + spec, + self.db, + read_preference, + self.codec_options, + ctx=conn.compression_context, + ) + return request_id, msg, size + + # OP_QUERY treats ntoreturn of -1 and 1 the same, return + # one document and close the cursor. We have to use 2 for + # batch size if 1 is specified. + ntoreturn = self.batch_size == 1 and 2 or self.batch_size + if self.limit: + if ntoreturn: + ntoreturn = min(self.limit, ntoreturn) + else: + ntoreturn = self.limit + + if conn.is_mongos: + assert isinstance(spec, MutableMapping) + spec = _maybe_add_read_preference(spec, read_preference) + + return _query( + flags, + ns, + self.ntoskip, + ntoreturn, + spec, + None if use_cmd else self.fields, + self.codec_options, + ctx=conn.compression_context, + ) + + +class _GetMore: + """A getmore operation.""" + + __slots__ = ( + "db", + "coll", + "ntoreturn", + "cursor_id", + "max_await_time_ms", + "codec_options", + "read_preference", + "session", + "client", + "conn_mgr", + "_as_command", + "exhaust", + "comment", + ) + + name = "getMore" + + def __init__( + self, + db: str, + coll: str, + ntoreturn: int, + cursor_id: int, + codec_options: CodecOptions[Any], + read_preference: _ServerMode, + session: Optional[_AgnosticClientSession], + client: _AgnosticMongoClient, + max_await_time_ms: Optional[int], + conn_mgr: Any, + exhaust: bool, + comment: Any, + ): + self.db = db + self.coll = coll + self.ntoreturn = ntoreturn + self.cursor_id = cursor_id + self.codec_options = codec_options + self.read_preference = read_preference + self.session = session + self.client = client + self.max_await_time_ms = max_await_time_ms + self.conn_mgr = conn_mgr + self._as_command: Optional[tuple[dict[str, Any], str]] = None + self.exhaust = exhaust + self.comment = comment + + def reset(self) -> None: + self._as_command = None + + def namespace(self) -> str: + return f"{self.db}.{self.coll}" + + def use_command(self, conn: _AgnosticConnection) -> bool: + use_cmd = False + if not self.exhaust: + use_cmd = True + elif conn.max_wire_version >= 8: + # OP_MSG supports exhaust on MongoDB 4.2+ + use_cmd = True + + conn.validate_session(self.client, self.session) # type: ignore[arg-type] + return use_cmd + + def update_command(self, cmd: dict[str, Any]) -> None: + self._as_command = cmd, self.db + + def as_command( + self, conn: _AgnosticConnection, apply_timeout: bool = False + ) -> tuple[dict[str, Any], str]: + """Return a getMore command document for this query.""" + # See _Query.as_command for an explanation of this caching. + if self._as_command is not None: + return self._as_command + + cmd: dict[str, Any] = _gen_get_more_command( + self.cursor_id, + self.coll, + self.ntoreturn, + self.max_await_time_ms, + self.comment, + conn, + ) + if self.session: + self.session._apply_to(cmd, False, self.read_preference, conn) # type: ignore[arg-type] + conn.add_server_api(cmd) + conn.send_cluster_time(cmd, self.session, self.client) # type: ignore[arg-type] + # Support CSOT + if apply_timeout: + conn.apply_timeout(self.client, cmd=None) # type: ignore[arg-type] + self._as_command = cmd, self.db + return self._as_command + + def get_message( + self, dummy0: Any, conn: _AgnosticConnection, use_cmd: bool = False + ) -> Union[tuple[int, bytes, int], tuple[int, bytes]]: + """Get a getmore message.""" + ns = self.namespace() + ctx = conn.compression_context + + if use_cmd: + spec = self.as_command(conn)[0] + if self.conn_mgr and self.exhaust: + flags = _OpMsg.EXHAUST_ALLOWED + else: + flags = 0 + request_id, msg, size, _ = _op_msg( + flags, spec, self.db, None, self.codec_options, ctx=conn.compression_context + ) + return request_id, msg, size + + return _get_more(ns, self.ntoreturn, self.cursor_id, ctx) + + +class _RawBatchQuery(_Query): + def use_command(self, conn: _AgnosticConnection) -> bool: + # Compatibility checks. + super().use_command(conn) + if conn.max_wire_version >= 8: + # MongoDB 4.2+ supports exhaust over OP_MSG + return True + elif not self.exhaust: + return True + return False + + +class _RawBatchGetMore(_GetMore): + def use_command(self, conn: _AgnosticConnection) -> bool: + # Compatibility checks. + super().use_command(conn) + if conn.max_wire_version >= 8: + # MongoDB 4.2+ supports exhaust over OP_MSG + return True + elif not self.exhaust: + return True + return False + + +class _CursorAddress(tuple[Any, ...]): + """The server address (host, port) of a cursor, with namespace property.""" + + __namespace: Any + + def __new__(cls, address: _Address, namespace: str) -> _CursorAddress: + self = tuple.__new__(cls, address) + self.__namespace = namespace + return self + + @property + def namespace(self) -> str: + """The namespace this cursor.""" + return self.__namespace + + def __hash__(self) -> int: + # Two _CursorAddress instances with different namespaces + # must not hash the same. + return ((*self, self.__namespace)).__hash__() + + def __eq__(self, other: object) -> bool: + if isinstance(other, _CursorAddress): + return tuple(self) == tuple(other) and self.namespace == other.namespace + return NotImplemented + + def __ne__(self, other: object) -> bool: + return not self == other diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 0d15dc121f..778abe27ef 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1,1489 +1,22 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -"""Tools for connecting to MongoDB. +"""Re-import of synchronous MongoClient API for compatibility.""" +from __future__ import annotations -.. seealso:: Module :mod:`~pymongo.master_slave_connection` for - connecting to master-slave clusters, and - :doc:`/examples/high_availability` for an example of how to connect - to a replica set, or specify a list of mongos instances for automatic - failover. +from pymongo.synchronous.mongo_client import * # noqa: F403 +from pymongo.synchronous.mongo_client import __doc__ as original_doc -To get a :class:`~pymongo.database.Database` instance from a -:class:`MongoClient` use either dictionary-style or attribute-style -access: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> c = MongoClient() - >>> c.test_database - Database(MongoClient('localhost', 27017), u'test_database') - >>> c['test-database'] - Database(MongoClient('localhost', 27017), u'test-database') -""" - -import datetime -import random -import socket -import struct -import threading -import time -import warnings - -from bson.py3compat import b -from pymongo import (auth, - common, - database, - helpers, - message, - pool, - thread_util, - uri_parser) -from pymongo.common import HAS_SSL -from pymongo.cursor_manager import CursorManager -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure, - DocumentTooLarge, - DuplicateKeyError, - InvalidURI, - OperationFailure) -from pymongo.member import Member -EMPTY = b("") - - -def _partition_node(node): - """Split a host:port string returned from mongod/s into - a (host, int(port)) pair needed for socket.connect(). - """ - host = node - port = 27017 - idx = node.rfind(':') - if idx != -1: - host, port = node[:idx], int(node[idx + 1:]) - if host.startswith('['): - host = host[1:-1] - return host, port - - -class MongoClient(common.BaseObject): - """Connection to MongoDB. - """ - - HOST = "localhost" - PORT = 27017 - - def __init__(self, host=None, port=None, max_pool_size=100, - document_class=dict, tz_aware=False, _connect=True, - **kwargs): - """Create a new connection to a single MongoDB instance at *host:port*. - - The resultant client object has connection-pooling built - in. It also performs auto-reconnection when necessary. If an - operation fails because of a connection error, - :class:`~pymongo.errors.ConnectionFailure` is raised. If - auto-reconnection will be performed, - :class:`~pymongo.errors.AutoReconnect` will be - raised. Application code should handle this exception - (recognizing that the operation failed) and then continue to - execute. - - Raises :class:`TypeError` if port is not an instance of - ``int``. Raises :class:`~pymongo.errors.ConnectionFailure` if - the connection cannot be made. - - The `host` parameter can be a full `mongodb URI - `_, in addition to - a simple hostname. It can also be a list of hostnames or - URIs. Any port specified in the host string(s) will override - the `port` parameter. If multiple mongodb URIs containing - database or auth information are passed, the last database, - username, and password present will be used. For username and - passwords reserved characters like ':', '/', '+' and '@' must be - escaped following RFC 2396. - - :Parameters: - - `host` (optional): hostname or IP address of the - instance to connect to, or a mongodb URI, or a list of - hostnames / mongodb URIs. If `host` is an IPv6 literal - it must be enclosed in '[' and ']' characters following - the RFC2732 URL syntax (e.g. '[::1]' for localhost) - - `port` (optional): port number on which to connect - - `max_pool_size` (optional): The maximum number of connections - that the pool will open simultaneously. If this is set, operations - will block if there are `max_pool_size` outstanding connections - from the pool. Defaults to 100. - - `document_class` (optional): default class to use for - documents returned from queries on this client - - `tz_aware` (optional): if ``True``, - :class:`~datetime.datetime` instances returned as values - in a document by this :class:`MongoClient` will be timezone - aware (otherwise they will be naive) - - | **Other optional parameters can be passed as keyword arguments:** - - - `socketTimeoutMS`: (integer) How long (in milliseconds) a send or - receive on a socket can take before timing out. Defaults to ``None`` - (no timeout). - - `connectTimeoutMS`: (integer) How long (in milliseconds) a - connection can take to be opened before timing out. Defaults to - ``20000``. - - `waitQueueTimeoutMS`: (integer) How long (in milliseconds) a - thread will wait for a socket from the pool if the pool has no - free sockets. Defaults to ``None`` (no timeout). - - `waitQueueMultiple`: (integer) Multiplied by max_pool_size to give - the number of threads allowed to wait for a socket at one time. - Defaults to ``None`` (no waiters). - - `auto_start_request`: If ``True``, each thread that accesses - this :class:`MongoClient` has a socket allocated to it for the - thread's lifetime. This ensures consistent reads, even if you - read after an unacknowledged write. Defaults to ``False`` - - `use_greenlets`: If ``True``, :meth:`start_request()` will ensure - that the current greenlet uses the same socket for all - operations until :meth:`end_request()` - - | **Write Concern options:** - - - `w`: (integer or string) If this is a replica set, write operations - will block until they have been replicated to the specified number - or tagged set of servers. `w=` always includes the replica set - primary (e.g. w=3 means write to the primary and wait until - replicated to **two** secondaries). Passing w=0 **disables write - acknowledgement** and all other write concern options. - - `wtimeout`: (integer) Used in conjunction with `w`. Specify a value - in milliseconds to control how long to wait for write propagation - to complete. If replication does not complete in the given - timeframe, a timeout exception is raised. - - `j`: If ``True`` block until write operations have been committed - to the journal. Cannot be used in combination with `fsync`. Prior - to MongoDB 2.6 this option was ignored if the server was running - without journaling. Starting with MongoDB 2.6 write operations will - fail with an exception if this option is used when the server is - running without journaling. - - `fsync`: If ``True`` and the server is running without journaling, - blocks until the server has synced all data files to disk. If the - server is running with journaling, this acts the same as the `j` - option, blocking until write operations have been committed to the - journal. Cannot be used in combination with `j`. - - | **Replica set keyword arguments for connecting with a replica set - - either directly or via a mongos:** - | (ignored by standalone mongod instances) - - - `replicaSet`: (string) The name of the replica set to connect to. - The driver will verify that the replica set it connects to matches - this name. Implies that the hosts specified are a seed list and the - driver should attempt to find all members of the set. *Ignored by - mongos*. - - `read_preference`: The read preference for this client. If - connecting to a secondary then a read preference mode *other* than - PRIMARY is required - otherwise all queries will throw - :class:`~pymongo.errors.AutoReconnect` "not master". - See :class:`~pymongo.read_preferences.ReadPreference` for all - available read preference options. - - `tag_sets`: Ignored unless connecting to a replica set via mongos. - Specify a priority-order for tag sets, provide a list of - tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag - set, ``{}``, means "read from any member that matches the mode, - ignoring tags. - - | **SSL configuration:** - - - `ssl`: If ``True``, create the connection to the server using SSL. - - `ssl_keyfile`: The private keyfile used to identify the local - connection against mongod. If included with the ``certfile`` then - only the ``ssl_certfile`` is needed. Implies ``ssl=True``. - - `ssl_certfile`: The certificate file used to identify the local - connection against mongod. Implies ``ssl=True``. - - `ssl_cert_reqs`: Specifies whether a certificate is required from - the other side of the connection, and whether it will be validated - if provided. It must be one of the three values ``ssl.CERT_NONE`` - (certificates ignored), ``ssl.CERT_OPTIONAL`` - (not required, but validated if provided), or ``ssl.CERT_REQUIRED`` - (required and validated). If the value of this parameter is not - ``ssl.CERT_NONE``, then the ``ssl_ca_certs`` parameter must point - to a file of CA certificates. Implies ``ssl=True``. - - `ssl_ca_certs`: The ca_certs file contains a set of concatenated - "certification authority" certificates, which are used to validate - certificates passed from the other end of the connection. - Implies ``ssl=True``. - - .. seealso:: :meth:`end_request` - - .. mongodoc:: connections - - .. versionchanged:: 2.5 - Added additional ssl options - .. versionadded:: 2.4 - """ - if host is None: - host = self.HOST - if isinstance(host, basestring): - host = [host] - if port is None: - port = self.PORT - if not isinstance(port, int): - raise TypeError("port must be an instance of int") - - seeds = set() - username = None - password = None - self.__default_database_name = None - opts = {} - for entity in host: - if "://" in entity: - if entity.startswith("mongodb://"): - res = uri_parser.parse_uri(entity, port) - seeds.update(res["nodelist"]) - username = res["username"] or username - password = res["password"] or password - self.__default_database_name = ( - res["database"] or self.__default_database_name) - - opts = res["options"] - else: - idx = entity.find("://") - raise InvalidURI("Invalid URI scheme: " - "%s" % (entity[:idx],)) - else: - seeds.update(uri_parser.split_hosts(entity, port)) - if not seeds: - raise ConfigurationError("need to specify at least one host") - - # Seeds are only used before first connection attempt; nodes are then - # used for any reconnects. Nodes are set to all replica set members - # if connecting to a replica set (besides arbiters), or to all - # available mongoses from the seed list, or to the one standalone - # mongod. - self.__seeds = frozenset(seeds) - self.__nodes = frozenset() - self.__member = None # TODO: Rename to __server. - - # _pool_class and _event_class are for deep customization of PyMongo, - # e.g. Motor. SHOULD NOT BE USED BY THIRD-PARTY DEVELOPERS. - pool_class = kwargs.pop('_pool_class', pool.Pool) - event_class = kwargs.pop('_event_class', None) - - options = {} - for option, value in kwargs.iteritems(): - option, value = common.validate(option, value) - options[option] = value - options.update(opts) - - self.__max_pool_size = common.validate_positive_integer_or_none( - 'max_pool_size', max_pool_size) - - self.__cursor_manager = CursorManager(self) - - self.__repl = options.get('replicaset') - self.__direct = len(seeds) == 1 and not self.__repl - - self.__net_timeout = options.get('sockettimeoutms') - self.__conn_timeout = options.get('connecttimeoutms') - self.__wait_queue_timeout = options.get('waitqueuetimeoutms') - self.__wait_queue_multiple = options.get('waitqueuemultiple') - - self.__use_ssl = options.get('ssl', None) - self.__ssl_keyfile = options.get('ssl_keyfile', None) - self.__ssl_certfile = options.get('ssl_certfile', None) - self.__ssl_cert_reqs = options.get('ssl_cert_reqs', None) - self.__ssl_ca_certs = options.get('ssl_ca_certs', None) - - ssl_kwarg_keys = [k for k in kwargs.keys() if k.startswith('ssl_')] - if self.__use_ssl == False and ssl_kwarg_keys: - raise ConfigurationError("ssl has not been enabled but the " - "following ssl parameters have been set: " - "%s. Please set `ssl=True` or remove." - % ', '.join(ssl_kwarg_keys)) - - if self.__ssl_cert_reqs and not self.__ssl_ca_certs: - raise ConfigurationError("If `ssl_cert_reqs` is not " - "`ssl.CERT_NONE` then you must " - "include `ssl_ca_certs` to be able " - "to validate the server.") - - if ssl_kwarg_keys and self.__use_ssl is None: - # ssl options imply ssl = True - self.__use_ssl = True - - if self.__use_ssl and not HAS_SSL: - raise ConfigurationError("The ssl module is not available. If you " - "are using a python version previous to " - "2.6 you must install the ssl package " - "from PyPI.") - - self.__use_greenlets = options.get('use_greenlets', False) - self.__pool_class = pool_class - - self.__connecting = False - if self.__use_greenlets: - # Greenlets don't need to lock around access to the Member; - # they're only interrupted when they do I/O. - self.__connecting_lock = thread_util.DummyLock() - else: - self.__connecting_lock = threading.Lock() - - if event_class: - self.__event_class = event_class - else: - # Prevent a cycle; this lambda shouldn't refer to self. - g = self.__use_greenlets - event_class = lambda: thread_util.create_event(g) - self.__event_class = event_class - - self.__future_member = None - self.__document_class = document_class - self.__tz_aware = common.validate_boolean('tz_aware', tz_aware) - self.__auto_start_request = options.get('auto_start_request', False) - - # cache of existing indexes used by ensure_index ops - self.__index_cache = {} - self.__auth_credentials = {} - - super(MongoClient, self).__init__(**options) - if self.slave_okay: - warnings.warn("slave_okay is deprecated. Please " - "use read_preference instead.", DeprecationWarning, - stacklevel=2) - - if _connect: - try: - self._ensure_connected(True) - except AutoReconnect, e: - # ConnectionFailure makes more sense here than AutoReconnect - raise ConnectionFailure(str(e)) - - if username: - mechanism = options.get('authmechanism', 'MONGODB-CR') - source = ( - options.get('authsource') - or self.__default_database_name - or 'admin') - - credentials = auth._build_credentials_tuple(mechanism, - source, - unicode(username), - unicode(password), - options) - try: - self._cache_credentials(source, credentials, _connect) - except OperationFailure, exc: - raise ConfigurationError(str(exc)) - - def _cached(self, dbname, coll, index): - """Test if `index` is cached. - """ - cache = self.__index_cache - now = datetime.datetime.utcnow() - return (dbname in cache and - coll in cache[dbname] and - index in cache[dbname][coll] and - now < cache[dbname][coll][index]) - - def _cache_index(self, database, collection, index, cache_for): - """Add an index to the index cache for ensure_index operations. - """ - now = datetime.datetime.utcnow() - expire = datetime.timedelta(seconds=cache_for) + now - - if database not in self.__index_cache: - self.__index_cache[database] = {} - self.__index_cache[database][collection] = {} - self.__index_cache[database][collection][index] = expire - - elif collection not in self.__index_cache[database]: - self.__index_cache[database][collection] = {} - self.__index_cache[database][collection][index] = expire - - else: - self.__index_cache[database][collection][index] = expire - - def _purge_index(self, database_name, - collection_name=None, index_name=None): - """Purge an index from the index cache. - - If `index_name` is None purge an entire collection. - - If `collection_name` is None purge an entire database. - """ - if not database_name in self.__index_cache: - return - - if collection_name is None: - del self.__index_cache[database_name] - return - - if not collection_name in self.__index_cache[database_name]: - return - - if index_name is None: - del self.__index_cache[database_name][collection_name] - return - - if index_name in self.__index_cache[database_name][collection_name]: - del self.__index_cache[database_name][collection_name][index_name] - - def _cache_credentials(self, source, credentials, connect=True): - """Add credentials to the database authentication cache - for automatic login when a socket is created. If `connect` is True, - verify the credentials on the server first. - """ - if source in self.__auth_credentials: - # Nothing to do if we already have these credentials. - if credentials == self.__auth_credentials[source]: - return - raise OperationFailure('Another user is already authenticated ' - 'to this database. You must logout first.') - - if connect: - member = self.__ensure_member() - sock_info = self.__socket(member) - try: - # Since __check_auth was called in __socket - # there is no need to call it here. - auth.authenticate(credentials, sock_info, self.__simple_command) - sock_info.authset.add(credentials) - finally: - member.pool.maybe_return_socket(sock_info) - - self.__auth_credentials[source] = credentials - - def _purge_credentials(self, source): - """Purge credentials from the database authentication cache. - """ - if source in self.__auth_credentials: - del self.__auth_credentials[source] - - def __create_pool(self, pair): - return self.__pool_class( - pair, - self.__max_pool_size, - self.__net_timeout, - self.__conn_timeout, - self.__use_ssl, - use_greenlets=self.__use_greenlets, - ssl_keyfile=self.__ssl_keyfile, - ssl_certfile=self.__ssl_certfile, - ssl_cert_reqs=self.__ssl_cert_reqs, - ssl_ca_certs=self.__ssl_ca_certs, - wait_queue_timeout=self.__wait_queue_timeout, - wait_queue_multiple=self.__wait_queue_multiple) - - def __check_auth(self, sock_info): - """Authenticate using cached database credentials. - """ - if self.__auth_credentials or sock_info.authset: - cached = set(self.__auth_credentials.itervalues()) - - authset = sock_info.authset.copy() - - # Logout any credentials that no longer exist in the cache. - for credentials in authset - cached: - self.__simple_command(sock_info, credentials[1], {'logout': 1}) - sock_info.authset.discard(credentials) - - for credentials in cached - authset: - auth.authenticate(credentials, - sock_info, self.__simple_command) - sock_info.authset.add(credentials) - - def __member_property(self, attr_name, default=None): - member = self.__member - if member: - return getattr(member, attr_name) - - return default - - @property - def host(self): - """Current connected host. - - .. versionchanged:: 1.3 - ``host`` is now a property rather than a method. - """ - member = self.__member - if member: - return member.host[0] - - return None - - @property - def port(self): - """Current connected port. - - .. versionchanged:: 1.3 - ``port`` is now a property rather than a method. - """ - member = self.__member - if member: - return member.host[1] - - return None - @property - def is_primary(self): - """If this instance is connected to a standalone, a replica set - primary, or the master of a master-slave set. - - .. versionadded:: 2.3 - """ - return self.__member_property('is_primary', False) - - @property - def is_mongos(self): - """If this instance is connected to mongos. - - .. versionadded:: 2.3 - """ - return self.__member_property('is_mongos', False) - - @property - def max_pool_size(self): - """The maximum number of sockets the pool will open concurrently. - - When the pool has reached `max_pool_size`, operations block waiting for - a socket to be returned to the pool. If ``waitQueueTimeoutMS`` is set, - a blocked operation will raise :exc:`~pymongo.errors.ConnectionFailure` - after a timeout. By default ``waitQueueTimeoutMS`` is not set. - - .. warning:: SIGNIFICANT BEHAVIOR CHANGE in 2.6. Previously, this - parameter would limit only the idle sockets the pool would hold - onto, not the number of open sockets. The default has also changed - to 100. - - .. versionchanged:: 2.6 - .. versionadded:: 1.11 - """ - return self.__max_pool_size - - @property - def use_greenlets(self): - """Whether calling :meth:`start_request` assigns greenlet-local, - rather than thread-local, sockets. - - .. versionadded:: 2.4.2 - """ - return self.__use_greenlets - - @property - def nodes(self): - """List of all known nodes. - - Nodes are either specified when this instance was created, - or discovered through the replica set discovery mechanism. - - .. versionadded:: 1.8 - """ - return self.__nodes - - @property - def auto_start_request(self): - """Is auto_start_request enabled? - """ - return self.__auto_start_request - - def get_document_class(self): - return self.__document_class - - def set_document_class(self, klass): - self.__document_class = klass - - document_class = property(get_document_class, set_document_class, - doc="""Default class to use for documents - returned from this client. - - .. versionadded:: 1.7 - """) - - @property - def tz_aware(self): - """Does this client return timezone-aware datetimes? - - .. versionadded:: 1.8 - """ - return self.__tz_aware - - @property - def max_bson_size(self): - """Return the maximum size BSON object the connected server - accepts in bytes. Defaults to 16MB if not connected to a - server. - - .. versionadded:: 1.10 - """ - return self.__member_property('max_bson_size', common.MAX_BSON_SIZE) - - @property - def max_message_size(self): - """Return the maximum message size the connected server - accepts in bytes. Defaults to 32MB if not connected to a - server. - - .. versionadded:: 2.6 - """ - return self.__member_property( - 'max_message_size', common.MAX_MESSAGE_SIZE) - - @property - def min_wire_version(self): - """The minWireVersion reported by the server. - - Returns ``0`` when connected to server versions prior to MongoDB 2.6. - - .. versionadded:: 2.7 - """ - return self.__member_property( - 'min_wire_version', common.MIN_WIRE_VERSION) - - @property - def max_wire_version(self): - """The maxWireVersion reported by the server. - - Returns ``0`` when connected to server versions prior to MongoDB 2.6. - - .. versionadded:: 2.7 - """ - return self.__member_property( - 'max_wire_version', common.MAX_WIRE_VERSION) - - @property - def max_write_batch_size(self): - """The maxWriteBatchSize reported by the server. - - Returns a default value when connected to server versions prior to - MongoDB 2.6. - - .. versionadded:: 2.7 - """ - return self.__member_property( - 'max_write_batch_size', common.MAX_WRITE_BATCH_SIZE) - - def __simple_command(self, sock_info, dbname, spec): - """Send a command to the server. - """ - rqst_id, msg, _ = message.query(0, dbname + '.$cmd', 0, -1, spec) - start = time.time() - try: - sock_info.sock.sendall(msg) - response = self.__receive_message_on_socket(1, rqst_id, sock_info) - except: - sock_info.close() - raise - - end = time.time() - response = helpers._unpack_response(response)['data'][0] - msg = "command %r failed: %%s" % spec - helpers._check_command_response(response, None, msg) - return response, end - start - - def __try_node(self, node): - """Try to connect to this node and see if it works for our connection - type. Returns a Member and set of hosts (including this one). Doesn't - modify state. - - :Parameters: - - `node`: The (host, port) pair to try. - """ - # Call 'ismaster' directly so we can get a response time. - connection_pool = self.__create_pool(node) - sock_info = connection_pool.get_socket() - try: - response, res_time = self.__simple_command(sock_info, - 'admin', - {'ismaster': 1}) - finally: - connection_pool.maybe_return_socket(sock_info) - - member = Member( - node, - connection_pool, - response, - res_time) - - nodes = frozenset([node]) - - # Replica Set? - if not self.__direct: - # Check that this host is part of the given replica set. - if self.__repl and member.set_name != self.__repl: - raise ConfigurationError("%s:%d is not a member of " - "replica set %s" - % (node[0], node[1], self.__repl)) - - if "hosts" in response: - nodes = frozenset([ - _partition_node(h) for h in response["hosts"]]) - - if member.is_primary: - return member, nodes - - elif "primary" in response: - # Shortcut: a secondary usually tells us who the primary is. - candidate = _partition_node(response["primary"]) - return self.__try_node(candidate) - - # Explain why we aren't using this connection. - raise AutoReconnect('%s:%d is not primary or master' % node) - - # Direct connection - if member.is_arbiter and not self.__direct: - raise ConfigurationError("%s:%d is an arbiter" % node) - - return member, nodes - - def __pick_nearest(self, candidates): - """Return the 'nearest' Member instance based on response time. - - Doesn't modify state. - """ - latency = self.secondary_acceptable_latency_ms - # Only used for mongos high availability, ping_time is in seconds. - fastest = min([ - member.ping_time for member in candidates]) - - near_candidates = [ - member for member in candidates - if member.ping_time - fastest < latency / 1000.0] - - return random.choice(near_candidates) - - def __ensure_member(self): - """Connect and return a Member instance, or raise AutoReconnect.""" - # If `connecting` is False, no thread is in __find_node(), - # and `future_member` is resolved. `member` may be None if the - # last __find_node() attempt failed, otherwise it is in `nodes`. - # - # If `connecting` is True, a thread is in __find_node(), - # `member` is None, and `future_member` is pending. - # - # To violate these invariants temporarily, acquire the lock. - # Note that disconnect() interacts with this method. - self.__connecting_lock.acquire() - if self.__member: - member = self.__member - self.__connecting_lock.release() - return member - - elif self.__connecting: - # A thread is in __find_node(). Wait. - future = self.__future_member - self.__connecting_lock.release() - return future.result() - - else: - self.__connecting = True - future = self.__future_member = thread_util.Future( - self.__event_class) - - self.__connecting_lock.release() - - member = None - nodes = None - exc = None - - try: - try: - member, nodes = self.__find_node() - return member - except Exception, e: - exc = e - raise - finally: - # We're either returning a Member or raising an error. - # Propagate either outcome to waiting threads. - self.__connecting_lock.acquire() - self.__member = member - self.__connecting = False - - # If we discovered a set of nodes, use them from now on; - # otherwise we're raising an error. Stick with the last - # known good set of nodes. - if nodes: - self.__nodes = nodes - - if member: - # Unblock waiting threads. - future.set_result(member) - else: - # Raise exception in waiting threads. - future.set_exception(exc) - - self.__connecting_lock.release() - - def __find_node(self): - """Find a server suitable for our connection type. - - Returns a Member and a set of nodes. Doesn't modify state. - - If only one host was supplied to __init__ see if we can connect - to it. Don't check if the host is a master/primary so we can make - a direct connection to read from a secondary or send commands to - an arbiter. - - If more than one host was supplied treat them as a seed list for - connecting to a replica set or to support high availability for - mongos. If connecting to a replica set try to find the primary, - and set `nodes` to list of all members. - - If a mongos seed list was provided find the "nearest" mongos and - return it, setting `nodes` to all mongoses in the seed list that - are up. - - Otherwise we iterate through the list trying to find a host we can - send write operations to. - """ - assert not self.__member, \ - "__find_node unexpectedly running with a non-null Member" - - errors = [] - mongos_candidates = [] - candidates = self.__nodes or self.__seeds - chosen_member = None - discovered_nodes = None - - for candidate in candidates: - try: - member, nodes = self.__try_node(candidate) - if member.is_mongos and not self.__direct: - mongos_candidates.append(member) - - # We intend to find all the mongoses; keep trying nodes. - continue - elif len(mongos_candidates): - raise ConfigurationError("Seed list cannot contain a mix " - "of mongod and mongos instances.") - - # We've found a suitable node. - chosen_member = member - discovered_nodes = nodes - break - except (OperationFailure, ConfigurationError, ValueError): - # The server is available but something failed, e.g. auth, - # wrong replica set name, or incompatible wire protocol. - raise - except Exception, why: - errors.append(str(why)) - - if len(mongos_candidates): - # If we have a mongos seed list, pick the "nearest" member. - chosen_member = self.__pick_nearest(mongos_candidates) - mongoses = frozenset(m.host for m in mongos_candidates) - - # The first time, __nodes is empty and mongoses becomes nodes. - return chosen_member, self.__nodes or mongoses - - if not chosen_member: - # Couldn't find a suitable host. - raise AutoReconnect(', '.join(errors)) - - return chosen_member, discovered_nodes - - def __socket(self, member): - """Get a SocketInfo. - - Calls disconnect() on error. - """ - connection_pool = member.pool - try: - if self.auto_start_request and not connection_pool.in_request(): - connection_pool.start_request() - - sock_info = connection_pool.get_socket() - except socket.error, why: - self.disconnect() - - # Check if a unix domain socket - host, port = member.host - if host.endswith('.sock'): - host_details = "%s:" % host - else: - host_details = "%s:%d:" % (host, port) - raise AutoReconnect("could not connect to " - "%s %s" % (host_details, str(why))) - try: - self.__check_auth(sock_info) - except OperationFailure: - connection_pool.maybe_return_socket(sock_info) - raise - return sock_info - - def _ensure_connected(self, sync=False): - """Ensure this client instance is connected to a mongod/s. - """ - self.__ensure_member() - - def disconnect(self): - """Disconnect from MongoDB. - - Disconnecting will close all underlying sockets in the connection - pool. If this instance is used again it will be automatically - re-opened. Care should be taken to make sure that :meth:`disconnect` - is not called in the middle of a sequence of operations in which - ordering is important. This could lead to unexpected results. - - .. seealso:: :meth:`end_request` - .. versionadded:: 1.3 - """ - self.__connecting_lock.acquire() - member, self.__member = self.__member, None - self.__connecting_lock.release() - - # Close sockets promptly. - if member: - member.pool.reset() - - def close(self): - """Alias for :meth:`disconnect` - - Disconnecting will close all underlying sockets in the connection - pool. If this instance is used again it will be automatically - re-opened. Care should be taken to make sure that :meth:`disconnect` - is not called in the middle of a sequence of operations in which - ordering is important. This could lead to unexpected results. - - .. seealso:: :meth:`end_request` - .. versionadded:: 2.1 - """ - self.disconnect() - - def alive(self): - """Return ``False`` if there has been an error communicating with the - server, else ``True``. - - This method attempts to check the status of the server with minimal I/O. - The current thread / greenlet retrieves a socket from the pool (its - request socket if it's in a request, or a random idle socket if it's not - in a request) and checks whether calling `select`_ on it raises an - error. If there are currently no idle sockets, :meth:`alive` will - attempt to actually connect to the server. - - A more certain way to determine server availability is:: - - client.admin.command('ping') - - .. _select: http://docs.python.org/2/library/select.html#select.select - """ - # In the common case, a socket is available and was used recently, so - # calling select() on it is a reasonable attempt to see if the OS has - # reported an error. - self.__connecting_lock.acquire() - member = self.__member - self.__connecting_lock.release() - if not member: - return False - else: - sock_info = None - try: - try: - sock_info = member.pool.get_socket() - return not pool._closed(sock_info.sock) - except (socket.error, ConnectionFailure): - return False - finally: - member.pool.maybe_return_socket(sock_info) - - def set_cursor_manager(self, manager_class): - """Set this client's cursor manager. - - Raises :class:`TypeError` if `manager_class` is not a subclass of - :class:`~pymongo.cursor_manager.CursorManager`. A cursor manager - handles closing cursors. Different managers can implement different - policies in terms of when to actually kill a cursor that has - been closed. - - :Parameters: - - `manager_class`: cursor manager to use - - .. versionchanged:: 2.1+ - Deprecated support for external cursor managers. - """ - warnings.warn("Support for external cursor managers is deprecated " - "and will be removed in PyMongo 3.0.", - DeprecationWarning, stacklevel=2) - manager = manager_class(self) - if not isinstance(manager, CursorManager): - raise TypeError("manager_class must be a subclass of " - "CursorManager") - - self.__cursor_manager = manager - - def __check_response_to_last_error(self, response, is_command): - """Check a response to a lastError message for errors. - - `response` is a byte string representing a response to the message. - If it represents an error response we raise OperationFailure. - - Return the response as a document. - """ - response = helpers._unpack_response(response) - - assert response["number_returned"] == 1 - result = response["data"][0] - - helpers._check_command_response(result, self.disconnect) - - # write commands - skip getLastError checking - if is_command: - return result - - # getLastError - error_msg = result.get("err", "") - if error_msg is None: - return result - if error_msg.startswith("not master"): - self.disconnect() - raise AutoReconnect(error_msg) - - details = result - # mongos returns the error code in an error object - # for some errors. - if "errObjects" in result: - for errobj in result["errObjects"]: - if errobj["err"] == error_msg: - details = errobj - break - - code = details.get("code") - if code in (11000, 11001, 12582): - raise DuplicateKeyError(details["err"], code, result) - raise OperationFailure(details["err"], code, result) - - def __check_bson_size(self, message): - """Make sure the message doesn't include BSON documents larger - than the connected server will accept. - - :Parameters: - - `message`: message to check - """ - if len(message) == 3: - (request_id, data, max_doc_size) = message - if max_doc_size > self.max_bson_size: - raise DocumentTooLarge("BSON document too large (%d bytes)" - " - the connected server supports" - " BSON document sizes up to %d" - " bytes." % - (max_doc_size, self.max_bson_size)) - return (request_id, data) - else: - # get_more and kill_cursors messages - # don't include BSON documents. - return message - - def _send_message(self, message, - with_last_error=False, command=False, check_primary=True): - """Say something to Mongo. - - Raises ConnectionFailure if the message cannot be sent. Raises - OperationFailure if `with_last_error` is ``True`` and the - response to the getLastError call returns an error. Return the - response from lastError, or ``None`` if `with_last_error` - is ``False``. - - :Parameters: - - `message`: message to send - - `with_last_error`: check getLastError status after sending the - message - - `check_primary`: don't try to write to a non-primary; see - kill_cursors for an exception to this rule - """ - member = self.__ensure_member() - if check_primary and not with_last_error and not self.is_primary: - # The write won't succeed, bail as if we'd done a getLastError - raise AutoReconnect("not master") - - sock_info = self.__socket(member) - try: - try: - (request_id, data) = self.__check_bson_size(message) - sock_info.sock.sendall(data) - # Safe mode. We pack the message together with a lastError - # message and send both. We then get the response (to the - # lastError) and raise OperationFailure if it is an error - # response. - rv = None - if with_last_error: - response = self.__receive_message_on_socket(1, request_id, - sock_info) - rv = self.__check_response_to_last_error(response, command) - - return rv - except OperationFailure: - raise - except (ConnectionFailure, socket.error), e: - self.disconnect() - raise AutoReconnect(str(e)) - except: - sock_info.close() - raise - finally: - member.pool.maybe_return_socket(sock_info) - - def __receive_data_on_socket(self, length, sock_info): - """Lowest level receive operation. - - Takes length to receive and repeatedly calls recv until able to - return a buffer of that length, raising ConnectionFailure on error. - """ - message = EMPTY - while length: - chunk = sock_info.sock.recv(length) - if chunk == EMPTY: - raise ConnectionFailure("connection closed") - length -= len(chunk) - message += chunk - return message - - def __receive_message_on_socket(self, operation, rqst_id, sock_info): - """Receive a message in response to `rqst_id` on `sock`. - - Returns the response data with the header removed. - """ - header = self.__receive_data_on_socket(16, sock_info) - length = struct.unpack(">> client = pymongo.MongoClient(auto_start_request=False) - >>> db = client.test - >>> _id = db.test_collection.insert({}) - >>> with client.start_request(): - ... for i in range(100): - ... db.test_collection.update({'_id': _id}, {'$set': {'i':i}}) - ... - ... # Definitely read the document after the final update completes - ... print db.test_collection.find({'_id': _id}) - - If a thread or greenlet calls start_request multiple times, an equal - number of calls to :meth:`end_request` is required to end the request. - - .. versionchanged:: 2.4 - Now counts the number of calls to start_request and doesn't end - request until an equal number of calls to end_request. - - .. versionadded:: 2.2 - The :class:`~pymongo.pool.Request` return value. - :meth:`start_request` previously returned None - """ - member = self.__ensure_member() - member.pool.start_request() - return pool.Request(self) - - def in_request(self): - """True if this thread is in a request, meaning it has a socket - reserved for its exclusive use. - """ - member = self.__member # Don't try to connect if disconnected. - return member and member.pool.in_request() - - def end_request(self): - """Undo :meth:`start_request`. If :meth:`end_request` is called as many - times as :meth:`start_request`, the request is over and this thread's - connection returns to the pool. Extra calls to :meth:`end_request` have - no effect. - - Ending a request allows the :class:`~socket.socket` that has - been reserved for this thread by :meth:`start_request` to be returned to - the pool. Other threads will then be able to re-use that - :class:`~socket.socket`. If your application uses many threads, or has - long-running threads that infrequently perform MongoDB operations, then - judicious use of this method can lead to performance gains. Care should - be taken, however, to make sure that :meth:`end_request` is not called - in the middle of a sequence of operations in which ordering is - important. This could lead to unexpected results. - """ - member = self.__member # Don't try to connect if disconnected. - if member: - member.pool.end_request() - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.host == other.host and self.port == other.port - return NotImplemented - - def __ne__(self, other): - return not self == other - - def __repr__(self): - if len(self.__nodes) == 1: - return "MongoClient(%r, %r)" % (self.host, self.port) - else: - return "MongoClient(%r)" % ["%s:%d" % n for n in self.__nodes] - - def __getattr__(self, name): - """Get a database by name. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid - database name is used. - - :Parameters: - - `name`: the name of the database to get - """ - return database.Database(self, name) - - def __getitem__(self, name): - """Get a database by name. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid - database name is used. - - :Parameters: - - `name`: the name of the database to get - """ - return self.__getattr__(name) - - def close_cursor(self, cursor_id): - """Close a single database cursor. - - Raises :class:`TypeError` if `cursor_id` is not an instance of - ``(int, long)``. What closing the cursor actually means - depends on this client's cursor manager. - - :Parameters: - - `cursor_id`: id of cursor to close - """ - if not isinstance(cursor_id, (int, long)): - raise TypeError("cursor_id must be an instance of (int, long)") - - self.__cursor_manager.close(cursor_id) - - def kill_cursors(self, cursor_ids): - """Send a kill cursors message with the given ids. - - Raises :class:`TypeError` if `cursor_ids` is not an instance of - ``list``. - - :Parameters: - - `cursor_ids`: list of cursor ids to kill - """ - if not isinstance(cursor_ids, list): - raise TypeError("cursor_ids must be a list") - return self._send_message( - message.kill_cursors(cursor_ids), check_primary=False) - - def server_info(self): - """Get information about the MongoDB server we're connected to. - """ - return self.admin.command("buildinfo") - - def database_names(self): - """Get a list of the names of all databases on the connected server. - """ - return [db["name"] for db in - self.admin.command("listDatabases")["databases"]] - - def drop_database(self, name_or_database): - """Drop a database. - - Raises :class:`TypeError` if `name_or_database` is not an instance of - :class:`basestring` (:class:`str` in python 3) or Database. - - :Parameters: - - `name_or_database`: the name of a database to drop, or a - :class:`~pymongo.database.Database` instance representing the - database to drop - """ - name = name_or_database - if isinstance(name, database.Database): - name = name.name - - if not isinstance(name, basestring): - raise TypeError("name_or_database must be an instance of " - "%s or Database" % (basestring.__name__,)) - - self._purge_index(name) - self[name].command("dropDatabase") - - def copy_database(self, from_name, to_name, - from_host=None, username=None, password=None): - """Copy a database, potentially from another host. - - Raises :class:`TypeError` if `from_name` or `to_name` is not - an instance of :class:`basestring` (:class:`str` in python 3). - Raises :class:`~pymongo.errors.InvalidName` if `to_name` is - not a valid database name. - - If `from_host` is ``None`` the current host is used as the - source. Otherwise the database is copied from `from_host`. - - If the source database requires authentication, `username` and - `password` must be specified. - - :Parameters: - - `from_name`: the name of the source database - - `to_name`: the name of the target database - - `from_host` (optional): host name to copy from - - `username` (optional): username for source database - - `password` (optional): password for source database - - .. note:: Specifying `username` and `password` requires server - version **>= 1.3.3+**. - - .. versionadded:: 1.5 - """ - if not isinstance(from_name, basestring): - raise TypeError("from_name must be an instance " - "of %s" % (basestring.__name__,)) - if not isinstance(to_name, basestring): - raise TypeError("to_name must be an instance " - "of %s" % (basestring.__name__,)) - - database._check_name(to_name) - - command = {"fromdb": from_name, "todb": to_name} - - if from_host is not None: - command["fromhost"] = from_host - - try: - self.start_request() - - if username is not None: - nonce = self.admin.command("copydbgetnonce", - fromhost=from_host)["nonce"] - command["username"] = username - command["nonce"] = nonce - command["key"] = auth._auth_key(nonce, username, password) - - return self.admin.command("copydb", **command) - finally: - self.end_request() - - def get_default_database(self): - """Get the database named in the MongoDB connection URI. - - >>> uri = 'mongodb://host/my_database' - >>> client = MongoClient(uri) - >>> db = client.get_default_database() - >>> assert db.name == 'my_database' - - Useful in scripts where you want to choose which database to use - based only on the URI in a configuration file. - """ - if self.__default_database_name is None: - raise ConfigurationError('No default database defined') - - return self[self.__default_database_name] - - @property - def is_locked(self): - """Is this server locked? While locked, all write operations - are blocked, although read operations may still be allowed. - Use :meth:`unlock` to unlock. - - .. versionadded:: 2.0 - """ - ops = self.admin.current_op() - return bool(ops.get('fsyncLock', 0)) - - def fsync(self, **kwargs): - """Flush all pending writes to datafiles. - - :Parameters: - - Optional parameters can be passed as keyword arguments: - - - `lock`: If True lock the server to disallow writes. - - `async`: If True don't block while synchronizing. - - .. warning:: `async` and `lock` can not be used together. - - .. warning:: MongoDB does not support the `async` option - on Windows and will raise an exception on that - platform. - - .. versionadded:: 2.0 - """ - self.admin.command("fsync", **kwargs) - - def unlock(self): - """Unlock a previously locked server. - - .. versionadded:: 2.0 - """ - self.admin['$cmd'].sys.unlock.find_one() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.disconnect() - - def __iter__(self): - return self - - def next(self): - raise TypeError("'MongoClient' object is not iterable") +__doc__ = original_doc +__all__ = ["MongoClient"] # noqa: F405 diff --git a/pymongo/mongo_replica_set_client.py b/pymongo/mongo_replica_set_client.py deleted file mode 100644 index 0f501ffd03..0000000000 --- a/pymongo/mongo_replica_set_client.py +++ /dev/null @@ -1,1932 +0,0 @@ -# Copyright 2011-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Tools for connecting to a MongoDB replica set. - -.. seealso:: :doc:`/examples/high_availability` for more examples of - how to connect to a replica set. - -To get a :class:`~pymongo.database.Database` instance from a -:class:`MongoReplicaSetClient` use either dictionary-style or -attribute-style access: - -.. doctest:: - - >>> from pymongo import MongoReplicaSetClient - >>> c = MongoReplicaSetClient('localhost:27017', replicaSet='repl0') - >>> c.test_database - Database(MongoReplicaSetClient([u'...', u'...']), u'test_database') - >>> c['test_database'] - Database(MongoReplicaSetClient([u'...', u'...']), u'test_database') -""" - -import atexit -import datetime -import socket -import struct -import threading -import time -import warnings -import weakref - -from bson.py3compat import b -from pymongo import (auth, - common, - database, - helpers, - message, - pool, - thread_util, - uri_parser) -from pymongo.member import Member -from pymongo.read_preferences import ( - ReadPreference, select_member, modes, MovingAverage) -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure, - DocumentTooLarge, - DuplicateKeyError, - OperationFailure, - InvalidOperation) -from pymongo.thread_util import DummyLock - -EMPTY = b("") -MAX_RETRY = 3 - -MONITORS = set() - -def register_monitor(monitor): - ref = weakref.ref(monitor, _on_monitor_deleted) - MONITORS.add(ref) - -def _on_monitor_deleted(ref): - """Remove the weakreference from the set - of active MONITORS. We no longer - care about keeping track of it - """ - MONITORS.remove(ref) - -def shutdown_monitors(): - # Keep a local copy of MONITORS as - # shutting down threads has a side effect - # of removing them from the MONITORS set() - monitors = list(MONITORS) - for ref in monitors: - monitor = ref() - if monitor: - monitor.shutdown() - monitor.join() -atexit.register(shutdown_monitors) - -def _partition_node(node): - """Split a host:port string returned from mongod/s into - a (host, int(port)) pair needed for socket.connect(). - """ - host = node - port = 27017 - idx = node.rfind(':') - if idx != -1: - host, port = node[:idx], int(node[idx + 1:]) - if host.startswith('['): - host = host[1:-1] - return host, port - - -# Concurrency notes: A MongoReplicaSetClient keeps its view of the replica-set -# state in an RSState instance. RSStates are immutable, except for -# host-pinning. Pools, which are internally thread / greenlet safe, can be -# copied from old to new RSStates safely. The client updates its view of the -# set's state not by modifying its RSState but by replacing it with an updated -# copy. - -# In __init__, MongoReplicaSetClient gets a list of potential members called -# 'seeds' from its initial parameters, and calls refresh(). refresh() iterates -# over the the seeds in arbitrary order looking for a member it can connect to. -# Once it finds one, it calls 'ismaster' and sets self.__hosts to the list of -# members in the response, and connects to the rest of the members. refresh() -# sets the MongoReplicaSetClient's RSState. Finally, __init__ launches the -# replica-set monitor. - -# The monitor calls refresh() every 30 seconds, or whenever the client has -# encountered an error that prompts it to wake the monitor. - -# Every method that accesses the RSState multiple times within the method makes -# a local reference first and uses that throughout, so it's isolated from a -# concurrent method replacing the RSState with an updated copy. This technique -# avoids the need to lock around accesses to the RSState. - - -class RSState(object): - def __init__( - self, threadlocal, hosts=None, host_to_member=None, arbiters=None, - writer=None, error_message='No primary available', exc=None, - initial=False): - """An immutable snapshot of the client's view of the replica set state. - - Stores Member instances for all members we're connected to, and a - list of (host, port) pairs for all the hosts and arbiters listed - in the most recent ismaster response. - - :Parameters: - - `threadlocal`: Thread- or greenlet-local storage - - `hosts`: Sequence of (host, port) pairs - - `host_to_member`: Optional dict: (host, port) -> Member instance - - `arbiters`: Optional sequence of arbiters as (host, port) - - `writer`: Optional (host, port) of primary - - `error_message`: Optional error if `writer` is None - - `exc`: Optional error if state is unusable - - `initial`: Whether this is the initial client state - """ - self._threadlocal = threadlocal # threading.local or gevent local - self._arbiters = frozenset(arbiters or []) # set of (host, port) - self._writer = writer # (host, port) of the primary, or None - self._error_message = error_message - self._host_to_member = host_to_member or {} - self._hosts = frozenset(hosts or []) - self._members = frozenset(self._host_to_member.values()) - self._exc = exc - self._initial = initial - self._primary_member = self.get(writer) - - def clone_with_host_down(self, host, error_message): - """Get a clone, marking as "down" the member with the given (host, port) - """ - members = self._host_to_member.copy() - members.pop(host, None) - - if host == self.writer: - # The primary went down; record the error message. - return RSState( - self._threadlocal, - self._hosts, - members, - self._arbiters, - None, - error_message, - self._exc) - else: - # Some other host went down. Keep our current primary or, if it's - # already down, keep our current error message. - return RSState( - self._threadlocal, - self._hosts, - members, - self._arbiters, - self._writer, - self._error_message, - self._exc) - - def clone_without_writer(self, threadlocal): - """Get a clone without a primary. Unpins all threads. - - :Parameters: - - `threadlocal`: Thread- or greenlet-local storage - """ - return RSState( - threadlocal, - self._hosts, - self._host_to_member, - self._arbiters) - - def clone_with_error(self, exc): - return RSState( - self._threadlocal, - self._hosts, - self._host_to_member.copy(), - self._arbiters, - self._writer, - self._error_message, - exc) - - @property - def arbiters(self): - """(host, port) pairs from the last ismaster response's arbiter list. - """ - return self._arbiters - - @property - def writer(self): - """(host, port) of primary, or None.""" - return self._writer - - @property - def primary_member(self): - return self._primary_member - - @property - def hosts(self): - """(host, port) pairs from the last ismaster response's host list.""" - return self._hosts - - @property - def members(self): - """Set of Member instances.""" - return self._members - - @property - def error_message(self): - """The error, if any, raised when trying to connect to the primary""" - return self._error_message - - @property - def secondaries(self): - """Set of (host, port) pairs, secondaries we're connected to.""" - # Unlike the other properties, this isn't cached because it isn't used - # in regular operations. - return set([ - host for host, member in self._host_to_member.items() - if member.is_secondary]) - - @property - def exc(self): - """Reason RSState is unusable, or None.""" - return self._exc - - @property - def initial(self): - """Whether this is the initial client state.""" - return self._initial - - def get(self, host): - """Return a Member instance or None for the given (host, port).""" - return self._host_to_member.get(host) - - def pin_host(self, host, mode, tag_sets, latency): - """Pin this thread / greenlet to a member. - - `host` is a (host, port) pair. The remaining parameters are a read - preference. - """ - # Fun fact: Unlike in thread_util.ThreadIdent, we needn't lock around - # assignment here. Assignment to a threadlocal is only unsafe if it - # can cause other Python code to run implicitly. - self._threadlocal.host = host - self._threadlocal.read_preference = (mode, tag_sets, latency) - - def keep_pinned_host(self, mode, tag_sets, latency): - """Does a read pref match the last used by this thread / greenlet?""" - return self._threadlocal.read_preference == (mode, tag_sets, latency) - - @property - def pinned_host(self): - """The (host, port) last used by this thread / greenlet, or None.""" - return getattr(self._threadlocal, 'host', None) - - def unpin_host(self): - """Forget this thread / greenlet's last used member.""" - self._threadlocal.host = self._threadlocal.read_preference = None - - @property - def threadlocal(self): - return self._threadlocal - - def __str__(self): - return '' % ( - ', '.join(str(member) for member in self._host_to_member.itervalues()), - self.writer and '%s:%s' % self.writer or None) - - -class Monitor(object): - """Base class for replica set monitors. - """ - _refresh_interval = 30 - - def __init__(self, rsc, event_class): - self.rsc = weakref.proxy(rsc, self.shutdown) - self.timer = event_class() - self.refreshed = event_class() - self.started_event = event_class() - self.stopped = False - - def start_sync(self): - """Start the Monitor and block until it's really started. - """ - # start() can return before the thread is fully bootstrapped, - # so a fork can leave the thread thinking it's alive in a child - # process when it's really dead: - # http://bugs.python.org/issue18418. - self.start() # Implemented in subclasses. - self.started_event.wait(5) - - def shutdown(self, dummy=None): - """Signal the monitor to shutdown. - """ - self.stopped = True - self.timer.set() - - def schedule_refresh(self): - """Refresh immediately - """ - if not self.isAlive(): - # Checks in RS client should prevent this. - raise AssertionError("schedule_refresh called with dead monitor") - self.refreshed.clear() - self.timer.set() - - def wait_for_refresh(self, timeout_seconds): - """Block until a scheduled refresh completes - """ - self.refreshed.wait(timeout_seconds) - - def monitor(self): - """Run until the RSC is collected or an - unexpected error occurs. - """ - self.started_event.set() - while True: - self.timer.wait(Monitor._refresh_interval) - if self.stopped: - break - self.timer.clear() - - try: - try: - self.rsc.refresh() - finally: - self.refreshed.set() - except AutoReconnect: - pass - - # RSC has been collected or there - # was an unexpected error. - except: - break - - def isAlive(self): - raise NotImplementedError() - - -class MonitorThread(threading.Thread, Monitor): - """Thread based replica set monitor. - """ - def __init__(self, rsc): - Monitor.__init__(self, rsc, threading.Event) - threading.Thread.__init__(self) - self.setName("ReplicaSetMonitorThread") - self.setDaemon(True) - - def run(self): - """Override Thread's run method. - """ - self.monitor() - - -have_gevent = False -try: - from gevent import Greenlet - from gevent.event import Event - - # Used by ReplicaSetConnection - from gevent.local import local as gevent_local - have_gevent = True - - class MonitorGreenlet(Monitor, Greenlet): - """Greenlet based replica set monitor. - """ - def __init__(self, rsc): - self.monitor_greenlet_alive = False - Monitor.__init__(self, rsc, Event) - Greenlet.__init__(self) - - def start_sync(self): - self.monitor_greenlet_alive = True - - # Call superclass. - Monitor.start_sync(self) - - # Don't override `run` in a Greenlet. Add _run instead. - # Refer to gevent's Greenlet docs and source for more - # information. - def _run(self): - """Define Greenlet's _run method. - """ - self.monitor() - - def isAlive(self): - # bool(self) isn't immediately True after someone calls start(), - # but isAlive() is. Thus it's safe for greenlets to do: - # "if not monitor.isAlive(): monitor.start()" - # ... and be guaranteed only one greenlet starts the monitor. - return self.monitor_greenlet_alive - -except ImportError: - pass - - -class MongoReplicaSetClient(common.BaseObject): - """Connection to a MongoDB replica set. - """ - - # For tests. - _refresh_timeout_sec = 5 - - def __init__(self, hosts_or_uri=None, max_pool_size=100, - document_class=dict, tz_aware=False, _connect=True, **kwargs): - """Create a new connection to a MongoDB replica set. - - The resultant client object has connection-pooling built - in. It also performs auto-reconnection when necessary. If an - operation fails because of a connection error, - :class:`~pymongo.errors.ConnectionFailure` is raised. If - auto-reconnection will be performed, - :class:`~pymongo.errors.AutoReconnect` will be - raised. Application code should handle this exception - (recognizing that the operation failed) and then continue to - execute. - - Raises :class:`~pymongo.errors.ConnectionFailure` if - the connection cannot be made. - - The `hosts_or_uri` parameter can be a full `mongodb URI - `_, in addition to - a string of `host:port` pairs (e.g. 'host1:port1,host2:port2'). - If `hosts_or_uri` is None 'localhost:27017' will be used. - - .. note:: Instances of :class:`MongoReplicaSetClient` start a - background task to monitor the state of the replica set. This allows - it to quickly respond to changes in replica set configuration. - Before discarding an instance of :class:`MongoReplicaSetClient` make - sure you call :meth:`~close` to ensure that the monitor task is - cleanly shut down. - - :Parameters: - - `hosts_or_uri` (optional): A MongoDB URI or string of `host:port` - pairs. If a host is an IPv6 literal it must be enclosed in '[' and - ']' characters following the RFC2732 URL syntax (e.g. '[::1]' for - localhost) - - `max_pool_size` (optional): The maximum number of connections - each pool will open simultaneously. If this is set, operations - will block if there are `max_pool_size` outstanding connections - from the pool. Defaults to 100. - - `document_class` (optional): default class to use for - documents returned from queries on this client - - `tz_aware` (optional): if ``True``, - :class:`~datetime.datetime` instances returned as values - in a document by this :class:`MongoReplicaSetClient` will be timezone - aware (otherwise they will be naive) - - `replicaSet`: (required) The name of the replica set to connect to. - The driver will verify that each host it connects to is a member of - this replica set. Can be passed as a keyword argument or as a - MongoDB URI option. - - | **Other optional parameters can be passed as keyword arguments:** - - - `host`: For compatibility with :class:`~mongo_client.MongoClient`. - If both `host` and `hosts_or_uri` are specified `host` takes - precedence. - - `port`: For compatibility with :class:`~mongo_client.MongoClient`. - The default port number to use for hosts. - - `socketTimeoutMS`: (integer) How long (in milliseconds) a send or - receive on a socket can take before timing out. Defaults to ``None`` - (no timeout). - - `connectTimeoutMS`: (integer) How long (in milliseconds) a - connection can take to be opened before timing out. Defaults to - ``20000``. - - `waitQueueTimeoutMS`: (integer) How long (in milliseconds) a - thread will wait for a socket from the pool if the pool has no - free sockets. Defaults to ``None`` (no timeout). - - `waitQueueMultiple`: (integer) Multiplied by max_pool_size to give - the number of threads allowed to wait for a socket at one time. - Defaults to ``None`` (no waiters). - - `auto_start_request`: If ``True``, each thread that accesses - this :class:`MongoReplicaSetClient` has a socket allocated to it - for the thread's lifetime, for each member of the set. For - :class:`~pymongo.read_preferences.ReadPreference` PRIMARY, - auto_start_request=True ensures consistent reads, even if you read - after an unacknowledged write. For read preferences other than - PRIMARY, there are no consistency guarantees. Default to ``False``. - - `use_greenlets`: If ``True``, use a background Greenlet instead of - a background thread to monitor state of replica set. Additionally, - :meth:`start_request()` assigns a greenlet-local, rather than - thread-local, socket. - `use_greenlets` with :class:`MongoReplicaSetClient` requires - `Gevent `_ to be installed. - - | **Write Concern options:** - - - `w`: (integer or string) Write operations will block until they have - been replicated to the specified number or tagged set of servers. - `w=` always includes the replica set primary (e.g. w=3 means - write to the primary and wait until replicated to **two** - secondaries). Passing w=0 **disables write acknowledgement** and all - other write concern options. - - `wtimeout`: (integer) Used in conjunction with `w`. Specify a value - in milliseconds to control how long to wait for write propagation - to complete. If replication does not complete in the given - timeframe, a timeout exception is raised. - - `j`: If ``True`` block until write operations have been committed - to the journal. Cannot be used in combination with `fsync`. Prior - to MongoDB 2.6 this option was ignored if the server was running - without journaling. Starting with MongoDB 2.6 write operations will - fail with an exception if this option is used when the server is - running without journaling. - - `fsync`: If ``True`` and the server is running without journaling, - blocks until the server has synced all data files to disk. If the - server is running with journaling, this acts the same as the `j` - option, blocking until write operations have been committed to the - journal. Cannot be used in combination with `j`. - - | **Read preference options:** - - - `read_preference`: The read preference for this client. - See :class:`~pymongo.read_preferences.ReadPreference` for available - options. - - `tag_sets`: Read from replica-set members with these tags. - To specify a priority-order for tag sets, provide a list of - tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag - set, ``{}``, means "read from any member that matches the mode, - ignoring tags." :class:`MongoReplicaSetClient` tries each set of - tags in turn until it finds a set of tags with at least one matching - member. - - `secondary_acceptable_latency_ms`: (integer) Any replica-set member - whose ping time is within secondary_acceptable_latency_ms of the - nearest member may accept reads. Default 15 milliseconds. - **Ignored by mongos** and must be configured on the command line. - See the localThreshold_ option for more information. - - | **SSL configuration:** - - - `ssl`: If ``True``, create the connection to the servers using SSL. - - `ssl_keyfile`: The private keyfile used to identify the local - connection against mongod. If included with the ``certfile`` then - only the ``ssl_certfile`` is needed. Implies ``ssl=True``. - - `ssl_certfile`: The certificate file used to identify the local - connection against mongod. Implies ``ssl=True``. - - `ssl_cert_reqs`: Specifies whether a certificate is required from - the other side of the connection, and whether it will be validated - if provided. It must be one of the three values ``ssl.CERT_NONE`` - (certificates ignored), ``ssl.CERT_OPTIONAL`` - (not required, but validated if provided), or ``ssl.CERT_REQUIRED`` - (required and validated). If the value of this parameter is not - ``ssl.CERT_NONE``, then the ``ssl_ca_certs`` parameter must point - to a file of CA certificates. Implies ``ssl=True``. - - `ssl_ca_certs`: The ca_certs file contains a set of concatenated - "certification authority" certificates, which are used to validate - certificates passed from the other end of the connection. - Implies ``ssl=True``. - - .. versionchanged:: 2.5 - Added additional ssl options - .. versionadded:: 2.4 - - .. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold - """ - self.__opts = {} - self.__seeds = set() - self.__index_cache = {} - self.__auth_credentials = {} - - self.__max_pool_size = common.validate_positive_integer_or_none( - 'max_pool_size', max_pool_size) - self.__tz_aware = common.validate_boolean('tz_aware', tz_aware) - self.__document_class = document_class - self.__monitor = None - self.__closed = False - - # Compatibility with mongo_client.MongoClient - host = kwargs.pop('host', hosts_or_uri) - - port = kwargs.pop('port', 27017) - if not isinstance(port, int): - raise TypeError("port must be an instance of int") - - username = None - password = None - self.__default_database_name = None - options = {} - if host is None: - self.__seeds.add(('localhost', port)) - elif '://' in host: - res = uri_parser.parse_uri(host, port) - self.__seeds.update(res['nodelist']) - username = res['username'] - password = res['password'] - self.__default_database_name = res['database'] - options = res['options'] - else: - self.__seeds.update(uri_parser.split_hosts(host, port)) - - # _pool_class and _monitor_class are for deep customization of PyMongo, - # e.g. Motor. SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB. - self.pool_class = kwargs.pop('_pool_class', pool.Pool) - self.__monitor_class = kwargs.pop('_monitor_class', None) - - for option, value in kwargs.iteritems(): - option, value = common.validate(option, value) - self.__opts[option] = value - self.__opts.update(options) - - self.__use_greenlets = self.__opts.get('use_greenlets', False) - if self.__use_greenlets and not have_gevent: - raise ConfigurationError( - "The gevent module is not available. " - "Install the gevent package from PyPI.") - - self.__rs_state = RSState(self.__make_threadlocal(), initial=True) - - self.__request_counter = thread_util.Counter(self.__use_greenlets) - - self.__auto_start_request = self.__opts.get('auto_start_request', False) - if self.__auto_start_request: - self.start_request() - - self.__name = self.__opts.get('replicaset') - if not self.__name: - raise ConfigurationError("the replicaSet " - "keyword parameter is required.") - - self.__net_timeout = self.__opts.get('sockettimeoutms') - self.__conn_timeout = self.__opts.get('connecttimeoutms') - self.__wait_queue_timeout = self.__opts.get('waitqueuetimeoutms') - self.__wait_queue_multiple = self.__opts.get('waitqueuemultiple') - self.__use_ssl = self.__opts.get('ssl', None) - self.__ssl_keyfile = self.__opts.get('ssl_keyfile', None) - self.__ssl_certfile = self.__opts.get('ssl_certfile', None) - self.__ssl_cert_reqs = self.__opts.get('ssl_cert_reqs', None) - self.__ssl_ca_certs = self.__opts.get('ssl_ca_certs', None) - - ssl_kwarg_keys = [k for k in kwargs.keys() if k.startswith('ssl_')] - if self.__use_ssl is False and ssl_kwarg_keys: - raise ConfigurationError("ssl has not been enabled but the " - "following ssl parameters have been set: " - "%s. Please set `ssl=True` or remove." - % ', '.join(ssl_kwarg_keys)) - - if self.__ssl_cert_reqs and not self.__ssl_ca_certs: - raise ConfigurationError("If `ssl_cert_reqs` is not " - "`ssl.CERT_NONE` then you must " - "include `ssl_ca_certs` to be able " - "to validate the server.") - - if ssl_kwarg_keys and self.__use_ssl is None: - # ssl options imply ssl = True - self.__use_ssl = True - - if self.__use_ssl and not common.HAS_SSL: - raise ConfigurationError("The ssl module is not available. If you " - "are using a python version previous to " - "2.6 you must install the ssl package " - "from PyPI.") - - super(MongoReplicaSetClient, self).__init__(**self.__opts) - if self.slave_okay: - warnings.warn("slave_okay is deprecated. Please " - "use read_preference instead.", DeprecationWarning, - stacklevel=2) - - if _connect: - try: - self.refresh(initial=True) - except AutoReconnect, e: - # ConnectionFailure makes more sense here than AutoReconnect - raise ConnectionFailure(str(e)) - - if username: - mechanism = options.get('authmechanism', 'MONGODB-CR') - source = ( - options.get('authsource') - or self.__default_database_name - or 'admin') - - credentials = auth._build_credentials_tuple(mechanism, - source, - unicode(username), - unicode(password), - options) - try: - self._cache_credentials(source, credentials, _connect) - except OperationFailure, exc: - raise ConfigurationError(str(exc)) - - # Start the monitor after we know the configuration is correct. - if not self.__monitor_class: - if self.__use_greenlets: - self.__monitor_class = MonitorGreenlet - else: - # Common case: monitor RS with a background thread. - self.__monitor_class = MonitorThread - - if self.__use_greenlets: - # Greenlets don't need to lock around access to the monitor. - # A Greenlet can safely do: - # "if not self.__monitor: self.__monitor = monitor_class()" - # because it won't be interrupted between the check and the - # assignment. - self.__monitor_lock = DummyLock() - else: - self.__monitor_lock = threading.Lock() - - if _connect: - self.__ensure_monitor() - - def _cached(self, dbname, coll, index): - """Test if `index` is cached. - """ - cache = self.__index_cache - now = datetime.datetime.utcnow() - return (dbname in cache and - coll in cache[dbname] and - index in cache[dbname][coll] and - now < cache[dbname][coll][index]) - - def _cache_index(self, dbase, collection, index, cache_for): - """Add an index to the index cache for ensure_index operations. - """ - now = datetime.datetime.utcnow() - expire = datetime.timedelta(seconds=cache_for) + now - - if dbase not in self.__index_cache: - self.__index_cache[dbase] = {} - self.__index_cache[dbase][collection] = {} - self.__index_cache[dbase][collection][index] = expire - - elif collection not in self.__index_cache[dbase]: - self.__index_cache[dbase][collection] = {} - self.__index_cache[dbase][collection][index] = expire - - else: - self.__index_cache[dbase][collection][index] = expire - - def _purge_index(self, database_name, - collection_name=None, index_name=None): - """Purge an index from the index cache. - - If `index_name` is None purge an entire collection. - - If `collection_name` is None purge an entire database. - """ - if not database_name in self.__index_cache: - return - - if collection_name is None: - del self.__index_cache[database_name] - return - - if not collection_name in self.__index_cache[database_name]: - return - - if index_name is None: - del self.__index_cache[database_name][collection_name] - return - - if index_name in self.__index_cache[database_name][collection_name]: - del self.__index_cache[database_name][collection_name][index_name] - - def _cache_credentials(self, source, credentials, connect=True): - """Add credentials to the database authentication cache - for automatic login when a socket is created. If `connect` is True, - verify the credentials on the server first. - - Raises OperationFailure if other credentials are already stored for - this source. - """ - if source in self.__auth_credentials: - # Nothing to do if we already have these credentials. - if credentials == self.__auth_credentials[source]: - return - raise OperationFailure('Another user is already authenticated ' - 'to this database. You must logout first.') - - if connect: - # Try to authenticate even during failover. - member = select_member( - self.__rs_state.members, ReadPreference.PRIMARY_PREFERRED) - - if not member: - raise AutoReconnect( - "No replica set members available for authentication") - - sock_info = self.__socket(member) - try: - # Since __check_auth was called in __socket - # there is no need to call it here. - auth.authenticate(credentials, sock_info, self.__simple_command) - sock_info.authset.add(credentials) - finally: - member.pool.maybe_return_socket(sock_info) - - self.__auth_credentials[source] = credentials - - def _purge_credentials(self, source): - """Purge credentials from the database authentication cache. - """ - if source in self.__auth_credentials: - del self.__auth_credentials[source] - - def __check_auth(self, sock_info): - """Authenticate using cached database credentials. - """ - if self.__auth_credentials or sock_info.authset: - cached = set(self.__auth_credentials.itervalues()) - - authset = sock_info.authset.copy() - - # Logout any credentials that no longer exist in the cache. - for credentials in authset - cached: - self.__simple_command(sock_info, credentials[1], {'logout': 1}) - sock_info.authset.discard(credentials) - - for credentials in cached - authset: - auth.authenticate(credentials, - sock_info, self.__simple_command) - sock_info.authset.add(credentials) - - @property - def seeds(self): - """The seed list used to connect to this replica set. - - A sequence of (host, port) pairs. - """ - return self.__seeds - - @property - def hosts(self): - """All active and passive (priority 0) replica set - members known to this client. This does not include - hidden or slaveDelay members, or arbiters. - - A sequence of (host, port) pairs. - """ - return self.__rs_state.hosts - - @property - def primary(self): - """The (host, port) of the current primary of the replica set. - - Returns None if there is no primary. - """ - return self.__rs_state.writer - - @property - def secondaries(self): - """The secondary members known to this client. - - A sequence of (host, port) pairs. - """ - return self.__rs_state.secondaries - - @property - def arbiters(self): - """The arbiters known to this client. - - A sequence of (host, port) pairs. - """ - return self.__rs_state.arbiters - - @property - def is_mongos(self): - """If this instance is connected to mongos (always False). - - .. versionadded:: 2.3 - """ - return False - - @property - def max_pool_size(self): - """The maximum number of sockets the pool will open concurrently. - - When the pool has reached `max_pool_size`, operations block waiting for - a socket to be returned to the pool. If ``waitQueueTimeoutMS`` is set, - a blocked operation will raise :exc:`~pymongo.errors.ConnectionFailure` - after a timeout. By default ``waitQueueTimeoutMS`` is not set. - - .. warning:: SIGNIFICANT BEHAVIOR CHANGE in 2.6. Previously, this - parameter would limit only the idle sockets the pool would hold - onto, not the number of open sockets. The default has also changed - to 100. - - .. versionchanged:: 2.6 - """ - return self.__max_pool_size - - @property - def use_greenlets(self): - """Whether calling :meth:`start_request` assigns greenlet-local, - rather than thread-local, sockets. - - .. versionadded:: 2.4.2 - """ - return self.__use_greenlets - - def get_document_class(self): - """document_class getter""" - return self.__document_class - - def set_document_class(self, klass): - """document_class setter""" - self.__document_class = klass - - document_class = property(get_document_class, set_document_class, - doc="""Default class to use for documents - returned from this client. - """) - - @property - def tz_aware(self): - """Does this client return timezone-aware datetimes? - """ - return self.__tz_aware - - @property - def max_bson_size(self): - """Returns the maximum size BSON object the connected primary - accepts in bytes. Defaults to 16MB if not connected to a - primary. - """ - rs_state = self.__rs_state - if rs_state.primary_member: - return rs_state.primary_member.max_bson_size - return common.MAX_BSON_SIZE - - @property - def max_message_size(self): - """Returns the maximum message size the connected primary - accepts in bytes. Defaults to 32MB if not connected to a - primary. - """ - rs_state = self.__rs_state - if rs_state.primary_member: - return rs_state.primary_member.max_message_size - return common.MAX_MESSAGE_SIZE - - @property - def min_wire_version(self): - """The minWireVersion reported by the server. - - Returns ``0`` when connected to server versions prior to MongoDB 2.6. - - .. versionadded:: 2.7 - """ - rs_state = self.__rs_state - if rs_state.primary_member: - return rs_state.primary_member.min_wire_version - return common.MIN_WIRE_VERSION - - @property - def max_wire_version(self): - """The maxWireVersion reported by the server. - - Returns ``0`` when connected to server versions prior to MongoDB 2.6. - - .. versionadded:: 2.7 - """ - rs_state = self.__rs_state - if rs_state.primary_member: - return rs_state.primary_member.max_wire_version - return common.MAX_WIRE_VERSION - - @property - def max_write_batch_size(self): - """The maxWriteBatchSize reported by the server. - - Returns a default value when connected to server versions prior to - MongoDB 2.6. - - .. versionadded:: 2.7 - """ - rs_state = self.__rs_state - if rs_state.primary_member: - return rs_state.primary_member.max_write_batch_size - return common.MAX_WRITE_BATCH_SIZE - - @property - def auto_start_request(self): - """Is auto_start_request enabled? - """ - return self.__auto_start_request - - def __simple_command(self, sock_info, dbname, spec): - """Send a command to the server. - Returns (response, ping_time in seconds). - """ - rqst_id, msg, _ = message.query(0, dbname + '.$cmd', 0, -1, spec) - start = time.time() - try: - sock_info.sock.sendall(msg) - response = self.__recv_msg(1, rqst_id, sock_info) - except: - sock_info.close() - raise - - end = time.time() - response = helpers._unpack_response(response)['data'][0] - msg = "command %r failed: %%s" % spec - helpers._check_command_response(response, None, msg) - return response, end - start - - def __is_master(self, host): - """Directly call ismaster. - Returns (response, connection_pool, ping_time in seconds). - """ - connection_pool = self.pool_class( - host, - self.__max_pool_size, - self.__net_timeout, - self.__conn_timeout, - self.__use_ssl, - wait_queue_timeout=self.__wait_queue_timeout, - wait_queue_multiple=self.__wait_queue_multiple, - use_greenlets=self.__use_greenlets, - ssl_keyfile=self.__ssl_keyfile, - ssl_certfile=self.__ssl_certfile, - ssl_cert_reqs=self.__ssl_cert_reqs, - ssl_ca_certs=self.__ssl_ca_certs) - - if self.in_request(): - connection_pool.start_request() - - sock_info = connection_pool.get_socket() - try: - response, ping_time = self.__simple_command( - sock_info, 'admin', {'ismaster': 1} - ) - - connection_pool.maybe_return_socket(sock_info) - return response, connection_pool, ping_time - except (ConnectionFailure, socket.error): - connection_pool.discard_socket(sock_info) - raise - - def __schedule_refresh(self, sync=False): - """Awake the monitor to update our view of the replica set's state. - - If `sync` is True, block until the refresh completes. - - If multiple application threads call __schedule_refresh while refresh - is in progress, the work of refreshing the state is only performed - once. - """ - if self.__closed: - raise InvalidOperation('MongoReplicaSetClient has been closed') - - monitor = self.__ensure_monitor() - monitor.schedule_refresh() - if sync: - monitor.wait_for_refresh(timeout_seconds=self._refresh_timeout_sec) - - def __ensure_monitor(self): - """Ensure the monitor is started, and return it.""" - self.__monitor_lock.acquire() - try: - # Another thread can start the monitor while we wait for the lock. - if self.__monitor is not None and self.__monitor.isAlive(): - return self.__monitor - - monitor = self.__monitor = self.__monitor_class(self) - register_monitor(monitor) - monitor.start_sync() - return monitor - finally: - self.__monitor_lock.release() - - def __make_threadlocal(self): - if self.__use_greenlets: - return gevent_local() - else: - return threading.local() - - def refresh(self, initial=False): - """Iterate through the existing host list, or possibly the - seed list, to update the list of hosts and arbiters in this - replica set. - """ - # Only one thread / greenlet calls refresh() at a time: the one - # running __init__() or the monitor. We won't modify the state, only - # replace it. - rs_state = self.__rs_state - try: - self.__rs_state = self.__create_rs_state(rs_state, initial) - except ConfigurationError, e: - self.__rs_state = rs_state.clone_with_error(e) - raise - - def __create_rs_state(self, rs_state, initial): - errors = [] - if rs_state.hosts: - # Try first those hosts we think are up, then the down ones. - nodes = sorted( - rs_state.hosts, - key=lambda host: bool(rs_state.get(host)), - reverse=True) - else: - nodes = self.__seeds - - hosts = set() - - # This will become the new RSState. - members = {} - arbiters = set() - writer = None - - # Look for first member from which we can get a list of all members. - for node in nodes: - member, sock_info = rs_state.get(node), None - try: - if member: - sock_info = self.__socket(member, force=True) - response, ping_time = self.__simple_command( - sock_info, 'admin', {'ismaster': 1}) - member.pool.maybe_return_socket(sock_info) - new_member = member.clone_with(response, ping_time) - else: - response, pool, ping_time = self.__is_master(node) - new_member = Member( - node, pool, response, MovingAverage([ping_time])) - - # Check that this host is part of the given replica set. - # Fail fast if we find a bad seed during __init__. - # Regular refreshes keep searching for valid nodes. - if response.get('setName') != self.__name: - if initial: - host, port = node - raise ConfigurationError("%s:%d is not a member of " - "replica set %s" - % (host, port, self.__name)) - else: - continue - - if "arbiters" in response: - arbiters = set([ - _partition_node(h) for h in response["arbiters"]]) - if "hosts" in response: - hosts.update([_partition_node(h) - for h in response["hosts"]]) - if "passives" in response: - hosts.update([_partition_node(h) - for h in response["passives"]]) - - # Start off the new 'members' dict with this member - # but don't add seed list members. - if node in hosts: - members[node] = new_member - if response['ismaster']: - writer = node - - except (ConnectionFailure, socket.error), why: - if member: - member.pool.discard_socket(sock_info) - errors.append("%s:%d: %s" % (node[0], node[1], str(why))) - if hosts: - break - else: - # We've changed nothing. On the next refresh, we'll try the same - # list of hosts: rs_state.hosts or self.__seeds. - if errors: - raise AutoReconnect(', '.join(errors)) - raise ConfigurationError('No suitable hosts found') - - # Ensure we have a pool for each member, and find the primary. - for host in hosts: - if host in members: - # This member was the first we connected to, in the loop above. - continue - - member, sock_info = rs_state.get(host), None - try: - if member: - sock_info = self.__socket(member, force=True) - res, ping_time = self.__simple_command( - sock_info, 'admin', {'ismaster': 1}) - - if res.get('setName') != self.__name: - # Not a member of this set. - continue - - member.pool.maybe_return_socket(sock_info) - new_member = member.clone_with(res, ping_time) - else: - res, connection_pool, ping_time = self.__is_master(host) - if res.get('setName') != self.__name: - # Not a member of this set. - continue - - new_member = Member( - host, connection_pool, res, MovingAverage([ping_time])) - - members[host] = new_member - - except (ConnectionFailure, socket.error): - if member: - member.pool.discard_socket(sock_info) - continue - - if res['ismaster']: - writer = host - - if not members: - # In the first loop, we connected to a member in the seed list - # and got a host list, but couldn't reach any members in that - # list. - raise AutoReconnect( - "Couldn't reach any hosts in %s. Replica set is" - " configured with internal hostnames or IPs?" - % list(hosts)) - - if writer == rs_state.writer: - threadlocal = self.__rs_state.threadlocal - else: - # We unpin threads from members if the primary has changed, since - # no monotonic consistency can be promised now anyway. - threadlocal = self.__make_threadlocal() - - # Get list of hosts in the RS config, including unreachable ones. - # Prefer the primary's list, otherwise any member's list. - if writer: - response = members[writer].ismaster_response - elif members: - response = members.values()[0].ismaster_response - else: - response = {} - - final_host_list = ( - response.get('hosts', []) - + response.get('passives', [])) - - # Replace old state with new. - return RSState( - threadlocal, - [_partition_node(h) for h in final_host_list], - members, - arbiters, - writer) - - def __get_rs_state(self): - rs_state = self.__rs_state - if rs_state.exc: - raise rs_state.exc - - return rs_state - - def __find_primary(self): - """Returns a connection to the primary of this replica set, - if one exists, or raises AutoReconnect. - """ - rs_state = self.__get_rs_state() - primary = rs_state.primary_member - if primary: - return primary - - # We had a failover. - self.__schedule_refresh(sync=True) - - # Try again. This time copy the RSState reference so we're guaranteed - # primary_member and error_message are from the same state. - rs_state = self.__get_rs_state() - if rs_state.primary_member: - return rs_state.primary_member - - # Couldn't find the primary. - raise AutoReconnect(rs_state.error_message) - - def __socket(self, member, force=False): - """Get a SocketInfo from the pool. - """ - if self.auto_start_request and not self.in_request(): - self.start_request() - - sock_info = member.pool.get_socket(force=force) - - try: - self.__check_auth(sock_info) - except OperationFailure: - member.pool.maybe_return_socket(sock_info) - raise - return sock_info - - def _ensure_connected(self, sync=False): - """Ensure this client instance is connected to a primary. - """ - # This may be the first time we're connecting to the set. - self.__ensure_monitor() - - if sync: - rs_state = self.__rs_state - if rs_state.exc or not rs_state.primary_member: - self.__schedule_refresh(sync) - - def disconnect(self): - """Disconnect from the replica set primary, unpin all members, and - refresh our view of the replica set. - """ - rs_state = self.__rs_state - if rs_state.primary_member: - rs_state.primary_member.pool.reset() - - threadlocal = self.__make_threadlocal() - self.__rs_state = rs_state.clone_without_writer(threadlocal) - self.__schedule_refresh() - - def close(self): - """Close this client instance. - - This method first terminates the replica set monitor, then disconnects - from all members of the replica set. No further operations are - permitted on this client. - - .. warning:: This method stops the replica set monitor task. The - replica set monitor is required to properly handle replica set - configuration changes, including a failure of the primary. - Once :meth:`~close` is called this client instance must not be - reused. - - .. versionchanged:: 2.2.1 - The :meth:`close` method now terminates the replica set monitor. - """ - self.__closed = True - self.__rs_state = RSState(self.__make_threadlocal()) - - monitor, self.__monitor = self.__monitor, None - if monitor: - monitor.shutdown() - # Use a reasonable timeout. - monitor.join(1.0) - - def alive(self): - """Return ``False`` if there has been an error communicating with the - primary, else ``True``. - - This method attempts to check the status of the primary with minimal - I/O. The current thread / greenlet retrieves a socket (its request - socket if it's in a request, or a random idle socket if it's not in a - request) from the primary's connection pool and checks whether calling - select_ on it raises an error. If there are currently no idle sockets, - :meth:`alive` attempts to connect a new socket. - - A more certain way to determine primary availability is to ping it:: - - client.admin.command('ping') - - .. _select: http://docs.python.org/2/library/select.html#select.select - """ - # In the common case, a socket is available and was used recently, so - # calling select() on it is a reasonable attempt to see if the OS has - # reported an error. - primary, sock_info = None, None - try: - try: - rs_state = self.__get_rs_state() - primary = rs_state.primary_member - if not primary: - return False - else: - sock_info = self.__socket(primary) - return not pool._closed(sock_info.sock) - except (socket.error, ConnectionFailure): - return False - finally: - if primary: - primary.pool.maybe_return_socket(sock_info) - - def __check_response_to_last_error(self, response, is_command): - """Check a response to a lastError message for errors. - - `response` is a byte string representing a response to the message. - If it represents an error response we raise OperationFailure. - - Return the response as a document. - """ - response = helpers._unpack_response(response) - - assert response["number_returned"] == 1 - result = response["data"][0] - - helpers._check_command_response(result, self.disconnect) - - # write commands - skip getLastError checking - if is_command: - return result - - # getLastError - error_msg = result.get("err", "") - if error_msg is None: - return result - if error_msg.startswith("not master"): - self.disconnect() - raise AutoReconnect(error_msg) - - code = result.get("code") - if code in (11000, 11001, 12582): - raise DuplicateKeyError(result["err"], code, result) - raise OperationFailure(result["err"], code, result) - - def __recv_data(self, length, sock_info): - """Lowest level receive operation. - - Takes length to receive and repeatedly calls recv until able to - return a buffer of that length, raising ConnectionFailure on error. - """ - message = EMPTY - while length: - chunk = sock_info.sock.recv(length) - if chunk == EMPTY: - raise ConnectionFailure("connection closed") - length -= len(chunk) - message += chunk - return message - - def __recv_msg(self, operation, rqst_id, sock): - """Receive a message in response to `rqst_id` on `sock`. - - Returns the response data with the header removed. - """ - header = self.__recv_data(16, sock) - length = struct.unpack(" max_size: - raise DocumentTooLarge("BSON document too large (%d bytes)" - " - the connected server supports" - " BSON document sizes up to %d" - " bytes." % - (max_doc_size, max_size)) - return (request_id, data) - # get_more and kill_cursors messages - # don't include BSON documents. - return msg - - def _send_message(self, msg, with_last_error=False, - command=False, _connection_to_use=None): - """Say something to Mongo. - - Raises ConnectionFailure if the message cannot be sent. Raises - OperationFailure if `with_last_error` is ``True`` and the - response to the getLastError call returns an error. Return the - response from lastError, or ``None`` if `with_last_error` is - ``False``. - - :Parameters: - - `msg`: message to send - - `with_last_error`: check getLastError status after sending the - message - """ - self._ensure_connected() - - if _connection_to_use in (None, -1): - member = self.__find_primary() - else: - member = self.__get_rs_state().get(_connection_to_use) - - sock_info = None - try: - try: - sock_info = self.__socket(member) - rqst_id, data = self.__check_bson_size( - msg, member.max_bson_size) - - sock_info.sock.sendall(data) - # Safe mode. We pack the message together with a lastError - # message and send both. We then get the response (to the - # lastError) and raise OperationFailure if it is an error - # response. - rv = None - if with_last_error: - response = self.__recv_msg(1, rqst_id, sock_info) - rv = self.__check_response_to_last_error(response, command) - return rv - except OperationFailure: - raise - except(ConnectionFailure, socket.error), why: - member.pool.discard_socket(sock_info) - if _connection_to_use in (None, -1): - self.disconnect() - raise AutoReconnect(str(why)) - except: - sock_info.close() - raise - finally: - member.pool.maybe_return_socket(sock_info) - - def __send_and_receive(self, member, msg, **kwargs): - """Send a message on the given socket and return the response data. - - Can raise socket.error. - """ - sock_info = None - exhaust = kwargs.get('exhaust') - rqst_id, data = self.__check_bson_size(msg, member.max_bson_size) - try: - sock_info = self.__socket(member) - - if not exhaust and "network_timeout" in kwargs: - sock_info.sock.settimeout(kwargs['network_timeout']) - - sock_info.sock.sendall(data) - response = self.__recv_msg(1, rqst_id, sock_info) - - if not exhaust: - if "network_timeout" in kwargs: - sock_info.sock.settimeout(self.__net_timeout) - member.pool.maybe_return_socket(sock_info) - - return response, sock_info, member.pool - except: - if sock_info is not None: - sock_info.close() - member.pool.maybe_return_socket(sock_info) - raise - - def __try_read(self, member, msg, **kwargs): - """Attempt a read from a member; on failure mark the member "down" and - wake up the monitor thread to refresh as soon as possible. - """ - try: - return self.__send_and_receive(member, msg, **kwargs) - except socket.timeout, e: - # Could be one slow query, don't refresh. - host, port = member.host - raise AutoReconnect("%s:%d: %s" % (host, port, e)) - except (socket.error, ConnectionFailure), why: - # Try to replace our RSState with a clone where this member is - # marked "down", to reduce exceptions on other threads, or repeated - # exceptions on this thread. We accept that there's a race - # condition (another thread could be replacing our state with a - # different version concurrently) but this approach is simple and - # lock-free. - self.__rs_state = self.__rs_state.clone_with_host_down( - member.host, str(why)) - - self.__schedule_refresh() - host, port = member.host - raise AutoReconnect("%s:%d: %s" % (host, port, why)) - - def _send_message_with_response(self, msg, _connection_to_use=None, - _must_use_master=False, **kwargs): - """Send a message to Mongo and return the response. - - Sends the given message and returns (host used, response). - - :Parameters: - - `msg`: (request_id, data) pair making up the message to send - - `_connection_to_use`: Optional (host, port) of member for message, - used by Cursor for getMore and killCursors messages. - - `_must_use_master`: If True, send to primary. - """ - self._ensure_connected() - - rs_state = self.__get_rs_state() - tag_sets = kwargs.get('tag_sets', [{}]) - mode = kwargs.get('read_preference', ReadPreference.PRIMARY) - if _must_use_master: - mode = ReadPreference.PRIMARY - tag_sets = [{}] - - if not rs_state.primary_member: - # If we were initialized with _connect=False then connect now. - # Otherwise, the primary was down last we checked. Start a refresh - # if one is not already in progress. If caller requested the - # primary, wait to see if it's up, otherwise continue with - # known-good members. - sync = (rs_state.initial or mode == ReadPreference.PRIMARY) - self.__schedule_refresh(sync=sync) - rs_state = self.__rs_state - - latency = kwargs.get( - 'secondary_acceptable_latency_ms', - self.secondary_acceptable_latency_ms) - - try: - if _connection_to_use is not None: - if _connection_to_use == -1: - member = rs_state.primary_member - error_message = rs_state.error_message - else: - member = rs_state.get(_connection_to_use) - error_message = '%s:%s not available' % _connection_to_use - - if not member: - raise AutoReconnect(error_message) - - return member.pool.pair, self.__try_read( - member, msg, **kwargs) - except AutoReconnect: - if _connection_to_use in (-1, rs_state.writer): - # Primary's down. Refresh. - self.disconnect() - raise - - # To provide some monotonic consistency, we use the same member as - # long as this thread is in a request and all reads use the same - # mode, tags, and latency. The member gets unpinned if pref changes, - # if member changes state, if we detect a failover, or if this thread - # calls end_request(). - errors = [] - - pinned_host = rs_state.pinned_host - pinned_member = rs_state.get(pinned_host) - if (pinned_member - and pinned_member.matches_mode(mode) - and pinned_member.matches_tag_sets(tag_sets) # TODO: REMOVE? - and rs_state.keep_pinned_host(mode, tag_sets, latency)): - try: - return ( - pinned_member.host, - self.__try_read(pinned_member, msg, **kwargs)) - except AutoReconnect, why: - if _must_use_master or mode == ReadPreference.PRIMARY: - self.disconnect() - raise - else: - errors.append(str(why)) - - # No pinned member, or pinned member down or doesn't match read pref - rs_state.unpin_host() - - members = list(rs_state.members) - while len(errors) < MAX_RETRY: - member = select_member( - members=members, - mode=mode, - tag_sets=tag_sets, - latency=latency) - - if not member: - # Ran out of members to try - break - - try: - # Removes member on failure, so select_member won't retry it. - response = self.__try_read(member, msg, **kwargs) - - # Success - if self.in_request(): - # Keep reading from this member in this thread / greenlet - # unless read preference changes - rs_state.pin_host(member.host, mode, tag_sets, latency) - return member.host, response - except AutoReconnect, why: - if mode == ReadPreference.PRIMARY: - raise - - errors.append(str(why)) - members.remove(member) - - # Ran out of tries - if mode == ReadPreference.PRIMARY: - msg = "No replica set primary available for query" - elif mode == ReadPreference.SECONDARY: - msg = "No replica set secondary available for query" - else: - msg = "No replica set members available for query" - - msg += " with ReadPreference %s" % modes[mode] - - if tag_sets != [{}]: - msg += " and tags " + repr(tag_sets) - - # Format a message like: - # 'No replica set secondary available for query with ReadPreference - # SECONDARY. host:27018: timed out, host:27019: timed out'. - if errors: - msg += ". " + ', '.join(errors) - - raise AutoReconnect(msg, errors) - - def _exhaust_next(self, sock_info): - """Used with exhaust cursors to get the next batch off the socket. - """ - return self.__recv_msg(1, None, sock_info) - - def start_request(self): - """Ensure the current thread or greenlet always uses the same socket - until it calls :meth:`end_request`. For - :class:`~pymongo.read_preferences.ReadPreference` PRIMARY, - auto_start_request=True ensures consistent reads, even if you read - after an unacknowledged write. For read preferences other than PRIMARY, - there are no consistency guarantees. - - In Python 2.6 and above, or in Python 2.5 with - "from __future__ import with_statement", :meth:`start_request` can be - used as a context manager: - - >>> client = pymongo.MongoReplicaSetClient() - >>> db = client.test - >>> _id = db.test_collection.insert({}) - >>> with client.start_request(): - ... for i in range(100): - ... db.test_collection.update({'_id': _id}, {'$set': {'i':i}}) - ... - ... # Definitely read the document after the final update completes - ... print db.test_collection.find({'_id': _id}) - - .. versionadded:: 2.2 - The :class:`~pymongo.pool.Request` return value. - :meth:`start_request` previously returned None - """ - # We increment our request counter's thread- or greenlet-local value - # for every call to start_request; however, we only call each pool's - # start_request once to start a request, and call each pool's - # end_request once to end it. We don't let pools' request counters - # exceed 1. This keeps things sane when we create and delete pools - # within a request. - if 1 == self.__request_counter.inc(): - for member in self.__rs_state.members: - member.pool.start_request() - - return pool.Request(self) - - def in_request(self): - """True if :meth:`start_request` has been called, but not - :meth:`end_request`, or if `auto_start_request` is True and - :meth:`end_request` has not been called in this thread or greenlet. - """ - return bool(self.__request_counter.get()) - - def end_request(self): - """Undo :meth:`start_request` and allow this thread's connections to - replica set members to return to the pool. - - Calling :meth:`end_request` allows the :class:`~socket.socket` that has - been reserved for this thread by :meth:`start_request` to be returned - to the pool. Other threads will then be able to re-use that - :class:`~socket.socket`. If your application uses many threads, or has - long-running threads that infrequently perform MongoDB operations, then - judicious use of this method can lead to performance gains. Care should - be taken, however, to make sure that :meth:`end_request` is not called - in the middle of a sequence of operations in which ordering is - important. This could lead to unexpected results. - """ - rs_state = self.__rs_state - if 0 == self.__request_counter.dec(): - for member in rs_state.members: - # No effect if not in a request - member.pool.end_request() - - rs_state.unpin_host() - - def __eq__(self, other): - # XXX: Implement this? - return NotImplemented - - def __ne__(self, other): - return NotImplemented - - def __repr__(self): - return "MongoReplicaSetClient(%r)" % (["%s:%d" % n - for n in self.hosts],) - - def __getattr__(self, name): - """Get a database by name. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid - database name is used. - - :Parameters: - - `name`: the name of the database to get - """ - return database.Database(self, name) - - def __getitem__(self, name): - """Get a database by name. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid - database name is used. - - :Parameters: - - `name`: the name of the database to get - """ - return self.__getattr__(name) - - def close_cursor(self, cursor_id, _conn_id): - """Close a single database cursor. - - Raises :class:`TypeError` if `cursor_id` is not an instance of - ``(int, long)``. What closing the cursor actually means - depends on this client's cursor manager. - - :Parameters: - - `cursor_id`: id of cursor to close - """ - if not isinstance(cursor_id, (int, long)): - raise TypeError("cursor_id must be an instance of (int, long)") - - self._send_message(message.kill_cursors([cursor_id]), - _connection_to_use=_conn_id) - - def server_info(self): - """Get information about the MongoDB primary we're connected to. - """ - return self.admin.command("buildinfo") - - def database_names(self): - """Get a list of the names of all databases on the connected server. - """ - return [db["name"] for db in - self.admin.command("listDatabases")["databases"]] - - def drop_database(self, name_or_database): - """Drop a database. - - Raises :class:`TypeError` if `name_or_database` is not an instance of - :class:`basestring` (:class:`str` in python 3) or Database - - :Parameters: - - `name_or_database`: the name of a database to drop, or a - :class:`~pymongo.database.Database` instance representing the - database to drop - """ - name = name_or_database - if isinstance(name, database.Database): - name = name.name - - if not isinstance(name, basestring): - raise TypeError("name_or_database must be an instance of " - "%s or Database" % (basestring.__name__,)) - - self._purge_index(name) - self[name].command("dropDatabase") - - def copy_database(self, from_name, to_name, - from_host=None, username=None, password=None): - """Copy a database, potentially from another host. - - Raises :class:`TypeError` if `from_name` or `to_name` is not - an instance of :class:`basestring` (:class:`str` in python 3). - Raises :class:`~pymongo.errors.InvalidName` if `to_name` is - not a valid database name. - - If `from_host` is ``None`` the current host is used as the - source. Otherwise the database is copied from `from_host`. - - If the source database requires authentication, `username` and - `password` must be specified. - - :Parameters: - - `from_name`: the name of the source database - - `to_name`: the name of the target database - - `from_host` (optional): host name to copy from - - `username` (optional): username for source database - - `password` (optional): password for source database - - .. note:: Specifying `username` and `password` requires server - version **>= 1.3.3+**. - """ - if not isinstance(from_name, basestring): - raise TypeError("from_name must be an instance " - "of %s" % (basestring.__name__,)) - if not isinstance(to_name, basestring): - raise TypeError("to_name must be an instance " - "of %s" % (basestring.__name__,)) - - database._check_name(to_name) - - command = {"fromdb": from_name, "todb": to_name} - - if from_host is not None: - command["fromhost"] = from_host - - try: - self.start_request() - - if username is not None: - nonce = self.admin.command("copydbgetnonce", - fromhost=from_host)["nonce"] - command["username"] = username - command["nonce"] = nonce - command["key"] = auth._auth_key(nonce, username, password) - - return self.admin.command("copydb", **command) - finally: - self.end_request() - - def get_default_database(self): - """Get the database named in the MongoDB connection URI. - - >>> uri = 'mongodb://host/my_database' - >>> client = MongoReplicaSetClient(uri) - >>> db = client.get_default_database() - >>> assert db.name == 'my_database' - - Useful in scripts where you want to choose which database to use - based only on the URI in a configuration file. - """ - if self.__default_database_name is None: - raise ConfigurationError('No default database defined') - - return self[self.__default_database_name] diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py new file mode 100644 index 0000000000..46a78aea0b --- /dev/null +++ b/pymongo/monitoring.py @@ -0,0 +1,1909 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools to monitor driver events. + +.. versionadded:: 3.1 + +.. attention:: Starting in PyMongo 3.11, the monitoring classes outlined below + are included in the PyMongo distribution under the + :mod:`~pymongo.event_loggers` submodule. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. + + +Use :func:`register` to register global listeners for specific events. +Listeners must inherit from one of the abstract classes below and implement +the correct functions for that class. + +For example, a simple command logger might be implemented like this:: + + import logging + + from pymongo import monitoring + + class CommandLogger(monitoring.CommandListener): + + def started(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} started on server " + "{0.connection_id}".format(event)) + + def succeeded(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "succeeded in {0.duration_micros} " + "microseconds".format(event)) + + def failed(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "failed in {0.duration_micros} " + "microseconds".format(event)) + + monitoring.register(CommandLogger()) + +Server discovery and monitoring events are also available. For example:: + + class ServerLogger(monitoring.ServerListener): + + def opened(self, event): + logging.info("Server {0.server_address} added to topology " + "{0.topology_id}".format(event)) + + def description_changed(self, event): + previous_server_type = event.previous_description.server_type + new_server_type = event.new_description.server_type + if new_server_type != previous_server_type: + # server_type_name was added in PyMongo 3.4 + logging.info( + "Server {0.server_address} changed type from " + "{0.previous_description.server_type_name} to " + "{0.new_description.server_type_name}".format(event)) + + def closed(self, event): + logging.warning("Server {0.server_address} removed from topology " + "{0.topology_id}".format(event)) + + + class HeartbeatLogger(monitoring.ServerHeartbeatListener): + + def started(self, event): + logging.info("Heartbeat sent to server " + "{0.connection_id}".format(event)) + + def succeeded(self, event): + # The reply.document attribute was added in PyMongo 3.4. + logging.info("Heartbeat to server {0.connection_id} " + "succeeded with reply " + "{0.reply.document}".format(event)) + + def failed(self, event): + logging.warning("Heartbeat to server {0.connection_id} " + "failed with error {0.reply}".format(event)) + + class TopologyLogger(monitoring.TopologyListener): + + def opened(self, event): + logging.info("Topology with id {0.topology_id} " + "opened".format(event)) + + def description_changed(self, event): + logging.info("Topology description updated for " + "topology id {0.topology_id}".format(event)) + previous_topology_type = event.previous_description.topology_type + new_topology_type = event.new_description.topology_type + if new_topology_type != previous_topology_type: + # topology_type_name was added in PyMongo 3.4 + logging.info( + "Topology {0.topology_id} changed type from " + "{0.previous_description.topology_type_name} to " + "{0.new_description.topology_type_name}".format(event)) + # The has_writable_server and has_readable_server methods + # were added in PyMongo 3.4. + if not event.new_description.has_writable_server(): + logging.warning("No writable servers available.") + if not event.new_description.has_readable_server(): + logging.warning("No readable servers available.") + + def closed(self, event): + logging.info("Topology with id {0.topology_id} " + "closed".format(event)) + +Connection monitoring and pooling events are also available. For example:: + + class ConnectionPoolLogger(ConnectionPoolListener): + + def pool_created(self, event): + logging.info("[pool {0.address}] pool created".format(event)) + + def pool_ready(self, event): + logging.info("[pool {0.address}] pool is ready".format(event)) + + def pool_cleared(self, event): + logging.info("[pool {0.address}] pool cleared".format(event)) + + def pool_closed(self, event): + logging.info("[pool {0.address}] pool closed".format(event)) + + def connection_created(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection created".format(event)) + + def connection_ready(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection setup succeeded".format(event)) + + def connection_closed(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection closed, reason: " + "{0.reason}".format(event)) + + def connection_check_out_started(self, event): + logging.info("[pool {0.address}] connection check out " + "started".format(event)) + + def connection_check_out_failed(self, event): + logging.info("[pool {0.address}] connection check out " + "failed, reason: {0.reason}".format(event)) + + def connection_checked_out(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection checked out of pool".format(event)) + + def connection_checked_in(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection checked into pool".format(event)) + + +Event listeners can also be registered per instance of +:class:`~pymongo.mongo_client.MongoClient`:: + + client = MongoClient(event_listeners=[CommandLogger()]) + +Note that previously registered global listeners are automatically included +when configuring per client event listeners. Registering a new global listener +will not add that listener to existing client instances. + +.. note:: Events are delivered **synchronously**. Application threads block + waiting for event handlers (e.g. :meth:`~CommandListener.started`) to + return. Care must be taken to ensure that your event handlers are efficient + enough to not adversely affect overall application performance. + +.. warning:: The command documents published through this API are *not* copies. + If you intend to modify them in any way you must copy them in your event + handler first. +""" + +from __future__ import annotations + +import datetime +from collections import abc, namedtuple +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence + +from bson.objectid import ObjectId +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _SENSITIVE_COMMANDS, _handle_exception +from pymongo.typings import _Address, _DocumentOut + +if TYPE_CHECKING: + from datetime import timedelta + + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + + +_Listeners = namedtuple( + "_Listeners", + ( + "command_listeners", + "server_listeners", + "server_heartbeat_listeners", + "topology_listeners", + "cmap_listeners", + ), +) + +_LISTENERS = _Listeners([], [], [], [], []) + + +class _EventListener: + """Abstract base class for all event listeners.""" + + +class CommandListener(_EventListener): + """Abstract base class for command listeners. + + Handles `CommandStartedEvent`, `CommandSucceededEvent`, + and `CommandFailedEvent`. + """ + + def started(self, event: CommandStartedEvent) -> None: + """Abstract method to handle a `CommandStartedEvent`. + + :param event: An instance of :class:`CommandStartedEvent`. + """ + raise NotImplementedError + + def succeeded(self, event: CommandSucceededEvent) -> None: + """Abstract method to handle a `CommandSucceededEvent`. + + :param event: An instance of :class:`CommandSucceededEvent`. + """ + raise NotImplementedError + + def failed(self, event: CommandFailedEvent) -> None: + """Abstract method to handle a `CommandFailedEvent`. + + :param event: An instance of :class:`CommandFailedEvent`. + """ + raise NotImplementedError + + +class ConnectionPoolListener(_EventListener): + """Abstract base class for connection pool listeners. + + Handles all of the connection pool events defined in the Connection + Monitoring and Pooling Specification: + :class:`PoolCreatedEvent`, :class:`PoolClearedEvent`, + :class:`PoolClosedEvent`, :class:`ConnectionCreatedEvent`, + :class:`ConnectionReadyEvent`, :class:`ConnectionClosedEvent`, + :class:`ConnectionCheckOutStartedEvent`, + :class:`ConnectionCheckOutFailedEvent`, + :class:`ConnectionCheckedOutEvent`, + and :class:`ConnectionCheckedInEvent`. + + .. versionadded:: 3.9 + """ + + def pool_created(self, event: PoolCreatedEvent) -> None: + """Abstract method to handle a :class:`PoolCreatedEvent`. + + Emitted when a connection Pool is created. + + :param event: An instance of :class:`PoolCreatedEvent`. + """ + raise NotImplementedError + + def pool_ready(self, event: PoolReadyEvent) -> None: + """Abstract method to handle a :class:`PoolReadyEvent`. + + Emitted when a connection Pool is marked ready. + + :param event: An instance of :class:`PoolReadyEvent`. + + .. versionadded:: 4.0 + """ + raise NotImplementedError + + def pool_cleared(self, event: PoolClearedEvent) -> None: + """Abstract method to handle a `PoolClearedEvent`. + + Emitted when a connection Pool is cleared. + + :param event: An instance of :class:`PoolClearedEvent`. + """ + raise NotImplementedError + + def pool_closed(self, event: PoolClosedEvent) -> None: + """Abstract method to handle a `PoolClosedEvent`. + + Emitted when a connection Pool is closed. + + :param event: An instance of :class:`PoolClosedEvent`. + """ + raise NotImplementedError + + def connection_created(self, event: ConnectionCreatedEvent) -> None: + """Abstract method to handle a :class:`ConnectionCreatedEvent`. + + Emitted when a connection Pool creates a Connection object. + + :param event: An instance of :class:`ConnectionCreatedEvent`. + """ + raise NotImplementedError + + def connection_ready(self, event: ConnectionReadyEvent) -> None: + """Abstract method to handle a :class:`ConnectionReadyEvent`. + + Emitted when a connection has finished its setup, and is now ready to + use. + + :param event: An instance of :class:`ConnectionReadyEvent`. + """ + raise NotImplementedError + + def connection_closed(self, event: ConnectionClosedEvent) -> None: + """Abstract method to handle a :class:`ConnectionClosedEvent`. + + Emitted when a connection Pool closes a connection. + + :param event: An instance of :class:`ConnectionClosedEvent`. + """ + raise NotImplementedError + + def connection_check_out_started(self, event: ConnectionCheckOutStartedEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckOutStartedEvent`. + + Emitted when the driver starts attempting to check out a connection. + + :param event: An instance of :class:`ConnectionCheckOutStartedEvent`. + """ + raise NotImplementedError + + def connection_check_out_failed(self, event: ConnectionCheckOutFailedEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckOutFailedEvent`. + + Emitted when the driver's attempt to check out a connection fails. + + :param event: An instance of :class:`ConnectionCheckOutFailedEvent`. + """ + raise NotImplementedError + + def connection_checked_out(self, event: ConnectionCheckedOutEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckedOutEvent`. + + Emitted when the driver successfully checks out a connection. + + :param event: An instance of :class:`ConnectionCheckedOutEvent`. + """ + raise NotImplementedError + + def connection_checked_in(self, event: ConnectionCheckedInEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckedInEvent`. + + Emitted when the driver checks in a connection back to the connection + Pool. + + :param event: An instance of :class:`ConnectionCheckedInEvent`. + """ + raise NotImplementedError + + +class ServerHeartbeatListener(_EventListener): + """Abstract base class for server heartbeat listeners. + + Handles `ServerHeartbeatStartedEvent`, `ServerHeartbeatSucceededEvent`, + and `ServerHeartbeatFailedEvent`. + + .. versionadded:: 3.3 + """ + + def started(self, event: ServerHeartbeatStartedEvent) -> None: + """Abstract method to handle a `ServerHeartbeatStartedEvent`. + + :param event: An instance of :class:`ServerHeartbeatStartedEvent`. + """ + raise NotImplementedError + + def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: + """Abstract method to handle a `ServerHeartbeatSucceededEvent`. + + :param event: An instance of :class:`ServerHeartbeatSucceededEvent`. + """ + raise NotImplementedError + + def failed(self, event: ServerHeartbeatFailedEvent) -> None: + """Abstract method to handle a `ServerHeartbeatFailedEvent`. + + :param event: An instance of :class:`ServerHeartbeatFailedEvent`. + """ + raise NotImplementedError + + +class TopologyListener(_EventListener): + """Abstract base class for topology monitoring listeners. + Handles `TopologyOpenedEvent`, `TopologyDescriptionChangedEvent`, and + `TopologyClosedEvent`. + + .. versionadded:: 3.3 + """ + + def opened(self, event: TopologyOpenedEvent) -> None: + """Abstract method to handle a `TopologyOpenedEvent`. + + :param event: An instance of :class:`TopologyOpenedEvent`. + """ + raise NotImplementedError + + def description_changed(self, event: TopologyDescriptionChangedEvent) -> None: + """Abstract method to handle a `TopologyDescriptionChangedEvent`. + + :param event: An instance of :class:`TopologyDescriptionChangedEvent`. + """ + raise NotImplementedError + + def closed(self, event: TopologyClosedEvent) -> None: + """Abstract method to handle a `TopologyClosedEvent`. + + :param event: An instance of :class:`TopologyClosedEvent`. + """ + raise NotImplementedError + + +class ServerListener(_EventListener): + """Abstract base class for server listeners. + Handles `ServerOpeningEvent`, `ServerDescriptionChangedEvent`, and + `ServerClosedEvent`. + + .. versionadded:: 3.3 + """ + + def opened(self, event: ServerOpeningEvent) -> None: + """Abstract method to handle a `ServerOpeningEvent`. + + :param event: An instance of :class:`ServerOpeningEvent`. + """ + raise NotImplementedError + + def description_changed(self, event: ServerDescriptionChangedEvent) -> None: + """Abstract method to handle a `ServerDescriptionChangedEvent`. + + :param event: An instance of :class:`ServerDescriptionChangedEvent`. + """ + raise NotImplementedError + + def closed(self, event: ServerClosedEvent) -> None: + """Abstract method to handle a `ServerClosedEvent`. + + :param event: An instance of :class:`ServerClosedEvent`. + """ + raise NotImplementedError + + +def _to_micros(dur: timedelta) -> int: + """Convert duration 'dur' to microseconds.""" + return int(dur.total_seconds() * 10e5) + + +def _validate_event_listeners( + option: str, listeners: Sequence[_EventListeners] +) -> Sequence[_EventListeners]: + """Validate event listeners""" + if not isinstance(listeners, abc.Sequence): + raise TypeError(f"{option} must be a list or tuple, not {type(listeners)}") + for listener in listeners: + if not isinstance(listener, _EventListener): + raise TypeError( + f"Listeners for {option} must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener," + f"not {type(listener)}" + ) + return listeners + + +def register(listener: _EventListener) -> None: + """Register a global event listener. + + :param listener: A subclasses of :class:`CommandListener`, + :class:`ServerHeartbeatListener`, :class:`ServerListener`, + :class:`TopologyListener`, or :class:`ConnectionPoolListener`. + """ + if not isinstance(listener, _EventListener): + raise TypeError( + f"Listeners for {listener} must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener," + f"not {type(listener)}" + ) + if isinstance(listener, CommandListener): + _LISTENERS.command_listeners.append(listener) + if isinstance(listener, ServerHeartbeatListener): + _LISTENERS.server_heartbeat_listeners.append(listener) + if isinstance(listener, ServerListener): + _LISTENERS.server_listeners.append(listener) + if isinstance(listener, TopologyListener): + _LISTENERS.topology_listeners.append(listener) + if isinstance(listener, ConnectionPoolListener): + _LISTENERS.cmap_listeners.append(listener) + + +# The "hello" command is also deemed sensitive when attempting speculative +# authentication. +def _is_speculative_authenticate(command_name: str, doc: Mapping[str, Any]) -> bool: + if ( + command_name.lower() in ("hello", HelloCompat.LEGACY_CMD) + and "speculativeAuthenticate" in doc + ): + return True + return False + + +class _CommandEvent: + """Base class for command events.""" + + __slots__ = ( + "__cmd_name", + "__rqst_id", + "__conn_id", + "__op_id", + "__service_id", + "__db", + "__server_conn_id", + ) + + def __init__( + self, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, + ) -> None: + self.__cmd_name = command_name + self.__rqst_id = request_id + self.__conn_id = connection_id + self.__op_id = operation_id + self.__service_id = service_id + self.__db = database_name + self.__server_conn_id = server_connection_id + + @property + def command_name(self) -> str: + """The command name.""" + return self.__cmd_name + + @property + def request_id(self) -> int: + """The request id for this operation.""" + return self.__rqst_id + + @property + def connection_id(self) -> _Address: + """The address (host, port) of the server this command was sent to.""" + return self.__conn_id + + @property + def service_id(self) -> Optional[ObjectId]: + """The service_id this command was sent to, or ``None``. + + .. versionadded:: 3.12 + """ + return self.__service_id + + @property + def operation_id(self) -> Optional[int]: + """An id for this series of events or None.""" + return self.__op_id + + @property + def database_name(self) -> str: + """The database_name this command was sent to, or ``""``. + + .. versionadded:: 4.6 + """ + return self.__db + + @property + def server_connection_id(self) -> Optional[int]: + """The server-side connection id for the connection this command was sent on, or ``None``. + + .. versionadded:: 4.7 + """ + return self.__server_conn_id + + +class CommandStartedEvent(_CommandEvent): + """Event published when a command starts. + + :param command: The command document. + :param database_name: The name of the database this command was run against. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command + was sent to. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + """ + + __slots__ = ("__cmd",) + + def __init__( + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + server_connection_id: Optional[int] = None, + ) -> None: + if not command: + raise ValueError(f"{command!r} is not a valid command") + # Command name must be first key. + command_name = next(iter(command)) + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + cmd_name = command_name.lower() + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): + self.__cmd: _DocumentOut = {} + else: + self.__cmd = command + + @property + def command(self) -> _DocumentOut: + """The command document.""" + return self.__cmd + + @property + def database_name(self) -> str: + """The name of the database this command was run against.""" + return super().database_name + + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, service_id: {}, server_connection_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.service_id, + self.server_connection_id, + ) + + +class CommandSucceededEvent(_CommandEvent): + """Event published when a command succeeds. + + :param duration: The command duration as a datetime.timedelta. + :param reply: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command + was sent to. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. + """ + + __slots__ = ("__duration_micros", "__reply") + + def __init__( + self, + duration: datetime.timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, + ) -> None: + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + self.__duration_micros = _to_micros(duration) + cmd_name = command_name.lower() + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply): + self.__reply: _DocumentOut = {} + else: + self.__reply = reply + + @property + def duration_micros(self) -> int: + """The duration of this operation in microseconds.""" + return self.__duration_micros + + @property + def reply(self) -> _DocumentOut: + """The server failure document for this operation.""" + return self.__reply + + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, service_id: {}, server_connection_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.duration_micros, + self.service_id, + self.server_connection_id, + ) + + +class CommandFailedEvent(_CommandEvent): + """Event published when a command fails. + + :param duration: The command duration as a datetime.timedelta. + :param failure: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command + was sent to. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. + """ + + __slots__ = ("__duration_micros", "__failure") + + def __init__( + self, + duration: datetime.timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, + ) -> None: + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + self.__duration_micros = _to_micros(duration) + self.__failure = failure + + @property + def duration_micros(self) -> int: + """The duration of this operation in microseconds.""" + return self.__duration_micros + + @property + def failure(self) -> _DocumentOut: + """The server failure document for this operation.""" + return self.__failure + + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, " + "failure: {!r}, service_id: {}, server_connection_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.duration_micros, + self.failure, + self.service_id, + self.server_connection_id, + ) + + +class _PoolEvent: + """Base class for pool events.""" + + __slots__ = ("__address",) + + def __init__(self, address: _Address) -> None: + self.__address = address + + @property + def address(self) -> _Address: + """The address (host, port) pair of the server the pool is attempting + to connect to. + """ + return self.__address + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__address!r})" + + +class PoolCreatedEvent(_PoolEvent): + """Published when a Connection Pool is created. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__options",) + + def __init__(self, address: _Address, options: dict[str, Any]) -> None: + super().__init__(address) + self.__options = options + + @property + def options(self) -> dict[str, Any]: + """Any non-default pool options that were set on this Connection Pool.""" + return self.__options + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__options!r})" + + +class PoolReadyEvent(_PoolEvent): + """Published when a Connection Pool is marked ready. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 4.0 + """ + + __slots__ = () + + +class PoolClearedEvent(_PoolEvent): + """Published when a Connection Pool is cleared. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + :param service_id: The service_id this command was sent to, or ``None``. + :param interrupt_connections: True if all active connections were interrupted by the Pool during clearing. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__service_id", "__interrupt_connections") + + def __init__( + self, + address: _Address, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: + super().__init__(address) + self.__service_id = service_id + self.__interrupt_connections = interrupt_connections + + @property + def service_id(self) -> Optional[ObjectId]: + """Connections with this service_id are cleared. + + When service_id is ``None``, all connections in the pool are cleared. + + .. versionadded:: 3.12 + """ + return self.__service_id + + @property + def interrupt_connections(self) -> bool: + """If True, active connections are interrupted during clearing. + + .. versionadded:: 4.7 + """ + return self.__interrupt_connections + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__service_id!r}, {self.__interrupt_connections!r})" + + +class PoolClosedEvent(_PoolEvent): + """Published when a Connection Pool is closed. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionClosedReason: + """An enum that defines values for `reason` on a + :class:`ConnectionClosedEvent`. + + .. versionadded:: 3.9 + """ + + STALE = "stale" + """The pool was cleared, making the connection no longer valid.""" + + IDLE = "idle" + """The connection became stale by being idle for too long (maxIdleTimeMS). + """ + + ERROR = "error" + """The connection experienced an error, making it no longer valid.""" + + POOL_CLOSED = "poolClosed" + """The pool was closed, making the connection no longer valid.""" + + +class ConnectionCheckOutFailedReason: + """An enum that defines values for `reason` on a + :class:`ConnectionCheckOutFailedEvent`. + + .. versionadded:: 3.9 + """ + + TIMEOUT = "timeout" + """The connection check out attempt exceeded the specified timeout.""" + + POOL_CLOSED = "poolClosed" + """The pool was previously closed, and cannot provide new connections.""" + + CONN_ERROR = "connectionError" + """The connection check out attempt experienced an error while setting up + a new connection. + """ + + +class _ConnectionEvent: + """Private base class for connection events.""" + + __slots__ = ("__address",) + + def __init__(self, address: _Address) -> None: + self.__address = address + + @property + def address(self) -> _Address: + """The address (host, port) pair of the server this connection is + attempting to connect to. + """ + return self.__address + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__address!r})" + + +class _ConnectionIdEvent(_ConnectionEvent): + """Private base class for connection events with an id.""" + + __slots__ = ("__connection_id",) + + def __init__(self, address: _Address, connection_id: int) -> None: + super().__init__(address) + self.__connection_id = connection_id + + @property + def connection_id(self) -> int: + """The ID of the connection.""" + return self.__connection_id + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__connection_id!r})" + + +class _ConnectionDurationEvent(_ConnectionIdEvent): + """Private base class for connection events with a duration.""" + + __slots__ = ("__duration",) + + def __init__(self, address: _Address, connection_id: int, duration: Optional[float]) -> None: + super().__init__(address, connection_id) + self.__duration = duration + + @property + def duration(self) -> Optional[float]: + """The duration of the connection event. + + .. versionadded:: 4.7 + """ + return self.__duration + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.connection_id!r}, {self.__duration!r})" + + +class ConnectionCreatedEvent(_ConnectionIdEvent): + """Published when a Connection Pool creates a Connection object. + + NOTE: This connection is not ready for use until the + :class:`ConnectionReadyEvent` is published. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionReadyEvent(_ConnectionDurationEvent): + """Published when a Connection has finished its setup, and is ready to use. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionClosedEvent(_ConnectionIdEvent): + """Published when a Connection is closed. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + :param reason: A reason explaining why this connection was closed. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__reason",) + + def __init__(self, address: _Address, connection_id: int, reason: str): + super().__init__(address, connection_id) + self.__reason = reason + + @property + def reason(self) -> str: + """A reason explaining why this connection was closed. + + The reason must be one of the strings from the + :class:`ConnectionClosedReason` enum. + """ + return self.__reason + + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r})".format( + self.__class__.__name__, + self.address, + self.connection_id, + self.__reason, + ) + + +class ConnectionCheckOutStartedEvent(_ConnectionEvent): + """Published when the driver starts attempting to check out a connection. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionCheckOutFailedEvent(_ConnectionDurationEvent): + """Published when the driver's attempt to check out a connection fails. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param reason: A reason explaining why connection check out failed. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__reason",) + + def __init__(self, address: _Address, reason: str, duration: Optional[float]) -> None: + super().__init__(address=address, connection_id=0, duration=duration) + self.__reason = reason + + @property + def reason(self) -> str: + """A reason explaining why connection check out failed. + + The reason must be one of the strings from the + :class:`ConnectionCheckOutFailedReason` enum. + """ + return self.__reason + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__reason!r}, {self.duration!r})" + + +class ConnectionCheckedOutEvent(_ConnectionDurationEvent): + """Published when the driver successfully checks out a connection. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionCheckedInEvent(_ConnectionIdEvent): + """Published when the driver checks in a Connection into the Pool. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class _ServerEvent: + """Base class for server events.""" + + __slots__ = ("__server_address", "__topology_id") + + def __init__(self, server_address: _Address, topology_id: ObjectId) -> None: + self.__server_address = server_address + self.__topology_id = topology_id + + @property + def server_address(self) -> _Address: + """The address (host, port) pair of the server""" + return self.__server_address + + @property + def topology_id(self) -> ObjectId: + """A unique identifier for the topology this server is a part of.""" + return self.__topology_id + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.server_address} topology_id: {self.topology_id}>" + + +class ServerDescriptionChangedEvent(_ServerEvent): + """Published when server description changes. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__previous_description", "__new_description") + + def __init__( + self, + previous_description: ServerDescription, + new_description: ServerDescription, + *args: Any, + ) -> None: + super().__init__(*args) + self.__previous_description = previous_description + self.__new_description = new_description + + @property + def previous_description(self) -> ServerDescription: + """The previous + :class:`~pymongo.server_description.ServerDescription`. + """ + return self.__previous_description + + @property + def new_description(self) -> ServerDescription: + """The new + :class:`~pymongo.server_description.ServerDescription`. + """ + return self.__new_description + + def __repr__(self) -> str: + return "<{} {} changed from: {}, to: {}>".format( + self.__class__.__name__, + self.server_address, + self.previous_description, + self.new_description, + ) + + +class ServerOpeningEvent(_ServerEvent): + """Published when server is initialized. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class ServerClosedEvent(_ServerEvent): + """Published when server is closed. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class TopologyEvent: + """Base class for topology description events.""" + + __slots__ = ("__topology_id",) + + def __init__(self, topology_id: ObjectId) -> None: + self.__topology_id = topology_id + + @property + def topology_id(self) -> ObjectId: + """A unique identifier for the topology this server is a part of.""" + return self.__topology_id + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} topology_id: {self.topology_id}>" + + +class TopologyDescriptionChangedEvent(TopologyEvent): + """Published when the topology description changes. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__previous_description", "__new_description") + + def __init__( + self, + previous_description: TopologyDescription, + new_description: TopologyDescription, + *args: Any, + ) -> None: + super().__init__(*args) + self.__previous_description = previous_description + self.__new_description = new_description + + @property + def previous_description(self) -> TopologyDescription: + """The previous + :class:`~pymongo.topology_description.TopologyDescription`. + """ + return self.__previous_description + + @property + def new_description(self) -> TopologyDescription: + """The new + :class:`~pymongo.topology_description.TopologyDescription`. + """ + return self.__new_description + + def __repr__(self) -> str: + return "<{} topology_id: {} changed from: {}, to: {}>".format( + self.__class__.__name__, + self.topology_id, + self.previous_description, + self.new_description, + ) + + +class TopologyOpenedEvent(TopologyEvent): + """Published when the topology is initialized. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class TopologyClosedEvent(TopologyEvent): + """Published when the topology is closed. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class _ServerHeartbeatEvent: + """Base class for server heartbeat events.""" + + __slots__ = ("__connection_id", "__awaited") + + def __init__(self, connection_id: _Address, awaited: bool = False) -> None: + self.__connection_id = connection_id + self.__awaited = awaited + + @property + def connection_id(self) -> _Address: + """The address (host, port) of the server this heartbeat was sent + to. + """ + return self.__connection_id + + @property + def awaited(self) -> bool: + """Whether the heartbeat was issued as an awaitable hello command. + + .. versionadded:: 4.6 + """ + return self.__awaited + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.connection_id} awaited: {self.awaited}>" + + +class ServerHeartbeatStartedEvent(_ServerHeartbeatEvent): + """Published when a heartbeat is started. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): + """Fired when the server heartbeat succeeds. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__duration", "__reply") + + def __init__( + self, + duration: float, + reply: Hello[dict[str, Any]], + connection_id: _Address, + awaited: bool = False, + ) -> None: + super().__init__(connection_id, awaited) + self.__duration = duration + self.__reply = reply + + @property + def duration(self) -> float: + """The duration of this heartbeat in microseconds.""" + return self.__duration + + @property + def reply(self) -> Hello[dict[str, Any]]: + """An instance of :class:`~pymongo.hello.Hello`.""" + return self.__reply + + @property + def awaited(self) -> bool: + """Whether the heartbeat was awaited. + + If true, then :meth:`duration` reflects the sum of the round trip time + to the server and the time that the server waited before sending a + response. + + .. versionadded:: 3.11 + """ + return super().awaited + + def __repr__(self) -> str: + return "<{} {} duration: {}, awaited: {}, reply: {}>".format( + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) + + +class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): + """Fired when the server heartbeat fails, either with an "ok: 0" + or a socket exception. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__duration", "__reply") + + def __init__( + self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False + ) -> None: + super().__init__(connection_id, awaited) + self.__duration = duration + self.__reply = reply + + @property + def duration(self) -> float: + """The duration of this heartbeat in microseconds.""" + return self.__duration + + @property + def reply(self) -> Exception: + """A subclass of :exc:`Exception`.""" + return self.__reply + + @property + def awaited(self) -> bool: + """Whether the heartbeat was awaited. + + If true, then :meth:`duration` reflects the sum of the round trip time + to the server and the time that the server waited before sending a + response. + + .. versionadded:: 3.11 + """ + return super().awaited + + def __repr__(self) -> str: + return "<{} {} duration: {}, awaited: {}, reply: {!r}>".format( + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) + + +class _EventListeners: + """Configure event listeners for a client instance. + + Any event listeners registered globally are included by default. + + :param listeners: A list of event listeners. + """ + + def __init__(self, listeners: Optional[Sequence[_EventListener]]): + self.__command_listeners = _LISTENERS.command_listeners[:] + self.__server_listeners = _LISTENERS.server_listeners[:] + lst = _LISTENERS.server_heartbeat_listeners + self.__server_heartbeat_listeners = lst[:] + self.__topology_listeners = _LISTENERS.topology_listeners[:] + self.__cmap_listeners = _LISTENERS.cmap_listeners[:] + if listeners is not None: + for lst in listeners: + if isinstance(lst, CommandListener): + self.__command_listeners.append(lst) + if isinstance(lst, ServerListener): + self.__server_listeners.append(lst) + if isinstance(lst, ServerHeartbeatListener): + self.__server_heartbeat_listeners.append(lst) + if isinstance(lst, TopologyListener): + self.__topology_listeners.append(lst) + if isinstance(lst, ConnectionPoolListener): + self.__cmap_listeners.append(lst) + self.__enabled_for_commands = bool(self.__command_listeners) + self.__enabled_for_server = bool(self.__server_listeners) + self.__enabled_for_server_heartbeat = bool(self.__server_heartbeat_listeners) + self.__enabled_for_topology = bool(self.__topology_listeners) + self.__enabled_for_cmap = bool(self.__cmap_listeners) + + @property + def enabled_for_commands(self) -> bool: + """Are any CommandListener instances registered?""" + return self.__enabled_for_commands + + @property + def enabled_for_server(self) -> bool: + """Are any ServerListener instances registered?""" + return self.__enabled_for_server + + @property + def enabled_for_server_heartbeat(self) -> bool: + """Are any ServerHeartbeatListener instances registered?""" + return self.__enabled_for_server_heartbeat + + @property + def enabled_for_topology(self) -> bool: + """Are any TopologyListener instances registered?""" + return self.__enabled_for_topology + + @property + def enabled_for_cmap(self) -> bool: + """Are any ConnectionPoolListener instances registered?""" + return self.__enabled_for_cmap + + def event_listeners(self) -> list[_EventListeners]: + """List of registered event listeners.""" + return ( + self.__command_listeners + + self.__server_heartbeat_listeners + + self.__server_listeners + + self.__topology_listeners + + self.__cmap_listeners + ) + + def publish_command_start( + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + ) -> None: + """Publish a CommandStartedEvent to all command listeners. + + :param command: The command document. + :param database_name: The name of the database this command was run + against. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this + command was sent to. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + """ + if op_id is None: + op_id = request_id + event = CommandStartedEvent( + command, + database_name, + request_id, + connection_id, + op_id, + service_id=service_id, + server_connection_id=server_connection_id, + ) + for subscriber in self.__command_listeners: + try: + subscriber.started(event) + except Exception: + _handle_exception() + + def publish_command_success( + self, + duration: timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + speculative_hello: bool = False, + database_name: str = "", + ) -> None: + """Publish a CommandSucceededEvent to all command listeners. + + :param duration: The command duration as a datetime.timedelta. + :param reply: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this + command was sent to. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + :param speculative_hello: Was the command sent with speculative auth? + :param database_name: The database this command was sent to, or ``""``. + """ + if op_id is None: + op_id = request_id + if speculative_hello: + # Redact entire response when the command started contained + # speculativeAuthenticate. + reply = {} + event = CommandSucceededEvent( + duration, + reply, + command_name, + request_id, + connection_id, + op_id, + service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + for subscriber in self.__command_listeners: + try: + subscriber.succeeded(event) + except Exception: + _handle_exception() + + def publish_command_failure( + self, + duration: timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + database_name: str = "", + ) -> None: + """Publish a CommandFailedEvent to all command listeners. + + :param duration: The command duration as a datetime.timedelta. + :param failure: The server reply document or failure description + document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this + command was sent to. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. + """ + if op_id is None: + op_id = request_id + event = CommandFailedEvent( + duration, + failure, + command_name, + request_id, + connection_id, + op_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + for subscriber in self.__command_listeners: + try: + subscriber.failed(event) + except Exception: + _handle_exception() + + def publish_server_heartbeat_started(self, connection_id: _Address, awaited: bool) -> None: + """Publish a ServerHeartbeatStartedEvent to all server heartbeat + listeners. + + :param connection_id: The address (host, port) pair of the connection. + :param awaited: True if this heartbeat is part of an awaitable hello command. + """ + event = ServerHeartbeatStartedEvent(connection_id, awaited) + for subscriber in self.__server_heartbeat_listeners: + try: + subscriber.started(event) + except Exception: + _handle_exception() + + def publish_server_heartbeat_succeeded( + self, connection_id: _Address, duration: float, reply: Hello[dict[str, Any]], awaited: bool + ) -> None: + """Publish a ServerHeartbeatSucceededEvent to all server heartbeat + listeners. + + :param connection_id: The address (host, port) pair of the connection. + :param duration: The execution time of the event in the highest possible + resolution for the platform. + :param reply: The command reply. + :param awaited: True if the response was awaited. + """ + event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, awaited) + for subscriber in self.__server_heartbeat_listeners: + try: + subscriber.succeeded(event) + except Exception: + _handle_exception() + + def publish_server_heartbeat_failed( + self, connection_id: _Address, duration: float, reply: Exception, awaited: bool + ) -> None: + """Publish a ServerHeartbeatFailedEvent to all server heartbeat + listeners. + + :param connection_id: The address (host, port) pair of the connection. + :param duration: The execution time of the event in the highest possible + resolution for the platform. + :param reply: The command reply. + :param awaited: True if the response was awaited. + """ + event = ServerHeartbeatFailedEvent(duration, reply, connection_id, awaited) + for subscriber in self.__server_heartbeat_listeners: + try: + subscriber.failed(event) + except Exception: + _handle_exception() + + def publish_server_opened(self, server_address: _Address, topology_id: ObjectId) -> None: + """Publish a ServerOpeningEvent to all server listeners. + + :param server_address: The address (host, port) pair of the server. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = ServerOpeningEvent(server_address, topology_id) + for subscriber in self.__server_listeners: + try: + subscriber.opened(event) + except Exception: + _handle_exception() + + def publish_server_closed(self, server_address: _Address, topology_id: ObjectId) -> None: + """Publish a ServerClosedEvent to all server listeners. + + :param server_address: The address (host, port) pair of the server. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = ServerClosedEvent(server_address, topology_id) + for subscriber in self.__server_listeners: + try: + subscriber.closed(event) + except Exception: + _handle_exception() + + def publish_server_description_changed( + self, + previous_description: ServerDescription, + new_description: ServerDescription, + server_address: _Address, + topology_id: ObjectId, + ) -> None: + """Publish a ServerDescriptionChangedEvent to all server listeners. + + :param previous_description: The previous server description. + :param server_address: The address (host, port) pair of the server. + :param new_description: The new server description. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = ServerDescriptionChangedEvent( + previous_description, new_description, server_address, topology_id + ) + for subscriber in self.__server_listeners: + try: + subscriber.description_changed(event) + except Exception: + _handle_exception() + + def publish_topology_opened(self, topology_id: ObjectId) -> None: + """Publish a TopologyOpenedEvent to all topology listeners. + + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = TopologyOpenedEvent(topology_id) + for subscriber in self.__topology_listeners: + try: + subscriber.opened(event) + except Exception: + _handle_exception() + + def publish_topology_closed(self, topology_id: ObjectId) -> None: + """Publish a TopologyClosedEvent to all topology listeners. + + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = TopologyClosedEvent(topology_id) + for subscriber in self.__topology_listeners: + try: + subscriber.closed(event) + except Exception: + _handle_exception() + + def publish_topology_description_changed( + self, + previous_description: TopologyDescription, + new_description: TopologyDescription, + topology_id: ObjectId, + ) -> None: + """Publish a TopologyDescriptionChangedEvent to all topology listeners. + + :param previous_description: The previous topology description. + :param new_description: The new topology description. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id) + for subscriber in self.__topology_listeners: + try: + subscriber.description_changed(event) + except Exception: + _handle_exception() + + def publish_pool_created(self, address: _Address, options: dict[str, Any]) -> None: + """Publish a :class:`PoolCreatedEvent` to all pool listeners.""" + event = PoolCreatedEvent(address, options) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_created(event) + except Exception: + _handle_exception() + + def publish_pool_ready(self, address: _Address) -> None: + """Publish a :class:`PoolReadyEvent` to all pool listeners.""" + event = PoolReadyEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_ready(event) + except Exception: + _handle_exception() + + def publish_pool_cleared( + self, + address: _Address, + service_id: Optional[ObjectId], + interrupt_connections: bool = False, + ) -> None: + """Publish a :class:`PoolClearedEvent` to all pool listeners.""" + event = PoolClearedEvent(address, service_id, interrupt_connections) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_cleared(event) + except Exception: + _handle_exception() + + def publish_pool_closed(self, address: _Address) -> None: + """Publish a :class:`PoolClosedEvent` to all pool listeners.""" + event = PoolClosedEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_closed(event) + except Exception: + _handle_exception() + + def publish_connection_created(self, address: _Address, connection_id: int) -> None: + """Publish a :class:`ConnectionCreatedEvent` to all connection + listeners. + """ + event = ConnectionCreatedEvent(address, connection_id) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_created(event) + except Exception: + _handle_exception() + + def publish_connection_ready( + self, address: _Address, connection_id: int, duration: float + ) -> None: + """Publish a :class:`ConnectionReadyEvent` to all connection listeners.""" + event = ConnectionReadyEvent(address, connection_id, duration) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_ready(event) + except Exception: + _handle_exception() + + def publish_connection_closed(self, address: _Address, connection_id: int, reason: str) -> None: + """Publish a :class:`ConnectionClosedEvent` to all connection + listeners. + """ + event = ConnectionClosedEvent(address, connection_id, reason) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_closed(event) + except Exception: + _handle_exception() + + def publish_connection_check_out_started(self, address: _Address) -> None: + """Publish a :class:`ConnectionCheckOutStartedEvent` to all connection + listeners. + """ + event = ConnectionCheckOutStartedEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_check_out_started(event) + except Exception: + _handle_exception() + + def publish_connection_check_out_failed( + self, address: _Address, reason: str, duration: float + ) -> None: + """Publish a :class:`ConnectionCheckOutFailedEvent` to all connection + listeners. + """ + event = ConnectionCheckOutFailedEvent(address, reason, duration) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_check_out_failed(event) + except Exception: + _handle_exception() + + def publish_connection_checked_out( + self, address: _Address, connection_id: int, duration: float + ) -> None: + """Publish a :class:`ConnectionCheckedOutEvent` to all connection + listeners. + """ + event = ConnectionCheckedOutEvent(address, connection_id, duration) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_checked_out(event) + except Exception: + _handle_exception() + + def publish_connection_checked_in(self, address: _Address, connection_id: int) -> None: + """Publish a :class:`ConnectionCheckedInEvent` to all connection + listeners. + """ + event = ConnectionCheckedInEvent(address, connection_id) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_checked_in(event) + except Exception: + _handle_exception() diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py new file mode 100644 index 0000000000..7c62a251f8 --- /dev/null +++ b/pymongo/network_layer.py @@ -0,0 +1,786 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal network layer helper methods.""" +from __future__ import annotations + +import asyncio +import collections +import errno +import socket +import struct +import sys +import time +from asyncio import AbstractEventLoop, BaseTransport, BufferedProtocol, Future, Transport +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Union, +) + +from pymongo import _csot, ssl_support +from pymongo._asyncio_task import create_task +from pymongo.common import MAX_MESSAGE_SIZE +from pymongo.compression_support import decompress +from pymongo.errors import ProtocolError, _OperationCancelled +from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply +from pymongo.socket_checker import _errno_from_exception + +try: + from ssl import SSLError, SSLSocket + + _HAVE_SSL = True +except ImportError: + _HAVE_SSL = False + +try: + from pymongo.pyopenssl_context import _sslConn + + _HAVE_PYOPENSSL = True +except ImportError: + _HAVE_PYOPENSSL = False + _sslConn = SSLSocket # type: ignore[assignment, misc] + +from pymongo.ssl_support import ( + BLOCKING_IO_LOOKUP_ERROR, + BLOCKING_IO_READ_ERROR, + BLOCKING_IO_WRITE_ERROR, +) + +if TYPE_CHECKING: + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.synchronous.pool import Connection + +_UNPACK_HEADER = struct.Struct(" None: + timeout = sock.gettimeout() + sock.settimeout(0.0) + loop = asyncio.get_running_loop() + try: + if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): + await asyncio.wait_for(_async_socket_sendall_ssl(sock, buf, loop), timeout=timeout) + else: + await asyncio.wait_for(loop.sock_sendall(sock, buf), timeout=timeout) # type: ignore[arg-type] + except asyncio.TimeoutError as exc: + # Convert the asyncio.wait_for timeout error to socket.timeout which pool.py understands. + raise socket.timeout("timed out") from exc + finally: + sock.settimeout(timeout) + + +if sys.platform != "win32": + + async def _async_socket_sendall_ssl( + sock: Union[socket.socket, _sslConn], buf: bytes, loop: AbstractEventLoop + ) -> None: + view = memoryview(buf) + sent = 0 + + def _is_ready(fut: Future[Any]) -> None: + if fut.done(): + return + fut.set_result(None) + + while sent < len(buf): + try: + sent += sock.send(view[sent:]) + except BLOCKING_IO_ERRORS as exc: + fd = sock.fileno() + # Check for closed socket. + if fd == -1: + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, BLOCKING_IO_READ_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_reader(fd) + if isinstance(exc, BLOCKING_IO_WRITE_ERROR): + fut = loop.create_future() + loop.add_writer(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_writer(fd) + if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + loop.add_writer(fd, _is_ready, fut) + await fut + finally: + loop.remove_reader(fd) + loop.remove_writer(fd) + + async def _async_socket_receive_ssl( + conn: _sslConn, length: int, loop: AbstractEventLoop, once: Optional[bool] = False + ) -> memoryview: + mv = memoryview(bytearray(length)) + total_read = 0 + + def _is_ready(fut: Future[Any]) -> None: + if fut.done(): + return + fut.set_result(None) + + while total_read < length: + try: + read = conn.recv_into(mv[total_read:]) + if read == 0: + raise OSError("connection closed") + # KMS responses update their expected size after the first batch, stop reading after one loop + if once: + return mv[:read] + total_read += read + except BLOCKING_IO_ERRORS as exc: + fd = conn.fileno() + # Check for closed socket. + if fd == -1: + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, BLOCKING_IO_READ_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_reader(fd) + if isinstance(exc, BLOCKING_IO_WRITE_ERROR): + fut = loop.create_future() + loop.add_writer(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_writer(fd) + if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + loop.add_writer(fd, _is_ready, fut) + await fut + finally: + loop.remove_reader(fd) + loop.remove_writer(fd) + return mv + +else: + # The default Windows asyncio event loop does not support loop.add_reader/add_writer: + # https://docs.python.org/3/library/asyncio-platforms.html#asyncio-platform-support + # Note: In PYTHON-4493 we plan to replace this code with asyncio streams. + async def _async_socket_sendall_ssl( + sock: Union[socket.socket, _sslConn], buf: bytes, dummy: AbstractEventLoop + ) -> None: + view = memoryview(buf) + total_length = len(buf) + total_sent = 0 + # Backoff starts at 1ms, doubles on timeout up to 512ms, and halves on success + # down to 1ms. + backoff = 0.001 + while total_sent < total_length: + try: + sent = sock.send(view[total_sent:]) + except BLOCKING_IO_ERRORS: + await asyncio.sleep(backoff) + sent = 0 + if sent > 0: + backoff = max(backoff / 2, 0.001) + else: + backoff = min(backoff * 2, 0.512) + total_sent += sent + + async def _async_socket_receive_ssl( + conn: _sslConn, length: int, dummy: AbstractEventLoop, once: Optional[bool] = False + ) -> memoryview: + mv = memoryview(bytearray(length)) + total_read = 0 + # Backoff starts at 1ms, doubles on timeout up to 512ms, and halves on success + # down to 1ms. + backoff = 0.001 + while total_read < length: + try: + read = conn.recv_into(mv[total_read:]) + if read == 0: + raise OSError("connection closed") + # KMS responses update their expected size after the first batch, stop reading after one loop + if once: + return mv[:read] + except BLOCKING_IO_ERRORS: + await asyncio.sleep(backoff) + read = 0 + if read > 0: + backoff = max(backoff / 2, 0.001) + else: + backoff = min(backoff * 2, 0.512) + total_read += read + return mv + + +def sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: + sock.sendall(buf) + + +async def _poll_cancellation(conn: AsyncConnection) -> None: + while True: + if conn.cancel_context.cancelled: + return + + await asyncio.sleep(_POLL_TIMEOUT) + + +async def async_receive_data_socket( + sock: Union[socket.socket, _sslConn], length: int +) -> memoryview: + sock_timeout = sock.gettimeout() + timeout = sock_timeout + + sock.settimeout(0.0) + loop = asyncio.get_running_loop() + try: + if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): + return await asyncio.wait_for( + _async_socket_receive_ssl(sock, length, loop, once=True), # type: ignore[arg-type] + timeout=timeout, + ) + else: + return await asyncio.wait_for( + _async_socket_receive(sock, length, loop), # type: ignore[arg-type] + timeout=timeout, + ) + except asyncio.TimeoutError as err: + raise socket.timeout("timed out") from err + finally: + sock.settimeout(sock_timeout) + + +async def _async_socket_receive( + conn: socket.socket, length: int, loop: AbstractEventLoop +) -> memoryview: + mv = memoryview(bytearray(length)) + bytes_read = 0 + while bytes_read < length: + chunk_length = await loop.sock_recv_into(conn, mv[bytes_read:]) + if chunk_length == 0: + raise OSError("connection closed") + bytes_read += chunk_length + return mv + + +_PYPY = "PyPy" in sys.version +_WINDOWS = sys.platform == "win32" + + +def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: + """Block until at least one byte is read, or a timeout, or a cancel.""" + sock = conn.conn.sock + timed_out = False + # Check if the connection's socket has been manually closed + if sock.fileno() == -1: + return + while True: + # SSLSocket can have buffered data which won't be caught by select. + if hasattr(sock, "pending") and sock.pending() > 0: + readable = True + else: + # Wait up to 500ms for the socket to become readable and then + # check for cancellation. + if deadline: + remaining = deadline - time.monotonic() + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + if remaining <= 0: + timed_out = True + timeout = max(min(remaining, _POLL_TIMEOUT), 0) + else: + timeout = _POLL_TIMEOUT + readable = conn.socket_checker.select(sock, read=True, timeout=timeout) + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") + if readable: + return + if timed_out: + raise socket.timeout("timed out") + + +def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> memoryview: + buf = bytearray(length) + mv = memoryview(buf) + bytes_read = 0 + # To support cancelling a network read, we shorten the socket timeout and + # check for the cancellation signal after each timeout. Alternatively we + # could close the socket but that does not reliably cancel recv() calls + # on all OSes. + # When the timeout has expired we perform one final non-blocking recv. + # This helps avoid spurious timeouts when the response is actually already + # buffered on the client. + orig_timeout = conn.conn.gettimeout() + try: + while bytes_read < length: + try: + # Use the legacy wait_for_read cancellation approach on PyPy due to PYTHON-5011. + # also use it on Windows due to PYTHON-5405 + if _PYPY or _WINDOWS: + wait_for_read(conn, deadline) + if _csot.get_timeout() and deadline is not None: + conn.set_conn_timeout(max(deadline - time.monotonic(), 0)) + else: + if deadline is not None: + short_timeout = min(max(deadline - time.monotonic(), 0), _POLL_TIMEOUT) + else: + short_timeout = _POLL_TIMEOUT + conn.set_conn_timeout(short_timeout) + + chunk_length = conn.conn.recv_into(mv[bytes_read:]) + except BLOCKING_IO_ERRORS: + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") from None + # We reached the true deadline. + raise socket.timeout("timed out") from None + except socket.timeout: + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") from None + if ( + _PYPY + or _WINDOWS + or not conn.is_sdam + and deadline is not None + and deadline - time.monotonic() < 0 + ): + # We reached the true deadline. + raise + continue + except OSError as exc: + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") from None + if _errno_from_exception(exc) == errno.EINTR: + continue + raise + if chunk_length == 0: + raise OSError("connection closed") + + bytes_read += chunk_length + finally: + conn.set_conn_timeout(orig_timeout) + + return mv + + +class NetworkingInterfaceBase: + def __init__(self, conn: Any): + self.conn = conn + + @property + def gettimeout(self) -> Any: + raise NotImplementedError + + def settimeout(self, timeout: float | None) -> None: + raise NotImplementedError + + def close(self) -> Any: + raise NotImplementedError + + def is_closing(self) -> bool: + raise NotImplementedError + + @property + def get_conn(self) -> Any: + raise NotImplementedError + + @property + def sock(self) -> Any: + raise NotImplementedError + + +class AsyncNetworkingInterface(NetworkingInterfaceBase): + def __init__(self, conn: tuple[Transport, PyMongoProtocol]): + super().__init__(conn) + + @property + def gettimeout(self) -> float | None: + return self.conn[1].gettimeout + + def settimeout(self, timeout: float | None) -> None: + self.conn[1].settimeout(timeout) + + async def close(self) -> None: + self.conn[1].close() + await self.conn[1].wait_closed() + + def is_closing(self) -> bool: + return self.conn[0].is_closing() + + @property + def get_conn(self) -> PyMongoProtocol: + return self.conn[1] + + @property + def sock(self) -> socket.socket: + return self.conn[0].get_extra_info("socket") + + +class NetworkingInterface(NetworkingInterfaceBase): + def __init__(self, conn: Union[socket.socket, _sslConn]): + super().__init__(conn) + + def gettimeout(self) -> float | None: + return self.conn.gettimeout() + + def settimeout(self, timeout: float | None) -> None: + self.conn.settimeout(timeout) + + def close(self) -> None: + self.conn.close() + + def is_closing(self) -> bool: + return self.conn.is_closing() + + @property + def get_conn(self) -> Union[socket.socket, _sslConn]: + return self.conn + + @property + def sock(self) -> Union[socket.socket, _sslConn]: + return self.conn + + def fileno(self) -> int: + return self.conn.fileno() + + def recv_into(self, buffer: bytes | memoryview) -> int: + return self.conn.recv_into(buffer) + + +class PyMongoProtocol(BufferedProtocol): + def __init__(self, timeout: Optional[float] = None): + self.transport: Transport = None # type: ignore[assignment] + # Each message is reader in 2-3 parts: header, compression header, and message body + # The message buffer is allocated after the header is read. + self._header = memoryview(bytearray(16)) + self._header_index = 0 + self._compression_header = memoryview(bytearray(9)) + self._compression_index = 0 + self._message: Optional[memoryview] = None + self._message_index = 0 + # State. TODO: replace booleans with an enum? + self._expecting_header = True + self._expecting_compression = False + self._message_size = 0 + self._op_code = 0 + self._connection_lost = False + self._read_waiter: Optional[Future[Any]] = None + self._timeout = timeout + self._is_compressed = False + self._compressor_id: Optional[int] = None + self._max_message_size = MAX_MESSAGE_SIZE + self._response_to: Optional[int] = None + self._closed = asyncio.get_running_loop().create_future() + self._pending_messages: collections.deque[Future[Any]] = collections.deque() + self._done_messages: collections.deque[Future[Any]] = collections.deque() + + def settimeout(self, timeout: float | None) -> None: + self._timeout = timeout + + @property + def gettimeout(self) -> float | None: + """The configured timeout for the socket that underlies our protocol pair.""" + return self._timeout + + def connection_made(self, transport: BaseTransport) -> None: + """Called exactly once when a connection is made. + The transport argument is the transport representing the write side of the connection. + """ + self.transport = transport # type: ignore[assignment] + self.transport.set_write_buffer_limits(MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE) + + async def write(self, message: bytes) -> None: + """Write a message to this connection's transport.""" + if self.transport.is_closing(): + raise OSError("Connection is closed") + self.transport.write(message) + self.transport.resume_reading() + + async def read(self, request_id: Optional[int], max_message_size: int) -> tuple[bytes, int]: + """Read a single MongoDB Wire Protocol message from this connection.""" + if self.transport: + try: + self.transport.resume_reading() + # Known bug in SSL Protocols, fixed in Python 3.11: https://github.com/python/cpython/issues/89322 + except AttributeError: + raise OSError("connection is already closed") from None + self._max_message_size = max_message_size + if self._done_messages: + message = await self._done_messages.popleft() + else: + if self.transport and self.transport.is_closing(): + raise OSError("connection is already closed") + read_waiter = asyncio.get_running_loop().create_future() + self._pending_messages.append(read_waiter) + try: + message = await read_waiter + finally: + if read_waiter in self._done_messages: + self._done_messages.remove(read_waiter) + if message: + op_code, compressor_id, response_to, data = message + # No request_id for exhaust cursor "getMore". + if request_id is not None: + if request_id != response_to: + raise ProtocolError( + f"Got response id {response_to!r} but expected {request_id!r}" + ) + if compressor_id is not None: + data = decompress(data, compressor_id) + return data, op_code + raise OSError("connection closed") + + def get_buffer(self, sizehint: int) -> memoryview: + """Called to allocate a new receive buffer. + The asyncio loop calls this method expecting to receive a non-empty buffer to fill with data. + If any data does not fit into the returned buffer, this method will be called again until + either no data remains or an empty buffer is returned. + """ + # Due to a bug, Python <=3.11 will call get_buffer() even after we raise + # ProtocolError in buffer_updated() and call connection_lost(). We allocate + # a temp buffer to drain the waiting data. + if self._connection_lost: + if not self._message: + self._message = memoryview(bytearray(2**14)) + return self._message + # TODO: optimize this by caching pointers to the buffers. + # return self._buffer[self._index:] + if self._expecting_header: + return self._header[self._header_index :] + if self._expecting_compression: + return self._compression_header[self._compression_index :] + return self._message[self._message_index :] # type: ignore[index] + + def buffer_updated(self, nbytes: int) -> None: + """Called when the buffer was updated with the received data""" + # Wrote 0 bytes into a non-empty buffer, signal connection closed + if nbytes == 0: + self.close(OSError("connection closed")) + return + if self._connection_lost: + return + if self._expecting_header: + self._header_index += nbytes + if self._header_index >= 16: + self._expecting_header = False + try: + ( + self._message_size, + self._op_code, + self._response_to, + self._expecting_compression, + ) = self.process_header() + except ProtocolError as exc: + self.close(exc) + return + self._message = memoryview(bytearray(self._message_size)) + return + if self._expecting_compression: + self._compression_index += nbytes + if self._compression_index >= 9: + self._expecting_compression = False + self._op_code, self._compressor_id = self.process_compression_header() + return + + self._message_index += nbytes + if self._message_index >= self._message_size: + self._expecting_header = True + # Pause reading to avoid storing an arbitrary number of messages in memory. + self.transport.pause_reading() + if self._pending_messages: + result = self._pending_messages.popleft() + else: + result = asyncio.get_running_loop().create_future() + # Future has been cancelled, close this connection + if result.done(): + self.close(None) + return + # Necessary values to reconstruct and verify message + result.set_result( + (self._op_code, self._compressor_id, self._response_to, self._message) + ) + self._done_messages.append(result) + # Reset internal state to expect a new message + self._header_index = 0 + self._compression_index = 0 + self._message_index = 0 + self._message_size = 0 + self._message = None + self._op_code = 0 + self._compressor_id = None + self._response_to = None + + def process_header(self) -> tuple[int, int, int, bool]: + """Unpack a MongoDB Wire Protocol header.""" + length, _, response_to, op_code = _UNPACK_HEADER(self._header) + expecting_compression = False + if op_code == 2012: # OP_COMPRESSED + if length <= 25: + raise ProtocolError( + f"Message length ({length!r}) not longer than standard OP_COMPRESSED message header size (25)" + ) + expecting_compression = True + length -= 9 + if length <= 16: + raise ProtocolError( + f"Message length ({length!r}) not longer than standard message header size (16)" + ) + if length > self._max_message_size: + raise ProtocolError( + f"Message length ({length!r}) is larger than server max " + f"message size ({self._max_message_size!r})" + ) + + return length - 16, op_code, response_to, expecting_compression + + def process_compression_header(self) -> tuple[int, int]: + """Unpack a MongoDB Wire Protocol compression header.""" + op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(self._compression_header) + return op_code, compressor_id + + def _resolve_pending_messages(self, exc: Optional[Exception] = None) -> None: + pending = list(self._pending_messages) + for msg in pending: + if not msg.done(): + if exc is None: + msg.set_result(None) + else: + msg.set_exception(exc) + self._done_messages.append(msg) + + def close(self, exc: Optional[Exception] = None) -> None: + self.transport.abort() + self._resolve_pending_messages(exc) + self._connection_lost = True + + def connection_lost(self, exc: Optional[Exception] = None) -> None: + self._resolve_pending_messages(exc) + if not self._closed.done(): + self._closed.set_result(None) + + async def wait_closed(self) -> None: + await self._closed + + +async def async_sendall(conn: PyMongoProtocol, buf: bytes) -> None: + try: + await asyncio.wait_for(conn.write(buf), timeout=conn.gettimeout) + except asyncio.TimeoutError as exc: + # Convert the asyncio.wait_for timeout error to socket.timeout which pool.py understands. + raise socket.timeout("timed out") from exc + + +async def async_receive_message( + conn: AsyncConnection, + request_id: Optional[int], + max_message_size: int = MAX_MESSAGE_SIZE, +) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise socket.error.""" + timeout: Optional[Union[float, int]] + timeout = conn.conn.gettimeout + if _csot.get_timeout(): + deadline = _csot.get_deadline() + else: + if timeout: + deadline = time.monotonic() + timeout + else: + deadline = None + if deadline: + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + timeout = max(deadline - time.monotonic(), 0) + + cancellation_task = create_task(_poll_cancellation(conn)) + read_task = create_task(conn.conn.get_conn.read(request_id, max_message_size)) + tasks = [read_task, cancellation_task] + try: + done, pending = await asyncio.wait( + tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED + ) + for task in pending: + task.cancel() + if pending: + await asyncio.wait(pending) + if len(done) == 0: + raise socket.timeout("timed out") + if read_task in done: + data, op_code = read_task.result() + try: + unpack_reply = _UNPACK_REPLY[op_code] + except KeyError: + raise ProtocolError( + f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" + ) from None + return unpack_reply(data) + raise _OperationCancelled("operation cancelled") + except asyncio.CancelledError: + for task in tasks: + task.cancel() + await asyncio.wait(tasks) + raise + + +def receive_message( + conn: Connection, request_id: Optional[int], max_message_size: int = MAX_MESSAGE_SIZE +) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise socket.error.""" + if _csot.get_timeout(): + deadline = _csot.get_deadline() + else: + timeout = conn.conn.gettimeout() + if timeout: + deadline = time.monotonic() + timeout + else: + deadline = None + # Ignore the response's request id. + length, _, response_to, op_code = _UNPACK_HEADER(receive_data(conn, 16, deadline)) + # No request_id for exhaust cursor "getMore". + if request_id is not None: + if request_id != response_to: + raise ProtocolError(f"Got response id {response_to!r} but expected {request_id!r}") + if length <= 16: + raise ProtocolError( + f"Message length ({length!r}) not longer than standard message header size (16)" + ) + if length > max_message_size: + raise ProtocolError( + f"Message length ({length!r}) is larger than server max " + f"message size ({max_message_size!r})" + ) + data: memoryview | bytes + if op_code == 2012: + op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(receive_data(conn, 9, deadline)) + data = decompress(receive_data(conn, length - 25, deadline), compressor_id) + else: + data = receive_data(conn, length - 16, deadline) + + try: + unpack_reply = _UNPACK_REPLY[op_code] + except KeyError: + raise ProtocolError( + f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" + ) from None + return unpack_reply(data) diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py new file mode 100644 index 0000000000..2df232848f --- /dev/null +++ b/pymongo/ocsp_cache.py @@ -0,0 +1,131 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for caching OCSP responses.""" + +from __future__ import annotations + +from collections import namedtuple +from datetime import datetime as _datetime +from datetime import timezone +from typing import TYPE_CHECKING, Any, Optional + +from pymongo.lock import _create_lock + +if TYPE_CHECKING: + from cryptography.x509.ocsp import OCSPRequest, OCSPResponse + + +def _next_update(value: OCSPResponse) -> Optional[_datetime]: + """Compat helper to return the response's next_update_utc.""" + # Added in cryptography 43.0.0. + if hasattr(value, "next_update_utc"): + return value.next_update_utc + return value.next_update + + +def _this_update(value: OCSPResponse) -> Optional[_datetime]: + """Compat helper to return the response's this_update_utc.""" + # Added in cryptography 43.0.0. + if hasattr(value, "this_update_utc"): + return value.this_update_utc + return value.this_update + + +class _OCSPCache: + """A cache for OCSP responses.""" + + CACHE_KEY_TYPE = namedtuple( # type: ignore + "OcspResponseCacheKey", + ["hash_algorithm", "issuer_name_hash", "issuer_key_hash", "serial_number"], + ) + + def __init__(self) -> None: + self._data: dict[Any, OCSPResponse] = {} + # Hold this lock when accessing _data. + self._lock = _create_lock() + + def _get_cache_key(self, ocsp_request: OCSPRequest) -> CACHE_KEY_TYPE: + return self.CACHE_KEY_TYPE( + hash_algorithm=ocsp_request.hash_algorithm.name.lower(), + issuer_name_hash=ocsp_request.issuer_name_hash, + issuer_key_hash=ocsp_request.issuer_key_hash, + serial_number=ocsp_request.serial_number, + ) + + def __setitem__(self, key: OCSPRequest, value: OCSPResponse) -> None: + """Add/update a cache entry. + + 'key' is of type cryptography.x509.ocsp.OCSPRequest + 'value' is of type cryptography.x509.ocsp.OCSPResponse + + Validity of the OCSP response must be checked by caller. + """ + with self._lock: + cache_key = self._get_cache_key(key) + + # As per the OCSP protocol, if the response's nextUpdate field is + # not set, the responder is indicating that newer revocation + # information is available all the time. + next_update = _next_update(value) + if next_update is None: + self._data.pop(cache_key, None) + return + + this_update = _this_update(value) + if this_update is None: + return + now = _datetime.now(tz=timezone.utc) + if this_update.tzinfo is None: + # Make naive to match cryptography. + now = now.replace(tzinfo=None) + # Do nothing if the response is invalid. + if not (this_update <= now < next_update): + return + + # Cache new response OR update cached response if new response + # has longer validity. + cached_value = self._data.get(cache_key, None) + if cached_value is None: + self._data[cache_key] = value + return + cached_next_update = _next_update(cached_value) + if cached_next_update is not None and cached_next_update < next_update: + self._data[cache_key] = value + + def __getitem__(self, item: OCSPRequest) -> OCSPResponse: + """Get a cache entry if it exists. + + 'item' is of type cryptography.x509.ocsp.OCSPRequest + + Raises KeyError if the item is not in the cache. + """ + with self._lock: + cache_key = self._get_cache_key(item) + value = self._data[cache_key] + + # Return cached response if it is still valid. + this_update = _this_update(value) + next_update = _next_update(value) + assert this_update is not None + assert next_update is not None + now = _datetime.now(tz=timezone.utc) + if this_update.tzinfo is None: + # Make naive to match cryptography. + now = now.replace(tzinfo=None) + if this_update <= now < next_update: + return value + + self._data.pop(cache_key, None) + raise KeyError(cache_key) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py new file mode 100644 index 0000000000..8322f821fb --- /dev/null +++ b/pymongo/ocsp_support.py @@ -0,0 +1,438 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for requesting and verifying OCSP responses.""" +from __future__ import annotations + +import logging as _logging +import re as _re +from datetime import datetime as _datetime +from datetime import timezone +from typing import TYPE_CHECKING, Iterable, Optional, Type, Union + +from cryptography.exceptions import InvalidSignature as _InvalidSignature +from cryptography.hazmat.backends import default_backend as _default_backend +from cryptography.hazmat.primitives.asymmetric.dsa import DSAPublicKey as _DSAPublicKey +from cryptography.hazmat.primitives.asymmetric.ec import ECDSA as _ECDSA +from cryptography.hazmat.primitives.asymmetric.ec import ( + EllipticCurvePublicKey as _EllipticCurvePublicKey, +) +from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 as _PKCS1v15 +from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey as _RSAPublicKey +from cryptography.hazmat.primitives.asymmetric.x448 import ( + X448PublicKey as _X448PublicKey, +) +from cryptography.hazmat.primitives.asymmetric.x25519 import ( + X25519PublicKey as _X25519PublicKey, +) +from cryptography.hazmat.primitives.hashes import SHA1 as _SHA1 +from cryptography.hazmat.primitives.hashes import Hash as _Hash +from cryptography.hazmat.primitives.serialization import Encoding as _Encoding +from cryptography.hazmat.primitives.serialization import PublicFormat as _PublicFormat +from cryptography.x509 import AuthorityInformationAccess as _AuthorityInformationAccess +from cryptography.x509 import ExtendedKeyUsage as _ExtendedKeyUsage +from cryptography.x509 import ExtensionNotFound as _ExtensionNotFound +from cryptography.x509 import TLSFeature as _TLSFeature +from cryptography.x509 import TLSFeatureType as _TLSFeatureType +from cryptography.x509 import load_pem_x509_certificate as _load_pem_x509_certificate +from cryptography.x509.ocsp import OCSPCertStatus as _OCSPCertStatus +from cryptography.x509.ocsp import OCSPRequestBuilder as _OCSPRequestBuilder +from cryptography.x509.ocsp import OCSPResponseStatus as _OCSPResponseStatus +from cryptography.x509.ocsp import load_der_ocsp_response as _load_der_ocsp_response +from cryptography.x509.oid import ( + AuthorityInformationAccessOID as _AuthorityInformationAccessOID, +) +from cryptography.x509.oid import ExtendedKeyUsageOID as _ExtendedKeyUsageOID +from requests import post as _post +from requests.exceptions import RequestException as _RequestException + +from pymongo import _csot +from pymongo.ocsp_cache import _next_update, _this_update + +if TYPE_CHECKING: + from cryptography.hazmat.primitives.asymmetric import ( + dsa, + ec, + ed448, + ed25519, + rsa, + x448, + x25519, + ) + from cryptography.hazmat.primitives.asymmetric.utils import Prehashed + from cryptography.hazmat.primitives.hashes import HashAlgorithm + from cryptography.x509 import Certificate, Name + from cryptography.x509.extensions import Extension, ExtensionTypeVar + from cryptography.x509.ocsp import OCSPRequest, OCSPResponse + from OpenSSL.SSL import Connection + + from pymongo.ocsp_cache import _OCSPCache + from pymongo.pyopenssl_context import _CallbackData + + CertificateIssuerPublicKeyTypes = Union[ + dsa.DSAPublicKey, + rsa.RSAPublicKey, + ec.EllipticCurvePublicKey, + ed25519.Ed25519PublicKey, + ed448.Ed448PublicKey, + x25519.X25519PublicKey, + x448.X448PublicKey, + ] + +# Note: the functions in this module generally return 1 or 0. The reason +# is simple. The entry point, ocsp_callback, is registered as a callback +# with OpenSSL through PyOpenSSL. The callback must return 1 (success) or +# 0 (failure). + +_LOGGER = _logging.getLogger(__name__) + +_CERT_REGEX = _re.compile( + b"-----BEGIN CERTIFICATE[^\r\n]+.+?-----END CERTIFICATE[^\r\n]+", _re.DOTALL +) + + +def _load_trusted_ca_certs(cafile: str) -> list[Certificate]: + """Parse the tlsCAFile into a list of certificates.""" + with open(cafile, "rb") as f: + data = f.read() + + # Load all the certs in the file. + trusted_ca_certs = [] + backend = _default_backend() + for cert_data in _re.findall(_CERT_REGEX, data): + trusted_ca_certs.append(_load_pem_x509_certificate(cert_data, backend)) + return trusted_ca_certs + + +def _get_issuer_cert( + cert: Certificate, chain: Iterable[Certificate], trusted_ca_certs: Optional[list[Certificate]] +) -> Optional[Certificate]: + issuer_name = cert.issuer + for candidate in chain: + if candidate.subject == issuer_name: + return candidate + + # Depending on the server's TLS library, the peer's cert chain may not + # include the self signed root CA. In this case we check the user + # provided tlsCAFile for the issuer. + # Remove once we use the verified peer cert chain in PYTHON-2147. + if trusted_ca_certs: + for candidate in trusted_ca_certs: + if candidate.subject == issuer_name: + return candidate + return None + + +def _verify_signature( + key: CertificateIssuerPublicKeyTypes, + signature: bytes, + algorithm: Union[Prehashed, HashAlgorithm, None], + data: bytes, +) -> int: + # See cryptography.x509.Certificate.public_key + # for the public key types. + try: + if isinstance(key, _RSAPublicKey): + key.verify(signature, data, _PKCS1v15(), algorithm) # type: ignore[arg-type] + elif isinstance(key, _DSAPublicKey): + key.verify(signature, data, algorithm) # type: ignore[arg-type] + elif isinstance(key, _EllipticCurvePublicKey): + key.verify(signature, data, _ECDSA(algorithm)) # type: ignore[arg-type] + elif isinstance( + key, (_X25519PublicKey, _X448PublicKey) + ): # Curve25519 and Curve448 keys do not require verification + return 1 + else: + key.verify(signature, data) + except _InvalidSignature: + return 0 + return 1 + + +def _get_extension( + cert: Certificate, klass: Type[ExtensionTypeVar] +) -> Optional[Extension[ExtensionTypeVar]]: + try: + return cert.extensions.get_extension_for_class(klass) + except _ExtensionNotFound: + return None + + +def _public_key_hash(cert: Certificate) -> bytes: + public_key = cert.public_key() + # https://tools.ietf.org/html/rfc2560#section-4.2.1 + # "KeyHash ::= OCTET STRING -- SHA-1 hash of responder's public key + # (excluding the tag and length fields)" + # https://stackoverflow.com/a/46309453/600498 + if isinstance(public_key, _RSAPublicKey): + pbytes = public_key.public_bytes(_Encoding.DER, _PublicFormat.PKCS1) + elif isinstance(public_key, _EllipticCurvePublicKey): + pbytes = public_key.public_bytes(_Encoding.X962, _PublicFormat.UncompressedPoint) + else: + pbytes = public_key.public_bytes(_Encoding.DER, _PublicFormat.SubjectPublicKeyInfo) + digest = _Hash(_SHA1(), backend=_default_backend()) # noqa: S303 + digest.update(pbytes) + return digest.finalize() + + +def _get_certs_by_key_hash( + certificates: Iterable[Certificate], issuer: Certificate, responder_key_hash: Optional[bytes] +) -> list[Certificate]: + return [ + cert + for cert in certificates + if _public_key_hash(cert) == responder_key_hash and cert.issuer == issuer.subject + ] + + +def _get_certs_by_name( + certificates: Iterable[Certificate], issuer: Certificate, responder_name: Optional[Name] +) -> list[Certificate]: + return [ + cert + for cert in certificates + if cert.subject == responder_name and cert.issuer == issuer.subject + ] + + +def _verify_response_signature(issuer: Certificate, response: OCSPResponse) -> int: + # Response object will have a responder_name or responder_key_hash + # not both. + name = response.responder_name + rkey_hash = response.responder_key_hash + ikey_hash = response.issuer_key_hash + if name is not None and name == issuer.subject or rkey_hash == ikey_hash: + _LOGGER.debug("Responder is issuer") + # Responder is the issuer + responder_cert = issuer + else: + _LOGGER.debug("Responder is a delegate") + # Responder is a delegate + # https://tools.ietf.org/html/rfc6960#section-2.6 + # RFC6960, Section 3.2, Number 3 + certs = response.certificates + if response.responder_name is not None: + responder_certs = _get_certs_by_name(certs, issuer, name) + _LOGGER.debug("Using responder name") + else: + responder_certs = _get_certs_by_key_hash(certs, issuer, rkey_hash) + _LOGGER.debug("Using key hash") + if not responder_certs: + _LOGGER.debug("No matching or valid responder certs.") + return 0 + # XXX: Can there be more than one? If so, should we try each one + # until we find one that passes signature verification? + responder_cert = responder_certs[0] + + # RFC6960, Section 3.2, Number 4 + ext = _get_extension(responder_cert, _ExtendedKeyUsage) + if not ext or _ExtendedKeyUsageOID.OCSP_SIGNING not in ext.value: + _LOGGER.debug("Delegate not authorized for OCSP signing") + return 0 + if not _verify_signature( + issuer.public_key(), + responder_cert.signature, + responder_cert.signature_hash_algorithm, + responder_cert.tbs_certificate_bytes, + ): + _LOGGER.debug("Delegate signature verification failed") + return 0 + # RFC6960, Section 3.2, Number 2 + ret = _verify_signature( + responder_cert.public_key(), + response.signature, + response.signature_hash_algorithm, + response.tbs_response_bytes, + ) + if not ret: + _LOGGER.debug("Response signature verification failed") + return ret + + +def _build_ocsp_request(cert: Certificate, issuer: Certificate) -> OCSPRequest: + # https://cryptography.io/en/latest/x509/ocsp/#creating-requests + builder = _OCSPRequestBuilder() + builder = builder.add_certificate(cert, issuer, _SHA1()) # noqa: S303 + return builder.build() + + +def _verify_response(issuer: Certificate, response: OCSPResponse) -> int: + _LOGGER.debug("Verifying response") + # RFC6960, Section 3.2, Number 2, 3 and 4 happen here. + res = _verify_response_signature(issuer, response) + if not res: + return 0 + + # Note that we are not using a "tolerance period" as discussed in + # https://tools.ietf.org/rfc/rfc5019.txt? + this_update = _this_update(response) + now = _datetime.now(tz=timezone.utc) + if this_update and this_update.tzinfo is None: + # Make naive to match cryptography. + now = now.replace(tzinfo=None) + # RFC6960, Section 3.2, Number 5 + if this_update and this_update > now: + _LOGGER.debug("thisUpdate is in the future") + return 0 + # RFC6960, Section 3.2, Number 6 + next_update = _next_update(response) + if next_update and next_update < now: + _LOGGER.debug("nextUpdate is in the past") + return 0 + return 1 + + +def _get_ocsp_response( + cert: Certificate, issuer: Certificate, uri: Union[str, bytes], ocsp_response_cache: _OCSPCache +) -> Optional[OCSPResponse]: + ocsp_request = _build_ocsp_request(cert, issuer) + try: + ocsp_response = ocsp_response_cache[ocsp_request] + _LOGGER.debug("Using cached OCSP response.") + except KeyError: + # CSOT: use the configured timeout or 5 seconds, whichever is smaller. + # Note that request's timeout works differently and does not imply an absolute + # deadline: https://requests.readthedocs.io/en/stable/user/quickstart/#timeouts + timeout = max(_csot.clamp_remaining(5), 0.001) + try: + response = _post( + uri, + data=ocsp_request.public_bytes(_Encoding.DER), + headers={"Content-Type": "application/ocsp-request"}, + timeout=timeout, + ) + except _RequestException as exc: + _LOGGER.debug("HTTP request failed: %s", exc) + return None + if response.status_code != 200: + _LOGGER.debug("HTTP request returned %d", response.status_code) + return None + ocsp_response = _load_der_ocsp_response(response.content) + _LOGGER.debug("OCSP response status: %r", ocsp_response.response_status) + if ocsp_response.response_status != _OCSPResponseStatus.SUCCESSFUL: + return None + # RFC6960, Section 3.2, Number 1. Only relevant if we need to + # talk to the responder directly. + # Accessing response.serial_number raises if response status is not + # SUCCESSFUL. + if ocsp_response.serial_number != ocsp_request.serial_number: + _LOGGER.debug("Response serial number does not match request") + return None + if not _verify_response(issuer, ocsp_response): + # The response failed verification. + return None + _LOGGER.debug("Caching OCSP response.") + ocsp_response_cache[ocsp_request] = ocsp_response + + return ocsp_response + + +def _ocsp_callback(conn: Connection, ocsp_bytes: bytes, user_data: Optional[_CallbackData]) -> bool: + """Callback for use with OpenSSL.SSL.Context.set_ocsp_client_callback.""" + # always pass in user_data but OpenSSL requires it be optional + assert user_data + pycert = conn.get_peer_certificate() + if pycert is None: + _LOGGER.debug("No peer cert?") + return False + cert = pycert.to_cryptography() + # Use the verified chain when available (pyopenssl>=20.0). + if hasattr(conn, "get_verified_chain"): + pychain = conn.get_verified_chain() + trusted_ca_certs = None + else: + pychain = conn.get_peer_cert_chain() + trusted_ca_certs = user_data.trusted_ca_certs + if not pychain: + _LOGGER.debug("No peer cert chain?") + return False + chain = [cer.to_cryptography() for cer in pychain] + issuer = _get_issuer_cert(cert, chain, trusted_ca_certs) + must_staple = False + # https://tools.ietf.org/html/rfc7633#section-4.2.3.1 + ext_tls = _get_extension(cert, _TLSFeature) + if ext_tls is not None: + for feature in ext_tls.value: + if feature == _TLSFeatureType.status_request: + _LOGGER.debug("Peer presented a must-staple cert") + must_staple = True + break + ocsp_response_cache = user_data.ocsp_response_cache + + # No stapled OCSP response + if ocsp_bytes == b"": + _LOGGER.debug("Peer did not staple an OCSP response") + if must_staple: + _LOGGER.debug("Must-staple cert with no stapled response, hard fail.") + return False + if not user_data.check_ocsp_endpoint: + _LOGGER.debug("OCSP endpoint checking is disabled, soft fail.") + # No stapled OCSP response, checking responder URI disabled, soft fail. + return True + # https://tools.ietf.org/html/rfc6960#section-3.1 + ext_aia = _get_extension(cert, _AuthorityInformationAccess) + if ext_aia is None: + _LOGGER.debug("No authority access information, soft fail") + # No stapled OCSP response, no responder URI, soft fail. + return True + uris = [ + desc.access_location.value + for desc in ext_aia.value + if desc.access_method == _AuthorityInformationAccessOID.OCSP + ] + if not uris: + _LOGGER.debug("No OCSP URI, soft fail") + # No responder URI, soft fail. + return True + if issuer is None: + _LOGGER.debug("No issuer cert?") + return False + _LOGGER.debug("Requesting OCSP data") + # When requesting data from an OCSP endpoint we only fail on + # successful, valid responses with a certificate status of REVOKED. + for uri in uris: + _LOGGER.debug("Trying %s", uri) + response = _get_ocsp_response(cert, issuer, uri, ocsp_response_cache) + if response is None: + # The endpoint didn't respond in time, or the response was + # unsuccessful or didn't match the request, or the response + # failed verification. + continue + _LOGGER.debug("OCSP cert status: %r", response.certificate_status) + if response.certificate_status == _OCSPCertStatus.GOOD: + return True + if response.certificate_status == _OCSPCertStatus.REVOKED: + return False + # Soft fail if we couldn't get a definitive status. + _LOGGER.debug("No definitive OCSP cert status, soft fail") + return True + + _LOGGER.debug("Peer stapled an OCSP response") + if issuer is None: + _LOGGER.debug("No issuer cert?") + return False + response = _load_der_ocsp_response(ocsp_bytes) + _LOGGER.debug("OCSP response status: %r", response.response_status) + # This happens in _request_ocsp when there is no stapled response so + # we know if we can compare serial numbers for the request and response. + if response.response_status != _OCSPResponseStatus.SUCCESSFUL: + return False + if not _verify_response(issuer, response): + return False + # Cache the verified, stapled response. + ocsp_response_cache[_build_ocsp_request(cert, issuer)] = response + _LOGGER.debug("OCSP cert status: %r", response.certificate_status) + if response.certificate_status == _OCSPCertStatus.REVOKED: + return False + return True diff --git a/pymongo/operations.py b/pymongo/operations.py new file mode 100644 index 0000000000..73fb8b5f36 --- /dev/null +++ b/pymongo/operations.py @@ -0,0 +1,853 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Operation class definitions. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +import enum +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) + +from bson.raw_bson import RawBSONDocument +from pymongo import helpers_shared +from pymongo.collation import validate_collation_or_none +from pymongo.common import validate_is_mapping, validate_list +from pymongo.errors import InvalidOperation +from pymongo.helpers_shared import _gen_index_name, _index_document, _index_list +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from pymongo.typings import _AgnosticBulk, _AgnosticClientBulk + + +# Hint supports index name, "myIndex", a list of either strings or index pairs: [('x', 1), ('y', -1), 'z''], or a dictionary +_IndexList = Union[ + Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] +] +_IndexKeyHint = Union[str, _IndexList] + + +class _Op(str, enum.Enum): + ABORT = "abortTransaction" + AGGREGATE = "aggregate" + BULK_WRITE = "bulkWrite" + COMMIT = "commitTransaction" + COUNT = "count" + CREATE = "create" + CREATE_INDEXES = "createIndexes" + CREATE_SEARCH_INDEXES = "createSearchIndexes" + DELETE = "delete" + DISTINCT = "distinct" + DROP = "drop" + DROP_DATABASE = "dropDatabase" + DROP_INDEXES = "dropIndexes" + DROP_SEARCH_INDEXES = "dropSearchIndexes" + END_SESSIONS = "endSessions" + FIND_AND_MODIFY = "findAndModify" + FIND = "find" + INSERT = "insert" + LIST_COLLECTIONS = "listCollections" + LIST_INDEXES = "listIndexes" + LIST_SEARCH_INDEX = "listSearchIndexes" + LIST_DATABASES = "listDatabases" + UPDATE = "update" + UPDATE_INDEX = "updateIndex" + UPDATE_SEARCH_INDEX = "updateSearchIndex" + RENAME = "rename" + GETMORE = "getMore" + KILL_CURSORS = "killCursors" + TEST = "testOperation" + + +class InsertOne(Generic[_DocumentType]): + """Represents an insert_one operation.""" + + __slots__ = ( + "_doc", + "_namespace", + ) + + def __init__(self, document: _DocumentType, namespace: Optional[str] = None) -> None: + """Create an InsertOne instance. + + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + + :param document: The document to insert. If the document is missing an + _id field one will be added. + :param namespace: (optional) The namespace in which to insert a document. + + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. + """ + self._doc = document + self._namespace = namespace + + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" + bulkobj.add_insert(self._doc) # type: ignore[arg-type] + + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_insert( + self._namespace, + self._doc, # type: ignore[arg-type] + ) + + def __repr__(self) -> str: + if self._namespace: + return f"{self.__class__.__name__}({self._doc!r}, {self._namespace!r})" + return f"{self.__class__.__name__}({self._doc!r})" + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return other._doc == self._doc and other._namespace == self._namespace + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + +class _DeleteOp: + """Private base class for delete operations.""" + + __slots__ = ( + "_filter", + "_collation", + "_hint", + "_namespace", + ) + + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, + ) -> None: + if filter is not None: + validate_is_mapping("filter", filter) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) + else: + self._hint = hint + + self._filter = filter + self._collation = collation + self._namespace = namespace + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return ( + other._filter, + other._collation, + other._hint, + other._namespace, + ) == ( + self._filter, + self._collation, + self._hint, + self._namespace, + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + if self._namespace: + return "{}({!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._collation, + self._hint, + self._namespace, + ) + return f"{self.__class__.__name__}({self._filter!r}, {self._collation!r}, {self._hint!r})" + + +class DeleteOne(_DeleteOp): + """Represents a delete_one operation.""" + + __slots__ = () + + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, + ) -> None: + """Create a DeleteOne instance. + + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + + :param filter: A query that matches the document to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param namespace: (optional) The namespace in which to delete a document. + + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + super().__init__(filter, collation, hint, namespace) + + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" + bulkobj.add_delete( + self._filter, + 1, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_delete( + self._namespace, + self._filter, + multi=False, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + +class DeleteMany(_DeleteOp): + """Represents a delete_many operation.""" + + __slots__ = () + + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, + ) -> None: + """Create a DeleteMany instance. + + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + + :param filter: A query that matches the documents to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param namespace: (optional) The namespace in which to delete documents. + + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + super().__init__(filter, collation, hint, namespace) + + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" + bulkobj.add_delete( + self._filter, + 0, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_delete( + self._namespace, + self._filter, + multi=True, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + +class ReplaceOne(Generic[_DocumentType]): + """Represents a replace_one operation.""" + + __slots__ = ( + "_filter", + "_doc", + "_upsert", + "_collation", + "_hint", + "_namespace", + "_sort", + ) + + def __init__( + self, + filter: Mapping[str, Any], + replacement: Union[_DocumentType, RawBSONDocument], + upsert: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create a ReplaceOne instance. + + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + :param namespace: (optional) The namespace in which to replace a document. + + .. versionchanged:: 4.10 + Added ``sort`` option. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.5 + Added the ``collation`` option. + """ + if filter is not None: + validate_is_mapping("filter", filter) + if upsert is not None: + validate_boolean("upsert", upsert) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) + else: + self._hint = hint + + self._sort = sort + self._filter = filter + self._doc = replacement + self._upsert = upsert + self._collation = collation + self._namespace = namespace + + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" + bulkobj.add_replace( + self._filter, + self._doc, + self._upsert, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + sort=self._sort, + ) + + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_replace( + self._namespace, + self._filter, + self._doc, + self._upsert, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + sort=self._sort, + ) + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return ( + other._filter, + other._doc, + other._upsert, + other._collation, + other._hint, + other._namespace, + other._sort, + ) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + self._hint, + self._namespace, + self._sort, + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + if self._namespace: + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._hint, + self._namespace, + self._sort, + ) + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._hint, + self._sort, + ) + + +class _UpdateOp: + """Private base class for update operations.""" + + __slots__ = ( + "_filter", + "_doc", + "_upsert", + "_collation", + "_array_filters", + "_hint", + "_namespace", + "_sort", + ) + + def __init__( + self, + filter: Mapping[str, Any], + doc: Union[Mapping[str, Any], _Pipeline], + upsert: Optional[bool], + collation: Optional[_CollationIn], + array_filters: Optional[list[Mapping[str, Any]]], + hint: Optional[_IndexKeyHint], + namespace: Optional[str], + sort: Optional[Mapping[str, Any]], + ): + if filter is not None: + validate_is_mapping("filter", filter) + if upsert is not None: + validate_boolean("upsert", upsert) + if array_filters is not None: + validate_list("array_filters", array_filters) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) + else: + self._hint = hint + self._filter = filter + self._doc = doc + self._upsert = upsert + self._collation = collation + self._array_filters = array_filters + self._namespace = namespace + self._sort = sort + + def __eq__(self, other: object) -> bool: + if isinstance(other, type(self)): + return ( + other._filter, + other._doc, + other._upsert, + other._collation, + other._array_filters, + other._hint, + other._namespace, + other._sort, + ) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + self._namespace, + self._sort, + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + if self._namespace: + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + self._namespace, + self._sort, + ) + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + self._sort, + ) + + +class UpdateOne(_UpdateOp): + """Represents an update_one operation.""" + + __slots__ = () + + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Represents an update_one operation. + + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param namespace: The namespace in which to update a document. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + + .. versionchanged:: 4.10 + Added ``sort`` option. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. + .. versionchanged:: 3.11 + Added the `hint` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added the `array_filters` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + super().__init__(filter, update, upsert, collation, array_filters, hint, namespace, sort) + + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" + bulkobj.add_update( + self._filter, + self._doc, + False, + bool(self._upsert), + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + sort=self._sort, + ) + + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_update( + self._namespace, + self._filter, + self._doc, + False, + self._upsert, + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + sort=self._sort, + ) + + +class UpdateMany(_UpdateOp): + """Represents an update_many operation.""" + + __slots__ = () + + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, + ) -> None: + """Create an UpdateMany instance. + + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param namespace: (optional) The namespace in which to update documents. + + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. + .. versionchanged:: 3.11 + Added the `hint` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added the `array_filters` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + super().__init__(filter, update, upsert, collation, array_filters, hint, namespace, None) + + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" + bulkobj.add_update( + self._filter, + self._doc, + True, + self._upsert, + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) + + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_update( + self._namespace, + self._filter, + self._doc, + True, + self._upsert, + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) + + +class IndexModel: + """Represents an index to create.""" + + __slots__ = ("__document",) + + def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: + """Create an Index instance. + + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_indexes` and :meth:`~pymongo.collection.Collection.create_indexes`. + + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str`, and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). + + Valid options include, but are not limited to: + + - `name`: custom name to use for this index - if none is + given, a name will be generated. + - `unique`: if ``True``, creates a uniqueness constraint on the index. + - `background`: if ``True``, this index should be created in the + background. + - `sparse`: if ``True``, omit from the index any documents that lack + the indexed field. + - `bucketSize`: for use with geoHaystack indexes. + Number of documents to group together within a certain proximity + to a given longitude and latitude. + - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` + index. + - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` + index. + - `expireAfterSeconds`: Used to create an expiring (TTL) + collection. MongoDB will automatically delete documents from + this collection after seconds. The indexed field must + be a UTC datetime or the data will not expire. + - `partialFilterExpression`: A document that specifies a filter for + a partial index. + - `collation`: An instance of :class:`~pymongo.collation.Collation` + that specifies the collation to use. + - `wildcardProjection`: Allows users to include or exclude specific + field paths from a `wildcard index`_ using the { "$**" : 1} key + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. + + See the MongoDB documentation for a full list of supported options by + server version. + + :param keys: a single key or a list containing (key, direction) pairs + or keys specifying the index to create. + :param kwargs: any additional index creation + options (see the above list) should be passed as keyword + arguments. + + .. versionchanged:: 3.11 + Added the ``hidden`` option. + .. versionchanged:: 3.2 + Added the ``partialFilterExpression`` option to support partial + indexes. + + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ + """ + keys = _index_list(keys) + if kwargs.get("name") is None: + kwargs["name"] = _gen_index_name(keys) + kwargs["key"] = _index_document(keys) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + self.__document = kwargs + if collation is not None: + self.__document["collation"] = collation + + @property + def document(self) -> dict[str, Any]: + """An index document suitable for passing to the createIndexes + command. + """ + return self.__document + + def __repr__(self) -> str: + return "{}({}{})".format( + self.__class__.__name__, + self.document["key"], + "".join([f", {key}={value!r}" for key, value in self.document.items() if key != "key"]), + ) + + +class SearchIndexModel: + """Represents a search index to create.""" + + __slots__ = ("__document",) + + def __init__( + self, + definition: Mapping[str, Any], + name: Optional[str] = None, + type: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Create a Search Index instance. + + For use with :meth:`~pymongo.collection.AsyncCollection.create_search_index` and :meth:`~pymongo.collection.AsyncCollection.create_search_indexes`. + + :param definition: The definition for this index. + :param name: The name for this index, if present. + :param type: The type for this index which defaults to "search". Alternative values include "vectorSearch". + :param kwargs: Keyword arguments supplying any additional options. + + .. note:: Search indexes require a MongoDB server version 7.0+ Atlas cluster. + .. versionadded:: 4.5 + .. versionchanged:: 4.7 + Added the type and kwargs arguments. + """ + self.__document: dict[str, Any] = {} + if name is not None: + self.__document["name"] = name + self.__document["definition"] = definition + if type is not None: + self.__document["type"] = type + self.__document.update(kwargs) + + @property + def document(self) -> Mapping[str, Any]: + """The document for this index.""" + return self.__document + + def __repr__(self) -> str: + return "{}({})".format( + self.__class__.__name__, + ", ".join([f"{key}={value!r}" for key, value in self.document.items()]), + ) diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py new file mode 100644 index 0000000000..82f506f039 --- /dev/null +++ b/pymongo/periodic_executor.py @@ -0,0 +1,296 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Run a target function on a background thread.""" + +from __future__ import annotations + +import asyncio +import sys +import threading +import time +import weakref +from typing import Any, Optional + +from pymongo import _csot +from pymongo._asyncio_task import create_task +from pymongo.lock import _create_lock + +_IS_SYNC = False + + +class AsyncPeriodicExecutor: + def __init__( + self, + interval: float, + min_interval: float, + target: Any, + name: Optional[str] = None, + ): + """Run a target function periodically on a background task. + + If the target's return value is false, the executor stops. + + :param interval: Seconds between calls to `target`. + :param min_interval: Minimum seconds between calls if `wake` is + called very often. + :param target: A function. + :param name: A name to give the underlying task. + """ + self._event = False + self._interval = interval + self._min_interval = min_interval + self._target = target + self._stopped = False + self._task: Optional[asyncio.Task[Any]] = None + self._name = name + self._skip_sleep = False + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" + + def open(self) -> None: + """Start. Multiple calls have no effect.""" + self._stopped = False + + if self._task is None or ( + self._task.done() and not self._task.cancelled() and not self._task.cancelling() # type: ignore[unused-ignore, attr-defined] + ): + self._task = create_task(self._run(), name=self._name) + + def close(self, dummy: Any = None) -> None: + """Stop. To restart, call open(). + + The dummy parameter allows an executor's close method to be a weakref + callback; see monitor.py. + """ + self._stopped = True + if self._task is not None: + self._task.cancel() + + async def join(self, timeout: Optional[int] = None) -> None: + if self._task is not None: + await asyncio.wait([self._task], timeout=timeout) # type-ignore: [arg-type] + + def wake(self) -> None: + """Execute the target function soon.""" + self._event = True + + def update_interval(self, new_interval: int) -> None: + self._interval = new_interval + + def skip_sleep(self) -> None: + self._skip_sleep = True + + async def _run(self) -> None: + # The CSOT contextvars must be cleared inside the executor task before execution begins + _csot.reset_all() + while not self._stopped: + if self._task and self._task.cancelling(): # type: ignore[unused-ignore, attr-defined] + raise asyncio.CancelledError + try: + if not await self._target(): + self._stopped = True + break + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + self._stopped = True + raise + + if self._skip_sleep: + self._skip_sleep = False + else: + deadline = time.monotonic() + self._interval + while not self._stopped and time.monotonic() < deadline: + await asyncio.sleep(self._min_interval) + if self._event: + break # Early wake. + + self._event = False + + +class PeriodicExecutor: + def __init__( + self, + interval: float, + min_interval: float, + target: Any, + name: Optional[str] = None, + ): + """Run a target function periodically on a background thread. + + If the target's return value is false, the executor stops. + + :param interval: Seconds between calls to `target`. + :param min_interval: Minimum seconds between calls if `wake` is + called very often. + :param target: A function. + :param name: A name to give the underlying thread. + """ + # threading.Event and its internal condition variable are expensive + # in Python 2, see PYTHON-983. Use a boolean to know when to wake. + # The executor's design is constrained by several Python issues, see + # "periodic_executor.rst" in this repository. + self._event = False + self._interval = interval + self._min_interval = min_interval + self._target = target + self._stopped = False + self._thread: Optional[threading.Thread] = None + self._name = name + self._skip_sleep = False + self._thread_will_exit = False + self._lock = _create_lock() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" + + def open(self) -> None: + """Start. Multiple calls have no effect. + + Not safe to call from multiple threads at once. + """ + with self._lock: + if self._thread_will_exit: + # If the background thread has read self._stopped as True + # there is a chance that it has not yet exited. The call to + # join should not block indefinitely because there is no + # other work done outside the while loop in self._run. + try: + assert self._thread is not None + self._thread.join() + except ReferenceError: + # Thread terminated. + pass + self._thread_will_exit = False + self._stopped = False + started: Any = False + try: + started = self._thread and self._thread.is_alive() + except ReferenceError: + # Thread terminated. + pass + + if not started: + thread = threading.Thread(target=self._run, name=self._name) + thread.daemon = True + self._thread = weakref.proxy(thread) + _register_executor(self) + # Mitigation to RuntimeError firing when thread starts on shutdown + # https://github.com/python/cpython/issues/114570 + try: + thread.start() + except RuntimeError as e: + if "interpreter shutdown" in str(e) or sys.is_finalizing(): + self._thread = None + return + raise + + def close(self, dummy: Any = None) -> None: + """Stop. To restart, call open(). + + The dummy parameter allows an executor's close method to be a weakref + callback; see monitor.py. + """ + self._stopped = True + + def join(self, timeout: Optional[int] = None) -> None: + if self._thread is not None: + try: + self._thread.join(timeout) + except (ReferenceError, RuntimeError): + # Thread already terminated, or not yet started. + pass + + def wake(self) -> None: + """Execute the target function soon.""" + self._event = True + + def update_interval(self, new_interval: int) -> None: + self._interval = new_interval + + def skip_sleep(self) -> None: + self._skip_sleep = True + + def _should_stop(self) -> bool: + with self._lock: + if self._stopped: + self._thread_will_exit = True + return True + return False + + def _run(self) -> None: + while not self._should_stop(): + try: + if not self._target(): + self._stopped = True + break + # Catch KeyboardInterrupt, etc. and cleanup. + except BaseException: + with self._lock: + self._stopped = True + self._thread_will_exit = True + + raise + + if self._skip_sleep: + self._skip_sleep = False + else: + deadline = time.monotonic() + self._interval + while not self._stopped and time.monotonic() < deadline: + time.sleep(self._min_interval) + if self._event: + break # Early wake. + + self._event = False + + +# _EXECUTORS has a weakref to each running PeriodicExecutor. Once started, +# an executor is kept alive by a strong reference from its thread and perhaps +# from other objects. When the thread dies and all other referrers are freed, +# the executor is freed and removed from _EXECUTORS. If any threads are +# running when the interpreter begins to shut down, we try to halt and join +# them to avoid spurious errors. +_EXECUTORS = set() + + +def _register_executor(executor: PeriodicExecutor) -> None: + ref = weakref.ref(executor, _on_executor_deleted) + _EXECUTORS.add(ref) + + +def _on_executor_deleted(ref: weakref.ReferenceType[PeriodicExecutor]) -> None: + _EXECUTORS.remove(ref) + + +def _shutdown_executors() -> None: + if _EXECUTORS is None: + return + + # Copy the set. Stopping threads has the side effect of removing executors. + executors = list(_EXECUTORS) + + # First signal all executors to close... + for ref in executors: + executor = ref() + if executor: + executor.close() + + # ...then try to join them. + for ref in executors: + executor = ref() + if executor: + executor.join(1) + + executor = None diff --git a/pymongo/pool.py b/pymongo/pool.py index e3ba94e47a..456ff3df0a 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1,554 +1,22 @@ -# Copyright 2011-2014 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -import os -import socket -import sys -import time -import threading -import weakref +"""Re-import of synchronous Pool API for compatibility.""" +from __future__ import annotations -from pymongo import thread_util -from pymongo.common import HAS_SSL -from pymongo.errors import ConnectionFailure, ConfigurationError +from pymongo.synchronous.pool import * # noqa: F403 +from pymongo.synchronous.pool import __doc__ as original_doc -try: - from ssl import match_hostname -except ImportError: - from pymongo.ssl_match_hostname import match_hostname - -if HAS_SSL: - import ssl - -if sys.platform.startswith('java'): - from select import cpython_compatible_select as select -else: - from select import select - - -NO_REQUEST = None -NO_SOCKET_YET = -1 - - -def _closed(sock): - """Return True if we know socket has been closed, False otherwise. - """ - try: - rd, _, _ = select([sock], [], [], 0) - # Any exception here is equally bad (select.error, ValueError, etc.). - except: - return True - return len(rd) > 0 - - -class SocketInfo(object): - """Store a socket with some metadata - """ - def __init__(self, sock, pool_id, host=None): - self.sock = sock - self.host = host - self.authset = set() - self.closed = False - self.last_checkout = time.time() - self.forced = False - - # The pool's pool_id changes with each reset() so we can close sockets - # created before the last reset. - self.pool_id = pool_id - - def close(self): - self.closed = True - # Avoid exceptions on interpreter shutdown. - try: - self.sock.close() - except: - pass - - def __eq__(self, other): - # Need to check if other is NO_REQUEST or NO_SOCKET_YET, and then check - # if its sock is the same as ours - return hasattr(other, 'sock') and self.sock == other.sock - - def __ne__(self, other): - return not self == other - - def __hash__(self): - return hash(self.sock) - - def __repr__(self): - return "SocketInfo(%s)%s at %s" % ( - repr(self.sock), - self.closed and " CLOSED" or "", - id(self) - ) - - -# Do *not* explicitly inherit from object or Jython won't call __del__ -# http://bugs.jython.org/issue1057 -class Pool: - def __init__(self, pair, max_size, net_timeout, conn_timeout, use_ssl, - use_greenlets, ssl_keyfile=None, ssl_certfile=None, - ssl_cert_reqs=None, ssl_ca_certs=None, - wait_queue_timeout=None, wait_queue_multiple=None): - """ - :Parameters: - - `pair`: a (hostname, port) tuple - - `max_size`: The maximum number of open sockets. Calls to - `get_socket` will block if this is set, this pool has opened - `max_size` sockets, and there are none idle. Set to `None` to - disable. - - `net_timeout`: timeout in seconds for operations on open connection - - `conn_timeout`: timeout in seconds for establishing connection - - `use_ssl`: bool, if True use an encrypted connection - - `use_greenlets`: bool, if True then start_request() assigns a - socket to the current greenlet - otherwise it is assigned to the - current thread - - `ssl_keyfile`: The private keyfile used to identify the local - connection against mongod. If included with the ``certfile` then - only the ``ssl_certfile`` is needed. Implies ``ssl=True``. - - `ssl_certfile`: The certificate file used to identify the local - connection against mongod. Implies ``ssl=True``. - - `ssl_cert_reqs`: Specifies whether a certificate is required from - the other side of the connection, and whether it will be validated - if provided. It must be one of the three values ``ssl.CERT_NONE`` - (certificates ignored), ``ssl.CERT_OPTIONAL`` - (not required, but validated if provided), or ``ssl.CERT_REQUIRED`` - (required and validated). If the value of this parameter is not - ``ssl.CERT_NONE``, then the ``ssl_ca_certs`` parameter must point - to a file of CA certificates. Implies ``ssl=True``. - - `ssl_ca_certs`: The ca_certs file contains a set of concatenated - "certification authority" certificates, which are used to validate - certificates passed from the other end of the connection. - Implies ``ssl=True``. - - `wait_queue_timeout`: (integer) How long (in seconds) a - thread will wait for a socket from the pool if the pool has no - free sockets. - - `wait_queue_multiple`: (integer) Multiplied by max_pool_size to give - the number of threads allowed to wait for a socket at one time. - """ - # Only check a socket's health with _closed() every once in a while. - # Can override for testing: 0 to always check, None to never check. - self._check_interval_seconds = 1 - - self.sockets = set() - self.lock = threading.Lock() - - # Keep track of resets, so we notice sockets created before the most - # recent reset and close them. - self.pool_id = 0 - self.pid = os.getpid() - self.pair = pair - self.max_size = max_size - self.net_timeout = net_timeout - self.conn_timeout = conn_timeout - self.wait_queue_timeout = wait_queue_timeout - self.wait_queue_multiple = wait_queue_multiple - self.use_ssl = use_ssl - self.ssl_keyfile = ssl_keyfile - self.ssl_certfile = ssl_certfile - self.ssl_cert_reqs = ssl_cert_reqs - self.ssl_ca_certs = ssl_ca_certs - - if HAS_SSL and use_ssl and not ssl_cert_reqs: - self.ssl_cert_reqs = ssl.CERT_NONE - - # Map self._ident.get() -> request socket - self._tid_to_sock = {} - - if use_greenlets and not thread_util.have_gevent: - raise ConfigurationError( - "The Gevent module is not available. " - "Install the gevent package from PyPI." - ) - - self._ident = thread_util.create_ident(use_greenlets) - - # Count the number of calls to start_request() per thread or greenlet - self._request_counter = thread_util.Counter(use_greenlets) - - if self.wait_queue_multiple is None or self.max_size is None: - max_waiters = None - else: - max_waiters = self.max_size * self.wait_queue_multiple - - self._socket_semaphore = thread_util.create_semaphore( - self.max_size, max_waiters, use_greenlets) - - def reset(self): - # Ignore this race condition -- if many threads are resetting at once, - # the pool_id will definitely change, which is all we care about. - self.pool_id += 1 - self.pid = os.getpid() - - sockets = None - try: - # Swapping variables is not atomic. We need to ensure no other - # thread is modifying self.sockets, or replacing it, in this - # critical section. - self.lock.acquire() - sockets, self.sockets = self.sockets, set() - finally: - self.lock.release() - - for sock_info in sockets: - sock_info.close() - - def create_connection(self): - """Connect and return a socket object. - - This is a modified version of create_connection from - CPython >=2.6. - """ - host, port = self.pair - - # Check if dealing with a unix domain socket - if host.endswith('.sock'): - if not hasattr(socket, "AF_UNIX"): - raise ConnectionFailure("UNIX-sockets are not supported " - "on this system") - sock = socket.socket(socket.AF_UNIX) - try: - sock.connect(host) - return sock - except socket.error, e: - if sock is not None: - sock.close() - raise e - - # Don't try IPv6 if we don't support it. Also skip it if host - # is 'localhost' (::1 is fine). Avoids slow connect issues - # like PYTHON-356. - family = socket.AF_INET - if socket.has_ipv6 and host != 'localhost': - family = socket.AF_UNSPEC - - err = None - for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): - af, socktype, proto, dummy, sa = res - sock = None - try: - sock = socket.socket(af, socktype, proto) - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - sock.settimeout(self.conn_timeout or 20.0) - sock.connect(sa) - return sock - except socket.error, e: - err = e - if sock is not None: - sock.close() - - if err is not None: - raise err - else: - # This likely means we tried to connect to an IPv6 only - # host with an OS/kernel or Python interpreter that doesn't - # support IPv6. The test case is Jython2.5.1 which doesn't - # support IPv6 at all. - raise socket.error('getaddrinfo failed') - - def connect(self): - """Connect to Mongo and return a new (connected) socket. Note that the - pool does not keep a reference to the socket -- you must call - return_socket() when you're done with it. - """ - sock = self.create_connection() - hostname = self.pair[0] - - if self.use_ssl: - try: - sock = ssl.wrap_socket(sock, - certfile=self.ssl_certfile, - keyfile=self.ssl_keyfile, - ca_certs=self.ssl_ca_certs, - cert_reqs=self.ssl_cert_reqs) - if self.ssl_cert_reqs: - match_hostname(sock.getpeercert(), hostname) - - except ssl.SSLError: - sock.close() - raise ConnectionFailure("SSL handshake failed. MongoDB may " - "not be configured with SSL support.") - - sock.settimeout(self.net_timeout) - return SocketInfo(sock, self.pool_id, hostname) - - def get_socket(self, force=False): - """Get a socket from the pool. - - Returns a :class:`SocketInfo` object wrapping a connected - :class:`socket.socket`, and a bool saying whether the socket was from - the pool or freshly created. - - :Parameters: - - `force`: optional boolean, forces a connection to be returned - without blocking, even if `max_size` has been reached. - """ - # We use the pid here to avoid issues with fork / multiprocessing. - # See test.test_client:TestClient.test_fork for an example of - # what could go wrong otherwise - if self.pid != os.getpid(): - self.reset() - - # Have we opened a socket for this request? - req_state = self._get_request_state() - if req_state not in (NO_SOCKET_YET, NO_REQUEST): - # There's a socket for this request, check it and return it - checked_sock = self._check(req_state) - if checked_sock != req_state: - self._set_request_state(checked_sock) - - checked_sock.last_checkout = time.time() - return checked_sock - - forced = False - # We're not in a request, just get any free socket or create one - if force: - # If we're doing an internal operation, attempt to play nicely with - # max_size, but if there is no open "slot" force the connection - # and mark it as forced so we don't release the semaphore without - # having acquired it for this socket. - if not self._socket_semaphore.acquire(False): - forced = True - elif not self._socket_semaphore.acquire(True, self.wait_queue_timeout): - self._raise_wait_queue_timeout() - - # We've now acquired the semaphore and must release it on error. - try: - sock_info, from_pool = None, None - try: - try: - # set.pop() isn't atomic in Jython less than 2.7, see - # http://bugs.jython.org/issue1854 - self.lock.acquire() - sock_info, from_pool = self.sockets.pop(), True - finally: - self.lock.release() - except KeyError: - sock_info, from_pool = self.connect(), False - - if from_pool: - sock_info = self._check(sock_info) - - sock_info.forced = forced - - if req_state == NO_SOCKET_YET: - # start_request has been called but we haven't assigned a - # socket to the request yet. Let's use this socket for this - # request until end_request. - self._set_request_state(sock_info) - except: - if not forced: - self._socket_semaphore.release() - raise - - sock_info.last_checkout = time.time() - return sock_info - - def start_request(self): - if self._get_request_state() == NO_REQUEST: - # Add a placeholder value so we know we're in a request, but we - # have no socket assigned to the request yet. - self._set_request_state(NO_SOCKET_YET) - - self._request_counter.inc() - - def in_request(self): - return bool(self._request_counter.get()) - - def end_request(self): - # Check if start_request has ever been called in this thread / greenlet - count = self._request_counter.get() - if count: - self._request_counter.dec() - if count == 1: - # End request - sock_info = self._get_request_state() - self._set_request_state(NO_REQUEST) - if sock_info not in (NO_REQUEST, NO_SOCKET_YET): - self._return_socket(sock_info) - - def discard_socket(self, sock_info): - """Close and discard the active socket. - """ - if sock_info not in (NO_REQUEST, NO_SOCKET_YET): - sock_info.close() - - if sock_info == self._get_request_state(): - # Discarding request socket; prepare to use a new request - # socket on next get_socket(). - self._set_request_state(NO_SOCKET_YET) - - def maybe_return_socket(self, sock_info): - """Return the socket to the pool unless it's the request socket. - """ - if sock_info in (NO_REQUEST, NO_SOCKET_YET): - return - - if self.pid != os.getpid(): - if not sock_info.forced: - self._socket_semaphore.release() - self.reset() - else: - if sock_info.closed: - if sock_info.forced: - sock_info.forced = False - elif sock_info != self._get_request_state(): - self._socket_semaphore.release() - return - - if sock_info != self._get_request_state(): - self._return_socket(sock_info) - - def _return_socket(self, sock_info): - """Return socket to the pool. If pool is full the socket is discarded. - """ - try: - self.lock.acquire() - too_many_sockets = (self.max_size is not None - and len(self.sockets) >= self.max_size) - - if not too_many_sockets and sock_info.pool_id == self.pool_id: - self.sockets.add(sock_info) - else: - sock_info.close() - finally: - self.lock.release() - - if sock_info.forced: - sock_info.forced = False - else: - self._socket_semaphore.release() - - def _check(self, sock_info): - """This side-effecty function checks if this pool has been reset since - the last time this socket was used, or if the socket has been closed by - some external network error, and if so, attempts to create a new socket. - If this connection attempt fails we reset the pool and reraise the - error. - - Checking sockets lets us avoid seeing *some* - :class:`~pymongo.errors.AutoReconnect` exceptions on server - hiccups, etc. We only do this if it's been > 1 second since - the last socket checkout, to keep performance reasonable - we - can't avoid AutoReconnects completely anyway. - """ - error = False - - # How long since socket was last checked out. - age = time.time() - sock_info.last_checkout - - if sock_info.closed: - error = True - - elif self.pool_id != sock_info.pool_id: - sock_info.close() - error = True - - elif (self._check_interval_seconds is not None - and ( - 0 == self._check_interval_seconds - or age > self._check_interval_seconds)): - if _closed(sock_info.sock): - sock_info.close() - error = True - - if not error: - return sock_info - else: - try: - return self.connect() - except socket.error: - self.reset() - raise - - def _set_request_state(self, sock_info): - ident = self._ident - tid = ident.get() - - if sock_info == NO_REQUEST: - # Ending a request - ident.unwatch(tid) - self._tid_to_sock.pop(tid, None) - else: - self._tid_to_sock[tid] = sock_info - - if not ident.watching(): - # Closure over tid, poolref, and ident. Don't refer directly to - # self, otherwise there's a cycle. - - # Do not access threadlocals in this function, or any - # function it calls! In the case of the Pool subclass and - # mod_wsgi 2.x, on_thread_died() is triggered when mod_wsgi - # calls PyThreadState_Clear(), which deferences the - # ThreadVigil and triggers the weakref callback. Accessing - # thread locals in this function, while PyThreadState_Clear() - # is in progress can cause leaks, see PYTHON-353. - poolref = weakref.ref(self) - - def on_thread_died(ref): - try: - ident.unwatch(tid) - pool = poolref() - if pool: - # End the request - request_sock = pool._tid_to_sock.pop(tid, None) - - # Was thread ever assigned a socket before it died? - if request_sock not in (NO_REQUEST, NO_SOCKET_YET): - pool._return_socket(request_sock) - except: - # Random exceptions on interpreter shutdown. - pass - - ident.watch(on_thread_died) - - def _get_request_state(self): - tid = self._ident.get() - return self._tid_to_sock.get(tid, NO_REQUEST) - - def _raise_wait_queue_timeout(self): - raise ConnectionFailure( - 'Timed out waiting for socket from pool with max_size %r and' - ' wait_queue_timeout %r' % ( - self.max_size, self.wait_queue_timeout)) - - def __del__(self): - # Avoid ResourceWarnings in Python 3 - for sock_info in self.sockets: - sock_info.close() - - for request_sock in self._tid_to_sock.values(): - if request_sock not in (NO_REQUEST, NO_SOCKET_YET): - request_sock.close() - - -class Request(object): - """ - A context manager returned by :meth:`start_request`, so you can do - `with client.start_request(): do_something()` in Python 2.5+. - """ - def __init__(self, connection): - self.connection = connection - - def end(self): - self.connection.end_request() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.end() - # Returning False means, "Don't suppress exceptions if any were - # thrown within the block" - return False +__doc__ = original_doc +__all__ = ["PoolOptions"] # noqa: F405 diff --git a/pymongo/pool_options.py b/pymongo/pool_options.py new file mode 100644 index 0000000000..a5d76007b0 --- /dev/null +++ b/pymongo/pool_options.py @@ -0,0 +1,537 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Pool options for AsyncMongoClient/MongoClient. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +import copy +import os +import platform +import sys +from pathlib import Path +from typing import TYPE_CHECKING, Any, MutableMapping, Optional + +import bson +from pymongo import __version__ +from pymongo.common import ( + MAX_CONNECTING, + MAX_IDLE_TIME_SEC, + MAX_POOL_SIZE, + MIN_POOL_SIZE, + WAIT_QUEUE_TIMEOUT, + has_c, +) + +if TYPE_CHECKING: + from pymongo.auth_shared import MongoCredential + from pymongo.compression_support import CompressionSettings + from pymongo.driver_info import DriverInfo + from pymongo.monitoring import _EventListeners + from pymongo.pyopenssl_context import SSLContext + from pymongo.server_api import ServerApi + + +_METADATA: dict[str, Any] = {"driver": {"name": "PyMongo", "version": __version__}} + +if sys.platform.startswith("linux"): + # platform.linux_distribution was deprecated in Python 3.5 + # and removed in Python 3.8. Starting in Python 3.5 it + # raises DeprecationWarning + # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 + _name = platform.system() + _METADATA["os"] = { + "type": _name, + "name": _name, + "architecture": platform.machine(), + # Kernel version (e.g. 4.4.0-17-generic). + "version": platform.release(), + } +elif sys.platform == "darwin": + _METADATA["os"] = { + "type": platform.system(), + "name": platform.system(), + "architecture": platform.machine(), + # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin + # kernel version. + "version": platform.mac_ver()[0], + } +elif sys.platform == "win32": + _ver = sys.getwindowsversion() + _METADATA["os"] = { + "type": "Windows", + "name": "Windows", + # Avoid using platform calls, see PYTHON-4455. + "architecture": os.environ.get("PROCESSOR_ARCHITECTURE") or platform.machine(), + # Windows patch level (e.g. 10.0.17763-SP0). + "version": ".".join(map(str, _ver[:3])) + f"-SP{_ver[-1] or '0'}", + } +elif sys.platform.startswith("java"): + _name, _ver, _arch = platform.java_ver()[-1] + _METADATA["os"] = { + # Linux, Windows 7, Mac OS X, etc. + "type": _name, + "name": _name, + # x86, x86_64, AMD64, etc. + "architecture": _arch, + # Linux kernel version, OSX version, etc. + "version": _ver, + } +else: + # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) + _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) + _METADATA["os"] = { + "type": platform.system(), + "name": " ".join([part for part in _aliased[:2] if part]), + "architecture": platform.machine(), + "version": _aliased[2], + } + +if platform.python_implementation().startswith("PyPy"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.pypy_version_info)), # type: ignore + "(Python %s)" % ".".join(map(str, sys.version_info)), + ) + ) +elif sys.platform.startswith("java"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.version_info)), + "(%s)" % " ".join((platform.system(), platform.release())), + ) + ) +else: + _METADATA["platform"] = " ".join( + (platform.python_implementation(), ".".join(map(str, sys.version_info))) + ) + +DOCKER_ENV_PATH = "/.dockerenv" +ENV_VAR_K8S = "KUBERNETES_SERVICE_HOST" + +RUNTIME_NAME_DOCKER = "docker" +ORCHESTRATOR_NAME_K8S = "kubernetes" + + +def get_container_env_info() -> dict[str, str]: + """Returns the runtime and orchestrator of a container. + If neither value is present, the metadata client.env.container field will be omitted.""" + container = {} + + if Path(DOCKER_ENV_PATH).exists(): + container["runtime"] = RUNTIME_NAME_DOCKER + if os.getenv(ENV_VAR_K8S): + container["orchestrator"] = ORCHESTRATOR_NAME_K8S + + return container + + +def _is_lambda() -> bool: + if os.getenv("AWS_LAMBDA_RUNTIME_API"): + return True + env = os.getenv("AWS_EXECUTION_ENV") + if env: + return env.startswith("AWS_Lambda_") + return False + + +def _is_azure_func() -> bool: + return bool(os.getenv("FUNCTIONS_WORKER_RUNTIME")) + + +def _is_gcp_func() -> bool: + return bool(os.getenv("K_SERVICE") or os.getenv("FUNCTION_NAME")) + + +def _is_vercel() -> bool: + return bool(os.getenv("VERCEL")) + + +def _is_faas() -> bool: + return _is_lambda() or _is_azure_func() or _is_gcp_func() or _is_vercel() + + +def _getenv_int(key: str) -> Optional[int]: + """Like os.getenv but returns an int, or None if the value is missing/malformed.""" + val = os.getenv(key) + if not val: + return None + try: + return int(val) + except ValueError: + return None + + +def _metadata_env() -> dict[str, Any]: + env: dict[str, Any] = {} + container = get_container_env_info() + if container: + env["container"] = container + # Skip if multiple (or no) envs are matched. + if (_is_lambda(), _is_azure_func(), _is_gcp_func(), _is_vercel()).count(True) != 1: + return env + if _is_lambda(): + env["name"] = "aws.lambda" + region = os.getenv("AWS_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("AWS_LAMBDA_FUNCTION_MEMORY_SIZE") + if memory_mb is not None: + env["memory_mb"] = memory_mb + elif _is_azure_func(): + env["name"] = "azure.func" + elif _is_gcp_func(): + env["name"] = "gcp.func" + region = os.getenv("FUNCTION_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("FUNCTION_MEMORY_MB") + if memory_mb is not None: + env["memory_mb"] = memory_mb + timeout_sec = _getenv_int("FUNCTION_TIMEOUT_SEC") + if timeout_sec is not None: + env["timeout_sec"] = timeout_sec + elif _is_vercel(): + env["name"] = "vercel" + region = os.getenv("VERCEL_REGION") + if region: + env["region"] = region + return env + + +_MAX_METADATA_SIZE = 512 + + +# See: https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.md#limitations +def _truncate_metadata(metadata: MutableMapping[str, Any]) -> None: + """Perform metadata truncation.""" + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 1. Omit fields from env except env.name. + env_name = metadata.get("env", {}).get("name") + if env_name: + metadata["env"] = {"name": env_name} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 2. Omit fields from os except os.type. + os_type = metadata.get("os", {}).get("type") + if os_type: + metadata["os"] = {"type": os_type} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 3. Omit the env document entirely. + metadata.pop("env", None) + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # 4. Truncate platform. + overflow = encoded_size - _MAX_METADATA_SIZE + plat = metadata.get("platform", "") + if plat: + plat = plat[:-overflow] + if plat: + metadata["platform"] = plat + else: + metadata.pop("platform", None) + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # 5. Truncate driver info. + overflow = encoded_size - _MAX_METADATA_SIZE + driver = metadata.get("driver", {}) + if driver: + # Truncate driver version. + driver_version = driver.get("version")[:-overflow] + if len(driver_version) >= len(_METADATA["driver"]["version"]): + metadata["driver"]["version"] = driver_version + else: + metadata["driver"]["version"] = _METADATA["driver"]["version"] + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # Truncate driver name. + overflow = encoded_size - _MAX_METADATA_SIZE + driver_name = driver.get("name")[:-overflow] + if len(driver_name) >= len(_METADATA["driver"]["name"]): + metadata["driver"]["name"] = driver_name + else: + metadata["driver"]["name"] = _METADATA["driver"]["name"] + + +# If the first getaddrinfo call of this interpreter's life is on a thread, +# while the main thread holds the import lock, getaddrinfo deadlocks trying +# to import the IDNA codec. Import it here, where presumably we're on the +# main thread, to avoid the deadlock. See PYTHON-607. +"foo".encode("idna") + + +class PoolOptions: + """Read only connection pool options for an AsyncMongoClient/MongoClient. + + Should not be instantiated directly by application developers. Access + a client's pool options via + :attr:`~pymongo.client_options.ClientOptions.pool_options` instead:: + + pool_opts = client.options.pool_options + pool_opts.max_pool_size + pool_opts.min_pool_size + + """ + + __slots__ = ( + "__max_pool_size", + "__min_pool_size", + "__max_idle_time_seconds", + "__connect_timeout", + "__socket_timeout", + "__wait_queue_timeout", + "__ssl_context", + "__tls_allow_invalid_hostnames", + "__event_listeners", + "__appname", + "__driver", + "__metadata", + "__compression_settings", + "__max_connecting", + "__pause_enabled", + "__server_api", + "__load_balanced", + "__credentials", + ) + + def __init__( + self, + max_pool_size: int = MAX_POOL_SIZE, + min_pool_size: int = MIN_POOL_SIZE, + max_idle_time_seconds: Optional[int] = MAX_IDLE_TIME_SEC, + connect_timeout: Optional[float] = None, + socket_timeout: Optional[float] = None, + wait_queue_timeout: Optional[int] = WAIT_QUEUE_TIMEOUT, + ssl_context: Optional[SSLContext] = None, + tls_allow_invalid_hostnames: bool = False, + event_listeners: Optional[_EventListeners] = None, + appname: Optional[str] = None, + driver: Optional[DriverInfo] = None, + compression_settings: Optional[CompressionSettings] = None, + max_connecting: int = MAX_CONNECTING, + pause_enabled: bool = True, + server_api: Optional[ServerApi] = None, + load_balanced: Optional[bool] = None, + credentials: Optional[MongoCredential] = None, + is_sync: Optional[bool] = True, + ): + self.__max_pool_size = max_pool_size + self.__min_pool_size = min_pool_size + self.__max_idle_time_seconds = max_idle_time_seconds + self.__connect_timeout = connect_timeout + self.__socket_timeout = socket_timeout + self.__wait_queue_timeout = wait_queue_timeout + self.__ssl_context = ssl_context + self.__tls_allow_invalid_hostnames = tls_allow_invalid_hostnames + self.__event_listeners = event_listeners + self.__appname = appname + self.__driver = driver + self.__compression_settings = compression_settings + self.__max_connecting = max_connecting + self.__pause_enabled = pause_enabled + self.__server_api = server_api + self.__load_balanced = load_balanced + self.__credentials = credentials + self.__metadata = copy.deepcopy(_METADATA) + + if appname: + self.__metadata["application"] = {"name": appname} + + # Combine the "driver" AsyncMongoClient option with PyMongo's info, like: + # { + # 'driver': { + # 'name': 'PyMongo|MyDriver', + # 'version': '4.2.0|1.2.3', + # }, + # 'platform': 'CPython 3.8.0|MyPlatform' + # } + if has_c(): + self.__metadata["driver"]["name"] = "{}|{}".format( + self.__metadata["driver"]["name"], + "c", + ) + if not is_sync: + self.__metadata["driver"]["name"] = "{}|{}".format( + self.__metadata["driver"]["name"], + "async", + ) + if driver: + self._update_metadata(driver) + + env = _metadata_env() + if env: + self.__metadata["env"] = env + + _truncate_metadata(self.__metadata) + + def _update_metadata(self, driver: DriverInfo) -> None: + """Updates the client's metadata""" + if driver.name and driver.name.lower() in self.__metadata["driver"]["name"].lower().split( + "|" + ): + return + + metadata = copy.deepcopy(self.__metadata) + + if driver.name: + metadata["driver"]["name"] = "{}|{}".format( + metadata["driver"]["name"], + driver.name, + ) + if driver.version: + metadata["driver"]["version"] = "{}|{}".format( + metadata["driver"]["version"], + driver.version, + ) + if driver.platform: + metadata["platform"] = "{}|{}".format(metadata["platform"], driver.platform) + + self.__metadata = metadata + + @property + def _credentials(self) -> Optional[MongoCredential]: + """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" + return self.__credentials + + @property + def non_default_options(self) -> dict[str, Any]: + """The non-default options this pool was created with. + + Added for CMAP's :class:`PoolCreatedEvent`. + """ + opts = {} + if self.__max_pool_size != MAX_POOL_SIZE: + opts["maxPoolSize"] = self.__max_pool_size + if self.__min_pool_size != MIN_POOL_SIZE: + opts["minPoolSize"] = self.__min_pool_size + if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC: + assert self.__max_idle_time_seconds is not None + opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000 + if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: + assert self.__wait_queue_timeout is not None + opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000 + if self.__max_connecting != MAX_CONNECTING: + opts["maxConnecting"] = self.__max_connecting + return opts + + @property + def max_pool_size(self) -> float: + """The maximum allowable number of concurrent connections to each + connected server. Requests to a server will block if there are + `maxPoolSize` outstanding connections to the requested server. + Defaults to 100. Cannot be 0. + + When a server's pool has reached `max_pool_size`, operations for that + server block waiting for a socket to be returned to the pool. If + ``waitQueueTimeoutMS`` is set, a blocked operation will raise + :exc:`~pymongo.errors.ConnectionFailure` after a timeout. + By default ``waitQueueTimeoutMS`` is not set. + """ + return self.__max_pool_size + + @property + def min_pool_size(self) -> int: + """The minimum required number of concurrent connections that the pool + will maintain to each connected server. Default is 0. + """ + return self.__min_pool_size + + @property + def max_connecting(self) -> int: + """The maximum number of concurrent connection creation attempts per + pool. Defaults to 2. + """ + return self.__max_connecting + + @property + def pause_enabled(self) -> bool: + return self.__pause_enabled + + @property + def max_idle_time_seconds(self) -> Optional[int]: + """The maximum number of seconds that a connection can remain + idle in the pool before being removed and replaced. Defaults to + `None` (no limit). + """ + return self.__max_idle_time_seconds + + @property + def connect_timeout(self) -> Optional[float]: + """How long a connection can take to be opened before timing out.""" + return self.__connect_timeout + + @property + def socket_timeout(self) -> Optional[float]: + """How long a send or receive on a socket can take before timing out.""" + return self.__socket_timeout + + @property + def wait_queue_timeout(self) -> Optional[int]: + """How long a thread will wait for a socket from the pool if the pool + has no free sockets. + """ + return self.__wait_queue_timeout + + @property + def _ssl_context(self) -> Optional[SSLContext]: + """An SSLContext instance or None.""" + return self.__ssl_context + + @property + def tls_allow_invalid_hostnames(self) -> bool: + """If True skip ssl.match_hostname.""" + return self.__tls_allow_invalid_hostnames + + @property + def _event_listeners(self) -> Optional[_EventListeners]: + """An instance of pymongo.monitoring._EventListeners.""" + return self.__event_listeners + + @property + def appname(self) -> Optional[str]: + """The application name, for sending with hello in server handshake.""" + return self.__appname + + @property + def driver(self) -> Optional[DriverInfo]: + """Driver name and version, for sending with hello in handshake.""" + return self.__driver + + @property + def _compression_settings(self) -> Optional[CompressionSettings]: + return self.__compression_settings + + @property + def metadata(self) -> dict[str, Any]: + """A dict of metadata about the application, driver, os, and platform.""" + return self.__metadata.copy() + + @property + def server_api(self) -> Optional[ServerApi]: + """A pymongo.server_api.ServerApi or None.""" + return self.__server_api + + @property + def load_balanced(self) -> Optional[bool]: + """True if this Pool is configured in load balanced mode.""" + return self.__load_balanced diff --git a/pymongo/pool_shared.py b/pymongo/pool_shared.py new file mode 100644 index 0000000000..8db26ccead --- /dev/null +++ b/pymongo/pool_shared.py @@ -0,0 +1,521 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pool utilities and shared helper methods.""" +from __future__ import annotations + +import asyncio +import functools +import socket +import ssl +import sys +from typing import ( + TYPE_CHECKING, + Any, + NoReturn, + Optional, + Union, +) + +from pymongo import _csot +from pymongo.asynchronous.helpers import _getaddrinfo +from pymongo.errors import ( # type:ignore[attr-defined] + AutoReconnect, + ConnectionFailure, + NetworkTimeout, + _CertificateError, +) +from pymongo.helpers_shared import _get_timeout_details, format_timeout_details +from pymongo.network_layer import AsyncNetworkingInterface, NetworkingInterface, PyMongoProtocol +from pymongo.pool_options import PoolOptions +from pymongo.ssl_support import PYSSLError, SSLError, _has_sni + +SSLErrors = (PYSSLError, SSLError) +if TYPE_CHECKING: + from pymongo.pyopenssl_context import _sslConn + from pymongo.typings import _Address + +try: + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + + def _set_non_inheritable_non_atomic(fd: int) -> None: + """Set the close-on-exec flag on the given file descriptor.""" + flags = fcntl(fd, F_GETFD) + fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + +except ImportError: + # Windows, various platforms we don't claim to support + # (Jython, IronPython, ..), systems that don't provide + # everything we need from fcntl, etc. + def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 + """Dummy function for platforms that don't provide fcntl.""" + + +_MAX_TCP_KEEPIDLE = 120 +_MAX_TCP_KEEPINTVL = 10 +_MAX_TCP_KEEPCNT = 9 + +if sys.platform == "win32": + try: + import _winreg as winreg + except ImportError: + import winreg + + def _query(key, name, default): + try: + value, _ = winreg.QueryValueEx(key, name) + # Ensure the value is a number or raise ValueError. + return int(value) + except (OSError, ValueError): + # QueryValueEx raises OSError when the key does not exist (i.e. + # the system is using the Windows default value). + return default + + try: + with winreg.OpenKey( + winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" + ) as key: + _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) + _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) + except OSError: + # We could not check the default values because winreg.OpenKey failed. + # Assume the system is using the default values. + _WINDOWS_TCP_IDLE_MS = 7200000 + _WINDOWS_TCP_INTERVAL_MS = 1000 + + def _set_keepalive_times(sock): + idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) + interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) + if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: + sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) + +else: + + def _set_tcp_option(sock: socket.socket, tcp_option: str, max_value: int) -> None: + if hasattr(socket, tcp_option): + sockopt = getattr(socket, tcp_option) + try: + # PYTHON-1350 - NetBSD doesn't implement getsockopt for + # TCP_KEEPIDLE and friends. Don't attempt to set the + # values there. + default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) + if default > max_value: + sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) + except OSError: + pass + + def _set_keepalive_times(sock: socket.socket) -> None: + _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) + _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) + _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) + + +def _raise_connection_failure( + address: Any, + error: Exception, + msg_prefix: Optional[str] = None, + timeout_details: Optional[dict[str, float]] = None, +) -> NoReturn: + """Convert a socket.error to ConnectionFailure and raise it.""" + host, port = address + # If connecting to a Unix socket, port will be None. + if port is not None: + msg = "%s:%d: %s" % (host, port, error) + else: + msg = f"{host}: {error}" + if msg_prefix: + msg = msg_prefix + msg + if "configured timeouts" not in msg: + msg += format_timeout_details(timeout_details) + if ( + isinstance(error, socket.timeout) + or isinstance(error, SSLErrors) + and "timed out" in str(error) + ): + raise NetworkTimeout(msg) from error + else: + raise AutoReconnect(msg) from error + + +class _CancellationContext: + def __init__(self) -> None: + self._cancelled = False + + def cancel(self) -> None: + """Cancel this context.""" + self._cancelled = True + + @property + def cancelled(self) -> bool: + """Was cancel called?""" + return self._cancelled + + +async def _async_create_connection(address: _Address, options: PoolOptions) -> socket.socket: + """Given (host, port) and PoolOptions, connect and return a raw socket object. + + Can raise socket.error. + + This is a modified version of create_connection from CPython >= 2.7. + """ + host, port = address + + # Check if dealing with a unix domain socket + if host.endswith(".sock"): + if not hasattr(socket, "AF_UNIX"): + raise ConnectionFailure("UNIX-sockets are not supported on this system") + sock = socket.socket(socket.AF_UNIX) + # SOCK_CLOEXEC not supported for Unix sockets. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.setblocking(False) + await asyncio.get_running_loop().sock_connect(sock, host) + return sock + except OSError: + sock.close() + raise + + # Don't try IPv6 if we don't support it. Also skip it if host + # is 'localhost' (::1 is fine). Avoids slow connect issues + # like PYTHON-356. + family = socket.AF_INET + if socket.has_ipv6 and host != "localhost": + family = socket.AF_UNSPEC + + err = None + for res in await _getaddrinfo(host, port, family=family, type=socket.SOCK_STREAM): + af, socktype, proto, dummy, sa = res + # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited + # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 + # all file descriptors are created non-inheritable. See PEP 446. + try: + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) + except OSError: + # Can SOCK_CLOEXEC be defined even if the kernel doesn't support + # it? + sock = socket.socket(af, socktype, proto) + # Fallback when SOCK_CLOEXEC isn't available. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + _set_keepalive_times(sock) + # Socket needs to be non-blocking during connection to not block the event loop + sock.setblocking(False) + await asyncio.wait_for( + asyncio.get_running_loop().sock_connect(sock, sa), timeout=timeout + ) + sock.settimeout(timeout) + return sock + except asyncio.TimeoutError as e: + sock.close() + err = socket.timeout("timed out") + err.__cause__ = e + except OSError as e: + sock.close() + err = e # type: ignore[assignment] + + if err is not None: + raise err + else: + # This likely means we tried to connect to an IPv6 only + # host with an OS/kernel or Python interpreter that doesn't + # support IPv6. The test case is Jython2.5.1 which doesn't + # support IPv6 at all. + raise OSError("getaddrinfo failed") + + +async def _async_configured_socket( + address: _Address, options: PoolOptions +) -> Union[socket.socket, _sslConn]: + """Given (host, port) and PoolOptions, return a raw configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = await _async_create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if _has_sni(False): + loop = asyncio.get_running_loop() + ssl_sock = await loop.run_in_executor( + None, + functools.partial(ssl_context.wrap_socket, sock, server_hostname=host), # type: ignore[assignment, misc, unused-ignore] + ) + else: + loop = asyncio.get_running_loop() + ssl_sock = await loop.run_in_executor(None, ssl_context.wrap_socket, sock) # type: ignore[assignment, misc, unused-ignore] + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, *SSLErrors) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined, unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock + + +async def _configured_protocol_interface( + address: _Address, options: PoolOptions +) -> AsyncNetworkingInterface: + """Given (host, port) and PoolOptions, return a configured AsyncNetworkingInterface. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets protocol's SSL and timeout options. + """ + sock = await _async_create_connection(address, options) + ssl_context = options._ssl_context + timeout = options.socket_timeout + + if ssl_context is None: + return AsyncNetworkingInterface( + await asyncio.get_running_loop().create_connection( + lambda: PyMongoProtocol(timeout=timeout), sock=sock + ) + ) + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + transport, protocol = await asyncio.get_running_loop().create_connection( # type: ignore[call-overload] + lambda: PyMongoProtocol(timeout=timeout), + sock=sock, + server_hostname=host, + ssl=ssl_context, + ) + except _CertificateError: + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, *SSLErrors) as exc: + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(transport.get_extra_info("peercert"), hostname=host) # type:ignore[attr-defined,unused-ignore] + except _CertificateError: + transport.abort() + raise + + return AsyncNetworkingInterface((transport, protocol)) + + +def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: + """Given (host, port) and PoolOptions, connect and return a raw socket object. + + Can raise socket.error. + + This is a modified version of create_connection from CPython >= 2.7. + """ + host, port = address + + # Check if dealing with a unix domain socket + if host.endswith(".sock"): + if not hasattr(socket, "AF_UNIX"): + raise ConnectionFailure("UNIX-sockets are not supported on this system") + sock = socket.socket(socket.AF_UNIX) + # SOCK_CLOEXEC not supported for Unix sockets. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.connect(host) + return sock + except OSError: + sock.close() + raise + + # Don't try IPv6 if we don't support it. Also skip it if host + # is 'localhost' (::1 is fine). Avoids slow connect issues + # like PYTHON-356. + family = socket.AF_INET + if socket.has_ipv6 and host != "localhost": + family = socket.AF_UNSPEC + + err = None + for res in socket.getaddrinfo(host, port, family=family, type=socket.SOCK_STREAM): # type: ignore[attr-defined, unused-ignore] + af, socktype, proto, dummy, sa = res + # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited + # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 + # all file descriptors are created non-inheritable. See PEP 446. + try: + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) + except OSError: + # Can SOCK_CLOEXEC be defined even if the kernel doesn't support + # it? + sock = socket.socket(af, socktype, proto) + # Fallback when SOCK_CLOEXEC isn't available. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.settimeout(timeout) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + _set_keepalive_times(sock) + sock.connect(sa) + return sock + except OSError as e: + err = e + sock.close() + + if err is not None: + raise err + else: + # This likely means we tried to connect to an IPv6 only + # host with an OS/kernel or Python interpreter that doesn't + # support IPv6. The test case is Jython2.5.1 which doesn't + # support IPv6 at all. + raise OSError("getaddrinfo failed") + + +def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket.socket, _sslConn]: + """Given (host, port) and PoolOptions, return a raw configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = _create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if _has_sni(True): + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc, unused-ignore] + else: + ssl_sock = ssl_context.wrap_socket(sock) # type: ignore[assignment, misc, unused-ignore] + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, *SSLErrors) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined, unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock + + +def _configured_socket_interface(address: _Address, options: PoolOptions) -> NetworkingInterface: + """Given (host, port) and PoolOptions, return a NetworkingInterface wrapping a configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = _create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return NetworkingInterface(sock) + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if _has_sni(True): + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) + else: + ssl_sock = ssl_context.wrap_socket(sock) + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, *SSLErrors) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined,unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return NetworkingInterface(ssl_sock) diff --git a/pymongo/py.typed b/pymongo/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/pymongo/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py new file mode 100644 index 0000000000..08fe99c889 --- /dev/null +++ b/pymongo/pyopenssl_context.py @@ -0,0 +1,428 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""A CPython compatible SSLContext implementation wrapping PyOpenSSL's +context. + +Due to limitations of the CPython asyncio.Protocol implementation for SSL, the async API does not support PyOpenSSL. +""" +from __future__ import annotations + +import socket as _socket +import ssl as _stdlibssl +import sys as _sys +import time as _time +from errno import EINTR as _EINTR +from ipaddress import ip_address as _ip_address +from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union + +import cryptography.x509 as x509 +import service_identity +from OpenSSL import SSL as _SSL +from OpenSSL import crypto as _crypto + +from pymongo.errors import ConfigurationError as _ConfigurationError +from pymongo.errors import _CertificateError # type:ignore[attr-defined] +from pymongo.ocsp_cache import _OCSPCache +from pymongo.ocsp_support import _load_trusted_ca_certs, _ocsp_callback +from pymongo.socket_checker import SocketChecker as _SocketChecker +from pymongo.socket_checker import _errno_from_exception +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from ssl import VerifyMode + + +_T = TypeVar("_T") + +try: + import certifi + + _HAVE_CERTIFI = True +except ImportError: + _HAVE_CERTIFI = False + +PROTOCOL_SSLv23 = _SSL.SSLv23_METHOD +# Always available +OP_NO_SSLv2 = _SSL.OP_NO_SSLv2 +OP_NO_SSLv3 = _SSL.OP_NO_SSLv3 +OP_NO_COMPRESSION = _SSL.OP_NO_COMPRESSION +# This isn't currently documented for PyOpenSSL +OP_NO_RENEGOTIATION = getattr(_SSL, "OP_NO_RENEGOTIATION", 0) + +# Always available +HAS_SNI = True +IS_PYOPENSSL = True + +# Base Exception class +SSLError = _SSL.Error + +# https://github.com/python/cpython/blob/v3.8.0/Modules/_ssl.c#L2995-L3002 +_VERIFY_MAP = { + _stdlibssl.CERT_NONE: _SSL.VERIFY_NONE, + _stdlibssl.CERT_OPTIONAL: _SSL.VERIFY_PEER, + _stdlibssl.CERT_REQUIRED: _SSL.VERIFY_PEER | _SSL.VERIFY_FAIL_IF_NO_PEER_CERT, +} + +_REVERSE_VERIFY_MAP = {value: key for key, value in _VERIFY_MAP.items()} + + +# For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are +# not permitted for SNI hostname. +def _is_ip_address(address: Any) -> bool: + try: + _ip_address(address) + return True + except (ValueError, UnicodeError): + return False + + +# According to the docs for socket.send it can raise +# WantX509LookupError and should be retried. +BLOCKING_IO_ERRORS = (_SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) +BLOCKING_IO_READ_ERROR = _SSL.WantReadError +BLOCKING_IO_WRITE_ERROR = _SSL.WantWriteError +BLOCKING_IO_LOOKUP_ERROR = _SSL.WantX509LookupError + + +def _ragged_eof(exc: BaseException) -> bool: + """Return True if the OpenSSL.SSL.SysCallError is a ragged EOF.""" + return exc.args == (-1, "Unexpected EOF") + + +# https://github.com/pyca/pyopenssl/issues/168 +# https://github.com/pyca/pyopenssl/issues/176 +# https://docs.python.org/3/library/ssl.html#notes-on-non-blocking-sockets +class _sslConn(_SSL.Connection): + def __init__( + self, + ctx: _SSL.Context, + sock: Optional[_socket.socket], + suppress_ragged_eofs: bool, + ): + self.socket_checker = _SocketChecker() + self.suppress_ragged_eofs = suppress_ragged_eofs + super().__init__(ctx, sock) + + def _call(self, call: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: + timeout = self.gettimeout() + if timeout: + start = _time.monotonic() + while True: + try: + return call(*args, **kwargs) + except BLOCKING_IO_ERRORS as exc: + # Do not retry if the connection is in non-blocking mode. + if timeout == 0: + raise exc + # Check for closed socket. + if self.fileno() == -1: + if timeout and _time.monotonic() - start > timeout: + raise _socket.timeout("timed out") from None + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, _SSL.WantReadError): + want_read = True + want_write = False + elif isinstance(exc, _SSL.WantWriteError): + want_read = False + want_write = True + else: + want_read = True + want_write = True + self.socket_checker.select(self, want_read, want_write, timeout) + if timeout and _time.monotonic() - start > timeout: + raise _socket.timeout("timed out") from None + continue + + def do_handshake(self, *args: Any, **kwargs: Any) -> None: + return self._call(super().do_handshake, *args, **kwargs) + + def recv(self, *args: Any, **kwargs: Any) -> bytes: + try: + return self._call(super().recv, *args, **kwargs) + except _SSL.SysCallError as exc: + # Suppress ragged EOFs to match the stdlib. + if self.suppress_ragged_eofs and _ragged_eof(exc): + return b"" + raise + + def recv_into(self, *args: Any, **kwargs: Any) -> int: + try: + return self._call(super().recv_into, *args, **kwargs) + except _SSL.SysCallError as exc: + # Suppress ragged EOFs to match the stdlib. + if self.suppress_ragged_eofs and _ragged_eof(exc): + return 0 + raise + + def sendall(self, buf: bytes, flags: int = 0) -> None: # type: ignore[override] + view = memoryview(buf) + total_length = len(buf) + total_sent = 0 + while total_sent < total_length: + try: + sent = self._call(super().send, view[total_sent:], flags) + # XXX: It's not clear if this can actually happen. PyOpenSSL + # doesn't appear to have any interrupt handling, nor any interrupt + # errors for OpenSSL connections. + except OSError as exc: + if _errno_from_exception(exc) == _EINTR: + continue + raise + # https://github.com/pyca/pyopenssl/blob/19.1.0/src/OpenSSL/SSL.py#L1756 + # https://www.openssl.org/docs/man1.0.2/man3/SSL_write.html + if sent <= 0: + raise OSError("connection closed") + total_sent += sent + + +class _CallbackData: + """Data class which is passed to the OCSP callback.""" + + def __init__(self) -> None: + self.trusted_ca_certs: Optional[list[x509.Certificate]] = None + self.check_ocsp_endpoint: Optional[bool] = None + self.ocsp_response_cache = _OCSPCache() + + +class SSLContext: + """A CPython compatible SSLContext implementation wrapping PyOpenSSL's + context. + """ + + __slots__ = ("_protocol", "_ctx", "_callback_data", "_check_hostname") + + def __init__(self, protocol: int): + self._protocol = protocol + self._ctx = _SSL.Context(self._protocol) + self._callback_data = _CallbackData() + self._check_hostname = True + # OCSP + # XXX: Find a better place to do this someday, since this is client + # side configuration and wrap_socket tries to support both client and + # server side sockets. + self._callback_data.check_ocsp_endpoint = True + self._ctx.set_ocsp_client_callback(callback=_ocsp_callback, data=self._callback_data) + + @property + def protocol(self) -> int: + """The protocol version chosen when constructing the context. + This attribute is read-only. + """ + return self._protocol + + def __get_verify_mode(self) -> VerifyMode: + """Whether to try to verify other peers' certificates and how to + behave if verification fails. This attribute must be one of + ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED. + """ + return _REVERSE_VERIFY_MAP[self._ctx.get_verify_mode()] + + def __set_verify_mode(self, value: VerifyMode) -> None: + """Setter for verify_mode.""" + + def _cb( + _connobj: _SSL.Connection, + _x509obj: _crypto.X509, + _errnum: int, + _errdepth: int, + retcode: int, + ) -> bool: + # It seems we don't need to do anything here. Twisted doesn't, + # and OpenSSL's SSL_CTX_set_verify let's you pass NULL + # for the callback option. It's weird that PyOpenSSL requires + # this. + # This is optional in pyopenssl >= 20 and can be removed once minimum + # supported version is bumped + # See: pyopenssl.org/en/latest/changelog.html#id47 + return bool(retcode) + + self._ctx.set_verify(_VERIFY_MAP[value], _cb) + + verify_mode = property(__get_verify_mode, __set_verify_mode) + + def __get_check_hostname(self) -> bool: + return self._check_hostname + + def __set_check_hostname(self, value: Any) -> None: + validate_boolean("check_hostname", value) + self._check_hostname = value + + check_hostname = property(__get_check_hostname, __set_check_hostname) + + def __get_check_ocsp_endpoint(self) -> Optional[bool]: + return self._callback_data.check_ocsp_endpoint + + def __set_check_ocsp_endpoint(self, value: bool) -> None: + validate_boolean("check_ocsp", value) + self._callback_data.check_ocsp_endpoint = value + + check_ocsp_endpoint = property(__get_check_ocsp_endpoint, __set_check_ocsp_endpoint) + + def __get_options(self) -> int: + # Calling set_options adds the option to the existing bitmask and + # returns the new bitmask. + # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_options + return self._ctx.set_options(0) + + def __set_options(self, value: int) -> None: + # Explicitly convert to int, since newer CPython versions + # use enum.IntFlag for options. The values are the same + # regardless of implementation. + self._ctx.set_options(int(value)) + + options = property(__get_options, __set_options) + + def load_cert_chain( + self, + certfile: Union[str, bytes], + keyfile: Union[str, bytes, None] = None, + password: Optional[str] = None, + ) -> None: + """Load a private key and the corresponding certificate. The certfile + string must be the path to a single file in PEM format containing the + certificate as well as any number of CA certificates needed to + establish the certificate's authenticity. The keyfile string, if + present, must point to a file containing the private key. Otherwise + the private key will be taken from certfile as well. + """ + # Match CPython behavior + # https://github.com/python/cpython/blob/v3.8.0/Modules/_ssl.c#L3930-L3971 + # Password callback MUST be set first or it will be ignored. + if password: + + def _pwcb(_max_length: int, _prompt_twice: bool, _user_data: Optional[bytes]) -> bytes: + # XXX:We could check the password length against what OpenSSL + # tells us is the max, but we can't raise an exception, so... + # warn? + assert password is not None + return password.encode("utf-8") + + self._ctx.set_passwd_cb(_pwcb) + self._ctx.use_certificate_chain_file(certfile) + self._ctx.use_privatekey_file(keyfile or certfile) + self._ctx.check_privatekey() + + def load_verify_locations( + self, cafile: Optional[str] = None, capath: Optional[str] = None + ) -> None: + """Load a set of "certification authority"(CA) certificates used to + validate other peers' certificates when `~verify_mode` is other than + ssl.CERT_NONE. + """ + self._ctx.load_verify_locations(cafile, capath) + # Manually load the CA certs when get_verified_chain is not available (pyopenssl<20). + if not hasattr(_SSL.Connection, "get_verified_chain"): + assert cafile is not None + self._callback_data.trusted_ca_certs = _load_trusted_ca_certs(cafile) + + def _load_certifi(self) -> None: + """Attempt to load CA certs from certifi.""" + if _HAVE_CERTIFI: + self.load_verify_locations(certifi.where()) + else: + raise _ConfigurationError( + "tlsAllowInvalidCertificates is False but no system " + "CA certificates could be loaded. Please install the " + "certifi package, or provide a path to a CA file using " + "the tlsCAFile option" + ) + + def _load_wincerts(self, store: str) -> None: + """Attempt to load CA certs from Windows trust store.""" + cert_store = self._ctx.get_cert_store() + assert cert_store is not None + oid = _stdlibssl.Purpose.SERVER_AUTH.oid + + for cert, encoding, trust in _stdlibssl.enum_certificates(store): # type: ignore + if encoding == "x509_asn": + if trust is True or oid in trust: + cert_store.add_cert( + _crypto.X509.from_cryptography(x509.load_der_x509_certificate(cert)) + ) + + def load_default_certs(self) -> None: + """A PyOpenSSL version of load_default_certs from CPython.""" + # PyOpenSSL is incapable of loading CA certs from Windows, and mostly + # incapable on macOS. + # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths + if _sys.platform == "win32": + try: + for storename in ("CA", "ROOT"): + self._load_wincerts(storename) + except PermissionError: + # Fall back to certifi + self._load_certifi() + elif _sys.platform == "darwin": + self._load_certifi() + self._ctx.set_default_verify_paths() + + def set_default_verify_paths(self) -> None: + """Specify that the platform provided CA certificates are to be used + for verification purposes. + """ + # Note: See PyOpenSSL's docs for limitations, which are similar + # but not that same as CPython's. + self._ctx.set_default_verify_paths() + + def wrap_socket( + self, + sock: _socket.socket, + server_side: bool = False, + do_handshake_on_connect: bool = True, + suppress_ragged_eofs: bool = True, + server_hostname: Optional[str] = None, + session: Optional[_SSL.Session] = None, + ) -> _sslConn: + """Wrap an existing Python socket connection and return a TLS socket + object. + """ + ssl_conn = _sslConn(self._ctx, sock, suppress_ragged_eofs) + if session: + ssl_conn.set_session(session) + if server_side is True: + ssl_conn.set_accept_state() + else: + # SNI + if server_hostname and not _is_ip_address(server_hostname): + # XXX: Do this in a callback registered with + # SSLContext.set_info_callback? See Twisted for an example. + ssl_conn.set_tlsext_host_name(server_hostname.encode("idna")) + if self.verify_mode != _stdlibssl.CERT_NONE: + # Request a stapled OCSP response. + ssl_conn.request_ocsp() + ssl_conn.set_connect_state() + # If this wasn't true the caller of wrap_socket would call + # do_handshake() + if do_handshake_on_connect: + # XXX: If we do hostname checking in a callback we can get rid + # of this call to do_handshake() since the handshake + # will happen automatically later. + ssl_conn.do_handshake() + # XXX: Do this in a callback registered with + # SSLContext.set_info_callback? See Twisted for an example. + if self.check_hostname and server_hostname is not None: + from service_identity import pyopenssl + + try: + if _is_ip_address(server_hostname): + pyopenssl.verify_ip_address(ssl_conn, server_hostname) + else: + pyopenssl.verify_hostname(ssl_conn, server_hostname) + except ( + service_identity.CertificateError, + service_identity.VerificationError, + ) as exc: + raise _CertificateError(str(exc)) from None + return ssl_conn diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py new file mode 100644 index 0000000000..2adc403366 --- /dev/null +++ b/pymongo/read_concern.py @@ -0,0 +1,79 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License", +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with read concerns. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from typing import Any, Optional + + +class ReadConcern: + """ReadConcern + + :param level: (string) The read concern level specifies the level of + isolation for read operations. For example, a read operation using a + read concern level of ``majority`` will only return data that has been + written to a majority of nodes. If the level is left unspecified, the + server default will be used. + + .. versionadded:: 3.2 + + """ + + def __init__(self, level: Optional[str] = None) -> None: + if level is None or isinstance(level, str): + self.__level = level + else: + raise TypeError(f"level must be a string or None, not {type(level)}") + + @property + def level(self) -> Optional[str]: + """The read concern level.""" + return self.__level + + @property + def ok_for_legacy(self) -> bool: + """Return ``True`` if this read concern is compatible with + old wire protocol versions. + """ + return self.level is None or self.level == "local" + + @property + def document(self) -> dict[str, Any]: + """The document representation of this read concern. + + .. note:: + :class:`ReadConcern` is immutable. Mutating the value of + :attr:`document` does not mutate this :class:`ReadConcern`. + """ + doc = {} + if self.__level: + doc["level"] = self.level + return doc + + def __eq__(self, other: Any) -> bool: + if isinstance(other, ReadConcern): + return self.document == other.document + return NotImplemented + + def __repr__(self) -> str: + if self.level: + return "ReadConcern(%s)" % self.level + return "ReadConcern()" + + +DEFAULT_READ_CONCERN = ReadConcern() diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 5be7e3cff5..35b92c4d01 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -1,10 +1,10 @@ -# Copyright 2012-2014 MongoDB, Inc. +# Copyright 2012-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License", # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,194 +12,626 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Utilities for choosing which member of a replica set to read from.""" +"""Utilities for choosing which member of a replica set to read from. -import random +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" + +from __future__ import annotations +import warnings +from collections import abc +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence + +from pymongo import max_staleness_selectors from pymongo.errors import ConfigurationError +from pymongo.server_selectors import ( + member_with_tags_server_selector, + secondary_with_tags_server_selector, +) +if TYPE_CHECKING: + from pymongo.server_selectors import Selection + from pymongo.topology_description import TopologyDescription -class ReadPreference: - """An enum that defines the read preference modes supported by PyMongo. - Used in three cases: - :class:`~pymongo.mongo_client.MongoClient` connected to a single host: +_PRIMARY = 0 +_PRIMARY_PREFERRED = 1 +_SECONDARY = 2 +_SECONDARY_PREFERRED = 3 +_NEAREST = 4 - * `PRIMARY`: Queries are allowed if the host is standalone or the replica - set primary. - * All other modes allow queries to standalone servers, to the primary, or - to secondaries. - :class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a - sharded cluster of replica sets: +_MONGOS_MODES = ( + "primary", + "primaryPreferred", + "secondary", + "secondaryPreferred", + "nearest", +) + +_Hedge = Mapping[str, Any] +_TagSets = Sequence[Mapping[str, Any]] - * `PRIMARY`: Queries are sent to the primary of a shard. - * `PRIMARY_PREFERRED`: Queries are sent to the primary if available, - otherwise a secondary. - * `SECONDARY`: Queries are distributed among shard secondaries. An error - is raised if no secondaries are available. - * `SECONDARY_PREFERRED`: Queries are distributed among shard secondaries, - or the primary if no secondary is available. - * `NEAREST`: Queries are distributed among all members of a shard. - - :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`: - - * `PRIMARY`: Queries are sent to the primary of the replica set. - * `PRIMARY_PREFERRED`: Queries are sent to the primary if available, - otherwise a secondary. - * `SECONDARY`: Queries are distributed among secondaries. An error - is raised if no secondaries are available. - * `SECONDARY_PREFERRED`: Queries are distributed among secondaries, - or the primary if no secondary is available. - * `NEAREST`: Queries are distributed among all members. - """ - PRIMARY = 0 - PRIMARY_PREFERRED = 1 - SECONDARY = 2 - SECONDARY_ONLY = 2 - SECONDARY_PREFERRED = 3 - NEAREST = 4 +def _validate_tag_sets(tag_sets: Optional[_TagSets]) -> Optional[_TagSets]: + """Validate tag sets for a MongoClient.""" + if tag_sets is None: + return tag_sets -# For formatting error messages -modes = { - ReadPreference.PRIMARY: 'PRIMARY', - ReadPreference.PRIMARY_PREFERRED: 'PRIMARY_PREFERRED', - ReadPreference.SECONDARY: 'SECONDARY', - ReadPreference.SECONDARY_PREFERRED: 'SECONDARY_PREFERRED', - ReadPreference.NEAREST: 'NEAREST', -} + if not isinstance(tag_sets, (list, tuple)): + raise TypeError(f"Tag sets {tag_sets!r} invalid, must be a sequence") + if len(tag_sets) == 0: + raise ValueError( + f"Tag sets {tag_sets!r} invalid, must be None or contain at least one set of tags" + ) -_mongos_modes = [ - 'primary', - 'primaryPreferred', - 'secondary', - 'secondaryPreferred', - 'nearest', -] + for tags in tag_sets: + if not isinstance(tags, abc.Mapping): + raise TypeError( + f"Tag set {tags!r} invalid, must be an instance of dict, " + "bson.son.SON or other type that inherits from " + "collection.Mapping" + ) -def mongos_mode(mode): - return _mongos_modes[mode] + return list(tag_sets) -def mongos_enum(enum): - return _mongos_modes.index(enum) -def select_primary(members): - for member in members: - if member.is_primary: - return member +def _invalid_max_staleness_msg(max_staleness: Any) -> str: + return "maxStalenessSeconds must be a positive integer, not %s" % max_staleness - return None +# Some duplication with common.py to avoid import cycle. +def _validate_max_staleness(max_staleness: Any) -> int: + """Validate max_staleness.""" + if max_staleness == -1: + return -1 -def select_member_with_tags(members, tags, secondary_only, latency): - candidates = [] + if not isinstance(max_staleness, int): + raise TypeError(_invalid_max_staleness_msg(max_staleness)) - for candidate in members: - if secondary_only and candidate.is_primary: - continue + if max_staleness <= 0: + raise ValueError(_invalid_max_staleness_msg(max_staleness)) - if not (candidate.is_primary or candidate.is_secondary): - # In RECOVERING or similar state - continue + return max_staleness - if candidate.matches_tags(tags): - candidates.append(candidate) - if not candidates: +def _validate_hedge(hedge: Optional[_Hedge]) -> Optional[_Hedge]: + """Validate hedge.""" + if hedge is None: return None - # ping_time is in seconds - fastest = min([candidate.get_avg_ping_time() for candidate in candidates]) - near_candidates = [ - candidate for candidate in candidates - if candidate.get_avg_ping_time() - fastest < latency / 1000.] + if not isinstance(hedge, dict): + raise TypeError(f"hedge must be a dictionary, not {hedge!r}") + + warnings.warn( + "The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0.", + DeprecationWarning, + stacklevel=4, + ) + return hedge + + +class _ServerMode: + """Base class for all read preferences.""" + + __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") + + def __init__( + self, + mode: int, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + self.__mongos_mode = _MONGOS_MODES[mode] + self.__mode = mode + self.__tag_sets = _validate_tag_sets(tag_sets) + self.__max_staleness = _validate_max_staleness(max_staleness) + self.__hedge = _validate_hedge(hedge) + + @property + def name(self) -> str: + """The name of this read preference.""" + return self.__class__.__name__ + + @property + def mongos_mode(self) -> str: + """The mongos mode of this read preference.""" + return self.__mongos_mode + + @property + def document(self) -> dict[str, Any]: + """Read preference as a document.""" + doc: dict[str, Any] = {"mode": self.__mongos_mode} + if self.__tag_sets not in (None, [{}]): + doc["tags"] = self.__tag_sets + if self.__max_staleness != -1: + doc["maxStalenessSeconds"] = self.__max_staleness + if self.__hedge not in (None, {}): + doc["hedge"] = self.__hedge + return doc + + @property + def mode(self) -> int: + """The mode of this read preference instance.""" + return self.__mode + + @property + def tag_sets(self) -> _TagSets: + """Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to + read only from members whose ``dc`` tag has the value ``"ny"``. + To specify a priority-order for tag sets, provide a list of + tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag + set, ``{}``, means "read from any member that matches the mode, + ignoring tags." MongoClient tries each set of tags in turn + until it finds a set of tags with at least one matching member. + For example, to only send a query to an analytic node:: + + Nearest(tag_sets=[{"node":"analytics"}]) + + Or using :class:`SecondaryPreferred`:: + + SecondaryPreferred(tag_sets=[{"node":"analytics"}]) + + .. seealso:: `Data-Center Awareness + `_ + """ + return list(self.__tag_sets) if self.__tag_sets else [{}] + + @property + def max_staleness(self) -> int: + """The maximum estimated length of time (in seconds) a replica set + secondary can fall behind the primary in replication before it will + no longer be selected for operations, or -1 for no maximum. + """ + return self.__max_staleness - return random.choice(near_candidates) + @property + def hedge(self) -> Optional[_Hedge]: + """**DEPRECATED** - The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0. + The read preference ``hedge`` parameter. -def select_member( - members, - mode=ReadPreference.PRIMARY, - tag_sets=None, - latency=15 -): - """Return a Member or None. + A dictionary that configures how the server will perform hedged reads. + It consists of the following keys: + + - ``enabled``: Enables or disables hedged reads in sharded clusters. + + Hedged reads are automatically enabled in MongoDB 4.4+ when using a + ``nearest`` read preference. To explicitly enable hedged reads, set + the ``enabled`` key to ``true``:: + + >>> Nearest(hedge={'enabled': True}) + + To explicitly disable hedged reads, set the ``enabled`` key to + ``False``:: + + >>> Nearest(hedge={'enabled': False}) + + .. versionadded:: 3.11 + """ + if self.__hedge is not None: + warnings.warn( + "The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0.", + DeprecationWarning, + stacklevel=2, + ) + return self.__hedge + + @property + def min_wire_version(self) -> int: + """The wire protocol version the server must support. + + Some read preferences impose version requirements on all servers (e.g. + maxStalenessSeconds requires MongoDB 3.4 / maxWireVersion 5). + + All servers' maxWireVersion must be at least this read preference's + `min_wire_version`, or the driver raises + :exc:`~pymongo.errors.ConfigurationError`. + """ + return 0 if self.__max_staleness == -1 else 5 + + def __repr__(self) -> str: + return "{}(tag_sets={!r}, max_staleness={!r}, hedge={!r})".format( + self.name, + self.__tag_sets, + self.__max_staleness, + self.__hedge, + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, _ServerMode): + return ( + self.mode == other.mode + and self.tag_sets == other.tag_sets + and self.max_staleness == other.max_staleness + and self.hedge == other.hedge + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __getstate__(self) -> dict[str, Any]: + """Return value of object for pickling. + + Needed explicitly because __slots__() defined. + """ + return { + "mode": self.__mode, + "tag_sets": self.__tag_sets, + "max_staleness": self.__max_staleness, + "hedge": self.__hedge, + } + + def __setstate__(self, value: Mapping[str, Any]) -> None: + """Restore from pickling.""" + self.__mode = value["mode"] + self.__mongos_mode = _MONGOS_MODES[self.__mode] + self.__tag_sets = _validate_tag_sets(value["tag_sets"]) + self.__max_staleness = _validate_max_staleness(value["max_staleness"]) + self.__hedge = _validate_hedge(value["hedge"]) + + def __call__(self, selection: Selection) -> Selection: + return selection + + +class Primary(_ServerMode): + """Primary read preference. + + * When directly connected to one mongod queries are allowed if the server + is standalone or a replica set primary. + * When connected to a mongos queries are sent to the primary of a shard. + * When connected to a replica set queries are sent to the primary of + the replica set. """ - if tag_sets is None: - tag_sets = [{}] - - # For brevity - PRIMARY = ReadPreference.PRIMARY - PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED - SECONDARY = ReadPreference.SECONDARY - SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED - NEAREST = ReadPreference.NEAREST - - if mode == PRIMARY: - if tag_sets != [{}]: - raise ConfigurationError("PRIMARY cannot be combined with tags") - return select_primary(members) - - elif mode == PRIMARY_PREFERRED: - # Recurse. - candidate_primary = select_member(members, PRIMARY, [{}], latency) - if candidate_primary: - return candidate_primary + + __slots__ = () + + def __init__(self) -> None: + super().__init__(_PRIMARY) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to a Selection.""" + return selection.primary_selection + + def __repr__(self) -> str: + return "Primary()" + + def __eq__(self, other: Any) -> bool: + if isinstance(other, _ServerMode): + return other.mode == _PRIMARY + return NotImplemented + + +class PrimaryPreferred(_ServerMode): + """PrimaryPreferred read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are sent to the primary of a shard if + available, otherwise a shard secondary. + * When connected to a replica set queries are sent to the primary if + available, otherwise a secondary. + + .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first + created reads will be routed to an available secondary until the + primary of the replica set is discovered. + + :param tag_sets: The :attr:`~tag_sets` to use if the primary is not + available. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ + + __slots__ = () + + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + if selection.primary: + return selection.primary_selection else: - return select_member(members, SECONDARY, tag_sets, latency) + return secondary_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + +class Secondary(_ServerMode): + """Secondary read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are distributed among shard + secondaries. An error is raised if no secondaries are available. + * When connected to a replica set queries are distributed among + secondaries. An error is raised if no secondaries are available. + + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ - elif mode == SECONDARY: - for tags in tag_sets: - candidate = select_member_with_tags(members, tags, True, latency) - if candidate: - return candidate + __slots__ = () + + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_SECONDARY, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + return secondary_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + +class SecondaryPreferred(_ServerMode): + """SecondaryPreferred read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are distributed among shard + secondaries, or the shard primary if no secondary is available. + * When connected to a replica set queries are distributed among + secondaries, or the primary if no secondary is available. + + .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first + created reads will be routed to the primary of the replica set until + an available secondary is discovered. + + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ - return None + __slots__ = () - elif mode == SECONDARY_PREFERRED: - # Recurse. - candidate_secondary = select_member( - members, SECONDARY, tag_sets, latency) - if candidate_secondary: - return candidate_secondary + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + secondaries = secondary_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + if secondaries: + return secondaries else: - return select_member(members, PRIMARY, [{}], latency) + return selection.primary_selection - elif mode == NEAREST: - for tags in tag_sets: - candidate = select_member_with_tags(members, tags, False, latency) - if candidate: - return candidate - # Ran out of tags. - return None +class Nearest(_ServerMode): + """Nearest read preference. - else: - raise ConfigurationError("Invalid mode %s" % repr(mode)) + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are distributed among all members of + a shard. + * When connected to a replica set queries are distributed among all + members. + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. -"""Commands that may be sent to replica-set secondaries, depending on - ReadPreference and tags. All other commands are always run on the primary. -""" -secondary_ok_commands = frozenset([ - "group", "aggregate", "collstats", "dbstats", "count", "distinct", - "geonear", "geosearch", "geowalk", "mapreduce", "getnonce", "authenticate", - "text", "parallelcollectionscan" -]) + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ + __slots__ = () -class MovingAverage(object): - def __init__(self, samples): - """Immutable structure to track a 5-sample moving average. - """ - self.samples = samples[-5:] - assert self.samples - self.average = sum(self.samples) / float(len(self.samples)) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_NEAREST, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + return member_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + +class _AggWritePref: + """Agg $out/$merge write preference. - def clone_with(self, sample): - """Get a copy of this instance plus a new sample""" - return MovingAverage(self.samples + [sample]) + * If there are readable servers and there is any pre-5.0 server, use + primary read preference. + * Otherwise use `pref` read preference. - def get(self): + :param pref: The read preference to use on MongoDB 5.0+. + """ + + __slots__ = ("pref", "effective_pref") + + def __init__(self, pref: _ServerMode): + self.pref = pref + self.effective_pref: _ServerMode = ReadPreference.PRIMARY + + def selection_hook(self, topology_description: TopologyDescription) -> None: + common_wv = topology_description.common_wire_version + if ( + topology_description.has_readable_server(ReadPreference.PRIMARY_PREFERRED) + and common_wv + and common_wv < 13 + ): + self.effective_pref = ReadPreference.PRIMARY + else: + self.effective_pref = self.pref + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to a Selection.""" + return self.effective_pref(selection) + + def __repr__(self) -> str: + return f"_AggWritePref(pref={self.pref!r})" + + # Proxy other calls to the effective_pref so that _AggWritePref can be + # used in place of an actual read preference. + def __getattr__(self, name: str) -> Any: + return getattr(self.effective_pref, name) + + +_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) + + +def make_read_preference( + mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1 +) -> _ServerMode: + if mode == _PRIMARY: + if tag_sets not in (None, [{}]): + raise ConfigurationError("Read preference primary cannot be combined with tags") + if max_staleness != -1: + raise ConfigurationError( + "Read preference primary cannot be combined with maxStalenessSeconds" + ) + return Primary() + return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore + + +_MODES = ( + "PRIMARY", + "PRIMARY_PREFERRED", + "SECONDARY", + "SECONDARY_PREFERRED", + "NEAREST", +) + + +class ReadPreference: + """An enum that defines some commonly used read preference modes. + + Apps can also create a custom read preference, for example:: + + Nearest(tag_sets=[{"node":"analytics"}]) + + See `Read and Write Settings `_ for code examples. + + A read preference is used in three cases: + + :class:`~pymongo.mongo_client.MongoClient` connected to a single mongod: + + - ``PRIMARY``: Queries are allowed if the server is standalone or a replica + set primary. + - All other modes allow queries to standalone servers, to a replica set + primary, or to replica set secondaries. + + :class:`~pymongo.mongo_client.MongoClient` initialized with the + ``replicaSet`` option: + + - ``PRIMARY``: Read from the primary. This is the default, and provides the + strongest consistency. If no primary is available, raise + :class:`~pymongo.errors.AutoReconnect`. + + - ``PRIMARY_PREFERRED``: Read from the primary if available, or if there is + none, read from a secondary. + + - ``SECONDARY``: Read from a secondary. If no secondary is available, + raise :class:`~pymongo.errors.AutoReconnect`. + + - ``SECONDARY_PREFERRED``: Read from a secondary if available, otherwise + from the primary. + + - ``NEAREST``: Read from any member. + + :class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a + sharded cluster of replica sets: + + - ``PRIMARY``: Read from the primary of the shard, or raise + :class:`~pymongo.errors.OperationFailure` if there is none. + This is the default. + + - ``PRIMARY_PREFERRED``: Read from the primary of the shard, or if there is + none, read from a secondary of the shard. + + - ``SECONDARY``: Read from a secondary of the shard, or raise + :class:`~pymongo.errors.OperationFailure` if there is none. + + - ``SECONDARY_PREFERRED``: Read from a secondary of the shard if available, + otherwise from the shard primary. + + - ``NEAREST``: Read from any shard member. + """ + + PRIMARY = Primary() + PRIMARY_PREFERRED = PrimaryPreferred() + SECONDARY = Secondary() + SECONDARY_PREFERRED = SecondaryPreferred() + NEAREST = Nearest() + + +def read_pref_mode_from_name(name: str) -> int: + """Get the read preference mode from mongos/uri name.""" + return _MONGOS_MODES.index(name) + + +class MovingAverage: + """Tracks an exponentially-weighted moving average.""" + + average: Optional[float] + + def __init__(self) -> None: + self.average = None + + def add_sample(self, sample: float) -> None: + if sample < 0: + raise ValueError(f"duration cannot be negative {sample}") + if self.average is None: + self.average = sample + else: + # The Server Selection Spec requires an exponentially weighted + # average with alpha = 0.2. + self.average = 0.8 * self.average + 0.2 * sample + + def get(self) -> Optional[float]: + """Get the calculated average, or None if no samples yet.""" return self.average + + def reset(self) -> None: + self.average = None diff --git a/pymongo/replica_set_connection.py b/pymongo/replica_set_connection.py deleted file mode 100644 index 5d92d281d6..0000000000 --- a/pymongo/replica_set_connection.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2011-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Tools for connecting to a MongoDB replica set. - -.. warning:: - **DEPRECATED:** Please use :mod:`~pymongo.mongo_replica_set_client` instead. - -.. seealso:: :doc:`/examples/high_availability` for more examples of - how to connect to a replica set. - -To get a :class:`~pymongo.database.Database` instance from a -:class:`ReplicaSetConnection` use either dictionary-style or -attribute-style access: - -.. doctest:: - - >>> from pymongo import ReplicaSetConnection - >>> c = ReplicaSetConnection('localhost:27017', replicaSet='repl0') - >>> c.test_database - Database(ReplicaSetConnection([u'...', u'...']), u'test_database') - >>> c['test_database'] - Database(ReplicaSetConnection([u'...', u'...']), u'test_database') -""" -from pymongo.mongo_replica_set_client import MongoReplicaSetClient -from pymongo.errors import ConfigurationError - - -class ReplicaSetConnection(MongoReplicaSetClient): - """Connection to a MongoDB replica set. - """ - - def __init__(self, hosts_or_uri=None, max_pool_size=None, - document_class=dict, tz_aware=False, **kwargs): - """Create a new connection to a MongoDB replica set. - - .. warning:: - **DEPRECATED:** :class:`ReplicaSetConnection` is deprecated. Please - use :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` - instead - - The resultant connection object has connection-pooling built - in. It also performs auto-reconnection when necessary. If an - operation fails because of a connection error, - :class:`~pymongo.errors.ConnectionFailure` is raised. If - auto-reconnection will be performed, - :class:`~pymongo.errors.AutoReconnect` will be - raised. Application code should handle this exception - (recognizing that the operation failed) and then continue to - execute. - - Raises :class:`~pymongo.errors.ConnectionFailure` if - the connection cannot be made. - - The `hosts_or_uri` parameter can be a full `mongodb URI - `_, in addition to - a string of `host:port` pairs (e.g. 'host1:port1,host2:port2'). - If `hosts_or_uri` is None 'localhost:27017' will be used. - - .. note:: Instances of :class:`~ReplicaSetConnection` start a - background task to monitor the state of the replica set. This allows - it to quickly respond to changes in replica set configuration. - Before discarding an instance of :class:`~ReplicaSetConnection` make - sure you call :meth:`~close` to ensure that the monitor task is - cleanly shut down. - - :Parameters: - - `hosts_or_uri` (optional): A MongoDB URI or string of `host:port` - pairs. If a host is an IPv6 literal it must be enclosed in '[' and - ']' characters following the RFC2732 URL syntax (e.g. '[::1]' for - localhost) - - `max_pool_size` (optional): The maximum number of connections - each pool will open simultaneously. If this is set, operations - will block if there are `max_pool_size` outstanding connections - from the pool. By default the pool size is unlimited. - - `document_class` (optional): default class to use for - documents returned from queries on this connection - - `tz_aware` (optional): if ``True``, - :class:`~datetime.datetime` instances returned as values - in a document by this :class:`ReplicaSetConnection` will be timezone - aware (otherwise they will be naive) - - `replicaSet`: (required) The name of the replica set to connect to. - The driver will verify that each host it connects to is a member of - this replica set. Can be passed as a keyword argument or as a - MongoDB URI option. - - | **Other optional parameters can be passed as keyword arguments:** - - - `host`: For compatibility with connection.Connection. If both - `host` and `hosts_or_uri` are specified `host` takes precedence. - - `port`: For compatibility with connection.Connection. The default - port number to use for hosts. - - `network_timeout`: For compatibility with connection.Connection. - The timeout (in seconds) to use for socket operations - default - is no timeout. If both `network_timeout` and `socketTimeoutMS` are - specified `network_timeout` takes precedence, matching - connection.Connection. - - `socketTimeoutMS`: (integer) How long (in milliseconds) a send or - receive on a socket can take before timing out. Defaults to ``None`` - (no timeout). - - `connectTimeoutMS`: (integer) How long (in milliseconds) a - connection can take to be opened before timing out. Defaults to - ``20000``. - - `waitQueueTimeoutMS`: (integer) How long (in milliseconds) a - thread will wait for a socket from the pool if the pool has no - free sockets. Defaults to ``None`` (no timeout). - - `waitQueueMultiple`: (integer) Multiplied by max_pool_size to give - the number of threads allowed to wait for a socket at one time. - Defaults to ``None`` (no waiters). - - `auto_start_request`: If ``True`` (the default), each thread that - accesses this :class:`ReplicaSetConnection` has a socket allocated - to it for the thread's lifetime, for each member of the set. For - :class:`~pymongo.read_preferences.ReadPreference` PRIMARY, - auto_start_request=True ensures consistent reads, even if you read - after an unsafe write. For read preferences other than PRIMARY, - there are no consistency guarantees. - - `use_greenlets`: if ``True``, use a background Greenlet instead of - a background thread to monitor state of replica set. Additionally, - :meth:`start_request()` will ensure that the current greenlet uses - the same socket for all operations until :meth:`end_request()`. - `use_greenlets` with ReplicaSetConnection requires `Gevent - `_ to be installed. - - | **Write Concern options:** - - - `safe`: :class:`ReplicaSetConnection` **disables** acknowledgement - of write operations. Use ``safe=True`` to enable write - acknowledgement. - - `w`: (integer or string) Write operations will block until they have - been replicated to the specified number or tagged set of servers. - `w=` always includes the replica set primary (e.g. w=3 means - write to the primary and wait until replicated to **two** - secondaries). Implies safe=True. - - `wtimeout`: (integer) Used in conjunction with `w`. Specify a value - in milliseconds to control how long to wait for write propagation - to complete. If replication does not complete in the given - timeframe, a timeout exception is raised. Implies safe=True. - - `j`: If ``True`` block until write operations have been committed - to the journal. Cannot be used in combination with `fsync`. Prior - to MongoDB 2.6 this option was ignored if the server was running - without journaling. Starting with MongoDB 2.6 write operations will - fail with an exception if this option is used when the server is - running without journaling. Implies safe=True. - - `fsync`: If ``True`` and the server is running without journaling, - blocks until the server has synced all data files to disk. If the - server is running with journaling, this acts the same as the `j` - option, blocking until write operations have been committed to the - journal. Cannot be used in combination with `j`. Implies safe=True. - - | **Read preference options:** - - - `slave_okay` or `slaveOk` (deprecated): Use `read_preference` - instead. - - `read_preference`: The read preference for this connection. - See :class:`~pymongo.read_preferences.ReadPreference` for available - - `tag_sets`: Read from replica-set members with these tags. - To specify a priority-order for tag sets, provide a list of - tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag - set, ``{}``, means "read from any member that matches the mode, - ignoring tags." :class:`MongoReplicaSetClient` tries each set of - tags in turn until it finds a set of tags with at least one matching - member. - - `secondary_acceptable_latency_ms`: (integer) Any replica-set member - whose ping time is within secondary_acceptable_latency_ms of the - nearest member may accept reads. Default 15 milliseconds. - **Ignored by mongos** and must be configured on the command line. - See the localThreshold_ option for more information. - - | **SSL configuration:** - - - `ssl`: If ``True``, create the connection to the servers using SSL. - - `ssl_keyfile`: The private keyfile used to identify the local - connection against mongod. If included with the ``certfile` then - only the ``ssl_certfile`` is needed. Implies ``ssl=True``. - - `ssl_certfile`: The certificate file used to identify the local - connection against mongod. Implies ``ssl=True``. - - `ssl_cert_reqs`: Specifies whether a certificate is required from - the other side of the connection, and whether it will be validated - if provided. It must be one of the three values ``ssl.CERT_NONE`` - (certificates ignored), ``ssl.CERT_OPTIONAL`` - (not required, but validated if provided), or ``ssl.CERT_REQUIRED`` - (required and validated). If the value of this parameter is not - ``ssl.CERT_NONE``, then the ``ssl_ca_certs`` parameter must point - to a file of CA certificates. Implies ``ssl=True``. - - `ssl_ca_certs`: The ca_certs file contains a set of concatenated - "certification authority" certificates, which are used to validate - certificates passed from the other end of the connection. - Implies ``ssl=True``. - - .. versionchanged:: 2.5 - Added additional ssl options - .. versionchanged:: 2.3 - Added `tag_sets` and `secondary_acceptable_latency_ms` options. - .. versionchanged:: 2.2 - Added `auto_start_request` and `use_greenlets` options. - Added support for `host`, `port`, and `network_timeout` keyword - arguments for compatibility with connection.Connection. - .. versionadded:: 2.1 - - .. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold - """ - network_timeout = kwargs.pop('network_timeout', None) - if network_timeout is not None: - if (not isinstance(network_timeout, (int, float)) or - network_timeout <= 0): - raise ConfigurationError("network_timeout must " - "be a positive integer") - kwargs['socketTimeoutMS'] = network_timeout * 1000 - - kwargs['auto_start_request'] = kwargs.get('auto_start_request', True) - kwargs['safe'] = kwargs.get('safe', False) - - super(ReplicaSetConnection, self).__init__( - hosts_or_uri, max_pool_size, document_class, tz_aware, **kwargs) - - def __repr__(self): - return "ReplicaSetConnection(%r)" % (["%s:%d" % n - for n in self.hosts],) diff --git a/pymongo/response.py b/pymongo/response.py new file mode 100644 index 0000000000..211ddf2354 --- /dev/null +++ b/pymongo/response.py @@ -0,0 +1,130 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Represent a response from the server.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Union + +if TYPE_CHECKING: + from datetime import timedelta + + from pymongo.message import _OpMsg, _OpReply + from pymongo.typings import _Address, _AgnosticConnection, _DocumentOut + + +class Response: + __slots__ = ("_data", "_address", "_request_id", "_duration", "_from_command", "_docs") + + def __init__( + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: Sequence[Mapping[str, Any]], + ): + """Represent a response from the server. + + :param data: A network response message. + :param address: (host, port) of the source server. + :param request_id: The request id of this operation. + :param duration: The duration of the operation. + :param from_command: if the response is the result of a db command. + """ + self._data = data + self._address = address + self._request_id = request_id + self._duration = duration + self._from_command = from_command + self._docs = docs + + @property + def data(self) -> Union[_OpMsg, _OpReply]: + """Server response's raw BSON bytes.""" + return self._data + + @property + def address(self) -> _Address: + """(host, port) of the source server.""" + return self._address + + @property + def request_id(self) -> int: + """The request id of this operation.""" + return self._request_id + + @property + def duration(self) -> Optional[timedelta]: + """The duration of the operation.""" + return self._duration + + @property + def from_command(self) -> bool: + """If the response is a result from a db command.""" + return self._from_command + + @property + def docs(self) -> Sequence[Mapping[str, Any]]: + """The decoded document(s).""" + return self._docs + + +class PinnedResponse(Response): + __slots__ = ("_conn", "_more_to_come") + + def __init__( + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + conn: _AgnosticConnection, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: list[_DocumentOut], + more_to_come: bool, + ): + """Represent a response to an exhaust cursor's initial query. + + :param data: A network response message. + :param address: (host, port) of the source server. + :param conn: The AsyncConnection/Connection used for the initial query. + :param request_id: The request id of this operation. + :param duration: The duration of the operation. + :param from_command: If the response is the result of a db command. + :param docs: List of documents. + :param more_to_come: Bool indicating whether cursor is ready to be + exhausted. + """ + super().__init__(data, address, request_id, duration, from_command, docs) + self._conn = conn + self._more_to_come = more_to_come + + @property + def conn(self) -> _AgnosticConnection: + """The AsyncConnection/Connection used for the initial query. + + The server will send batches on this socket, without waiting for + getMores from the client, until the result set is exhausted or there + is an error. + """ + return self._conn + + @property + def more_to_come(self) -> bool: + """If true, server is ready to send batches on the socket until the + result set is exhausted or there is an error. + """ + return self._more_to_come diff --git a/pymongo/results.py b/pymongo/results.py new file mode 100644 index 0000000000..bcce121fe7 --- /dev/null +++ b/pymongo/results.py @@ -0,0 +1,367 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Result class definitions. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from typing import Any, Mapping, MutableMapping, Optional, cast + +from pymongo.errors import InvalidOperation + + +class _WriteResult: + """Base class for write result classes.""" + + __slots__ = ("__acknowledged",) + + def __init__(self, acknowledged: bool) -> None: + self.__acknowledged = acknowledged + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__acknowledged})" + + def _raise_if_unacknowledged(self, property_name: str) -> None: + """Raise an exception on property access if unacknowledged.""" + if not self.__acknowledged: + raise InvalidOperation( + f"A value for {property_name} is not available when " + "the write is unacknowledged. Check the " + "acknowledged attribute to avoid this " + "error." + ) + + @property + def acknowledged(self) -> bool: + """Is this the result of an acknowledged write operation? + + The :attr:`acknowledged` attribute will be ``False`` when using + ``WriteConcern(w=0)``, otherwise ``True``. + + .. note:: + If the :attr:`acknowledged` attribute is ``False`` all other + attributes of this class will raise + :class:`~pymongo.errors.InvalidOperation` when accessed. Values for + other attributes cannot be determined if the write operation was + unacknowledged. + + .. seealso:: + :class:`~pymongo.write_concern.WriteConcern` + """ + return self.__acknowledged + + +class InsertOneResult(_WriteResult): + """The return type for :meth:`~pymongo.collection.Collection.insert_one` + and as part of :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + """ + + __slots__ = ("__inserted_id",) + + def __init__(self, inserted_id: Any, acknowledged: bool) -> None: + self.__inserted_id = inserted_id + super().__init__(acknowledged) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.__inserted_id!r}, acknowledged={self.acknowledged})" + ) + + @property + def inserted_id(self) -> Any: + """The inserted document's _id.""" + return self.__inserted_id + + +class InsertManyResult(_WriteResult): + """The return type for :meth:`~pymongo.collection.Collection.insert_many`.""" + + __slots__ = ("__inserted_ids",) + + def __init__(self, inserted_ids: list[Any], acknowledged: bool) -> None: + self.__inserted_ids = inserted_ids + super().__init__(acknowledged) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.__inserted_ids!r}, acknowledged={self.acknowledged})" + ) + + @property + def inserted_ids(self) -> list[Any]: + """A list of _ids of the inserted documents, in the order provided. + + .. note:: If ``False`` is passed for the `ordered` parameter to + :meth:`~pymongo.collection.Collection.insert_many` the server + may have inserted the documents in a different order than what + is presented here. + """ + return self.__inserted_ids + + +class UpdateResult(_WriteResult): + """The return type for :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`, and + :meth:`~pymongo.collection.Collection.replace_one`, and as part of + :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + """ + + __slots__ = ( + "__raw_result", + "__in_client_bulk", + ) + + def __init__( + self, + raw_result: Optional[Mapping[str, Any]], + acknowledged: bool, + in_client_bulk: bool = False, + ): + self.__raw_result = raw_result + self.__in_client_bulk = in_client_bulk + super().__init__(acknowledged) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__raw_result!r}, acknowledged={self.acknowledged})" + + @property + def raw_result(self) -> Optional[Mapping[str, Any]]: + """The raw result document returned by the server.""" + return self.__raw_result + + @property + def matched_count(self) -> int: + """The number of documents matched for this update.""" + self._raise_if_unacknowledged("matched_count") + assert self.__raw_result is not None + if not self.__in_client_bulk and self.upserted_id is not None: + return 0 + return self.__raw_result.get("n", 0) + + @property + def modified_count(self) -> int: + """The number of documents modified.""" + self._raise_if_unacknowledged("modified_count") + assert self.__raw_result is not None + return cast(int, self.__raw_result.get("nModified")) + + @property + def upserted_id(self) -> Any: + """The _id of the inserted document if an upsert took place. Otherwise + ``None``. + """ + self._raise_if_unacknowledged("upserted_id") + assert self.__raw_result is not None + if self.__in_client_bulk and self.__raw_result.get("upserted"): + return self.__raw_result["upserted"]["_id"] + return self.__raw_result.get("upserted", None) + + @property + def did_upsert(self) -> bool: + """Whether an upsert took place. + + .. versionadded:: 4.9 + """ + assert self.__raw_result is not None + return "upserted" in self.__raw_result + + +class DeleteResult(_WriteResult): + """The return type for :meth:`~pymongo.collection.Collection.delete_one` + and :meth:`~pymongo.collection.Collection.delete_many` + and as part of :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + """ + + __slots__ = ("__raw_result",) + + def __init__(self, raw_result: Mapping[str, Any], acknowledged: bool) -> None: + self.__raw_result = raw_result + super().__init__(acknowledged) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__raw_result!r}, acknowledged={self.acknowledged})" + + @property + def raw_result(self) -> Mapping[str, Any]: + """The raw result document returned by the server.""" + return self.__raw_result + + @property + def deleted_count(self) -> int: + """The number of documents deleted.""" + self._raise_if_unacknowledged("deleted_count") + return self.__raw_result.get("n", 0) + + +class _BulkWriteResultBase(_WriteResult): + """Private base class for bulk write API results.""" + + __slots__ = ("__bulk_api_result",) + + def __init__(self, bulk_api_result: dict[str, Any], acknowledged: bool) -> None: + self.__bulk_api_result = bulk_api_result + super().__init__(acknowledged) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__bulk_api_result!r}, acknowledged={self.acknowledged})" + + @property + def bulk_api_result(self) -> dict[str, Any]: + """The raw bulk write API result.""" + return self.__bulk_api_result + + @property + def inserted_count(self) -> int: + """The number of documents inserted.""" + self._raise_if_unacknowledged("inserted_count") + return cast(int, self.__bulk_api_result.get("nInserted")) + + @property + def matched_count(self) -> int: + """The number of documents matched for an update.""" + self._raise_if_unacknowledged("matched_count") + return cast(int, self.__bulk_api_result.get("nMatched")) + + @property + def modified_count(self) -> int: + """The number of documents modified.""" + self._raise_if_unacknowledged("modified_count") + return cast(int, self.__bulk_api_result.get("nModified")) + + @property + def deleted_count(self) -> int: + """The number of documents deleted.""" + self._raise_if_unacknowledged("deleted_count") + if "nRemoved" in self.__bulk_api_result: + return cast(int, self.__bulk_api_result.get("nRemoved")) + else: + return cast(int, self.__bulk_api_result.get("nDeleted")) + + @property + def upserted_count(self) -> int: + """The number of documents upserted.""" + self._raise_if_unacknowledged("upserted_count") + return cast(int, self.__bulk_api_result.get("nUpserted")) + + +class BulkWriteResult(_BulkWriteResultBase): + """An object wrapper for collection-level bulk write API results.""" + + __slots__ = () + + def __init__(self, bulk_api_result: dict[str, Any], acknowledged: bool) -> None: + """Create a BulkWriteResult instance. + + :param bulk_api_result: A result dict from the collection-level bulk write API + :param acknowledged: Was this write result acknowledged? If ``False`` + then all properties of this object will raise + :exc:`~pymongo.errors.InvalidOperation`. + """ + super().__init__(bulk_api_result, acknowledged) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.bulk_api_result!r}, acknowledged={self.acknowledged})" + ) + + @property + def upserted_ids(self) -> Optional[dict[int, Any]]: + """A map of operation index to the _id of the upserted document.""" + self._raise_if_unacknowledged("upserted_ids") + if self.bulk_api_result: + return {upsert["index"]: upsert["_id"] for upsert in self.bulk_api_result["upserted"]} + return None + + +class ClientBulkWriteResult(_BulkWriteResultBase): + """An object wrapper for client-level bulk write API results.""" + + __slots__ = ("__has_verbose_results",) + + def __init__( + self, + bulk_api_result: MutableMapping[str, Any], + acknowledged: bool, + has_verbose_results: bool, + ) -> None: + """Create a ClientBulkWriteResult instance. + + :param bulk_api_result: A result dict from the client-level bulk write API + :param acknowledged: Was this write result acknowledged? If ``False`` + then all properties of this object will raise + :exc:`~pymongo.errors.InvalidOperation`. + :param has_verbose_results: Should the returned result be verbose? + If ``False``, then the ``insert_results``, ``update_results``, and + ``delete_results`` properties of this object will raise + :exc:`~pymongo.errors.InvalidOperation`. + """ + self.__has_verbose_results = has_verbose_results + super().__init__( + bulk_api_result, # type: ignore[arg-type] + acknowledged, + ) + + def __repr__(self) -> str: + return "{}({!r}, acknowledged={}, verbose={})".format( + self.__class__.__name__, + self.bulk_api_result, + self.acknowledged, + self.has_verbose_results, + ) + + def _raise_if_not_verbose(self, property_name: str) -> None: + """Raise an exception on property access if verbose results are off.""" + if not self.__has_verbose_results: + raise InvalidOperation( + f"A value for {property_name} is not available when " + "the results are not set to be verbose. Check the " + "verbose_results attribute to avoid this error." + ) + + @property + def has_verbose_results(self) -> bool: + """Whether the returned results should be verbose.""" + return self.__has_verbose_results + + @property + def insert_results(self) -> Mapping[int, InsertOneResult]: + """A map of successful insertion operations to their results.""" + self._raise_if_unacknowledged("insert_results") + self._raise_if_not_verbose("insert_results") + return cast( + Mapping[int, InsertOneResult], + self.bulk_api_result.get("insertResults"), + ) + + @property + def update_results(self) -> Mapping[int, UpdateResult]: + """A map of successful update operations to their results.""" + self._raise_if_unacknowledged("update_results") + self._raise_if_not_verbose("update_results") + return cast( + Mapping[int, UpdateResult], + self.bulk_api_result.get("updateResults"), + ) + + @property + def delete_results(self) -> Mapping[int, DeleteResult]: + """A map of successful delete operations to their results.""" + self._raise_if_unacknowledged("delete_results") + self._raise_if_not_verbose("delete_results") + return cast( + Mapping[int, DeleteResult], + self.bulk_api_result.get("deleteResults"), + ) diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py new file mode 100644 index 0000000000..9cef22419e --- /dev/null +++ b/pymongo/saslprep.py @@ -0,0 +1,116 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""An implementation of RFC4013 SASLprep.""" +from __future__ import annotations + +from typing import Any, Optional + +try: + import stringprep +except ImportError: + HAVE_STRINGPREP = False + + def saslprep( + data: Any, + prohibit_unassigned_code_points: Optional[bool] = True, # noqa: ARG001 + ) -> Any: + """SASLprep dummy""" + if isinstance(data, str): + raise TypeError( + "The stringprep module is not available. Usernames and " + "passwords must be instances of bytes." + ) + return data + +else: + HAVE_STRINGPREP = True + import unicodedata + + # RFC4013 section 2.3 prohibited output. + _PROHIBITED = ( + # A strict reading of RFC 4013 requires table c12 here, but + # characters from it are mapped to SPACE in the Map step. Can + # normalization reintroduce them somehow? + stringprep.in_table_c12, + stringprep.in_table_c21_c22, + stringprep.in_table_c3, + stringprep.in_table_c4, + stringprep.in_table_c5, + stringprep.in_table_c6, + stringprep.in_table_c7, + stringprep.in_table_c8, + stringprep.in_table_c9, + ) + + def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> Any: + """An implementation of RFC4013 SASLprep. + + :param data: The string to SASLprep. Unicode strings + (:class:`str`) are supported. Byte strings + (:class:`bytes`) are ignored. + :param prohibit_unassigned_code_points: True / False. RFC 3454 + and RFCs for various SASL mechanisms distinguish between + `queries` (unassigned code points allowed) and + `stored strings` (unassigned code points prohibited). Defaults + to ``True`` (unassigned code points are prohibited). + + :return: The SASLprep'ed version of `data`. + """ + prohibited: Any + + if not isinstance(data, str): + return data + + if prohibit_unassigned_code_points: + prohibited = (*_PROHIBITED, stringprep.in_table_a1) + else: + prohibited = _PROHIBITED + + # RFC3454 section 2, step 1 - Map + # RFC4013 section 2.1 mappings + # Map Non-ASCII space characters to SPACE (U+0020). Map + # commonly mapped to nothing characters to, well, nothing. + in_table_c12 = stringprep.in_table_c12 + in_table_b1 = stringprep.in_table_b1 + data = "".join( + ["\u0020" if in_table_c12(elt) else elt for elt in data if not in_table_b1(elt)] + ) + + # RFC3454 section 2, step 2 - Normalize + # RFC4013 section 2.2 normalization + data = unicodedata.ucd_3_2_0.normalize("NFKC", data) + + in_table_d1 = stringprep.in_table_d1 + if in_table_d1(data[0]): + if not in_table_d1(data[-1]): + # RFC3454, Section 6, #3. If a string contains any + # RandALCat character, the first and last characters + # MUST be RandALCat characters. + raise ValueError("SASLprep: failed bidirectional check") + # RFC3454, Section 6, #2. If a string contains any RandALCat + # character, it MUST NOT contain any LCat character. + prohibited = (*prohibited, stringprep.in_table_d2) + else: + # RFC3454, Section 6, #3. Following the logic of #3, if + # the first character is not a RandALCat, no other character + # can be either. + prohibited = (*prohibited, in_table_d1) + + # RFC3454 section 2, step 3 and 4 - Prohibit and check bidi + for char in data: + if any(in_table(char) for in_table in prohibited): + raise ValueError("SASLprep: failed prohibited character check") + + return data diff --git a/pymongo/server_api.py b/pymongo/server_api.py new file mode 100644 index 0000000000..40bb1aac3e --- /dev/null +++ b/pymongo/server_api.py @@ -0,0 +1,173 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for MongoDB Stable API. + +.. _versioned-api-ref: + +MongoDB Stable API +===================== + +Starting in MongoDB 5.0, applications can specify the server API version +to use when creating a :class:`~pymongo.mongo_client.MongoClient`. Doing so +ensures that the driver behaves in a manner compatible with that server API +version, regardless of the server's actual release version. + +Declaring an API Version +```````````````````````` + +.. attention:: Stable API requires MongoDB >=5.0. + +To configure MongoDB Stable API, pass the ``server_api`` keyword option to +:class:`~pymongo.mongo_client.MongoClient`:: + + >>> from pymongo.mongo_client import MongoClient + >>> from pymongo.server_api import ServerApi + >>> + >>> # Declare API version "1" for MongoClient "client" + >>> server_api = ServerApi('1') + >>> client = MongoClient(server_api=server_api) + +The declared API version is applied to all commands run through ``client``, +including those sent through the generic +:meth:`~pymongo.database.Database.command` helper. + +.. note:: Declaring an API version on the + :class:`~pymongo.mongo_client.MongoClient` **and** specifying stable + API options in :meth:`~pymongo.database.Database.command` command document + is not supported and will lead to undefined behaviour. + +To run any command without declaring a server API version or using a different +API version, create a separate :class:`~pymongo.mongo_client.MongoClient` +instance. + +Strict Mode +``````````` + +Configuring ``strict`` mode will cause the MongoDB server to reject all +commands that are not part of the declared :attr:`ServerApi.version`. This +includes command options and aggregation pipeline stages. + +For example:: + + >>> server_api = ServerApi('1', strict=True) + >>> client = MongoClient(server_api=server_api) + >>> client.test.command('count', 'test') + Traceback (most recent call last): + ... + pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError' + +Detecting API Deprecations +`````````````````````````` + +The ``deprecationErrors`` option can be used to enable command failures +when using functionality that is deprecated from the configured +:attr:`ServerApi.version`. For example:: + + >>> server_api = ServerApi('1', deprecation_errors=True) + >>> client = MongoClient(server_api=server_api) + +Note that at the time of this writing, no deprecated APIs exist. + +Classes +======= +""" +from __future__ import annotations + +from typing import Any, MutableMapping, Optional + + +class ServerApiVersion: + """An enum that defines values for :attr:`ServerApi.version`. + + .. versionadded:: 3.12 + """ + + V1 = "1" + """Server API version "1".""" + + +class ServerApi: + """MongoDB Stable API.""" + + def __init__( + self, version: str, strict: Optional[bool] = None, deprecation_errors: Optional[bool] = None + ): + """Options to configure MongoDB Stable API. + + :param version: The API version string. Must be one of the values in + :class:`ServerApiVersion`. + :param strict: Set to ``True`` to enable API strict mode. + Defaults to ``None`` which means "use the server's default". + :param deprecation_errors: Set to ``True`` to enable + deprecation errors. Defaults to ``None`` which means "use the + server's default". + + .. versionadded:: 3.12 + """ + if version != ServerApiVersion.V1: + raise ValueError(f"Unknown ServerApi version: {version}") + if strict is not None and not isinstance(strict, bool): + raise TypeError( + "Wrong type for ServerApi strict, value must be an instance " + f"of bool, not {type(strict)}" + ) + if deprecation_errors is not None and not isinstance(deprecation_errors, bool): + raise TypeError( + "Wrong type for ServerApi deprecation_errors, value must be " + f"an instance of bool, not {type(deprecation_errors)}" + ) + self._version = version + self._strict = strict + self._deprecation_errors = deprecation_errors + + @property + def version(self) -> str: + """The API version setting. + + This value is sent to the server in the "apiVersion" field. + """ + return self._version + + @property + def strict(self) -> Optional[bool]: + """The API strict mode setting. + + When set, this value is sent to the server in the "apiStrict" field. + """ + return self._strict + + @property + def deprecation_errors(self) -> Optional[bool]: + """The API deprecation errors setting. + + When set, this value is sent to the server in the + "apiDeprecationErrors" field. + """ + return self._deprecation_errors + + +def _add_to_command(cmd: MutableMapping[str, Any], server_api: Optional[ServerApi]) -> None: + """Internal helper which adds API versioning options to a command. + + :param cmd: The command. + :param server_api: A :class:`ServerApi` or ``None``. + """ + if not server_api: + return + cmd["apiVersion"] = server_api.version + if server_api.strict is not None: + cmd["apiStrict"] = server_api.strict + if server_api.deprecation_errors is not None: + cmd["apiDeprecationErrors"] = server_api.deprecation_errors diff --git a/pymongo/server_description.py b/pymongo/server_description.py new file mode 100644 index 0000000000..d038c04b1c --- /dev/null +++ b/pymongo/server_description.py @@ -0,0 +1,302 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Represent one server the driver is connected to. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +import time +import warnings +from typing import Any, Mapping, Optional + +from bson import EPOCH_NAIVE +from bson.objectid import ObjectId +from pymongo.hello import Hello +from pymongo.server_type import SERVER_TYPE +from pymongo.typings import ClusterTime, _Address + + +class ServerDescription: + """Immutable representation of one server. + + :param address: A (host, port) pair + :param hello: Optional Hello instance + :param round_trip_time: Optional float + :param error: Optional, the last error attempting to connect to the server + :param round_trip_time: Optional float, the min latency from the most recent samples + """ + + __slots__ = ( + "_address", + "_server_type", + "_all_hosts", + "_tags", + "_replica_set_name", + "_primary", + "_max_bson_size", + "_max_message_size", + "_max_write_batch_size", + "_min_wire_version", + "_max_wire_version", + "_round_trip_time", + "_min_round_trip_time", + "_me", + "_is_writable", + "_is_readable", + "_ls_timeout_minutes", + "_error", + "_set_version", + "_election_id", + "_cluster_time", + "_last_write_date", + "_last_update_time", + "_topology_version", + ) + + def __init__( + self, + address: _Address, + hello: Optional[Hello[dict[str, Any]]] = None, + round_trip_time: Optional[float] = None, + error: Optional[Exception] = None, + min_round_trip_time: float = 0.0, + ) -> None: + self._address = address + if not hello: + hello = Hello({}) + + self._server_type = hello.server_type + self._all_hosts = hello.all_hosts + self._tags = hello.tags + self._replica_set_name = hello.replica_set_name + self._primary = hello.primary + self._max_bson_size = hello.max_bson_size + self._max_message_size = hello.max_message_size + self._max_write_batch_size = hello.max_write_batch_size + self._min_wire_version = hello.min_wire_version + self._max_wire_version = hello.max_wire_version + self._set_version = hello.set_version + self._election_id = hello.election_id + self._cluster_time = hello.cluster_time + self._is_writable = hello.is_writable + self._is_readable = hello.is_readable + self._ls_timeout_minutes = hello.logical_session_timeout_minutes + self._round_trip_time = round_trip_time + self._min_round_trip_time = min_round_trip_time + self._me = hello.me + self._last_update_time = time.monotonic() + self._error = error + self._topology_version = hello.topology_version + if error: + details = getattr(error, "details", None) + if isinstance(details, dict): + self._topology_version = details.get("topologyVersion") + + self._last_write_date: Optional[float] + if hello.last_write_date: + # Convert from datetime to seconds. + delta = hello.last_write_date - EPOCH_NAIVE + self._last_write_date = delta.total_seconds() + else: + self._last_write_date = None + + @property + def address(self) -> _Address: + """The address (host, port) of this server.""" + return self._address + + @property + def server_type(self) -> int: + """The type of this server.""" + return self._server_type + + @property + def server_type_name(self) -> str: + """The server type as a human readable string. + + .. versionadded:: 3.4 + """ + return SERVER_TYPE._fields[self._server_type] + + @property + def all_hosts(self) -> set[tuple[str, int]]: + """List of hosts, passives, and arbiters known to this server.""" + return self._all_hosts + + @property + def tags(self) -> Mapping[str, Any]: + return self._tags + + @property + def replica_set_name(self) -> Optional[str]: + """Replica set name or None.""" + return self._replica_set_name + + @property + def primary(self) -> Optional[tuple[str, int]]: + """This server's opinion about who the primary is, or None.""" + return self._primary + + @property + def max_bson_size(self) -> int: + return self._max_bson_size + + @property + def max_message_size(self) -> int: + return self._max_message_size + + @property + def max_write_batch_size(self) -> int: + return self._max_write_batch_size + + @property + def min_wire_version(self) -> int: + return self._min_wire_version + + @property + def max_wire_version(self) -> int: + return self._max_wire_version + + @property + def set_version(self) -> Optional[int]: + return self._set_version + + @property + def election_id(self) -> Optional[ObjectId]: + return self._election_id + + @property + def cluster_time(self) -> Optional[ClusterTime]: + return self._cluster_time + + @property + def election_tuple(self) -> tuple[Optional[int], Optional[ObjectId]]: + warnings.warn( + "'election_tuple' is deprecated, use 'set_version' and 'election_id' instead", + DeprecationWarning, + stacklevel=2, + ) + return self._set_version, self._election_id + + @property + def me(self) -> Optional[tuple[str, int]]: + return self._me + + @property + def logical_session_timeout_minutes(self) -> Optional[int]: + return self._ls_timeout_minutes + + @property + def last_write_date(self) -> Optional[float]: + return self._last_write_date + + @property + def last_update_time(self) -> float: + return self._last_update_time + + @property + def round_trip_time(self) -> Optional[float]: + """The current average latency or None.""" + # This override is for unittesting only! + if self._address in self._host_to_round_trip_time: + return self._host_to_round_trip_time[self._address] + + return self._round_trip_time + + @property + def min_round_trip_time(self) -> float: + """The min latency from the most recent samples.""" + return self._min_round_trip_time + + @property + def error(self) -> Optional[Exception]: + """The last error attempting to connect to the server, or None.""" + return self._error + + @property + def is_writable(self) -> bool: + return self._is_writable + + @property + def is_readable(self) -> bool: + return self._is_readable + + @property + def mongos(self) -> bool: + return self._server_type == SERVER_TYPE.Mongos + + @property + def is_server_type_known(self) -> bool: + return self.server_type != SERVER_TYPE.Unknown + + @property + def retryable_writes_supported(self) -> bool: + """Checks if this server supports retryable writes.""" + return ( + self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) + ) or self._server_type == SERVER_TYPE.LoadBalancer + + @property + def retryable_reads_supported(self) -> bool: + """Checks if this server supports retryable writes.""" + return self._max_wire_version >= 6 + + @property + def topology_version(self) -> Optional[Mapping[str, Any]]: + return self._topology_version + + def to_unknown(self, error: Optional[Exception] = None) -> ServerDescription: + unknown = ServerDescription(self.address, error=error) + unknown._topology_version = self.topology_version + return unknown + + def __eq__(self, other: Any) -> bool: + if isinstance(other, ServerDescription): + return ( + (self._address == other.address) + and (self._server_type == other.server_type) + and (self._min_wire_version == other.min_wire_version) + and (self._max_wire_version == other.max_wire_version) + and (self._me == other.me) + and (self._all_hosts == other.all_hosts) + and (self._tags == other.tags) + and (self._replica_set_name == other.replica_set_name) + and (self._set_version == other.set_version) + and (self._election_id == other.election_id) + and (self._primary == other.primary) + and (self._ls_timeout_minutes == other.logical_session_timeout_minutes) + and (self._error == other.error) + ) + + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + errmsg = "" + if self.error: + errmsg = f", error={self.error!r}" + return "<{} {} server_type: {}, rtt: {}{}>".format( + self.__class__.__name__, + self.address, + self.server_type_name, + self.round_trip_time, + errmsg, + ) + + # For unittesting only. Use under no circumstances! + _host_to_round_trip_time: dict = {} # type: ignore[type-arg] diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py new file mode 100644 index 0000000000..0d1425ab31 --- /dev/null +++ b/pymongo/server_selectors.py @@ -0,0 +1,174 @@ +# Copyright 2014-2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Criteria to select some ServerDescriptions from a TopologyDescription.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, TypeVar, cast + +from pymongo.server_type import SERVER_TYPE + +if TYPE_CHECKING: + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + + +T = TypeVar("T") +TagSet = Mapping[str, Any] +TagSets = Sequence[TagSet] + + +class Selection: + """Input or output of a server selector function.""" + + @classmethod + def from_topology_description(cls, topology_description: TopologyDescription) -> Selection: + known_servers = topology_description.known_servers + primary = None + for sd in known_servers: + if sd.server_type == SERVER_TYPE.RSPrimary: + primary = sd + break + + return Selection( + topology_description, + topology_description.known_servers, + topology_description.common_wire_version, + primary, + ) + + def __init__( + self, + topology_description: TopologyDescription, + server_descriptions: list[ServerDescription], + common_wire_version: Optional[int], + primary: Optional[ServerDescription], + ): + self.topology_description = topology_description + self.server_descriptions = server_descriptions + self.primary = primary + self.common_wire_version = common_wire_version + + def with_server_descriptions(self, server_descriptions: list[ServerDescription]) -> Selection: + return Selection( + self.topology_description, server_descriptions, self.common_wire_version, self.primary + ) + + def secondary_with_max_last_write_date(self) -> Optional[ServerDescription]: + secondaries = secondary_server_selector(self) + if secondaries.server_descriptions: + return max( + secondaries.server_descriptions, key=lambda sd: cast(float, sd.last_write_date) + ) + return None + + @property + def primary_selection(self) -> Selection: + primaries = [self.primary] if self.primary else [] + return self.with_server_descriptions(primaries) + + @property + def heartbeat_frequency(self) -> int: + return self.topology_description.heartbeat_frequency + + @property + def topology_type(self) -> int: + return self.topology_description.topology_type + + def __bool__(self) -> bool: + return bool(self.server_descriptions) + + def __getitem__(self, item: int) -> ServerDescription: + return self.server_descriptions[item] + + +def any_server_selector(selection: T) -> T: + return selection + + +def readable_server_selector(selection: Selection) -> Selection: + return selection.with_server_descriptions( + [s for s in selection.server_descriptions if s.is_readable] + ) + + +def writable_server_selector(selection: Selection) -> Selection: + return selection.with_server_descriptions( + [s for s in selection.server_descriptions if s.is_writable] + ) + + +def secondary_server_selector(selection: Selection) -> Selection: + return selection.with_server_descriptions( + [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSSecondary] + ) + + +def arbiter_server_selector(selection: Selection) -> Selection: + return selection.with_server_descriptions( + [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSArbiter] + ) + + +def writable_preferred_server_selector(selection: Selection) -> Selection: + """Like PrimaryPreferred but doesn't use tags or latency.""" + return writable_server_selector(selection) or secondary_server_selector(selection) + + +def apply_single_tag_set(tag_set: TagSet, selection: Selection) -> Selection: + """All servers matching one tag set. + + A tag set is a dict. A server matches if its tags are a superset: + A server tagged {'a': '1', 'b': '2'} matches the tag set {'a': '1'}. + + The empty tag set {} matches any server. + """ + + def tags_match(server_tags: Mapping[str, Any]) -> bool: + for key, value in tag_set.items(): + if key not in server_tags or server_tags[key] != value: + return False + + return True + + return selection.with_server_descriptions( + [s for s in selection.server_descriptions if tags_match(s.tags)] + ) + + +def apply_tag_sets(tag_sets: TagSets, selection: Selection) -> Selection: + """All servers match a list of tag sets. + + tag_sets is a list of dicts. The empty tag set {} matches any server, + and may be provided at the end of the list as a fallback. So + [{'a': 'value'}, {}] expresses a preference for servers tagged + {'a': 'value'}, but accepts any server if none matches the first + preference. + """ + for tag_set in tag_sets: + with_tag_set = apply_single_tag_set(tag_set, selection) + if with_tag_set: + return with_tag_set + + return selection.with_server_descriptions([]) + + +def secondary_with_tags_server_selector(tag_sets: TagSets, selection: Selection) -> Selection: + """All near-enough secondaries matching the tag sets.""" + return apply_tag_sets(tag_sets, secondary_server_selector(selection)) + + +def member_with_tags_server_selector(tag_sets: TagSets, selection: Selection) -> Selection: + """All near-enough members matching the tag sets.""" + return apply_tag_sets(tag_sets, readable_server_selector(selection)) diff --git a/pymongo/server_type.py b/pymongo/server_type.py new file mode 100644 index 0000000000..7a6d2aaf14 --- /dev/null +++ b/pymongo/server_type.py @@ -0,0 +1,33 @@ +# Copyright 2014-2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type codes for MongoDB servers.""" +from __future__ import annotations + +from typing import NamedTuple + + +class _ServerType(NamedTuple): + Unknown: int + Mongos: int + RSPrimary: int + RSSecondary: int + RSArbiter: int + RSOther: int + RSGhost: int + Standalone: int + LoadBalancer: int + + +SERVER_TYPE = _ServerType(*range(9)) diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py new file mode 100644 index 0000000000..78861854ab --- /dev/null +++ b/pymongo/socket_checker.py @@ -0,0 +1,105 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Select / poll helper""" +from __future__ import annotations + +import errno +import select +import sys +from typing import Any, Optional, cast + +# PYTHON-2320: Jython does not fully support poll on SSL sockets, +# https://bugs.jython.org/issue2900 +_HAVE_POLL = hasattr(select, "poll") and not sys.platform.startswith("java") +_SelectError = getattr(select, "error", OSError) + + +def _errno_from_exception(exc: BaseException) -> Optional[int]: + if hasattr(exc, "errno"): + return cast(int, exc.errno) + if exc.args: + return cast(int, exc.args[0]) + return None + + +class SocketChecker: + def __init__(self) -> None: + self._poller: Optional[select.poll] + if _HAVE_POLL: + self._poller = select.poll() + else: + self._poller = None + + def select( + self, sock: Any, read: bool = False, write: bool = False, timeout: Optional[float] = 0 + ) -> bool: + """Select for reads or writes with a timeout in seconds (or None). + + Returns True if the socket is readable/writable, False on timeout. + """ + res: Any + while True: + try: + if self._poller: + mask = select.POLLERR | select.POLLHUP + if read: + mask = mask | select.POLLIN | select.POLLPRI + if write: + mask = mask | select.POLLOUT + self._poller.register(sock, mask) + try: + # poll() timeout is in milliseconds. select() + # timeout is in seconds. + timeout_ = None if timeout is None else timeout * 1000 + res = self._poller.poll(timeout_) + # poll returns a possibly-empty list containing + # (fd, event) 2-tuples for the descriptors that have + # events or errors to report. Return True if the list + # is not empty. + return bool(res) + finally: + self._poller.unregister(sock) + else: + rlist = [sock] if read else [] + wlist = [sock] if write else [] + res = select.select(rlist, wlist, [sock], timeout) + # select returns a 3-tuple of lists of objects that are + # ready: subsets of the first three arguments. Return + # True if any of the lists are not empty. + return any(res) + except (_SelectError, OSError) as exc: # type: ignore + if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): + continue + raise + + def socket_closed(self, sock: Any) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + try: + return self.select(sock, read=True) + except (RuntimeError, KeyError): + # RuntimeError is raised during a concurrent poll. KeyError + # is raised by unregister if the socket is not in the poller. + # These errors should not be possible since we protect the + # poller with a mutex. + raise + except ValueError: + # ValueError is raised by register/unregister/select if the + # socket file descriptor is negative or outside the range for + # select (> 1023). + return True + except Exception: + # Any other exceptions should be attributed to a closed + # or invalid socket. + return True diff --git a/pymongo/son_manipulator.py b/pymongo/son_manipulator.py deleted file mode 100644 index 02576e83fa..0000000000 --- a/pymongo/son_manipulator.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Manipulators that can edit SON objects as they enter and exit a database. - -New manipulators should be defined as subclasses of SONManipulator and can be -installed on a database by calling -`pymongo.database.Database.add_son_manipulator`.""" - -from bson.dbref import DBRef -from bson.objectid import ObjectId -from bson.son import SON - - -class SONManipulator(object): - """A base son manipulator. - - This manipulator just saves and restores objects without changing them. - """ - - def will_copy(self): - """Will this SON manipulator make a copy of the incoming document? - - Derived classes that do need to make a copy should override this - method, returning True instead of False. All non-copying manipulators - will be applied first (so that the user's document will be updated - appropriately), followed by copying manipulators. - """ - return False - - def transform_incoming(self, son, collection): - """Manipulate an incoming SON object. - - :Parameters: - - `son`: the SON object to be inserted into the database - - `collection`: the collection the object is being inserted into - """ - if self.will_copy(): - return SON(son) - return son - - def transform_outgoing(self, son, collection): - """Manipulate an outgoing SON object. - - :Parameters: - - `son`: the SON object being retrieved from the database - - `collection`: the collection this object was stored in - """ - if self.will_copy(): - return SON(son) - return son - - -class ObjectIdInjector(SONManipulator): - """A son manipulator that adds the _id field if it is missing. - - .. versionchanged:: 2.7 - ObjectIdInjector is no longer used by PyMongo, but remains in this - module for backwards compatibility. - """ - - def transform_incoming(self, son, collection): - """Add an _id field if it is missing. - """ - if not "_id" in son: - son["_id"] = ObjectId() - return son - - -# This is now handled during BSON encoding (for performance reasons), -# but I'm keeping this here as a reference for those implementing new -# SONManipulators. -class ObjectIdShuffler(SONManipulator): - """A son manipulator that moves _id to the first position. - """ - - def will_copy(self): - """We need to copy to be sure that we are dealing with SON, not a dict. - """ - return True - - def transform_incoming(self, son, collection): - """Move _id to the front if it's there. - """ - if not "_id" in son: - return son - transformed = SON({"_id": son["_id"]}) - transformed.update(son) - return transformed - - -class NamespaceInjector(SONManipulator): - """A son manipulator that adds the _ns field. - """ - - def transform_incoming(self, son, collection): - """Add the _ns field to the incoming object - """ - son["_ns"] = collection.name - return son - - -class AutoReference(SONManipulator): - """Transparently reference and de-reference already saved embedded objects. - - This manipulator should probably only be used when the NamespaceInjector is - also being used, otherwise it doesn't make too much sense - documents can - only be auto-referenced if they have an *_ns* field. - - NOTE: this will behave poorly if you have a circular reference. - - TODO: this only works for documents that are in the same database. To fix - this we'll need to add a DatabaseInjector that adds *_db* and then make - use of the optional *database* support for DBRefs. - """ - - def __init__(self, db): - self.database = db - - def will_copy(self): - """We need to copy so the user's document doesn't get transformed refs. - """ - return True - - def transform_incoming(self, son, collection): - """Replace embedded documents with DBRefs. - """ - - def transform_value(value): - if isinstance(value, dict): - if "_id" in value and "_ns" in value: - return DBRef(value["_ns"], transform_value(value["_id"])) - else: - return transform_dict(SON(value)) - elif isinstance(value, list): - return [transform_value(v) for v in value] - return value - - def transform_dict(object): - for (key, value) in object.items(): - object[key] = transform_value(value) - return object - - return transform_dict(SON(son)) - - def transform_outgoing(self, son, collection): - """Replace DBRefs with embedded documents. - """ - - def transform_value(value): - if isinstance(value, DBRef): - return self.database.dereference(value) - elif isinstance(value, list): - return [transform_value(v) for v in value] - elif isinstance(value, dict): - return transform_dict(SON(value)) - return value - - def transform_dict(object): - for (key, value) in object.items(): - object[key] = transform_value(value) - return object - - return transform_dict(SON(son)) - -# TODO make a generic translator for custom types. Take encode, decode, -# should_encode and should_decode functions and just encode and decode where -# necessary. See examples/custom_type.py for where this would be useful. -# Alternatively it could take a should_encode, to_binary, from_binary and -# binary subtype. diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py new file mode 100644 index 0000000000..2ff7428cab --- /dev/null +++ b/pymongo/ssl_context.py @@ -0,0 +1,42 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""A fake SSLContext implementation.""" +from __future__ import annotations + +import ssl as _ssl + +# PROTOCOL_TLS_CLIENT is Python 3.6+ +PROTOCOL_SSLv23 = getattr(_ssl, "PROTOCOL_TLS_CLIENT", _ssl.PROTOCOL_SSLv23) +OP_NO_SSLv2 = getattr(_ssl, "OP_NO_SSLv2", 0) +OP_NO_SSLv3 = getattr(_ssl, "OP_NO_SSLv3", 0) +OP_NO_COMPRESSION = getattr(_ssl, "OP_NO_COMPRESSION", 0) +# Python 3.7+, OpenSSL 1.1.0h+ +OP_NO_RENEGOTIATION = getattr(_ssl, "OP_NO_RENEGOTIATION", 0) + +HAS_SNI = getattr(_ssl, "HAS_SNI", False) +IS_PYOPENSSL = False + +# Errors raised by SSL sockets when in non-blocking mode. +BLOCKING_IO_ERRORS = (_ssl.SSLWantReadError, _ssl.SSLWantWriteError) +BLOCKING_IO_READ_ERROR = _ssl.SSLWantReadError +BLOCKING_IO_WRITE_ERROR = _ssl.SSLWantWriteError + +# Base Exception class +SSLError = _ssl.SSLError + +from ssl import SSLContext # noqa: F401,E402 + +if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): + from ssl import VERIFY_CRL_CHECK_LEAF # noqa: F401 diff --git a/pymongo/ssl_match_hostname.py b/pymongo/ssl_match_hostname.py deleted file mode 100644 index f74df15dcd..0000000000 --- a/pymongo/ssl_match_hostname.py +++ /dev/null @@ -1,100 +0,0 @@ -# Backport of the match_hostname logic introduced in python 3.2 -# http://hg.python.org/releasing/3.3.5/file/993955b807b3/Lib/ssl.py - -import re - - -class CertificateError(ValueError): - pass - - -def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - parts = dn.split(r'.') - leftmost = parts[0] - remainder = parts[1:] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate") - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py new file mode 100644 index 0000000000..7dbd0f2148 --- /dev/null +++ b/pymongo/ssl_support.py @@ -0,0 +1,146 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for SSL in PyMongo.""" +from __future__ import annotations + +import types +import warnings +from typing import Any, Optional, Union + +from pymongo.errors import ConfigurationError + +HAVE_SSL = True +HAVE_PYSSL = True + +try: + import pymongo.pyopenssl_context as _pyssl +except (ImportError, AttributeError) as exc: + HAVE_PYSSL = False + if isinstance(exc, AttributeError): + warnings.warn( + "Failed to use the installed version of PyOpenSSL. " + "Falling back to stdlib ssl, disabling OCSP support. " + "This is likely caused by incompatible versions " + "of PyOpenSSL < 23.2.0 and cryptography >= 42.0.0. " + "Try updating PyOpenSSL >= 23.2.0 to enable OCSP.", + UserWarning, + stacklevel=2, + ) +try: + import pymongo.ssl_context as _ssl +except ImportError: + HAVE_SSL = False + + +if HAVE_SSL: + # Note: The validate* functions below deal with users passing + # CPython ssl module constants to configure certificate verification + # at a high level. This is legacy behavior, but requires us to + # import the ssl module even if we're only using it for this purpose. + import ssl as _stdlibssl # noqa: F401 + from ssl import CERT_NONE, CERT_REQUIRED + + IPADDR_SAFE = True + + if HAVE_PYSSL: + PYSSLError: Any = _pyssl.SSLError + BLOCKING_IO_ERRORS: tuple = ( # type: ignore[type-arg] + _ssl.BLOCKING_IO_ERRORS + _pyssl.BLOCKING_IO_ERRORS + ) + BLOCKING_IO_READ_ERROR: tuple = ( # type: ignore[type-arg] + _pyssl.BLOCKING_IO_READ_ERROR, + _ssl.BLOCKING_IO_READ_ERROR, + ) + BLOCKING_IO_WRITE_ERROR: tuple = ( # type: ignore[type-arg] + _pyssl.BLOCKING_IO_WRITE_ERROR, + _ssl.BLOCKING_IO_WRITE_ERROR, + ) + else: + PYSSLError = _ssl.SSLError + BLOCKING_IO_ERRORS: tuple = _ssl.BLOCKING_IO_ERRORS # type: ignore[type-arg, no-redef] + BLOCKING_IO_READ_ERROR: tuple = (_ssl.BLOCKING_IO_READ_ERROR,) # type: ignore[type-arg, no-redef] + BLOCKING_IO_WRITE_ERROR: tuple = (_ssl.BLOCKING_IO_WRITE_ERROR,) # type: ignore[type-arg, no-redef] + SSLError = _ssl.SSLError + BLOCKING_IO_LOOKUP_ERROR = BLOCKING_IO_READ_ERROR + + def _has_sni(is_sync: bool) -> bool: + if is_sync and HAVE_PYSSL: + return _pyssl.HAS_SNI + return _ssl.HAS_SNI + + def get_ssl_context( + certfile: Optional[str], + passphrase: Optional[str], + ca_certs: Optional[str], + crlfile: Optional[str], + allow_invalid_certificates: bool, + allow_invalid_hostnames: bool, + disable_ocsp_endpoint_check: bool, + is_sync: bool, + ) -> Union[_pyssl.SSLContext, _ssl.SSLContext]: # type: ignore[name-defined] + """Create and return an SSLContext object.""" + if is_sync and HAVE_PYSSL: + ssl: types.ModuleType = _pyssl + else: + ssl = _ssl + verify_mode = CERT_NONE if allow_invalid_certificates else CERT_REQUIRED + ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + if verify_mode != CERT_NONE: + ctx.check_hostname = not allow_invalid_hostnames + else: + ctx.check_hostname = False + if hasattr(ctx, "check_ocsp_endpoint"): + ctx.check_ocsp_endpoint = not disable_ocsp_endpoint_check + if hasattr(ctx, "options"): + # Explicitly disable SSLv2, SSLv3 and TLS compression. Note that + # up to date versions of MongoDB 2.4 and above already disable + # SSLv2 and SSLv3, python disables SSLv2 by default in >= 2.7.7 + # and >= 3.3.4 and SSLv3 in >= 3.4.3. + ctx.options |= ssl.OP_NO_SSLv2 + ctx.options |= ssl.OP_NO_SSLv3 + ctx.options |= ssl.OP_NO_COMPRESSION + ctx.options |= ssl.OP_NO_RENEGOTIATION + if certfile is not None: + try: + ctx.load_cert_chain(certfile, None, passphrase) + except ssl.SSLError as exc: + raise ConfigurationError(f"Private key doesn't match certificate: {exc}") from None + if crlfile is not None: + if ssl.IS_PYOPENSSL: + raise ConfigurationError("tlsCRLFile cannot be used with PyOpenSSL") + # Match the server's behavior. + ctx.verify_flags = getattr(ssl, "VERIFY_CRL_CHECK_LEAF", 0) + ctx.load_verify_locations(crlfile) + if ca_certs is not None: + ctx.load_verify_locations(ca_certs) + elif verify_mode != CERT_NONE: + ctx.load_default_certs() + ctx.verify_mode = verify_mode + return ctx + +else: + + class SSLError(Exception): # type: ignore + pass + + IPADDR_SAFE = False + BLOCKING_IO_ERRORS: tuple = () # type: ignore[type-arg, no-redef] + + def _has_sni(is_sync: bool) -> bool: # noqa: ARG001 + return False + + def get_ssl_context(*dummy): # type: ignore + """No ssl module, raise ConfigurationError.""" + raise ConfigurationError("The ssl module is not available") diff --git a/pymongo/synchronous/__init__.py b/pymongo/synchronous/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pymongo/synchronous/aggregation.py b/pymongo/synchronous/aggregation.py new file mode 100644 index 0000000000..486768ab7d --- /dev/null +++ b/pymongo/synchronous/aggregation.py @@ -0,0 +1,254 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Perform aggregation operations on a collection or database.""" +from __future__ import annotations + +from collections.abc import Callable, Mapping, MutableMapping +from typing import TYPE_CHECKING, Any, Optional, Union + +from pymongo import common +from pymongo.collation import validate_collation_or_none +from pymongo.errors import ConfigurationError +from pymongo.read_preferences import ReadPreference, _AggWritePref + +if TYPE_CHECKING: + from pymongo.read_preferences import _ServerMode + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.command_cursor import CommandCursor + from pymongo.synchronous.database import Database + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + from pymongo.typings import _DocumentType, _Pipeline + +_IS_SYNC = True + + +class _AggregationCommand: + """The internal abstract base class for aggregation cursors. + + Should not be called directly by application developers. Use + :meth:`pymongo.collection.Collection.aggregate`, or + :meth:`pymongo.database.Database.aggregate` instead. + """ + + def __init__( + self, + target: Union[Database[Any], Collection[Any]], + cursor_class: type[CommandCursor[Any]], + pipeline: _Pipeline, + options: MutableMapping[str, Any], + let: Optional[Mapping[str, Any]] = None, + user_fields: Optional[MutableMapping[str, Any]] = None, + result_processor: Optional[Callable[[Mapping[str, Any], Connection], None]] = None, + comment: Any = None, + ) -> None: + if "explain" in options: + raise ConfigurationError( + "The explain option is not supported. Use Database.command instead." + ) + + self._target = target + + pipeline = common.validate_list("pipeline", pipeline) + self._pipeline = pipeline + self._performs_write = False + if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): + self._performs_write = True + + common.validate_is_mapping("options", options) + if let is not None: + common.validate_is_mapping("let", let) + options["let"] = let + if comment is not None: + options["comment"] = comment + + self._options = options + + # This is the batchSize that will be used for setting the initial + # batchSize for the cursor, as well as the subsequent getMores. + self._batch_size = common.validate_non_negative_integer_or_none( + "batchSize", self._options.pop("batchSize", None) + ) + + # If the cursor option is already specified, avoid overriding it. + self._options.setdefault("cursor", {}) + # If the pipeline performs a write, we ignore the initial batchSize + # since the server doesn't return results in this case. + if self._batch_size is not None and not self._performs_write: + self._options["cursor"]["batchSize"] = self._batch_size + + self._cursor_class = cursor_class + self._user_fields = user_fields + self._result_processor = result_processor + + self._collation = validate_collation_or_none(options.pop("collation", None)) + + self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) + self._write_preference: Optional[_AggWritePref] = None + + @property + def _aggregation_target(self) -> Union[str, int]: + """The argument to pass to the aggregate command.""" + raise NotImplementedError + + @property + def _cursor_namespace(self) -> str: + """The namespace in which the aggregate command is run.""" + raise NotImplementedError + + def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> Collection[Any]: + """The Collection used for the aggregate command cursor.""" + raise NotImplementedError + + @property + def _database(self) -> Database[Any]: + """The database against which the aggregation command is run.""" + raise NotImplementedError + + def get_read_preference( + self, session: Optional[ClientSession] + ) -> Union[_AggWritePref, _ServerMode]: + if self._write_preference: + return self._write_preference + pref = self._target._read_preference_for(session) + if self._performs_write and pref != ReadPreference.PRIMARY: + self._write_preference = pref = _AggWritePref(pref) # type: ignore[assignment] + return pref + + def get_cursor( + self, + session: Optional[ClientSession], + server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[_DocumentType]: + # Serialize command. + cmd = {"aggregate": self._aggregation_target, "pipeline": self._pipeline} + cmd.update(self._options) + + # Apply this target's read concern if: + # readConcern has not been specified as a kwarg and either + # - server version is >= 4.2 or + # - server version is >= 3.2 and pipeline doesn't use $out + if ("readConcern" not in cmd) and ( + not self._performs_write or (conn.max_wire_version >= 8) + ): + read_concern = self._target.read_concern + else: + read_concern = None + + # Apply this target's write concern if: + # writeConcern has not been specified as a kwarg and pipeline doesn't + # perform a write operation + if "writeConcern" not in cmd and self._performs_write: + write_concern = self._target._write_concern_for(session) + else: + write_concern = None + + # Run command. + result = conn.command( + self._database.name, + cmd, + read_preference, + self._target.codec_options, + parse_write_concern_error=True, + read_concern=read_concern, + write_concern=write_concern, + collation=self._collation, + session=session, + client=self._database.client, + user_fields=self._user_fields, + ) + + if self._result_processor: + self._result_processor(result, conn) + + # Extract cursor from result or mock/fake one if necessary. + if "cursor" in result: + cursor = result["cursor"] + else: + # Unacknowledged $out/$merge write. Fake a cursor. + cursor = { + "id": 0, + "firstBatch": result.get("result", []), + "ns": self._cursor_namespace, + } + + # Create and return cursor instance. + cmd_cursor = self._cursor_class( + self._cursor_collection(cursor), + cursor, + conn.address, + batch_size=self._batch_size or 0, + max_await_time_ms=self._max_await_time_ms, + session=session, + comment=self._options.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + +class _CollectionAggregationCommand(_AggregationCommand): + _target: Collection[Any] + + @property + def _aggregation_target(self) -> str: + return self._target.name + + @property + def _cursor_namespace(self) -> str: + return self._target.full_name + + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection[Any]: + """The Collection used for the aggregate command cursor.""" + return self._target + + @property + def _database(self) -> Database[Any]: + return self._target.database + + +class _CollectionRawAggregationCommand(_CollectionAggregationCommand): + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + # For raw-batches, we set the initial batchSize for the cursor to 0. + if not self._performs_write: + self._options["cursor"]["batchSize"] = 0 + + +class _DatabaseAggregationCommand(_AggregationCommand): + _target: Database[Any] + + @property + def _aggregation_target(self) -> int: + return 1 + + @property + def _cursor_namespace(self) -> str: + return f"{self._target.name}.$cmd.aggregate" + + @property + def _database(self) -> Database[Any]: + return self._target + + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection[Any]: + """The Collection used for the aggregate command cursor.""" + # Collection level aggregate may not always return the "ns" field + # according to our MockupDB tests. Let's handle that case for db level + # aggregate too by defaulting to the .$cmd.aggregate namespace. + _, collname = cursor.get("ns", self._cursor_namespace).split(".", 1) + return self._database[collname] diff --git a/pymongo/synchronous/auth.py b/pymongo/synchronous/auth.py new file mode 100644 index 0000000000..650e25234d --- /dev/null +++ b/pymongo/synchronous/auth.py @@ -0,0 +1,450 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Authentication helpers.""" +from __future__ import annotations + +import functools +import hashlib +import hmac +import socket +from base64 import standard_b64decode, standard_b64encode +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Mapping, + MutableMapping, + Optional, + cast, +) +from urllib.parse import quote + +from bson.binary import Binary +from pymongo.auth_shared import ( + MongoCredential, + _authenticate_scram_start, + _parse_scram_response, + _xor, +) +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.saslprep import saslprep +from pymongo.synchronous.auth_aws import _authenticate_aws +from pymongo.synchronous.auth_oidc import ( + _authenticate_oidc, + _get_authenticator, +) +from pymongo.synchronous.helpers import _getaddrinfo + +if TYPE_CHECKING: + from pymongo.hello import Hello + from pymongo.synchronous.pool import Connection + +HAVE_KERBEROS = True +_USE_PRINCIPAL = False +try: + import winkerberos as kerberos # type:ignore[import] + + if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): + _USE_PRINCIPAL = True +except ImportError: + try: + import kerberos # type:ignore[import] + except ImportError: + HAVE_KERBEROS = False + + +_IS_SYNC = True + + +def _authenticate_scram(credentials: MongoCredential, conn: Connection, mechanism: str) -> None: + """Authenticate using SCRAM.""" + username = credentials.username + if mechanism == "SCRAM-SHA-256": + digest = "sha256" + digestmod = hashlib.sha256 + data = saslprep(credentials.password).encode("utf-8") + else: + digest = "sha1" + digestmod = hashlib.sha1 + data = _password_digest(username, credentials.password).encode("utf-8") + source = credentials.source + cache = credentials.cache + + # Make local + _hmac = hmac.HMAC + + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + assert isinstance(ctx, _ScramContext) + assert ctx.scram_data is not None + nonce, first_bare = ctx.scram_data + res = ctx.speculative_authenticate + else: + nonce, first_bare, cmd = _authenticate_scram_start(credentials, mechanism) + res = conn.command(source, cmd) + + assert res is not None + server_first = res["payload"] + parsed = _parse_scram_response(server_first) + iterations = int(parsed[b"i"]) + if iterations < 4096: + raise OperationFailure("Server returned an invalid iteration count.") + salt = parsed[b"s"] + rnonce = parsed[b"r"] + if not rnonce.startswith(nonce): + raise OperationFailure("Server returned an invalid nonce.") + + without_proof = b"c=biws,r=" + rnonce + if cache.data: + client_key, server_key, csalt, citerations = cache.data + else: + client_key, server_key, csalt, citerations = None, None, None, None + + # Salt and / or iterations could change for a number of different + # reasons. Either changing invalidates the cache. + if not client_key or salt != csalt or iterations != citerations: + salted_pass = hashlib.pbkdf2_hmac(digest, data, standard_b64decode(salt), iterations) + client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() + server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() + cache.data = (client_key, server_key, salt, iterations) + stored_key = digestmod(client_key).digest() + auth_msg = b",".join((first_bare, server_first, without_proof)) + client_sig = _hmac(stored_key, auth_msg, digestmod).digest() + client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) + client_final = b",".join((without_proof, client_proof)) + + server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) + + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(client_final), + } + res = conn.command(source, cmd) + + parsed = _parse_scram_response(res["payload"]) + if not hmac.compare_digest(parsed[b"v"], server_sig): + raise OperationFailure("Server returned an invalid signature.") + + # A third empty challenge may be required if the server does not support + # skipEmptyExchange: SERVER-44857. + if not res["done"]: + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(b""), + } + res = conn.command(source, cmd) + if not res["done"]: + raise OperationFailure("SASL conversation failed to complete.") + + +def _password_digest(username: str, password: str) -> str: + """Get a password digest to use for authentication.""" + if not isinstance(password, str): + raise TypeError("password must be an instance of str") + if len(password) == 0: + raise ValueError("password can't be empty") + if not isinstance(username, str): + raise TypeError(f"username must be an instance of str, not {type(username)}") + + md5hash = hashlib.md5() # noqa: S324 + data = f"{username}:mongo:{password}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +def _auth_key(nonce: str, username: str, password: str) -> str: + """Get an auth key to use for authentication.""" + digest = _password_digest(username, password) + md5hash = hashlib.md5() # noqa: S324 + data = f"{nonce}{username}{digest}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +def _canonicalize_hostname(hostname: str, option: str | bool) -> str: + """Canonicalize hostname following MIT-krb5 behavior.""" + # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 + if option in [False, "none"]: + return hostname + + af, socktype, proto, canonname, sockaddr = ( + _getaddrinfo( + hostname, + None, + family=0, + type=0, + proto=socket.IPPROTO_TCP, + flags=socket.AI_CANONNAME, + ) + )[0] # type: ignore[index] + + # For forward just to resolve the cname as dns.lookup() will not return it. + if option == "forward": + return canonname.lower() + + try: + name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) + except socket.gaierror: + return canonname.lower() + + return name[0].lower() + + +def _authenticate_gssapi(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using GSSAPI.""" + if not HAVE_KERBEROS: + raise ConfigurationError( + 'The "kerberos" module must be installed to use GSSAPI authentication.' + ) + + try: + username = credentials.username + password = credentials.password + props = credentials.mechanism_properties + # Starting here and continuing through the while loop below - establish + # the security context. See RFC 4752, Section 3.1, first paragraph. + host = props.service_host or conn.address[0] + host = _canonicalize_hostname(host, props.canonicalize_host_name) + service = props.service_name + "@" + host + if props.service_realm is not None: + service = service + "@" + props.service_realm + + if password is not None: + if _USE_PRINCIPAL: + # Note that, though we use unquote_plus for unquoting URI + # options, we use quote here. Microsoft's UrlUnescape (used + # by WinKerberos) doesn't support +. + principal = ":".join((quote(username), quote(password))) + result, ctx = kerberos.authGSSClientInit( + service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG + ) + else: + if "@" in username: + user, domain = username.split("@", 1) + else: + user, domain = username, None + result, ctx = kerberos.authGSSClientInit( + service, + gssflags=kerberos.GSS_C_MUTUAL_FLAG, + user=user, + domain=domain, + password=password, + ) + else: + result, ctx = kerberos.authGSSClientInit(service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + + if result != kerberos.AUTH_GSS_COMPLETE: + raise OperationFailure("Kerberos context failed to initialize.") + + try: + # pykerberos uses a weird mix of exceptions and return values + # to indicate errors. + # 0 == continue, 1 == complete, -1 == error + # Only authGSSClientStep can return 0. + if kerberos.authGSSClientStep(ctx, "") != 0: + raise OperationFailure("Unknown kerberos failure in step function.") + + # Start a SASL conversation with mongod/s + # Note: pykerberos deals with base64 encoded byte strings. + # Since mongo accepts base64 strings as the payload we don't + # have to use bson.binary.Binary. + payload = kerberos.authGSSClientResponse(ctx) + cmd = { + "saslStart": 1, + "mechanism": "GSSAPI", + "payload": payload, + "autoAuthorize": 1, + } + response = conn.command("$external", cmd) + + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + result = kerberos.authGSSClientStep(ctx, str(response["payload"])) + if result == -1: + raise OperationFailure("Unknown kerberos failure in step function.") + + payload = kerberos.authGSSClientResponse(ctx) or "" + + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } + response = conn.command("$external", cmd) + + if result == kerberos.AUTH_GSS_COMPLETE: + break + else: + raise OperationFailure("Kerberos authentication failed to complete.") + + # Once the security context is established actually authenticate. + # See RFC 4752, Section 3.1, last two paragraphs. + if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Unwrap step.") + + if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") + + payload = kerberos.authGSSClientResponse(ctx) + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } + conn.command("$external", cmd) + + finally: + kerberos.authGSSClientClean(ctx) + + except kerberos.KrbError as exc: + raise OperationFailure(str(exc)) from None + + +def _authenticate_plain(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using SASL PLAIN (RFC 4616)""" + source = credentials.source + username = credentials.username + password = credentials.password + payload = (f"\x00{username}\x00{password}").encode() + cmd = { + "saslStart": 1, + "mechanism": "PLAIN", + "payload": Binary(payload), + "autoAuthorize": 1, + } + conn.command(source, cmd) + + +def _authenticate_x509(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using MONGODB-X509.""" + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + # MONGODB-X509 is done after the speculative auth step. + return + + cmd = _X509Context(credentials, conn.address).speculate_command() + conn.command("$external", cmd) + + +def _authenticate_default(credentials: MongoCredential, conn: Connection) -> None: + if conn.max_wire_version >= 7: + if conn.negotiated_mechs: + mechs = conn.negotiated_mechs + else: + source = credentials.source + cmd = conn.hello_cmd() + cmd["saslSupportedMechs"] = source + "." + credentials.username + mechs = (conn.command(source, cmd, publish_events=False)).get("saslSupportedMechs", []) + if "SCRAM-SHA-256" in mechs: + return _authenticate_scram(credentials, conn, "SCRAM-SHA-256") + else: + return _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + else: + return _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + + +_AUTH_MAP: Mapping[str, Callable[..., None]] = { + "GSSAPI": _authenticate_gssapi, + "MONGODB-X509": _authenticate_x509, + "MONGODB-AWS": _authenticate_aws, + "MONGODB-OIDC": _authenticate_oidc, # type:ignore[dict-item] + "PLAIN": _authenticate_plain, + "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), + "DEFAULT": _authenticate_default, +} + + +class _AuthContext: + def __init__(self, credentials: MongoCredential, address: tuple[str, int]) -> None: + self.credentials = credentials + self.speculative_authenticate: Optional[Mapping[str, Any]] = None + self.address = address + + @staticmethod + def from_credentials( + creds: MongoCredential, address: tuple[str, int] + ) -> Optional[_AuthContext]: + spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) + if spec_cls: + return cast(_AuthContext, spec_cls(creds, address)) + return None + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + raise NotImplementedError + + def parse_response(self, hello: Hello[Mapping[str, Any]]) -> None: + self.speculative_authenticate = hello.speculative_authenticate + + def speculate_succeeded(self) -> bool: + return bool(self.speculative_authenticate) + + +class _ScramContext(_AuthContext): + def __init__( + self, credentials: MongoCredential, address: tuple[str, int], mechanism: str + ) -> None: + super().__init__(credentials, address) + self.scram_data: Optional[tuple[bytes, bytes]] = None + self.mechanism = mechanism + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) + # The 'db' field is included only on the speculative command. + cmd["db"] = self.credentials.source + # Save for later use. + self.scram_data = (nonce, first_bare) + return cmd + + +class _X509Context(_AuthContext): + def speculate_command(self) -> MutableMapping[str, Any]: + cmd = {"authenticate": 1, "mechanism": "MONGODB-X509"} + if self.credentials.username is not None: + cmd["user"] = self.credentials.username + return cmd + + +class _OIDCContext(_AuthContext): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + authenticator = _get_authenticator(self.credentials, self.address) + cmd = authenticator.get_spec_auth_cmd() + if cmd is None: + return None + cmd["db"] = self.credentials.source + return cmd + + +_SPECULATIVE_AUTH_MAP: Mapping[str, Any] = { + "MONGODB-X509": _X509Context, + "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), + "MONGODB-OIDC": _OIDCContext, + "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), +} + + +def authenticate( + credentials: MongoCredential, conn: Connection, reauthenticate: bool = False +) -> None: + """Authenticate connection.""" + mechanism = credentials.mechanism + auth_func = _AUTH_MAP[mechanism] + if mechanism == "MONGODB-OIDC": + _authenticate_oidc(credentials, conn, reauthenticate) + else: + auth_func(credentials, conn) diff --git a/pymongo/synchronous/auth_aws.py b/pymongo/synchronous/auth_aws.py new file mode 100644 index 0000000000..c7ea47886f --- /dev/null +++ b/pymongo/synchronous/auth_aws.py @@ -0,0 +1,100 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-AWS Authentication helpers.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Type + +import bson +from bson.binary import Binary +from pymongo.errors import ConfigurationError, OperationFailure + +if TYPE_CHECKING: + from bson.typings import _ReadableBuffer + from pymongo.auth_shared import MongoCredential + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True + + +def _authenticate_aws(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using MONGODB-AWS.""" + try: + import pymongo_auth_aws # type:ignore[import] + except ImportError as e: + raise ConfigurationError( + "MONGODB-AWS authentication requires pymongo-auth-aws: " + "install with: python -m pip install 'pymongo[aws]'" + ) from e + # Delayed import. + from pymongo_auth_aws.auth import ( # type:ignore[import] + set_cached_credentials, + set_use_cached_credentials, + ) + + set_use_cached_credentials(True) + + if conn.max_wire_version < 9: + raise ConfigurationError("MONGODB-AWS authentication requires MongoDB version 4.4 or later") + + class AwsSaslContext(pymongo_auth_aws.AwsSaslContext): # type: ignore + # Dependency injection: + def binary_type(self) -> Type[Binary]: + """Return the bson.binary.Binary type.""" + return Binary + + def bson_encode(self, doc: Mapping[str, Any]) -> bytes: + """Encode a dictionary to BSON.""" + return bson.encode(doc) + + def bson_decode(self, data: _ReadableBuffer) -> Mapping[str, Any]: + """Decode BSON to a dictionary.""" + return bson.decode(data) + + try: + ctx = AwsSaslContext( + pymongo_auth_aws.AwsCredential( + credentials.username, + credentials.password, + credentials.mechanism_properties.aws_session_token, + ) + ) + client_payload = ctx.step(None) + client_first = {"saslStart": 1, "mechanism": "MONGODB-AWS", "payload": client_payload} + server_first = conn.command("$external", client_first) + res = server_first + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + client_payload = ctx.step(res["payload"]) + cmd = { + "saslContinue": 1, + "conversationId": server_first["conversationId"], + "payload": client_payload, + } + res = conn.command("$external", cmd) + if res["done"]: + # SASL complete. + break + except pymongo_auth_aws.PyMongoAuthAwsError as exc: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + # Convert to OperationFailure and include pymongo-auth-aws version. + raise OperationFailure( + f"{exc} (pymongo-auth-aws version {pymongo_auth_aws.__version__})" + ) from None + except Exception: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + raise diff --git a/pymongo/synchronous/auth_oidc.py b/pymongo/synchronous/auth_oidc.py new file mode 100644 index 0000000000..583ee39f67 --- /dev/null +++ b/pymongo/synchronous/auth_oidc.py @@ -0,0 +1,303 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-OIDC Authentication helpers.""" +from __future__ import annotations + +import asyncio +import threading +import time +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, Optional, Union + +import bson +from bson.binary import Binary +from pymongo._csot import remaining +from pymongo.auth_oidc_shared import ( + CALLBACK_VERSION, + HUMAN_CALLBACK_TIMEOUT_SECONDS, + MACHINE_CALLBACK_TIMEOUT_SECONDS, + TIME_BETWEEN_CALLS_SECONDS, + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + OIDCIdPInfo, + _OIDCProperties, +) +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.helpers_shared import _AUTHENTICATION_FAILURE_CODE +from pymongo.lock import Lock, _create_lock + +if TYPE_CHECKING: + from pymongo.auth_shared import MongoCredential + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True + + +def _get_authenticator( + credentials: MongoCredential, address: tuple[str, int] +) -> _OIDCAuthenticator: + if credentials.cache.data: + return credentials.cache.data + + # Extract values. + principal_name = credentials.username + properties = credentials.mechanism_properties + + # Validate that the address is allowed. + if properties.human_callback is not None: + found = False + allowed_hosts = properties.allowed_hosts + for patt in allowed_hosts: + if patt == address[0]: + found = True + elif patt.startswith("*.") and address[0].endswith(patt[1:]): + found = True + if not found: + raise ConfigurationError( + f"Refusing to connect to {address[0]}, which is not in authOIDCAllowedHosts: {allowed_hosts}" + ) + + # Get or create the cache data. + credentials.cache.data = _OIDCAuthenticator(username=principal_name, properties=properties) + return credentials.cache.data + + +@dataclass +class _OIDCAuthenticator: + username: str + properties: _OIDCProperties + refresh_token: Optional[str] = field(default=None) + access_token: Optional[str] = field(default=None) + idp_info: Optional[OIDCIdPInfo] = field(default=None) + token_gen_id: int = field(default=0) + if not _IS_SYNC: + lock: Lock = field(default_factory=_create_lock) # type: ignore[assignment] + else: + lock: threading.Lock = field(default_factory=_create_lock) # type: ignore[assignment, no-redef] + + last_call_time: float = field(default=0) + + def reauthenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: + """Handle a reauthenticate from the server.""" + # Invalidate the token for the connection. + self._invalidate(conn) + # Call the appropriate auth logic for the callback type. + if self.properties.callback: + return self._authenticate_machine(conn) + return self._authenticate_human(conn) + + def authenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: + """Handle an initial authenticate request.""" + # First handle speculative auth. + # If it succeeded, we are done. + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + resp = ctx.speculative_authenticate + if resp and resp["done"]: + conn.oidc_token_gen_id = self.token_gen_id + return resp + + # If spec auth failed, call the appropriate auth logic for the callback type. + # We cannot assume that the token is invalid, because a proxy may have been + # involved that stripped the speculative auth information. + if self.properties.callback: + return self._authenticate_machine(conn) + return self._authenticate_human(conn) + + def get_spec_auth_cmd(self) -> Optional[MutableMapping[str, Any]]: + """Get the appropriate speculative auth command.""" + if not self.access_token: + return None + return self._get_start_command({"jwt": self.access_token}) + + def _authenticate_machine(self, conn: Connection) -> Mapping[str, Any]: + # If there is a cached access token, try to authenticate with it. If + # authentication fails with error code 18, invalidate the access token, + # fetch a new access token, and try to authenticate again. If authentication + # fails for any other reason, raise the error to the user. + if self.access_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return self._authenticate_machine(conn) + raise + return self._sasl_start_jwt(conn) + + def _authenticate_human(self, conn: Connection) -> Optional[Mapping[str, Any]]: + # If we have a cached access token, try a JwtStepRequest. + # authentication fails with error code 18, invalidate the access token, + # and try to authenticate again. If authentication fails for any other + # reason, raise the error to the user. + if self.access_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return self._authenticate_human(conn) + raise + + # If we have a cached refresh token, try a JwtStepRequest with that. + # If authentication fails with error code 18, invalidate the access and + # refresh tokens, and try to authenticate again. If authentication fails for + # any other reason, raise the error to the user. + if self.refresh_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + self.refresh_token = None + return self._authenticate_human(conn) + raise + + # Start a new Two-Step SASL conversation. + # Run a PrincipalStepRequest to get the IdpInfo. + cmd = self._get_start_command(None) + start_resp = self._run_command(conn, cmd) + # Attempt to authenticate with a JwtStepRequest. + return self._sasl_continue_jwt(conn, start_resp) + + def _get_access_token(self) -> Optional[str]: + properties = self.properties + cb: Union[None, OIDCCallback] + resp: OIDCCallbackResult + + is_human = properties.human_callback is not None + if is_human and self.idp_info is None: + return None + + if properties.callback: + cb = properties.callback + if properties.human_callback: + cb = properties.human_callback + + prev_token = self.access_token + if prev_token: + return prev_token + + if cb is None and not prev_token: + return None + + if not prev_token and cb is not None: + with self.lock: # type: ignore[attr-defined] + # See if the token was changed while we were waiting for the + # lock. + new_token = self.access_token + if new_token != prev_token: + return new_token + + # Ensure that we are waiting a min time between callback invocations. + delta = time.time() - self.last_call_time + if delta < TIME_BETWEEN_CALLS_SECONDS: + time.sleep(TIME_BETWEEN_CALLS_SECONDS - delta) + self.last_call_time = time.time() + + if is_human: + timeout = HUMAN_CALLBACK_TIMEOUT_SECONDS + assert self.idp_info is not None + else: + timeout = int(remaining() or MACHINE_CALLBACK_TIMEOUT_SECONDS) + context = OIDCCallbackContext( + timeout_seconds=timeout, + version=CALLBACK_VERSION, + refresh_token=self.refresh_token, + idp_info=self.idp_info, + username=self.properties.username, + ) + if not _IS_SYNC: + resp = asyncio.get_running_loop().run_in_executor(None, cb.fetch, context) # type: ignore[assignment] + else: + resp = cb.fetch(context) + if not isinstance(resp, OIDCCallbackResult): + raise ValueError( + f"Callback result must be of type OIDCCallbackResult, not {type(resp)}" + ) + self.refresh_token = resp.refresh_token + self.access_token = resp.access_token + self.token_gen_id += 1 + + return self.access_token + + def _run_command(self, conn: Connection, cmd: MutableMapping[str, Any]) -> Mapping[str, Any]: + try: + return conn.command("$external", cmd, no_reauth=True) # type: ignore[call-arg] + except OperationFailure as e: + if self._is_auth_error(e): + self._invalidate(conn) + raise + + def _is_auth_error(self, err: Exception) -> bool: + if not isinstance(err, OperationFailure): + return False + return err.code == _AUTHENTICATION_FAILURE_CODE + + def _invalidate(self, conn: Connection) -> None: + # Ignore the invalidation if a token gen id is given and is less than our + # current token gen id. + token_gen_id = conn.oidc_token_gen_id or 0 + if token_gen_id is not None and token_gen_id < self.token_gen_id: + return + self.access_token = None + + def _sasl_continue_jwt( + self, conn: Connection, start_resp: Mapping[str, Any] + ) -> Mapping[str, Any]: + self.access_token = None + self.refresh_token = None + start_payload: dict[str, Any] = bson.decode(start_resp["payload"]) + if "issuer" in start_payload: + self.idp_info = OIDCIdPInfo(**start_payload) + access_token = self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_continue_command({"jwt": access_token}, start_resp) + return self._run_command(conn, cmd) + + def _sasl_start_jwt(self, conn: Connection) -> Mapping[str, Any]: + access_token = self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_start_command({"jwt": access_token}) + return self._run_command(conn, cmd) + + def _get_start_command(self, payload: Optional[Mapping[str, Any]]) -> MutableMapping[str, Any]: + if payload is None: + principal_name = self.username + if principal_name: + payload = {"n": principal_name} + else: + payload = {} + bin_payload = Binary(bson.encode(payload)) + return {"saslStart": 1, "mechanism": "MONGODB-OIDC", "payload": bin_payload} + + def _get_continue_command( + self, payload: Mapping[str, Any], start_resp: Mapping[str, Any] + ) -> MutableMapping[str, Any]: + bin_payload = Binary(bson.encode(payload)) + return { + "saslContinue": 1, + "payload": bin_payload, + "conversationId": start_resp["conversationId"], + } + + +def _authenticate_oidc( + credentials: MongoCredential, conn: Connection, reauthenticate: bool +) -> Optional[Mapping[str, Any]]: + """Authenticate using MONGODB-OIDC.""" + authenticator = _get_authenticator(credentials, conn.address) + if reauthenticate: + return authenticator.reauthenticate(conn) + else: + return authenticator.authenticate(conn) diff --git a/pymongo/synchronous/bulk.py b/pymongo/synchronous/bulk.py new file mode 100644 index 0000000000..22d6a7a76a --- /dev/null +++ b/pymongo/synchronous/bulk.py @@ -0,0 +1,751 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The bulk write operations interface. + +.. versionadded:: 2.7 +""" +from __future__ import annotations + +import copy +import datetime +import logging +from collections.abc import MutableMapping +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Iterator, + Mapping, + Optional, + Type, + Union, +) + +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from pymongo import _csot, common +from pymongo.bulk_shared import ( + _COMMANDS, + _DELETE_ALL, + _merge_command, + _raise_bulk_write_error, + _Run, +) +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + ConfigurationError, + InvalidOperation, + NotPrimaryError, + OperationFailure, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import ( + _DELETE, + _INSERT, + _UPDATE, + _BulkWriteContext, + _convert_exception, + _convert_write_result, + _EncryptedBulkWriteContext, + _randint, +) +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.client_session import ClientSession, _validate_session_write_concern +from pymongo.synchronous.helpers import _handle_reauth +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + from pymongo.typings import _DocumentOut, _DocumentType, _Pipeline + +_IS_SYNC = True + + +class _Bulk: + """The private guts of the bulk write API.""" + + def __init__( + self, + collection: Collection[_DocumentType], + ordered: bool, + bypass_document_validation: Optional[bool], + comment: Optional[str] = None, + let: Optional[Any] = None, + ) -> None: + """Initialize a _Bulk instance.""" + self.collection = collection.with_options( + codec_options=collection.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + ) + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.comment: Optional[str] = comment + self.ordered = ordered + self.ops: list[tuple[int, Mapping[str, Any]]] = [] + self.executed = False + self.bypass_doc_val = bypass_document_validation + self.uses_collation = False + self.uses_array_filters = False + self.uses_hint_update = False + self.uses_hint_delete = False + self.uses_sort = False + self.is_retryable = True + self.retrying = False + self.started_retryable_write = False + # Extra state so that we know where to pick up on a retry attempt. + self.current_run = None + self.next_run = None + self.is_encrypted = False + + @property + def bulk_ctx_class(self) -> Type[_BulkWriteContext]: + encrypter = self.collection.database.client._encrypter + if encrypter and not encrypter._bypass_auto_encryption: + self.is_encrypted = True + return _EncryptedBulkWriteContext + else: + self.is_encrypted = False + return _BulkWriteContext + + def add_insert(self, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" + validate_is_document_type("document", document) + # Generate ObjectId client side. + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() + self.ops.append((_INSERT, document)) + + def add_update( + self, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool, + upsert: Optional[bool], + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" + validate_ok_for_update(update) + cmd: dict[str, Any] = {"q": selector, "u": update, "multi": multi} + if upsert is not None: + cmd["upsert"] = upsert + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if array_filters is not None: + self.uses_array_filters = True + cmd["arrayFilters"] = array_filters + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append((_UPDATE, cmd)) + + def add_replace( + self, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: Optional[bool], + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" + validate_ok_for_replace(replacement) + cmd: dict[str, Any] = {"q": selector, "u": replacement} + if upsert is not None: + cmd["upsert"] = upsert + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort + self.ops.append((_UPDATE, cmd)) + + def add_delete( + self, + selector: Mapping[str, Any], + limit: int, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd: dict[str, Any] = {"q": selector, "limit": limit} + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if hint is not None: + self.uses_hint_delete = True + cmd["hint"] = hint + if limit == _DELETE_ALL: + # A bulk_write containing a delete_many is not retryable. + self.is_retryable = False + self.ops.append((_DELETE, cmd)) + + def gen_ordered(self) -> Iterator[Optional[_Run]]: + """Generate batches of operations, batched by type of + operation, in the order **provided**. + """ + run = None + for idx, (op_type, operation) in enumerate(self.ops): + if run is None: + run = _Run(op_type) + elif run.op_type != op_type: + yield run + run = _Run(op_type) + run.add(idx, operation) + yield run + + def gen_unordered(self) -> Iterator[_Run]: + """Generate batches of operations, batched by type of + operation, in arbitrary order. + """ + operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)] + for idx, (op_type, operation) in enumerate(self.ops): + operations[op_type].add(idx, operation) + + for run in operations: + if run.ops: + yield run + + @_handle_reauth + def write_command( + self, + bwc: _BulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + docs: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> dict[str, Any]: + """A proxy for SocketInfo.write_command that handles event publishing.""" + cmd[bwc.field] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._start(cmd, request_id, docs) + try: + reply = bwc.conn.write_command(request_id, msg, bwc.codec) # type: ignore[misc] + duration = datetime.datetime.now() - bwc.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + client._process_response(reply, bwc.session) # type: ignore[arg-type] + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + + if bwc.publish: + bwc._fail(request_id, failure, duration) + # Process the response from the server. + if isinstance(exc, (NotPrimaryError, OperationFailure)): + client._process_response(exc.details, bwc.session) # type: ignore[arg-type] + raise + return reply # type: ignore[return-value] + + def unack_write( + self, + bwc: _BulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + max_doc_size: int, + docs: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> Optional[Mapping[str, Any]]: + """A proxy for Connection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + cmd = bwc._start(cmd, request_id, docs) + try: + result = bwc.conn.unack_write(msg, max_doc_size) # type: ignore[func-returns-value, misc, override] + duration = datetime.datetime.now() - bwc.start_time + if result is not None: + reply = _convert_write_result(bwc.name, cmd, result) # type: ignore[arg-type] + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(bwc.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if bwc.publish: + assert bwc.start_time is not None + bwc._fail(request_id, failure, duration) + raise + return result # type: ignore[return-value] + + def _execute_batch_unack( + self, + bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], + cmd: dict[str, Any], + ops: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> list[Mapping[str, Any]]: + if self.is_encrypted: + _, batched_cmd, to_send = bwc.batch_command(cmd, ops) + bwc.conn.command( # type: ignore[misc] + bwc.db_name, + batched_cmd, # type: ignore[arg-type] + write_concern=WriteConcern(w=0), + session=bwc.session, # type: ignore[arg-type] + client=client, # type: ignore[arg-type] + ) + else: + request_id, msg, to_send = bwc.batch_command(cmd, ops) + # Though this isn't strictly a "legacy" write, the helper + # handles publishing commands and sending our message + # without receiving a result. Send 0 for max_doc_size + # to disable size checking. Size checking is handled while + # the documents are encoded to BSON. + self.unack_write(bwc, cmd, request_id, msg, 0, to_send, client) # type: ignore[arg-type] + + return to_send + + def _execute_batch( + self, + bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], + cmd: dict[str, Any], + ops: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> tuple[dict[str, Any], list[Mapping[str, Any]]]: + if self.is_encrypted: + _, batched_cmd, to_send = bwc.batch_command(cmd, ops) + result = bwc.conn.command( # type: ignore[misc] + bwc.db_name, + batched_cmd, # type: ignore[arg-type] + codec_options=bwc.codec, + session=bwc.session, # type: ignore[arg-type] + client=client, # type: ignore[arg-type] + ) + else: + request_id, msg, to_send = bwc.batch_command(cmd, ops) + result = self.write_command(bwc, cmd, request_id, msg, to_send, client) # type: ignore[arg-type] + + return result, to_send # type: ignore[return-value] + + def _execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[ClientSession], + conn: Connection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: + db_name = self.collection.database.name + client = self.collection.database.client + listeners = client._event_listeners + + if not self.current_run: + self.current_run = next(generator) + self.next_run = None + run = self.current_run + + # Connection.command validates the session, but we use + # Connection.write_command + conn.validate_session(client, session) + last_run = False + + while run: + if not self.retrying: + self.next_run = next(generator, None) + if self.next_run is None: + last_run = True + + cmd_name = _COMMANDS[run.op_type] + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, + session, + run.op_type, + self.collection.codec_options, + ) + + while run.idx_offset < len(run.ops): + # If this is the last possible operation, use the + # final write concern. + if last_run and (len(run.ops) - run.idx_offset) == 1: + write_concern = final_write_concern or write_concern + + cmd = {cmd_name: self.collection.name, "ordered": self.ordered} + if self.comment: + cmd["comment"] = self.comment + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + if self.let is not None and run.op_type in (_DELETE, _UPDATE): + cmd["let"] = self.let + if session: + # Start a new retryable write unless one was already + # started for this command. + if retryable and not self.started_retryable_write: + session._start_retryable_write() + self.started_retryable_write = True + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(client, cmd) + ops = islice(run.ops, run.idx_offset, None) + + # Run as many ops as possible in one command. + if write_concern.acknowledged: + result, to_send = self._execute_batch(bwc, cmd, ops, client) + + # Retryable writeConcernErrors halt the execution of this run. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(run, full, run.idx_offset, result) + _raise_bulk_write_error(full) + + _merge_command(run, full_result, run.idx_offset, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + if self.ordered and "writeErrors" in result: + break + else: + to_send = self._execute_batch_unack(bwc, cmd, ops, client) + + run.idx_offset += len(to_send) + + # We're supposed to continue if errors are + # at the write concern level (e.g. wtimeout) + if self.ordered and full_result["writeErrors"]: + break + # Reset our state + self.current_run = run = self.next_run + + def execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[ClientSession], + operation: str, + ) -> dict[str, Any]: + """Execute using write commands.""" + # nModified is only reported for write commands, not legacy ops. + full_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + op_id = _randint() + + def retryable_bulk( + session: Optional[ClientSession], conn: Connection, retryable: bool + ) -> None: + self._execute_command( + generator, + write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) + + client = self.collection.database.client + _ = client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, # type: ignore[arg-type] + operation_id=op_id, + ) + + if full_result["writeErrors"] or full_result["writeConcernErrors"]: + _raise_bulk_write_error(full_result) + return full_result + + def execute_op_msg_no_results(self, conn: Connection, generator: Iterator[Any]) -> None: + """Execute write commands with OP_MSG and w=0 writeConcern, unordered.""" + db_name = self.collection.database.name + client = self.collection.database.client + listeners = client._event_listeners + op_id = _randint() + + if not self.current_run: + self.current_run = next(generator) + run = self.current_run + + while run: + cmd_name = _COMMANDS[run.op_type] + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, + None, + run.op_type, + self.collection.codec_options, + ) + + while run.idx_offset < len(run.ops): + cmd = { + cmd_name: self.collection.name, + "ordered": False, + "writeConcern": {"w": 0}, + } + conn.add_server_api(cmd) + ops = islice(run.ops, run.idx_offset, None) + # Run as many ops as possible. + to_send = self._execute_batch_unack(bwc, cmd, ops, client) + run.idx_offset += len(to_send) + self.current_run = run = next(generator, None) + + def execute_command_no_results( + self, + conn: Connection, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: + """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.""" + full_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + # Ordered bulk writes have to be acknowledged so that we stop + # processing at the first error, even when the application + # specified unacknowledged writeConcern. + initial_write_concern = WriteConcern() + op_id = _randint() + try: + self._execute_command( + generator, + initial_write_concern, + None, + conn, + op_id, + False, + full_result, + write_concern, + ) + except OperationFailure: + pass + + def execute_no_results( + self, + conn: Connection, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: + """Execute all operations, returning no results (w=0).""" + if self.uses_collation: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Guard against unsupported unacknowledged writes. + unack = write_concern and not write_concern.acknowledged + if unack and self.uses_hint_delete and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if unack and self.uses_hint_update and conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) + if unack and self.uses_sort and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) + # Cannot have both unacknowledged writes and bypass document validation. + if self.bypass_doc_val: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) + + if self.ordered: + return self.execute_command_no_results(conn, generator, write_concern) + return self.execute_op_msg_no_results(conn, generator) + + def execute( + self, + write_concern: WriteConcern, + session: Optional[ClientSession], + operation: str, + ) -> Any: + """Execute operations.""" + if not self.ops: + raise InvalidOperation("No operations to execute") + if self.executed: + raise InvalidOperation("Bulk operations can only be executed once.") + self.executed = True + write_concern = write_concern or self.collection.write_concern + session = _validate_session_write_concern(session, write_concern) + + if self.ordered: + generator = self.gen_ordered() + else: + generator = self.gen_unordered() + + client = self.collection.database.client + if not write_concern.acknowledged: + with client._conn_for_writes(session, operation) as connection: + self.execute_no_results(connection, generator, write_concern) + return None + else: + return self.execute_command(generator, write_concern, session, operation) diff --git a/pymongo/synchronous/change_stream.py b/pymongo/synchronous/change_stream.py new file mode 100644 index 0000000000..7e34d7b848 --- /dev/null +++ b/pymongo/synchronous/change_stream.py @@ -0,0 +1,494 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Watch changes on a collection, a database, or the entire cluster.""" +from __future__ import annotations + +import copy +from typing import TYPE_CHECKING, Any, Generic, Mapping, Optional, Type, Union + +from bson import CodecOptions, _bson_to_dict +from bson.raw_bson import RawBSONDocument +from bson.timestamp import Timestamp +from pymongo import _csot, common +from pymongo.collation import validate_collation_or_none +from pymongo.errors import ( + ConnectionFailure, + CursorNotFound, + InvalidOperation, + OperationFailure, + PyMongoError, +) +from pymongo.operations import _Op +from pymongo.synchronous.aggregation import ( + _AggregationCommand, + _CollectionAggregationCommand, + _DatabaseAggregationCommand, +) +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline + +_IS_SYNC = True + +# The change streams spec considers the following server errors from the +# getMore command non-resumable. All other getMore errors are resumable. +_RESUMABLE_GETMORE_ERRORS = frozenset( + [ + 6, # HostUnreachable + 7, # HostNotFound + 89, # NetworkTimeout + 91, # ShutdownInProgress + 189, # PrimarySteppedDown + 262, # ExceededTimeLimit + 9001, # SocketException + 10107, # NotWritablePrimary + 11600, # InterruptedAtShutdown + 11602, # InterruptedDueToReplStateChange + 13435, # NotPrimaryNoSecondaryOk + 13436, # NotPrimaryOrSecondary + 63, # StaleShardVersion + 150, # StaleEpoch + 13388, # StaleConfig + 234, # RetryChangeStream + 133, # FailedToSatisfyReadPreference + ] +) + + +if TYPE_CHECKING: + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.database import Database + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + + +def _resumable(exc: PyMongoError) -> bool: + """Return True if given a resumable change stream error.""" + if isinstance(exc, (ConnectionFailure, CursorNotFound)): + return True + if isinstance(exc, OperationFailure): + if exc._max_wire_version is None: + return False + return ( + exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") + ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) + return False + + +class ChangeStream(Generic[_DocumentType]): + """The internal abstract base class for change stream cursors. + + Should not be called directly by application developers. Use + :meth:`pymongo.collection.Collection.watch`, + :meth:`pymongo.database.Database.watch`, or + :meth:`pymongo.mongo_client.MongoClient.watch` instead. + + .. versionadded:: 3.6 + .. seealso:: The MongoDB documentation on `changeStreams `_. + """ + + def __init__( + self, + target: Union[ + MongoClient[_DocumentType], + Database[_DocumentType], + Collection[_DocumentType], + ], + pipeline: Optional[_Pipeline], + full_document: Optional[str], + resume_after: Optional[Mapping[str, Any]], + max_await_time_ms: Optional[int], + batch_size: Optional[int], + collation: Optional[_CollationIn], + start_at_operation_time: Optional[Timestamp], + session: Optional[ClientSession], + start_after: Optional[Mapping[str, Any]], + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> None: + if pipeline is None: + pipeline = [] + pipeline = common.validate_list("pipeline", pipeline) + common.validate_string_or_none("full_document", full_document) + validate_collation_or_none(collation) + common.validate_non_negative_integer_or_none("batchSize", batch_size) + + self._decode_custom = False + self._orig_codec_options: CodecOptions[_DocumentType] = target.codec_options + if target.codec_options.type_registry._decoder_map: + self._decode_custom = True + # Keep the type registry so that we support encoding custom types + # in the pipeline. + self._target = target.with_options( # type: ignore + codec_options=target.codec_options.with_options(document_class=RawBSONDocument) + ) + else: + self._target = target + + self._pipeline = copy.deepcopy(pipeline) + self._full_document = full_document + self._full_document_before_change = full_document_before_change + self._uses_start_after = start_after is not None + self._uses_resume_after = resume_after is not None + self._resume_token = copy.deepcopy(start_after or resume_after) + self._max_await_time_ms = max_await_time_ms + self._batch_size = batch_size + self._collation = collation + self._start_at_operation_time = start_at_operation_time + self._session = session + self._comment = comment + self._closed = False + self._timeout = self._target._timeout + self._show_expanded_events = show_expanded_events + + def _initialize_cursor(self) -> None: + # Initialize cursor. + self._cursor = self._create_cursor() + + @property + def _aggregation_command_class(self) -> Type[_AggregationCommand]: + """The aggregation command class to be used.""" + raise NotImplementedError + + @property + def _client(self) -> MongoClient: # type: ignore[type-arg] + """The client against which the aggregation commands for + this ChangeStream will be run. + """ + raise NotImplementedError + + def _change_stream_options(self) -> dict[str, Any]: + """Return the options dict for the $changeStream pipeline stage.""" + options: dict[str, Any] = {} + if self._full_document is not None: + options["fullDocument"] = self._full_document + + if self._full_document_before_change is not None: + options["fullDocumentBeforeChange"] = self._full_document_before_change + + resume_token = self.resume_token + if resume_token is not None: + if self._uses_start_after: + options["startAfter"] = resume_token + else: + options["resumeAfter"] = resume_token + + elif self._start_at_operation_time is not None: + options["startAtOperationTime"] = self._start_at_operation_time + + if self._show_expanded_events: + options["showExpandedEvents"] = self._show_expanded_events + + return options + + def _command_options(self) -> dict[str, Any]: + """Return the options dict for the aggregation command.""" + options = {} + if self._max_await_time_ms is not None: + options["maxAwaitTimeMS"] = self._max_await_time_ms + if self._batch_size is not None: + options["batchSize"] = self._batch_size + return options + + def _aggregation_pipeline(self) -> list[dict[str, Any]]: + """Return the full aggregation pipeline for this ChangeStream.""" + options = self._change_stream_options() + full_pipeline: list[dict[str, Any]] = [{"$changeStream": options}] + full_pipeline.extend(self._pipeline) + return full_pipeline + + def _process_result(self, result: Mapping[str, Any], conn: Connection) -> None: + """Callback that caches the postBatchResumeToken or + startAtOperationTime from a changeStream aggregate command response + containing an empty batch of change documents. + + This is implemented as a callback because we need access to the wire + version in order to determine whether to cache this value. + """ + if not result["cursor"]["firstBatch"]: + if "postBatchResumeToken" in result["cursor"]: + self._resume_token = result["cursor"]["postBatchResumeToken"] + elif ( + self._start_at_operation_time is None + and self._uses_resume_after is False + and self._uses_start_after is False + and conn.max_wire_version >= 7 + ): + self._start_at_operation_time = result.get("operationTime") + # PYTHON-2181: informative error on missing operationTime. + if self._start_at_operation_time is None: + raise OperationFailure( + "Expected field 'operationTime' missing from command " + f"response : {result!r}" + ) + + def _run_aggregation_cmd(self, session: Optional[ClientSession]) -> CommandCursor: # type: ignore[type-arg] + """Run the full aggregation pipeline for this ChangeStream and return + the corresponding CommandCursor. + """ + cmd = self._aggregation_command_class( + self._target, + CommandCursor, + self._aggregation_pipeline(), + self._command_options(), + result_processor=self._process_result, + comment=self._comment, + ) + return self._client._retryable_read( + cmd.get_cursor, + self._target._read_preference_for(session), + session, + operation=_Op.AGGREGATE, + ) + + def _create_cursor(self) -> CommandCursor: # type: ignore[type-arg] + with self._client._tmp_session(self._session) as s: + return self._run_aggregation_cmd(session=s) + + def _resume(self) -> None: + """Reestablish this change stream after a resumable error.""" + try: + self._cursor.close() + except PyMongoError: + pass + self._cursor = self._create_cursor() + + def close(self) -> None: + """Close this ChangeStream.""" + self._closed = True + self._cursor.close() + + def __iter__(self) -> ChangeStream[_DocumentType]: + return self + + @property + def resume_token(self) -> Optional[Mapping[str, Any]]: + """The cached resume token that will be used to resume after the most + recently returned change. + + .. versionadded:: 3.9 + """ + return copy.deepcopy(self._resume_token) + + @_csot.apply + def next(self) -> _DocumentType: + """Advance the cursor. + + This method blocks until the next change document is returned or an + unrecoverable error is raised. This method is used when iterating over + all changes in the cursor. For example:: + + try: + resume_token = None + pipeline = [{'$match': {'operationType': 'insert'}}] + with db.collection.watch(pipeline) as stream: + for insert_change in stream: + print(insert_change) + resume_token = stream.resume_token + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + if resume_token is None: + # There is no usable resume token because there was a + # failure during ChangeStream initialization. + logging.error('...') + else: + # Use the interrupted ChangeStream's resume token to create + # a new ChangeStream. The new stream will continue from the + # last seen insert change without missing any events. + with db.collection.watch( + pipeline, resume_after=resume_token) as stream: + for insert_change in stream: + print(insert_change) + + Raises :exc:`StopIteration` if this ChangeStream is closed. + """ + while self.alive: + doc = self.try_next() + if doc is not None: + return doc + + raise StopIteration + + __next__ = next + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + .. note:: Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration` and :meth:`try_next` can return ``None``. + + .. versionadded:: 3.8 + """ + return not self._closed + + @_csot.apply + def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next change document without waiting + indefinitely for the next change. For example:: + + with db.collection.watch() as stream: + while stream.alive: + change = stream.try_next() + # Note that the ChangeStream's resume token may be updated + # even when no changes are returned. + print("Current resume token: %r" % (stream.resume_token,)) + if change is not None: + print("Change document: %r" % (change,)) + continue + # We end up here when there are no recent changes. + # Sleep for a while before trying again to avoid flooding + # the server with getMore requests when no changes are + # available. + time.sleep(10) + + If no change document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there have been no changes) then ``None`` is returned. + + :return: The next change document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 3.8 + """ + if not self._closed and not self._cursor.alive: + self._resume() + + # Attempt to get the next change with at most one getMore and at most + # one resume attempt. + try: + try: + change = self._cursor._try_next(True) + except PyMongoError as exc: + if not _resumable(exc): + raise + self._resume() + change = self._cursor._try_next(False) + except PyMongoError as exc: + # Close the stream after a fatal error. + if not _resumable(exc) and not exc.timeout: + self.close() + raise + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + self.close() + raise + + # Check if the cursor was invalidated. + if not self._cursor.alive: + self._closed = True + + # If no changes are available. + if change is None: + # We have either iterated over all documents in the cursor, + # OR the most-recently returned batch is empty. In either case, + # update the cached resume token with the postBatchResumeToken if + # one was returned. We also clear the startAtOperationTime. + if self._cursor._post_batch_resume_token is not None: + self._resume_token = self._cursor._post_batch_resume_token + self._start_at_operation_time = None + return change + + # Else, changes are available. + try: + resume_token = change["_id"] + except KeyError: + self.close() + raise InvalidOperation( + "Cannot provide resume functionality when the resume token is missing." + ) from None + + # If this is the last change document from the current batch, cache the + # postBatchResumeToken. + if not self._cursor._has_next() and self._cursor._post_batch_resume_token: + resume_token = self._cursor._post_batch_resume_token + + # Hereafter, don't use startAfter; instead use resumeAfter. + self._uses_start_after = False + self._uses_resume_after = True + + # Cache the resume token and clear startAtOperationTime. + self._resume_token = resume_token + self._start_at_operation_time = None + + if self._decode_custom: + return _bson_to_dict(change.raw, self._orig_codec_options) + return change + + def __enter__(self) -> ChangeStream[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + +class CollectionChangeStream(ChangeStream[_DocumentType]): + """A change stream that watches changes on a single collection. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.collection.Collection.watch` instead. + + .. versionadded:: 3.7 + """ + + _target: Collection[_DocumentType] + + @property + def _aggregation_command_class(self) -> Type[_CollectionAggregationCommand]: + return _CollectionAggregationCommand + + @property + def _client(self) -> MongoClient[_DocumentType]: + return self._target.database.client + + +class DatabaseChangeStream(ChangeStream[_DocumentType]): + """A change stream that watches changes on all collections in a database. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.database.Database.watch` instead. + + .. versionadded:: 3.7 + """ + + _target: Database[_DocumentType] + + @property + def _aggregation_command_class(self) -> Type[_DatabaseAggregationCommand]: + return _DatabaseAggregationCommand + + @property + def _client(self) -> MongoClient[_DocumentType]: + return self._target.client + + +class ClusterChangeStream(DatabaseChangeStream[_DocumentType]): + """A change stream that watches changes on all collections in the cluster. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.mongo_client.MongoClient.watch` instead. + + .. versionadded:: 3.7 + """ + + def _change_stream_options(self) -> dict[str, Any]: + options = super()._change_stream_options() + options["allChangesForCluster"] = True + return options diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py new file mode 100644 index 0000000000..a606d028e1 --- /dev/null +++ b/pymongo/synchronous/client_bulk.py @@ -0,0 +1,754 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The client-level bulk write operations interface. + +.. versionadded:: 4.9 +""" +from __future__ import annotations + +import copy +import datetime +import logging +from collections.abc import MutableMapping +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + Optional, + Type, + Union, +) + +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from pymongo import _csot, common +from pymongo.synchronous.client_session import ClientSession, _validate_session_write_concern +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.helpers import _handle_reauth + +if TYPE_CHECKING: + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection +from pymongo._client_bulk_shared import ( + _merge_command, + _throw_client_bulk_write_exception, +) +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + WaitQueueTimeoutError, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import ( + _ClientBulkWriteContext, + _convert_client_bulk_exception, + _convert_exception, + _convert_write_result, + _randint, +) +from pymongo.read_preferences import ReadPreference +from pymongo.results import ( + ClientBulkWriteResult, + DeleteResult, + InsertOneResult, + UpdateResult, +) +from pymongo.typings import _DocumentOut, _Pipeline +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class _ClientBulk: + """The private guts of the client-level bulk write API.""" + + def __init__( + self, + client: MongoClient[Any], + write_concern: WriteConcern, + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + comment: Optional[str] = None, + let: Optional[Any] = None, + verbose_results: bool = False, + ) -> None: + """Initialize a _ClientBulk instance.""" + self.client = client + self.write_concern = write_concern + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.ordered = ordered + self.bypass_doc_val = bypass_document_validation + self.comment = comment + self.verbose_results = verbose_results + self.ops: list[tuple[str, Mapping[str, Any]]] = [] + self.namespaces: list[str] = [] + self.idx_offset: int = 0 + self.total_ops: int = 0 + self.executed = False + self.uses_collation = False + self.uses_array_filters = False + self.is_retryable = self.client.options.retry_writes + self.retrying = False + self.started_retryable_write = False + + @property + def bulk_ctx_class(self) -> Type[_ClientBulkWriteContext]: + return _ClientBulkWriteContext + + def add_insert(self, namespace: str, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" + validate_is_document_type("document", document) + # Generate ObjectId client side. + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() + cmd = {"insert": -1, "document": document} + self.ops.append(("insert", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_update( + self, + namespace: str, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool, + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" + validate_ok_for_update(update) + cmd = { + "update": -1, + "filter": selector, + "updateMods": update, + "multi": multi, + } + if upsert is not None: + cmd["upsert"] = upsert + if array_filters is not None: + self.uses_array_filters = True + cmd["arrayFilters"] = array_filters + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if sort is not None: + cmd["sort"] = sort + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("update", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_replace( + self, + namespace: str, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" + validate_ok_for_replace(replacement) + cmd = { + "update": -1, + "filter": selector, + "updateMods": replacement, + "multi": False, + } + if upsert is not None: + cmd["upsert"] = upsert + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if sort is not None: + cmd["sort"] = sort + self.ops.append(("replace", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_delete( + self, + namespace: str, + selector: Mapping[str, Any], + multi: bool, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd = {"delete": -1, "filter": selector, "multi": multi} + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("delete", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + @_handle_reauth + def write_command( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: Union[bytes, dict[str, Any]], + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> dict[str, Any]: + """A proxy for Connection.write_command that handles event publishing.""" + cmd["ops"] = op_docs + cmd["nsInfo"] = ns_docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._start(cmd, request_id, op_docs, ns_docs) + try: + reply = bwc.conn.write_command(request_id, msg, bwc.codec) # type: ignore[misc, arg-type] + duration = datetime.datetime.now() - bwc.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + # Process the response from the server. + self.client._process_response(reply, bwc.session) # type: ignore[arg-type] + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + + if bwc.publish: + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + # Process the response from the server. + if isinstance(exc, OperationFailure): + self.client._process_response(exc.details, bwc.session) # type: ignore[arg-type] + else: + self.client._process_response({}, bwc.session) # type: ignore[arg-type] + return reply # type: ignore[return-value] + + def unack_write( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> Optional[Mapping[str, Any]]: + """A proxy for Connection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + cmd = bwc._start(cmd, request_id, op_docs, ns_docs) + try: + result = bwc.conn.unack_write(msg, bwc.max_bson_size) # type: ignore[func-returns-value, misc, override] + duration = datetime.datetime.now() - bwc.start_time + if result is not None: + reply = _convert_write_result(bwc.name, cmd, result) # type: ignore[arg-type] + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(bwc.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if bwc.publish: + assert bwc.start_time is not None + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + return reply + + def _execute_batch_unack( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (unack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) + self.unack_write(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] + return to_send_ops, to_send_ns + + def _execute_batch( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ) -> tuple[dict[str, Any], list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (ack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) + result = self.write_command(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] + return result, to_send_ops, to_send_ns # type: ignore[return-value] + + def _process_results_cursor( + self, + full_result: MutableMapping[str, Any], + result: MutableMapping[str, Any], + conn: Connection, + session: Optional[ClientSession], + ) -> None: + """Internal helper for processing the server reply command cursor.""" + if result.get("cursor"): + if session: + session._leave_alive = True + coll = Collection( + database=Database(self.client, "admin"), + name="$cmd.bulkWrite", + ) + cmd_cursor = CommandCursor( + coll, + result["cursor"], + conn.address, + session=session, + comment=self.comment, + ) + cmd_cursor._maybe_pin_connection(conn) + + # Iterate the cursor to get individual write results. + try: + for doc in cmd_cursor: + original_index = doc["idx"] + self.idx_offset + op_type, op = self.ops[original_index] + + if not doc["ok"]: + result["writeErrors"].append(doc) + if self.ordered: + return + + # Record individual write result. + if doc["ok"] and self.verbose_results: + if op_type == "insert": + inserted_id = op["document"]["_id"] + res = InsertOneResult(inserted_id, acknowledged=True) # type: ignore[assignment] + if op_type in ["update", "replace"]: + op_type = "update" + res = UpdateResult(doc, acknowledged=True, in_client_bulk=True) # type: ignore[assignment] + if op_type == "delete": + res = DeleteResult(doc, acknowledged=True) # type: ignore[assignment] + full_result[f"{op_type}Results"][original_index] = res + except Exception as exc: + # Attempt to close the cursor, then raise top-level error. + if cmd_cursor.alive: + cmd_cursor.close() + result["error"] = _convert_client_bulk_exception(exc) + + def _execute_command( + self, + write_concern: WriteConcern, + session: Optional[ClientSession], + conn: Connection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: + """Internal helper for executing batches of bulkWrite commands.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + + # Connection.command validates the session, but we use + # Connection.write_command + conn.validate_session(self.client, session) + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + session, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # If this is the last possible batch, use the + # final write concern. + if self.total_ops - self.idx_offset <= bwc.max_write_batch_size: + write_concern = final_write_concern or write_concern + + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = not self.verbose_results + cmd["ordered"] = self.ordered # type: ignore[assignment] + not_in_transaction = session and not session.in_transaction + if not_in_transaction or not session: + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + if session: + # Start a new retryable write unless one was already + # started for this command. + if retryable and not self.started_retryable_write: + session._start_retryable_write() + self.started_retryable_write = True + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, self.client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(self.client, cmd) + ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) + + # Run as many ops as possible in one server command. + if write_concern.acknowledged: + raw_result, to_send_ops, _ = self._execute_batch(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + result = raw_result + + # Top-level server/network error. + if result.get("error"): + error = result["error"] + retryable_top_level_error = ( + hasattr(error, "details") + and isinstance(error.details, dict) + and error.details.get("code", 0) in _RETRYABLE_ERROR_CODES + ) + retryable_network_error = isinstance( + error, ConnectionFailure + ) and not isinstance(error, (NotPrimaryError, WaitQueueTimeoutError)) + + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + if retryable and (retryable_top_level_error or retryable_network_error): + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + else: + _merge_command(self.ops, self.idx_offset, full_result, result) + _throw_client_bulk_write_exception(full_result, self.verbose_results) + + result["error"] = None + result["writeErrors"] = [] + if result.get("nErrors", 0) < len(to_send_ops): + full_result["anySuccessful"] = True + + # Top-level command error. + if not result["ok"]: + result["error"] = raw_result + _merge_command(self.ops, self.idx_offset, full_result, result) + break + + if retryable: + # Retryable writeConcernErrors halt the execution of this batch. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + + # Process the server reply as a command cursor. + self._process_results_cursor(full_result, result, conn, session) + + # Merge this batch's results with the full results. + _merge_command(self.ops, self.idx_offset, full_result, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + else: + to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + # We halt execution if we hit a top-level error, + # or an individual error in an ordered bulk write. + if full_result["error"] or (self.ordered and full_result["writeErrors"]): + break + + def execute_command( + self, + session: Optional[ClientSession], + operation: str, + ) -> MutableMapping[str, Any]: + """Execute commands with w=1 WriteConcern.""" + full_result: MutableMapping[str, Any] = { + "anySuccessful": False, + "error": None, + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nDeleted": 0, + "insertResults": {}, + "updateResults": {}, + "deleteResults": {}, + } + op_id = _randint() + + def retryable_bulk( + session: Optional[ClientSession], + conn: Connection, + retryable: bool, + ) -> None: + if conn.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + self._execute_command( + self.write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) + + self.client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, + operation_id=op_id, + ) + + if full_result["error"] or full_result["writeErrors"] or full_result["writeConcernErrors"]: + _throw_client_bulk_write_exception(full_result, self.verbose_results) + return full_result + + def execute_command_unack( + self, + conn: Connection, + ) -> None: + """Execute commands with OP_MSG and w=0 writeConcern. Always unordered.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + op_id = _randint() + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + None, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = True + cmd["ordered"] = False + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + cmd["writeConcern"] = {"w": 0} # type: ignore[assignment] + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + conn.add_server_api(cmd) + ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) + + # Run as many ops as possible in one server command. + to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + def execute_no_results( + self, + conn: Connection, + ) -> None: + """Execute all operations, returning no results (w=0).""" + if self.uses_collation: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Cannot have both unacknowledged writes and bypass document validation. + if self.bypass_doc_val is not None: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) + + return self.execute_command_unack(conn) + + def execute( + self, + session: Optional[ClientSession], + operation: str, + ) -> Any: + """Execute operations.""" + if not self.ops: + raise InvalidOperation("No operations to execute") + if self.executed: + raise InvalidOperation("Bulk operations can only be executed once.") + self.executed = True + session = _validate_session_write_concern(session, self.write_concern) + + if not self.write_concern.acknowledged: + with self.client._conn_for_writes(session, operation) as connection: + if connection.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + self.execute_no_results(connection) + return ClientBulkWriteResult(None, False, False) # type: ignore[arg-type] + + result = self.execute_command(session, operation) + return ClientBulkWriteResult( + result, + self.write_concern.acknowledged, + self.verbose_results, + ) diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py new file mode 100644 index 0000000000..9b547dc946 --- /dev/null +++ b/pymongo/synchronous/client_session.py @@ -0,0 +1,1184 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Logical sessions for ordering sequential operations. + +.. versionadded:: 3.6 + +Causally Consistent Reads +========================= + +.. code-block:: python + + with client.start_session(causal_consistency=True) as session: + collection = client.db.collection + collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) + secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) + + # A secondary read waits for replication of the write. + secondary_c.find_one({"_id": 1}, session=session) + +If `causal_consistency` is True (the default), read operations that use +the session are causally after previous read and write operations. Using a +causally consistent session, an application can read its own writes and is +guaranteed monotonic reads, even when reading from replica set secondaries. + +.. seealso:: The MongoDB documentation on `causal-consistency `_. + +.. _transactions-ref: + +Transactions +============ + +.. versionadded:: 3.7 + +MongoDB 4.0 adds support for transactions on replica set primaries. A +transaction is associated with a :class:`ClientSession`. To start a transaction +on a session, use :meth:`ClientSession.start_transaction` in a with-statement. +Then, execute an operation within the transaction by passing the session to the +operation: + +.. code-block:: python + + orders = client.db.orders + inventory = client.db.inventory + with client.start_session() as session: + with session.start_transaction(): + orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + inventory.update_one( + {"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, + session=session, + ) + +Upon normal completion of ``with session.start_transaction()`` block, the +transaction automatically calls :meth:`ClientSession.commit_transaction`. +If the block exits with an exception, the transaction automatically calls +:meth:`ClientSession.abort_transaction`. + +In general, multi-document transactions only support read/write (CRUD) +operations on existing collections. However, MongoDB 4.4 adds support for +creating collections and indexes with some limitations, including an +insert operation that would result in the creation of a new collection. +For a complete description of all the supported and unsupported operations +see the `MongoDB server's documentation for transactions +`_. + +A session may only have a single active transaction at a time, multiple +transactions on the same session can be executed in sequence. + +Sharded Transactions +^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 3.9 + +PyMongo 3.9 adds support for transactions on sharded clusters running MongoDB +>=4.2. Sharded transactions have the same API as replica set transactions. +When running a transaction against a sharded cluster, the session is +pinned to the mongos server selected for the first operation in the +transaction. All subsequent operations that are part of the same transaction +are routed to the same mongos server. When the transaction is completed, by +running either commitTransaction or abortTransaction, the session is unpinned. + +.. seealso:: The MongoDB documentation on `transactions `_. + +.. _snapshot-reads-ref: + +Snapshot Reads +============== + +.. versionadded:: 3.12 + +MongoDB 5.0 adds support for snapshot reads. Snapshot reads are requested by +passing the ``snapshot`` option to +:meth:`~pymongo.mongo_client.MongoClient.start_session`. +If ``snapshot`` is True, all read operations that use this session read data +from the same snapshot timestamp. The server chooses the latest +majority-committed snapshot timestamp when executing the first read operation +using the session. Subsequent reads on this session read from the same +snapshot timestamp. Snapshot reads are also supported when reading from +replica set secondaries. + +.. code-block:: python + + # Each read using this session reads data from the same point in time. + with client.start_session(snapshot=True) as session: + order = orders.find_one({"sku": "abc123"}, session=session) + inventory = inventory.find_one({"sku": "abc123"}, session=session) + +Snapshot Reads Limitations +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Snapshot reads sessions are incompatible with ``causal_consistency=True``. +Only the following read operations are supported in a snapshot reads session: + +- :meth:`~pymongo.collection.Collection.find` +- :meth:`~pymongo.collection.Collection.find_one` +- :meth:`~pymongo.collection.Collection.aggregate` +- :meth:`~pymongo.collection.Collection.count_documents` +- :meth:`~pymongo.collection.Collection.distinct` (on unsharded collections) + +Classes +======= +""" + +from __future__ import annotations + +import collections +import time +import uuid +from collections.abc import Mapping as _Mapping +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Mapping, + MutableMapping, + NoReturn, + Optional, + Type, + TypeVar, +) + +from bson.binary import Binary +from bson.int64 import Int64 +from bson.timestamp import Timestamp +from pymongo import _csot +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, + PyMongoError, + WTimeoutError, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.cursor import _ConnectionManager +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from types import TracebackType + + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + from pymongo.typings import ClusterTime, _Address + +_IS_SYNC = True + + +class SessionOptions: + """Options for a new :class:`ClientSession`. + + :param causal_consistency: If True, read operations are causally + ordered within the session. Defaults to True when the ``snapshot`` + option is ``False``. + :param default_transaction_options: The default + TransactionOptions to use for transactions started on this session. + :param snapshot: If True, then all reads performed using this + session will read from the same snapshot. This option is incompatible + with ``causal_consistency=True``. Defaults to ``False``. + + .. versionchanged:: 3.12 + Added the ``snapshot`` parameter. + """ + + def __init__( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> None: + if snapshot: + if causal_consistency: + raise ConfigurationError("snapshot reads do not support causal_consistency=True") + causal_consistency = False + elif causal_consistency is None: + causal_consistency = True + self._causal_consistency = causal_consistency + if default_transaction_options is not None: + if not isinstance(default_transaction_options, TransactionOptions): + raise TypeError( + "default_transaction_options must be an instance of " + "pymongo.client_session.TransactionOptions, not: {!r}".format( + default_transaction_options + ) + ) + self._default_transaction_options = default_transaction_options + self._snapshot = snapshot + + @property + def causal_consistency(self) -> bool: + """Whether causal consistency is configured.""" + return self._causal_consistency + + @property + def default_transaction_options(self) -> Optional[TransactionOptions]: + """The default TransactionOptions to use for transactions started on + this session. + + .. versionadded:: 3.7 + """ + return self._default_transaction_options + + @property + def snapshot(self) -> Optional[bool]: + """Whether snapshot reads are configured. + + .. versionadded:: 3.12 + """ + return self._snapshot + + +class TransactionOptions: + """Options for :meth:`ClientSession.start_transaction`. + + :param read_concern: The + :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. + If ``None`` (the default) the :attr:`read_preference` of + the :class:`MongoClient` is used. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use for this + transaction. If ``None`` (the default) the :attr:`read_preference` of + the :class:`MongoClient` is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. Transactions which read must use + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param max_commit_time_ms: The maximum amount of time to allow a + single commitTransaction command to run. This option is an alias for + maxTimeMS option on the commitTransaction command. If ``None`` (the + default) maxTimeMS is not used. + + .. versionchanged:: 3.9 + Added the ``max_commit_time_ms`` option. + + .. versionadded:: 3.7 + """ + + def __init__( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> None: + self._read_concern = read_concern + self._write_concern = write_concern + self._read_preference = read_preference + self._max_commit_time_ms = max_commit_time_ms + if read_concern is not None: + if not isinstance(read_concern, ReadConcern): + raise TypeError( + "read_concern must be an instance of " + f"pymongo.read_concern.ReadConcern, not: {read_concern!r}" + ) + if write_concern is not None: + if not isinstance(write_concern, WriteConcern): + raise TypeError( + "write_concern must be an instance of " + f"pymongo.write_concern.WriteConcern, not: {write_concern!r}" + ) + if not write_concern.acknowledged: + raise ConfigurationError( + "transactions do not support unacknowledged write concern" + f": {write_concern!r}" + ) + if read_preference is not None: + if not isinstance(read_preference, _ServerMode): + raise TypeError( + f"{read_preference!r} is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." + ) + if max_commit_time_ms is not None: + if not isinstance(max_commit_time_ms, int): + raise TypeError( + f"max_commit_time_ms must be an integer or None, not {type(max_commit_time_ms)}" + ) + + @property + def read_concern(self) -> Optional[ReadConcern]: + """This transaction's :class:`~pymongo.read_concern.ReadConcern`.""" + return self._read_concern + + @property + def write_concern(self) -> Optional[WriteConcern]: + """This transaction's :class:`~pymongo.write_concern.WriteConcern`.""" + return self._write_concern + + @property + def read_preference(self) -> Optional[_ServerMode]: + """This transaction's :class:`~pymongo.read_preferences.ReadPreference`.""" + return self._read_preference + + @property + def max_commit_time_ms(self) -> Optional[int]: + """The maxTimeMS to use when running a commitTransaction command. + + .. versionadded:: 3.9 + """ + return self._max_commit_time_ms + + +def _validate_session_write_concern( + session: Optional[ClientSession], write_concern: Optional[WriteConcern] +) -> Optional[ClientSession]: + """Validate that an explicit session is not used with an unack'ed write. + + Returns the session to use for the next operation. + """ + if session: + if write_concern is not None and not write_concern.acknowledged: + # For unacknowledged writes without an explicit session, + # drivers SHOULD NOT use an implicit session. If a driver + # creates an implicit session for unacknowledged writes + # without an explicit session, the driver MUST NOT send the + # session ID. + if session._implicit: + return None + else: + raise ConfigurationError( + "Explicit sessions are incompatible with " + f"unacknowledged write concern: {write_concern!r}" + ) + return session + + +class _TransactionContext: + """Internal transaction context manager for start_transaction.""" + + def __init__(self, session: ClientSession): + self.__session = session + + def __enter__(self) -> _TransactionContext: + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if self.__session.in_transaction: + if exc_val is None: + self.__session.commit_transaction() + else: + self.__session.abort_transaction() + + +class _TxnState: + NONE = 1 + STARTING = 2 + IN_PROGRESS = 3 + COMMITTED = 4 + COMMITTED_EMPTY = 5 + ABORTED = 6 + + +class _Transaction: + """Internal class to hold transaction information in a ClientSession.""" + + def __init__(self, opts: Optional[TransactionOptions], client: MongoClient[Any]): + self.opts = opts + self.state = _TxnState.NONE + self.sharded = False + self.pinned_address: Optional[_Address] = None + self.conn_mgr: Optional[_ConnectionManager] = None + self.recovery_token = None + self.attempt = 0 + self.client = client + + def active(self) -> bool: + return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) + + def starting(self) -> bool: + return self.state == _TxnState.STARTING + + @property + def pinned_conn(self) -> Optional[Connection]: + if self.active() and self.conn_mgr: + return self.conn_mgr.conn + return None + + def pin(self, server: Server, conn: Connection) -> None: + self.sharded = True + self.pinned_address = server.description.address + if server.description.server_type == SERVER_TYPE.LoadBalancer: + conn.pin_txn() + self.conn_mgr = _ConnectionManager(conn, False) + + def unpin(self) -> None: + self.pinned_address = None + if self.conn_mgr: + self.conn_mgr.close() + self.conn_mgr = None + + def reset(self) -> None: + self.unpin() + self.state = _TxnState.NONE + self.sharded = False + self.recovery_token = None + self.attempt = 0 + + def __del__(self) -> None: + if self.conn_mgr: + # Reuse the cursor closing machinery to return the socket to the + # pool soon. + self.client._close_cursor_soon(0, None, self.conn_mgr) + self.conn_mgr = None + + +def _reraise_with_unknown_commit(exc: Any) -> NoReturn: + """Re-raise an exception with the UnknownTransactionCommitResult label.""" + exc._add_error_label("UnknownTransactionCommitResult") + raise + + +def _max_time_expired_error(exc: PyMongoError) -> bool: + """Return true if exc is a MaxTimeMSExpired error.""" + return isinstance(exc, OperationFailure) and exc.code == 50 + + +# From the transactions spec, all the retryable writes errors plus +# WriteConcernTimeout. +_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( # type: ignore[type-arg] + [ + 64, # WriteConcernTimeout + 50, # MaxTimeMSExpired + ] +) + +# From the Convenient API for Transactions spec, with_transaction must +# halt retries after 120 seconds. +# This limit is non-configurable and was chosen to be twice the 60 second +# default value of MongoDB's `transactionLifetimeLimitSeconds` parameter. +_WITH_TRANSACTION_RETRY_TIME_LIMIT = 120 + + +def _within_time_limit(start_time: float) -> bool: + """Are we within the with_transaction retry limit?""" + return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT + + +_T = TypeVar("_T") + +if TYPE_CHECKING: + from pymongo.synchronous.mongo_client import MongoClient + + +class ClientSession: + """A session for ordering sequential operations. + + :class:`ClientSession` instances are **not thread-safe or fork-safe**. + They can only be used by one thread or process at a time. A single + :class:`ClientSession` cannot be used to run multiple operations + concurrently. + + Should not be initialized directly by application developers - to create a + :class:`ClientSession`, call + :meth:`~pymongo.mongo_client.MongoClient.start_session`. + """ + + def __init__( + self, + client: MongoClient[Any], + server_session: Any, + options: SessionOptions, + implicit: bool, + ) -> None: + # A MongoClient, a _ServerSession, a SessionOptions, and a set. + self._client: MongoClient[Any] = client + self._server_session = server_session + self._options = options + self._cluster_time: Optional[Mapping[str, Any]] = None + self._operation_time: Optional[Timestamp] = None + self._snapshot_time = None + # Is this an implicitly created session? + self._implicit = implicit + self._transaction = _Transaction(None, client) + # Is this session attached to a cursor? + self._attached_to_cursor = False + # Should we leave the session alive when the cursor is closed? + self._leave_alive = False + + def end_session(self) -> None: + """Finish this session. If a transaction has started, abort it. + + It is an error to use the session after the session has ended. + """ + self._end_session(lock=True) + + def _end_session(self, lock: bool) -> None: + if self._server_session is not None: + try: + if self.in_transaction: + self.abort_transaction() + # It's possible we're still pinned here when the transaction + # is in the committed state when the session is discarded. + self._unpin() + finally: + self._client._return_server_session(self._server_session) + self._server_session = None + + def _end_implicit_session(self) -> None: + # Implicit sessions can't be part of transactions or pinned connections + if not self._leave_alive and self._server_session is not None: + self._client._return_server_session(self._server_session) + self._server_session = None + + def _check_ended(self) -> None: + if self._server_session is None: + raise InvalidOperation("Cannot use ended session") + + def __enter__(self) -> ClientSession: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self._end_session(lock=True) + + @property + def client(self) -> MongoClient[Any]: + """The :class:`~pymongo.mongo_client.MongoClient` this session was + created from. + """ + return self._client + + @property + def options(self) -> SessionOptions: + """The :class:`SessionOptions` this session was created with.""" + return self._options + + @property + def session_id(self) -> Mapping[str, Any]: + """A BSON document, the opaque server session identifier.""" + self._check_ended() + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.session_id + + @property + def _transaction_id(self) -> Int64: + """The current transaction id for the underlying server session.""" + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.transaction_id + + @property + def cluster_time(self) -> Optional[ClusterTime]: + """The cluster time returned by the last operation executed + in this session. + """ + return self._cluster_time + + @property + def operation_time(self) -> Optional[Timestamp]: + """The operation time returned by the last operation executed + in this session. + """ + return self._operation_time + + def _inherit_option(self, name: str, val: _T) -> _T: + """Return the inherited TransactionOption value.""" + if val: + return val + txn_opts = self.options.default_transaction_options + parent_val = txn_opts and getattr(txn_opts, name) + if parent_val: + return parent_val + return getattr(self.client, name) + + def with_transaction( + self, + callback: Callable[[ClientSession], _T], + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> _T: + """Execute a callback in a transaction. + + This method starts a transaction on this session, executes ``callback`` + once, and then commits the transaction. For example:: + + def callback(session): + orders = session.client.db.orders + inventory = session.client.db.inventory + orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, session=session) + + with client.start_session() as session: + session.with_transaction(callback) + + To pass arbitrary arguments to the ``callback``, wrap your callable + with a ``lambda`` like this:: + + def callback(session, custom_arg, custom_kwarg=None): + # Transaction operations... + + with client.start_session() as session: + session.with_transaction( + lambda s: callback(s, "custom_arg", custom_kwarg=1)) + + In the event of an exception, ``with_transaction`` may retry the commit + or the entire transaction, therefore ``callback`` may be invoked + multiple times by a single call to ``with_transaction``. Developers + should be mindful of this possibility when writing a ``callback`` that + modifies application state or has any other side-effects. + Note that even when the ``callback`` is invoked multiple times, + ``with_transaction`` ensures that the transaction will be committed + at-most-once on the server. + + The ``callback`` should not attempt to start new transactions, but + should simply run operations meant to be contained within a + transaction. The ``callback`` should also not commit the transaction; + this is handled automatically by ``with_transaction``. If the + ``callback`` does commit or abort the transaction without error, + however, ``with_transaction`` will return without taking further + action. + + :class:`ClientSession` instances are **not thread-safe or fork-safe**. + Consequently, the ``callback`` must not attempt to execute multiple + operations concurrently. + + When ``callback`` raises an exception, ``with_transaction`` + automatically aborts the current transaction. When ``callback`` or + :meth:`~ClientSession.commit_transaction` raises an exception that + includes the ``"TransientTransactionError"`` error label, + ``with_transaction`` starts a new transaction and re-executes + the ``callback``. + + The ``callback`` MUST NOT silently handle command errors + without allowing such errors to propagate. Command errors may abort the + transaction on the server, and an attempt to commit the transaction will + be rejected with a ``NoSuchTransaction`` error. For more information see + the `transactions specification`_. + + When :meth:`~ClientSession.commit_transaction` raises an exception with + the ``"UnknownTransactionCommitResult"`` error label, + ``with_transaction`` retries the commit until the result of the + transaction is known. + + This method will cease retrying after 120 seconds has elapsed. This + timeout is not configurable and any exception raised by the + ``callback`` or by :meth:`ClientSession.commit_transaction` after the + timeout is reached will be re-raised. Applications that desire a + different timeout duration should not use this method. + + :param callback: The callable ``callback`` to run inside a transaction. + The callable must accept a single argument, this session. Note, + under certain error conditions the callback may be run multiple + times. + :param read_concern: The + :class:`~pymongo.read_concern.ReadConcern` to use for this + transaction. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use for this + transaction. + :param read_preference: The read preference to use for this + transaction. If ``None`` (the default) the :attr:`read_preference` + of this :class:`Database` is used. See + :mod:`~pymongo.read_preferences` for options. + + :return: The return value of the ``callback``. + + .. versionadded:: 3.9 + + .. _transactions specification: + https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/transactions-convenient-api.md#handling-errors-inside-the-callback + """ + start_time = time.monotonic() + while True: + self.start_transaction(read_concern, write_concern, read_preference, max_commit_time_ms) + try: + ret = callback(self) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as exc: + if self.in_transaction: + self.abort_transaction() + if ( + isinstance(exc, PyMongoError) + and exc.has_error_label("TransientTransactionError") + and _within_time_limit(start_time) + ): + # Retry the entire transaction. + continue + raise + + if not self.in_transaction: + # Assume callback intentionally ended the transaction. + return ret + + while True: + try: + self.commit_transaction() + except PyMongoError as exc: + if ( + exc.has_error_label("UnknownTransactionCommitResult") + and _within_time_limit(start_time) + and not _max_time_expired_error(exc) + ): + # Retry the commit. + continue + + if exc.has_error_label("TransientTransactionError") and _within_time_limit( + start_time + ): + # Retry the entire transaction. + break + raise + + # Commit succeeded. + return ret + + def start_transaction( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> ContextManager[Any]: + """Start a multi-statement transaction. + + Takes the same arguments as :class:`TransactionOptions`. + + .. versionchanged:: 3.9 + Added the ``max_commit_time_ms`` option. + + .. versionadded:: 3.7 + """ + self._check_ended() + + if self.options.snapshot: + raise InvalidOperation("Transactions are not supported in snapshot sessions") + + if self.in_transaction: + raise InvalidOperation("Transaction already in progress") + + read_concern = self._inherit_option("read_concern", read_concern) + write_concern = self._inherit_option("write_concern", write_concern) + read_preference = self._inherit_option("read_preference", read_preference) + if max_commit_time_ms is None: + opts = self.options.default_transaction_options + if opts: + max_commit_time_ms = opts.max_commit_time_ms + + self._transaction.opts = TransactionOptions( + read_concern, write_concern, read_preference, max_commit_time_ms + ) + self._transaction.reset() + self._transaction.state = _TxnState.STARTING + self._start_retryable_write() + return _TransactionContext(self) + + def commit_transaction(self) -> None: + """Commit a multi-statement transaction. + + .. versionadded:: 3.7 + """ + self._check_ended() + state = self._transaction.state + if state is _TxnState.NONE: + raise InvalidOperation("No transaction started") + elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): + # Server transaction was never started, no need to send a command. + self._transaction.state = _TxnState.COMMITTED_EMPTY + return + elif state is _TxnState.ABORTED: + raise InvalidOperation("Cannot call commitTransaction after calling abortTransaction") + elif state is _TxnState.COMMITTED: + # We're explicitly retrying the commit, move the state back to + # "in progress" so that in_transaction returns true. + self._transaction.state = _TxnState.IN_PROGRESS + + try: + self._finish_transaction_with_retry("commitTransaction") + except ConnectionFailure as exc: + # We do not know if the commit was successfully applied on the + # server or if it satisfied the provided write concern, set the + # unknown commit error label. + exc._remove_error_label("TransientTransactionError") + _reraise_with_unknown_commit(exc) + except WTimeoutError as exc: + # We do not know if the commit has satisfied the provided write + # concern, add the unknown commit error label. + _reraise_with_unknown_commit(exc) + except OperationFailure as exc: + if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: + # The server reports errorLabels in the case. + raise + # We do not know if the commit was successfully applied on the + # server or if it satisfied the provided write concern, set the + # unknown commit error label. + _reraise_with_unknown_commit(exc) + finally: + self._transaction.state = _TxnState.COMMITTED + + def abort_transaction(self) -> None: + """Abort a multi-statement transaction. + + .. versionadded:: 3.7 + """ + self._check_ended() + + state = self._transaction.state + if state is _TxnState.NONE: + raise InvalidOperation("No transaction started") + elif state is _TxnState.STARTING: + # Server transaction was never started, no need to send a command. + self._transaction.state = _TxnState.ABORTED + return + elif state is _TxnState.ABORTED: + raise InvalidOperation("Cannot call abortTransaction twice") + elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): + raise InvalidOperation("Cannot call abortTransaction after calling commitTransaction") + + try: + self._finish_transaction_with_retry("abortTransaction") + except (OperationFailure, ConnectionFailure): + # The transactions spec says to ignore abortTransaction errors. + pass + finally: + self._transaction.state = _TxnState.ABORTED + self._unpin() + + def _finish_transaction_with_retry(self, command_name: str) -> dict[str, Any]: + """Run commit or abort with one retry after any retryable error. + + :param command_name: Either "commitTransaction" or "abortTransaction". + """ + + def func( + _session: Optional[ClientSession], conn: Connection, _retryable: bool + ) -> dict[str, Any]: + return self._finish_transaction(conn, command_name) + + return self._client._retry_internal( + func, self, None, retryable=True, operation=command_name + ) + + def _finish_transaction(self, conn: Connection, command_name: str) -> dict[str, Any]: + self._transaction.attempt += 1 + opts = self._transaction.opts + assert opts + wc = opts.write_concern + cmd = {command_name: 1} + if command_name == "commitTransaction": + if opts.max_commit_time_ms and _csot.get_timeout() is None: + cmd["maxTimeMS"] = opts.max_commit_time_ms + + # Transaction spec says that after the initial commit attempt, + # subsequent commitTransaction commands should be upgraded to use + # w:"majority" and set a default value of 10 seconds for wtimeout. + if self._transaction.attempt > 1: + assert wc + wc_doc = wc.document + wc_doc["w"] = "majority" + wc_doc.setdefault("wtimeout", 10000) + wc = WriteConcern(**wc_doc) + + if self._transaction.recovery_token: + cmd["recoveryToken"] = self._transaction.recovery_token + + return self._client.admin._command( + conn, cmd, session=self, write_concern=wc, parse_write_concern_error=True + ) + + def _advance_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + """Internal cluster time helper.""" + if self._cluster_time is None: + self._cluster_time = cluster_time + elif cluster_time is not None: + if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: + self._cluster_time = cluster_time + + def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: + """Update the cluster time for this session. + + :param cluster_time: The + :data:`~pymongo.client_session.ClientSession.cluster_time` from + another `ClientSession` instance. + """ + if not isinstance(cluster_time, _Mapping): + raise TypeError( + f"cluster_time must be a subclass of collections.Mapping, not {type(cluster_time)}" + ) + if not isinstance(cluster_time.get("clusterTime"), Timestamp): + raise ValueError("Invalid cluster_time") + self._advance_cluster_time(cluster_time) + + def _advance_operation_time(self, operation_time: Optional[Timestamp]) -> None: + """Internal operation time helper.""" + if self._operation_time is None: + self._operation_time = operation_time + elif operation_time is not None: + if operation_time > self._operation_time: + self._operation_time = operation_time + + def advance_operation_time(self, operation_time: Timestamp) -> None: + """Update the operation time for this session. + + :param operation_time: The + :data:`~pymongo.client_session.ClientSession.operation_time` from + another `ClientSession` instance. + """ + if not isinstance(operation_time, Timestamp): + raise TypeError( + f"operation_time must be an instance of bson.timestamp.Timestamp, not {type(operation_time)}" + ) + self._advance_operation_time(operation_time) + + def _process_response(self, reply: Mapping[str, Any]) -> None: + """Process a response to a command that was run with this session.""" + self._advance_cluster_time(reply.get("$clusterTime")) + self._advance_operation_time(reply.get("operationTime")) + if self._options.snapshot and self._snapshot_time is None: + if "cursor" in reply: + ct = reply["cursor"].get("atClusterTime") + else: + ct = reply.get("atClusterTime") + self._snapshot_time = ct + if self.in_transaction and self._transaction.sharded: + recovery_token = reply.get("recoveryToken") + if recovery_token: + self._transaction.recovery_token = recovery_token + + @property + def has_ended(self) -> bool: + """True if this session is finished.""" + return self._server_session is None + + @property + def in_transaction(self) -> bool: + """True if this session has an active multi-statement transaction. + + .. versionadded:: 3.10 + """ + return self._transaction.active() + + @property + def _starting_transaction(self) -> bool: + """True if this session is starting a multi-statement transaction.""" + return self._transaction.starting() + + @property + def _pinned_address(self) -> Optional[_Address]: + """The mongos address this transaction was created on.""" + if self._transaction.active(): + return self._transaction.pinned_address + return None + + @property + def _pinned_connection(self) -> Optional[Connection]: + """The connection this transaction was started on.""" + return self._transaction.pinned_conn + + def _pin(self, server: Server, conn: Connection) -> None: + """Pin this session to the given Server or to the given connection.""" + self._transaction.pin(server, conn) + + def _unpin(self) -> None: + """Unpin this session from any pinned Server.""" + self._transaction.unpin() + + def _txn_read_preference(self) -> Optional[_ServerMode]: + """Return read preference of this transaction or None.""" + if self.in_transaction: + assert self._transaction.opts + return self._transaction.opts.read_preference + return None + + def _materialize(self, logical_session_timeout_minutes: Optional[int] = None) -> None: + if isinstance(self._server_session, _EmptyServerSession): + old = self._server_session + self._server_session = self._client._topology.get_server_session( + logical_session_timeout_minutes + ) + if old.started_retryable_write: + self._server_session.inc_transaction_id() + + def _apply_to( + self, + command: MutableMapping[str, Any], + is_retryable: bool, + read_preference: _ServerMode, + conn: Connection, + ) -> None: + if not conn.supports_sessions: + if not self._implicit: + raise ConfigurationError("Sessions are not supported by this MongoDB deployment") + return + self._check_ended() + self._materialize(conn.logical_session_timeout_minutes) + if self.options.snapshot: + self._update_read_concern(command, conn) + + self._server_session.last_use = time.monotonic() + command["lsid"] = self._server_session.session_id + + if is_retryable: + command["txnNumber"] = self._server_session.transaction_id + return + + if self.in_transaction: + if read_preference != ReadPreference.PRIMARY: + raise InvalidOperation( + f"read preference in a transaction must be primary, not: {read_preference!r}" + ) + + if self._transaction.state == _TxnState.STARTING: + # First command begins a new transaction. + self._transaction.state = _TxnState.IN_PROGRESS + command["startTransaction"] = True + + assert self._transaction.opts + if self._transaction.opts.read_concern: + rc = self._transaction.opts.read_concern.document + if rc: + command["readConcern"] = rc + self._update_read_concern(command, conn) + + command["txnNumber"] = self._server_session.transaction_id + command["autocommit"] = False + + def _start_retryable_write(self) -> None: + self._check_ended() + self._server_session.inc_transaction_id() + + def _update_read_concern(self, cmd: MutableMapping[str, Any], conn: Connection) -> None: + if self.options.causal_consistency and self.operation_time is not None: + cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time + if self.options.snapshot: + if conn.max_wire_version < 13: + raise ConfigurationError("Snapshot reads require MongoDB 5.0 or later") + rc = cmd.setdefault("readConcern", {}) + rc["level"] = "snapshot" + if self._snapshot_time is not None: + rc["atClusterTime"] = self._snapshot_time + + def __copy__(self) -> NoReturn: + raise TypeError("A ClientSession cannot be copied, create a new session instead") + + +class _EmptyServerSession: + __slots__ = "dirty", "started_retryable_write" + + def __init__(self) -> None: + self.dirty = False + self.started_retryable_write = False + + def mark_dirty(self) -> None: + self.dirty = True + + def inc_transaction_id(self) -> None: + self.started_retryable_write = True + + +class _ServerSession: + def __init__(self, generation: int): + # Ensure id is type 4, regardless of CodecOptions.uuid_representation. + self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} + self.last_use = time.monotonic() + self._transaction_id = 0 + self.dirty = False + self.generation = generation + + def mark_dirty(self) -> None: + """Mark this session as dirty. + + A server session is marked dirty when a command fails with a network + error. Dirty sessions are later discarded from the server session pool. + """ + self.dirty = True + + def timed_out(self, session_timeout_minutes: Optional[int]) -> bool: + if session_timeout_minutes is None: + return False + + idle_seconds = time.monotonic() - self.last_use + + # Timed out if we have less than a minute to live. + return idle_seconds > (session_timeout_minutes - 1) * 60 + + @property + def transaction_id(self) -> Int64: + """Positive 64-bit integer.""" + return Int64(self._transaction_id) + + def inc_transaction_id(self) -> None: + self._transaction_id += 1 + + +class _ServerSessionPool(collections.deque): # type: ignore[type-arg] + """Pool of _ServerSession objects. + + This class is thread-safe. + """ + + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + self.generation = 0 + + def reset(self) -> None: + self.generation += 1 + self.clear() + + def pop_all(self) -> list[_ServerSession]: + ids = [] + while True: + try: + ids.append(self.pop().session_id) + except IndexError: + break + return ids + + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: + # Although the Driver Sessions Spec says we only clear stale sessions + # in return_server_session, PyMongo can't take a lock when returning + # sessions from a __del__ method (like in Cursor.__die), so it can't + # clear stale sessions there. In case many sessions were returned via + # __del__, check for stale sessions here too. + self._clear_stale(session_timeout_minutes) + + # The most recently used sessions are on the left. + while True: + try: + s = self.popleft() + except IndexError: + break + if not s.timed_out(session_timeout_minutes): + return s + + return _ServerSession(self.generation) + + def return_server_session(self, server_session: _ServerSession) -> None: + # Discard sessions from an old pool to avoid duplicate sessions in the + # child process after a fork. + if server_session.generation == self.generation and not server_session.dirty: + self.appendleft(server_session) + + def _clear_stale(self, session_timeout_minutes: Optional[int]) -> None: + # Clear stale sessions. The least recently used are on the right. + while True: + try: + s = self.pop() + except IndexError: + break + if not s.timed_out(session_timeout_minutes): + self.append(s) + # The remaining sessions also haven't timed out. + break diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py new file mode 100644 index 0000000000..4e5f7d08fb --- /dev/null +++ b/pymongo/synchronous/collection.py @@ -0,0 +1,3639 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection level utilities for Mongo.""" +from __future__ import annotations + +import warnings +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Generic, + Iterable, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, + overload, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from bson.timestamp import Timestamp +from pymongo import ASCENDING, _csot, common, helpers_shared, message +from pymongo.collation import validate_collation_or_none +from pymongo.common import _ecoc_coll_name, _esc_coll_name +from pymongo.errors import ( + ConfigurationError, + InvalidName, + InvalidOperation, + OperationFailure, +) +from pymongo.helpers_shared import _check_write_command_response +from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + SearchIndexModel, + UpdateMany, + UpdateOne, + _IndexKeyHint, + _IndexList, + _Op, +) +from pymongo.read_concern import DEFAULT_READ_CONCERN +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.synchronous.aggregation import ( + _CollectionAggregationCommand, + _CollectionRawAggregationCommand, +) +from pymongo.synchronous.bulk import _Bulk +from pymongo.synchronous.change_stream import CollectionChangeStream +from pymongo.synchronous.command_cursor import ( + CommandCursor, + RawBatchCommandCursor, +) +from pymongo.synchronous.cursor import ( + Cursor, + RawBatchCursor, +) +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean + +_IS_SYNC = True + +T = TypeVar("T") + +_FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} + + +_WriteOp = Union[ + InsertOne[_DocumentType], + DeleteOne, + DeleteMany, + ReplaceOne[_DocumentType], + UpdateOne, + UpdateMany, +] + + +class ReturnDocument: + """An enum used with + :meth:`~pymongo.collection.Collection.find_one_and_replace` and + :meth:`~pymongo.collection.Collection.find_one_and_update`. + """ + + BEFORE = False + """Return the original document before it was updated/replaced, or + ``None`` if no document matches the query. + """ + AFTER = True + """Return the updated/replaced or inserted document.""" + + +if TYPE_CHECKING: + import bson + from pymongo.collation import Collation + from pymongo.read_concern import ReadConcern + from pymongo.synchronous.aggregation import _AggregationCommand + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.database import Database + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + + +class Collection(common.BaseObject, Generic[_DocumentType]): + """A Mongo collection.""" + + def __init__( + self, + database: Database[_DocumentType], + name: str, + create: Optional[bool] = False, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> None: + """Get / create a Mongo collection. + + Raises :class:`TypeError` if `name` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if `name` is + not a valid collection name. Any additional keyword arguments will be used + as options passed to the create command. See + :meth:`~pymongo.database.Database.create_collection` for valid + options. + + If `create` is ``True``, `collation` is specified, or any additional + keyword arguments are present, a ``create`` command will be + sent, using ``session`` if specified. Otherwise, a ``create`` command + will not be sent and the collection will be created implicitly on first + use. The optional ``session`` argument is *only* used for the ``create`` + command, it is not associated with the collection afterward. + + :param database: the database to get a collection from + :param name: the name of the collection to get + :param create: If ``True``, force collection + creation even without options being set. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) database.codec_options is used. + :param read_preference: The read preference to use. If + ``None`` (the default) database.read_preference is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) database.write_concern is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) database.read_concern is used. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. If a collation is provided, + it will be passed to the create collection command. + :param session: A + :class:`~pymongo.client_session.ClientSession` that is used with + the create collection command. + :param kwargs: Additional keyword arguments will + be passed as options for the create collection command. + + .. versionchanged:: 4.2 + Added the ``clusteredIndex`` and ``encryptedFields`` parameters. + + .. versionchanged:: 4.0 + Removed the reindex, map_reduce, inline_map_reduce, + parallel_scan, initialize_unordered_bulk_op, + initialize_ordered_bulk_op, group, count, insert, save, + update, remove, find_and_modify, and ensure_index methods. See the + :ref:`pymongo4-migration-guide`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Support the `collation` option. + + .. versionchanged:: 3.2 + Added the read_concern option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + Removed the uuid_subtype attribute. + :class:`~pymongo.collection.Collection` no longer returns an + instance of :class:`~pymongo.collection.Collection` for attribute + names with leading underscores. You must use dict-style lookups + instead:: + + collection['__my_collection__'] + + Not: + + collection.__my_collection__ + + .. seealso:: The MongoDB documentation on `collections `_. + """ + super().__init__( + codec_options or database.codec_options, + read_preference or database.read_preference, + write_concern or database.write_concern, + read_concern or database.read_concern, + ) + if not isinstance(name, str): + raise TypeError(f"name must be an instance of str, not {type(name)}") + from pymongo.synchronous.database import Database + + if not isinstance(database, Database): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "Database" for cls in type(database).__mro__): + raise TypeError(f"Database required but given {type(database).__name__}") + + if not name or ".." in name: + raise InvalidName("collection names cannot be empty") + if "$" in name and not (name.startswith(("oplog.$main", "$cmd"))): + raise InvalidName("collection names must not contain '$': %r" % name) + if name[0] == "." or name[-1] == ".": + raise InvalidName("collection names must not start or end with '.': %r" % name) + if "\x00" in name: + raise InvalidName("collection names must not contain the null character") + + self._database: Database[_DocumentType] = database + self._name = name + self._full_name = f"{self._database.name}.{self._name}" + self._write_response_codec_options = self.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + self._timeout = database.client.options.timeout + + if create or kwargs: + if _IS_SYNC: + warnings.warn( + "The `create` and `kwargs` arguments to Collection are deprecated and will be removed in PyMongo 5.0", + DeprecationWarning, + stacklevel=2, + ) + self._create(kwargs, session) # type: ignore[unused-coroutine] + else: + raise ValueError("Collection does not support the `create` or `kwargs` arguments.") + + def __getattr__(self, name: str) -> Collection[_DocumentType]: + """Get a sub-collection of this collection by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + full_name = f"{self._name}.{name}" + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {full_name}" + f" collection, use database['{full_name}']." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> Collection[_DocumentType]: + return Collection( + self._database, + f"{self._name}.{name}", + False, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._database!r}, {self._name!r})" + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Collection): + return self._database == other.database and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._database, self._name)) + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: collection is not None" + ) + + @property + def full_name(self) -> str: + """The full name of this :class:`Collection`. + + The full name is of the form `database_name.collection_name`. + """ + return self._full_name + + @property + def name(self) -> str: + """The name of this :class:`Collection`.""" + return self._name + + @property + def database(self) -> Database[_DocumentType]: + """The :class:`~pymongo.database.Database` that this + :class:`Collection` is a part of. + """ + return self._database + + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Collection[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Collection[_DocumentTypeArg]: + ... + + def with_options( + self, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType] | Collection[_DocumentTypeArg]: + """Get a clone of this collection changing the specified settings. + + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY) + >>> coll1.read_preference + Primary() + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Collection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Collection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Collection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Collection` + is used. + """ + return Collection( + self._database, + self._name, + False, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def _write_concern_for_cmd( + self, cmd: Mapping[str, Any], session: Optional[ClientSession] + ) -> WriteConcern: + raw_wc = cmd.get("writeConcern") + if raw_wc is not None: + return WriteConcern(**raw_wc) + else: + return self._write_concern_for(session) + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'Collection' object is not iterable") + + next = __next__ + + def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: + """This is only here so that some API misusages are easier to debug.""" + if "." not in self._name: + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you " + "meant to call the '%s' method on a 'Database' " + "object it is failing because no such method " + "exists." % self._name + ) + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you meant to " + f"call the '%s' method on a '{type(self).__name__}' object it is " + "failing because no such method exists." % self._name.split(".")[-1] + ) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> CollectionChangeStream[_DocumentType]: + """Watch changes on this collection. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.CollectionChangeStream` cursor which + iterates over changes on this collection. + + .. code-block:: python + + with db.collection.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.change_stream.CollectionChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.CollectionChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with db.coll.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + .. note:: Using this helper method is preferred to directly calling + :meth:`~pymongo.collection.Collection.aggregate` with a + ``$changeStream`` stage, for the purpose of supporting + resumability. + + .. warning:: This Collection's :attr:`read_concern` must be + ``ReadConcern("majority")`` in order to use the ``$changeStream`` + stage. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionchanged:: 3.7 + Added the ``start_at_operation_time`` parameter. + + .. versionadded:: 3.6 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = CollectionChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream + + def _conn_for_writes( + self, session: Optional[ClientSession], operation: str + ) -> ContextManager[Connection]: + return self._database.client._conn_for_writes(session, operation) + + def _command( + self, + conn: Connection, + command: MutableMapping[str, Any], + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions[Mapping[str, Any]]] = None, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + collation: Optional[_CollationIn] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + user_fields: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal command helper. + + :param conn` - A Connection instance. + :param command` - The command itself, as a :class:`~bson.son.SON` instance. + :param read_preference` (optional) - The read preference to use. + :param codec_options` (optional) - An instance of + :class:`~bson.codec_options.CodecOptions`. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern` (optional) - An instance of + :class:`~pymongo.read_concern.ReadConcern`. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. + :param collation` (optional) - An instance of + :class:`~pymongo.collation.Collation`. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param retryable_write: True if this command is a retryable + write. + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + + :return: The result document. + """ + with self._database.client._tmp_session(session) as s: + return conn.command( + self._database.name, + command, + read_preference or self._read_preference_for(session), + codec_options or self.codec_options, + check, + allowable_errors, + read_concern=read_concern, + write_concern=write_concern, + parse_write_concern_error=True, + collation=collation, + session=s, + client=self._database.client, + retryable_write=retryable_write, + user_fields=user_fields, + ) + + def _create_helper( + self, + name: str, + options: MutableMapping[str, Any], + collation: Optional[_CollationIn], + session: Optional[ClientSession], + encrypted_fields: Optional[Mapping[str, Any]] = None, + qev2_required: bool = False, + ) -> None: + """Sends a create command with the given options.""" + cmd: dict[str, Any] = {"create": name} + if encrypted_fields: + cmd["encryptedFields"] = encrypted_fields + + if options: + if "size" in options: + options["size"] = float(options["size"]) + cmd.update(options) + with self._conn_for_writes(session, operation=_Op.CREATE) as conn: + if qev2_required and conn.max_wire_version < 21: + raise ConfigurationError( + "Driver support of Queryable Encryption is incompatible with server. " + "Upgrade server to use Queryable Encryption. " + f"Got maxWireVersion {conn.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" + ) + + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + collation=collation, + session=session, + ) + + def _create( + self, + options: MutableMapping[str, Any], + session: Optional[ClientSession], + ) -> None: + collation = validate_collation_or_none(options.pop("collation", None)) + encrypted_fields = options.pop("encryptedFields", None) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} + self._create_helper( + _esc_coll_name(encrypted_fields, self._name), + opts, + None, + session, + qev2_required=True, + ) + self._create_helper(_ecoc_coll_name(encrypted_fields, self._name), opts, None, session) + self._create_helper( + self._name, options, collation, session, encrypted_fields=encrypted_fields + ) + self.create_index([("__safeContent__", ASCENDING)], session) + else: + self._create_helper(self._name, options, collation, session) + + @_csot.apply + def bulk_write( + self, + requests: Sequence[_WriteOp[_DocumentType]], + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + let: Optional[Mapping[str, Any]] = None, + ) -> BulkWriteResult: + """Send a batch of write operations to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + ... + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}), + ... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)] + >>> result = db.test.bulk_write(requests) + >>> result.inserted_count + 1 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_ids + {2: ObjectId('54f62ee28891e756a6e1abd5')} + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + + :param requests: A list of write operations (see examples above). + :param ordered: If ``True`` (the default) requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False`` requests + will be performed on the server in arbitrary order, possibly in + parallel, and all operations will be attempted. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + + :return: An instance of :class:`~pymongo.results.BulkWriteResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + common.validate_list("requests", requests) + + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment, let=let) + for request in requests: + try: + request._add_to_bulk(blk) + except AttributeError: + raise TypeError(f"{request!r} is not a valid request") from None + + write_concern = self._write_concern_for(session) + bulk_api_result = blk.execute(write_concern, session, _Op.INSERT) + if bulk_api_result is not None: + return BulkWriteResult(bulk_api_result, True) + return BulkWriteResult({}, False) + + def _insert_one( + self, + doc: Mapping[str, Any], + ordered: bool, + write_concern: WriteConcern, + op_id: Optional[int], + bypass_doc_val: Optional[bool], + session: Optional[ClientSession], + comment: Optional[Any] = None, + ) -> Any: + """Internal helper for inserting a single document.""" + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + command = {"insert": self.name, "ordered": ordered, "documents": [doc]} + if comment is not None: + command["comment"] = comment + + def _insert_command( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> None: + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val + + result = conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + + _check_write_command_response(result) + + self._database.client._retryable_write( + acknowledged, _insert_command, session, operation=_Op.INSERT + ) + + if not isinstance(doc, RawBSONDocument): + return doc.get("_id") + return None + + def insert_one( + self, + document: Union[_DocumentType, RawBSONDocument], + bypass_document_validation: Optional[bool] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertOneResult: + """Insert a single document. + + >>> db.test.count_documents({'x': 1}) + 0 + >>> result = db.test.insert_one({'x': 1}) + >>> result.inserted_id + ObjectId('54f112defba522406c9cc208') + >>> db.test.find_one({'x': 1}) + {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} + + :param document: The document to insert. Must be a mutable mapping + type. If the document does not have an _id field one will be + added automatically. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.InsertOneResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + common.validate_is_document_type("document", document) + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() # type: ignore[index] + + write_concern = self._write_concern_for(session) + return InsertOneResult( + self._insert_one( + document, + ordered=True, + write_concern=write_concern, + op_id=None, + bypass_doc_val=bypass_document_validation, + session=session, + comment=comment, + ), + write_concern.acknowledged, + ) + + @_csot.apply + def insert_many( + self, + documents: Iterable[Union[_DocumentType, RawBSONDocument]], + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertManyResult: + """Insert an iterable of documents. + + >>> db.test.count_documents({}) + 0 + >>> result = db.test.insert_many([{'x': i} for i in range(2)]) + >>> result.inserted_ids + [ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')] + >>> db.test.count_documents({}) + 2 + + :param documents: A iterable of documents to insert. + :param ordered: If ``True`` (the default) documents will be + inserted on the server serially, in the order provided. If an error + occurs all remaining inserts are aborted. If ``False``, documents + will be inserted on the server in arbitrary order, possibly in + parallel, and all document inserts will be attempted. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: An instance of :class:`~pymongo.results.InsertManyResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + if ( + not isinstance(documents, abc.Iterable) + or isinstance(documents, abc.Mapping) + or not documents + ): + raise TypeError("documents must be a non-empty list") + inserted_ids: list[ObjectId] = [] + + def gen() -> Iterator[tuple[int, Mapping[str, Any]]]: + """A generator that validates documents and handles _ids.""" + for document in documents: + common.validate_is_document_type("document", document) + if not isinstance(document, RawBSONDocument): + if "_id" not in document: + document["_id"] = ObjectId() # type: ignore[index] + inserted_ids.append(document["_id"]) + yield (message._INSERT, document) + + write_concern = self._write_concern_for(session) + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) + blk.ops = list(gen()) + blk.execute(write_concern, session, _Op.INSERT) + return InsertManyResult(inserted_ids, write_concern.acknowledged) + + def _update( + self, + conn: Connection, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: + """Internal update / replace helper.""" + validate_boolean("upsert", upsert) + collation = validate_collation_or_none(collation) + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + update_doc: dict[str, Any] = { + "q": criteria, + "u": document, + "multi": multi, + "upsert": upsert, + } + if collation is not None: + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + else: + update_doc["collation"] = collation + if array_filters is not None: + if not acknowledged: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + else: + update_doc["arrayFilters"] = array_filters + if hint is not None: + if not acknowledged and conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + update_doc["hint"] = hint + if sort is not None: + if not acknowledged and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) + common.validate_is_mapping("sort", sort) + update_doc["sort"] = sort + + command = {"update": self.name, "ordered": ordered, "updates": [update_doc]} + if let is not None: + common.validate_is_mapping("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + # Update command. + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val + + # The command result has to be published for APM unmodified + # so we make a shallow copy here before adding updatedExisting. + result = ( + conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + ).copy() + _check_write_command_response(result) + # Add the updatedExisting field for compatibility. + if result.get("n") and "upserted" not in result: + result["updatedExisting"] = True + else: + result["updatedExisting"] = False + # MongoDB >= 2.6.0 returns the upsert _id in an array + # element. Break it out for backward compatibility. + if "upserted" in result: + result["upserted"] = result["upserted"][0]["_id"] + + if not acknowledged: + return None + return result + + def _update_retryable( + self, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + operation: str, + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: + """Internal update / replace helper.""" + + def _update( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Optional[Mapping[str, Any]]: + return self._update( + conn, + criteria, + document, + upsert=upsert, + multi=multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + bypass_doc_val=bypass_doc_val, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + sort=sort, + comment=comment, + ) + + return self._database.client._retryable_write( + (write_concern or self.write_concern).acknowledged and not multi, + _update, + session, + operation, + ) + + def replace_one( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Replace a single document matching the filter. + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} + >>> result = db.test.replace_one({'x': 1}, {'y': 1}) + >>> result.matched_count + 1 + >>> result.modified_count + 1 + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} + + The *upsert* option can be used to insert a new document if a matching + document does not exist. + + >>> result = db.test.replace_one({'x': 1}, {'x': 1}, True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('54f11e5c8891e756a6e1abd4') + >>> db.test.find_one({'x': 1}) + {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} + + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.11 + Added ``sort`` parameter. + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionchanged:: 3.2 + Added bypass_document_validation support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_replace(replacement) + if let is not None: + common.validate_is_mapping("let", let) + write_concern = self._write_concern_for(session) + return UpdateResult( + self._update_retryable( + filter, + replacement, + _Op.UPDATE, + upsert, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + hint=hint, + session=session, + let=let, + sort=sort, + comment=comment, + ), + write_concern.acknowledged, + ) + + def update_one( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Update a single document matching the filter. + + >>> for doc in db.test.find(): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) + >>> result.matched_count + 1 + >>> result.modified_count + 1 + >>> for doc in db.test.find(): + ... print(doc) + ... + {'x': 4, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + If ``upsert=True`` and no documents match the filter, create a + new document based on the filter criteria and update modifications. + + >>> result = db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('626a678eeaa80587d4bb3fb7') + >>> db.test.find_one(result.upserted_id) + {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} + + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.11 + Added ``sort`` parameter. + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the ``update``. + .. versionchanged:: 3.6 + Added the ``array_filters`` and ``session`` parameters. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Added ``bypass_document_validation`` support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + + write_concern = self._write_concern_for(session) + return UpdateResult( + self._update_retryable( + filter, + update, + _Op.UPDATE, + upsert, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + sort=sort, + comment=comment, + ), + write_concern.acknowledged, + ) + + def update_many( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Update one or more documents that match the filter. + + >>> for doc in db.test.find(): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) + >>> result.matched_count + 3 + >>> result.modified_count + 3 + >>> for doc in db.test.find(): + ... print(doc) + ... + {'x': 4, '_id': 0} + {'x': 4, '_id': 1} + {'x': 4, '_id': 2} + + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added ``array_filters`` and ``session`` parameters. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionchanged:: 3.2 + Added bypass_document_validation support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + + write_concern = self._write_concern_for(session) + return UpdateResult( + self._update_retryable( + filter, + update, + _Op.UPDATE, + upsert, + multi=True, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def drop( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> None: + """Alias for :meth:`~pymongo.database.Database.drop_collection`. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. + + The following two calls are equivalent: + + >>> db.foo.drop() + >>> db.drop_collection("foo") + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.7 + :meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + dbo = self._database.client.get_database( + self._database.name, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + dbo.drop_collection( + self._name, session=session, comment=comment, encrypted_fields=encrypted_fields + ) + + def _delete( + self, + conn: Connection, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal delete helper.""" + common.validate_is_mapping("filter", criteria) + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + delete_doc = {"q": criteria, "limit": int(not multi)} + collation = validate_collation_or_none(collation) + if collation is not None: + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + else: + delete_doc["collation"] = collation + if hint is not None: + if not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + delete_doc["hint"] = hint + command = {"delete": self.name, "ordered": ordered, "deletes": [delete_doc]} + + if let is not None: + common.validate_is_document_type("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + + # Delete command. + result = conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + _check_write_command_response(result) + return result + + def _delete_retryable( + self, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal delete helper.""" + + def _delete( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Mapping[str, Any]: + return self._delete( + conn, + criteria, + multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + collation=collation, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + comment=comment, + ) + + return self._database.client._retryable_write( + (write_concern or self.write_concern).acknowledged and not multi, + _delete, + session, + operation=_Op.DELETE, + ) + + def delete_one( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: + """Delete a single document matching the filter. + + >>> db.test.count_documents({'x': 1}) + 3 + >>> result = db.test.delete_one({'x': 1}) + >>> result.deleted_count + 1 + >>> db.test.count_documents({'x': 1}) + 2 + + :param filter: A query that matches the document to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.DeleteResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + write_concern = self._write_concern_for(session) + return DeleteResult( + self._delete_retryable( + filter, + False, + write_concern=write_concern, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def delete_many( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: + """Delete one or more documents matching the filter. + + >>> db.test.count_documents({'x': 1}) + 3 + >>> result = db.test.delete_many({'x': 1}) + >>> result.deleted_count + 3 + >>> db.test.count_documents({'x': 1}) + 0 + + :param filter: A query that matches the documents to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.DeleteResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + write_concern = self._write_concern_for(session) + return DeleteResult( + self._delete_retryable( + filter, + True, + write_concern=write_concern, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def find_one( + self, filter: Optional[Any] = None, *args: Any, **kwargs: Any + ) -> Optional[_DocumentType]: + """Get a single document from the database. + + All arguments to :meth:`find` are also valid arguments for + :meth:`find_one`, although any `limit` argument will be + ignored. Returns a single document, or ``None`` if no matching + document is found. + + The :meth:`find_one` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + :param filter: a dictionary specifying + the query to be performed OR any other type to be used as + the value for a query for ``"_id"``. + + :param args: any additional positional arguments + are the same as the arguments to :meth:`find`. + + :param kwargs: any additional keyword arguments + are the same as the arguments to :meth:`find`. + + :: code-block: python + + >>> collection.find_one(max_time_ms=100) + + """ + if filter is not None and not isinstance(filter, abc.Mapping): + filter = {"_id": filter} + cursor = self.find(filter, *args, **kwargs) + for result in cursor.limit(-1): + return result + return None + + def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: + """Query the database. + + The `filter` argument is a query document that all results + must match. For example: + + >>> db.test.find({"hello": "world"}) + + only matches documents that have a key "hello" with value + "world". Matches can have other keys *in addition* to + "hello". The `projection` argument is used to specify a subset + of fields that should be included in the result documents. By + limiting results to a certain subset of fields you can cut + down on network traffic and decoding time. + + Raises :class:`TypeError` if any of the arguments are of + improper type. Returns an instance of + :class:`~pymongo.cursor.Cursor` corresponding to this query. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.find() as cursor: + for doc in cursor: + print(doc) + + The :meth:`find` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + :param filter: A query document that selects which documents + to include in the result set. Can be an empty document to include + all documents. + :param projection: a list of field names that should be + returned in the result set or a dict specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a dict to exclude fields from + the result (e.g. projection={'_id': False}). + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param skip: the number of documents to omit (from + the start of the result set) when returning the results + :param limit: the maximum number of results to + return. A limit of 0 (the default) is equivalent to setting no + limit. + :param no_cursor_timeout: if False (the default), any + returned cursor is closed by the server after 10 minutes of + inactivity. If set to True, the returned cursor will never + time out on the server. Care should be taken to ensure that + cursors with no_cursor_timeout turned on are properly closed. + :param cursor_type: the type of cursor to return. The valid + options are defined by :class:`~pymongo.cursor.CursorType`: + + - :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of + this find call will return a standard cursor over the result set. + - :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this + find call will be a tailable cursor - tailable cursors are only + for use with capped collections. They are not closed when the + last data is retrieved but are kept open and the cursor location + marks the final document position. If more data is received + iteration of the cursor will continue from the last document + received. For details, see the `tailable cursor documentation + `_. + - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result + of this find call will be a tailable cursor with the await flag + set. The server will wait for a few seconds after returning the + full result set so that it can capture and return additional data + added during the query. + - :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this + find call will be an exhaust cursor. MongoDB will stream batched + results to the client without waiting for the client to request + each batch, reducing latency. See notes on compatibility below. + + :param sort: a list of (key, direction) pairs + specifying the sort order for this query. See + :meth:`~pymongo.cursor.Cursor.sort` for details. + :param allow_partial_results: if True, mongos will return + partial results if some shards are down instead of returning an + error. + :param oplog_replay: **DEPRECATED** - if True, set the + oplogReplay query flag. Default: False. + :param batch_size: Limits the number of documents returned in + a single batch. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param return_key: If True, return only the index keys in + each document. + :param show_record_id: If True, adds a field ``$recordId`` in + each document with the storage engine's internal record identifier. + :param snapshot: **DEPRECATED** - If True, prevents the + cursor from returning a document more than once because of an + intervening write operation. + :param hint: An index, in the same format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the + proper index to use for the query. + :param max_time_ms: Specifies a time limit for a query + operation. If the specified time is exceeded, the operation will be + aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass + this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor. + :param max_scan: **DEPRECATED** - The maximum number of + documents to scan. Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.max_scan` on the cursor. + :param min: A list of field, limit pairs specifying the + inclusive lower bound for all keys of a specific index in order. + Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.min` on the cursor. ``hint`` must + also be passed to ensure the query utilizes the correct index. + :param max: A list of field, limit pairs specifying the + exclusive upper bound for all keys of a specific index in order. + Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.max` on the cursor. ``hint`` must + also be passed to ensure the query utilizes the correct index. + :param comment: A string to attach to the query to help + interpret and trace the operation in the server logs and in profile + data. Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.comment` on the cursor. + :param allow_disk_use: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. The option has no effect if + MongoDB can satisfy the specified sort using an index, or if the + blocking sort requires less memory than the 100 MiB limit. This + option is only supported on MongoDB 4.4 and above. + + .. note:: There are a number of caveats to using + :attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type: + + - The `limit` option can not be used with an exhaust cursor. + + - Exhaust cursors are not supported by mongos and can not be + used with a sharded cluster. + + - A :class:`~pymongo.cursor.Cursor` instance created with the + :attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an + exclusive :class:`~socket.socket` connection to MongoDB. If the + :class:`~pymongo.cursor.Cursor` is discarded without being + completely iterated the underlying :class:`~socket.socket` + connection will be closed and discarded without being returned to + the connection pool. + + .. versionchanged:: 4.0 + Removed the ``modifiers`` option. + Empty projections (eg {} or []) are passed to the server as-is, + rather than the previous behavior which substituted in a + projection of ``{"_id": 1}``. This means that an empty projection + will now return the entire document, not just the ``"_id"`` field. + + .. versionchanged:: 3.11 + Added the ``allow_disk_use`` option. + Deprecated the ``oplog_replay`` option. Support for this option is + deprecated in MongoDB 4.4. The query engine now automatically + optimizes queries against the oplog without requiring this + option to be set. + + .. versionchanged:: 3.7 + Deprecated the ``snapshot`` option, which is deprecated in MongoDB + 3.6 and removed in MongoDB 4.0. + Deprecated the ``max_scan`` option. Support for this option is + deprecated in MongoDB 4.0. Use ``max_time_ms`` instead to limit + server-side execution time. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.5 + Added the options ``return_key``, ``show_record_id``, ``snapshot``, + ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, and + ``comment``. + Deprecated the ``modifiers`` option. + + .. versionchanged:: 3.4 + Added support for the ``collation`` option. + + .. versionchanged:: 3.0 + Changed the parameter names ``spec``, ``fields``, ``timeout``, and + ``partial`` to ``filter``, ``projection``, ``no_cursor_timeout``, + and ``allow_partial_results`` respectively. + Added the ``cursor_type``, ``oplog_replay``, and ``modifiers`` + options. + Removed the ``network_timeout``, ``read_preference``, ``tag_sets``, + ``secondary_acceptable_latency_ms``, ``max_scan``, ``snapshot``, + ``tailable``, ``await_data``, ``exhaust``, ``as_class``, and + slave_okay parameters. + Removed ``compile_re`` option: PyMongo now always + represents BSON regular expressions as :class:`~bson.regex.Regex` + objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to + convert from a BSON regular expression to a Python regular + expression object. + Soft deprecated the ``manipulate`` option. + + .. seealso:: The MongoDB documentation on `find `_. + """ + return Cursor(self, *args, **kwargs) + + def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_DocumentType]: + """Query the database and retrieve batches of raw BSON. + + Similar to the :meth:`find` method but returns a + :class:`~pymongo.cursor.RawBatchCursor`. + + This example demonstrates how to work with raw batches, but in practice + raw batches should be passed to an external library that can decode + BSON into another data type, rather than used with PyMongo's + :mod:`bson` module. + + >>> import bson + >>> cursor = db.test.find_raw_batches() + >>> for batch in cursor: + ... print(bson.decode_all(batch)) + + .. note:: find_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Instead of ignoring the user-specified read concern, this method + now sends it to the server when connected to MongoDB 3.6+. + + Added session support. + + .. versionadded:: 3.6 + """ + # OP_MSG is required to support encryption. + if self._database.client._encrypter: + raise InvalidOperation("find_raw_batches does not support auto encryption") + return RawBatchCursor(self, *args, **kwargs) + + def _count_cmd( + self, + session: Optional[ClientSession], + conn: Connection, + read_preference: Optional[_ServerMode], + cmd: dict[str, Any], + collation: Optional[Collation], + ) -> int: + """Internal count command helper.""" + res = self._command( + conn, + cmd, + read_preference=read_preference, + codec_options=self._write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + return int(res["n"]) + + def _aggregate_one_result( + self, + conn: Connection, + read_preference: Optional[_ServerMode], + cmd: dict[str, Any], + collation: Optional[_CollationIn], + session: Optional[ClientSession], + ) -> Optional[Mapping[str, Any]]: + """Internal helper to run an aggregate that returns a single result.""" + result = self._command( + conn, + cmd, + read_preference, + allowable_errors=[26], # Ignore NamespaceNotFound. + codec_options=self._write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + # cursor will not be present for NamespaceNotFound errors. + if "cursor" not in result: + return None + batch = result["cursor"]["firstBatch"] + return batch[0] if batch else None + + def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) -> int: + """Get an estimate of the number of documents in this collection using + collection metadata. + + The :meth:`estimated_document_count` method is **not** supported in a + transaction. + + All optional parameters should be passed as keyword arguments + to this method. Valid options include: + + - `maxTimeMS` (int): The maximum amount of time to allow this + operation to run, in milliseconds. + + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + .. versionchanged:: 4.2 + This method now always uses the `count`_ command. Due to an oversight in versions + 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the + `versioned API `_. Users of the Stable API with estimated_document_count are + recommended to upgrade their server version to 5.0.9+ or set + :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. + + .. versionadded:: 3.7 + .. _count: https://mongodb.com/docs/manual/reference/command/count/ + """ + if "session" in kwargs: + raise ConfigurationError("estimated_document_count does not support sessions") + if comment is not None: + kwargs["comment"] = comment + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> int: + cmd: dict[str, Any] = {"count": self._name} + cmd.update(kwargs) + return self._count_cmd(session, conn, read_preference, cmd, collation=None) + + return self._retryable_non_cursor_read(_cmd, None, operation=_Op.COUNT) + + def count_documents( + self, + filter: Mapping[str, Any], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> int: + """Count the number of documents in this collection. + + .. note:: For a fast count of the total documents in a collection see + :meth:`estimated_document_count`. + + The :meth:`count_documents` method is supported in a transaction. + + All optional parameters should be passed as keyword arguments + to this method. Valid options include: + + - `skip` (int): The number of matching documents to skip before + returning results. + - `limit` (int): The maximum number of documents to count. Must be + a positive integer. If not provided, no limit is imposed. + - `maxTimeMS` (int): The maximum amount of time to allow this + operation to run, in milliseconds. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `hint` (string or list of tuples): The index to use. Specify either + the index name as a string or the index specification as a list of + tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). + + The :meth:`count_documents` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + .. note:: When migrating from :meth:`count` to :meth:`count_documents` + the following query operators must be replaced: + + +-------------+-------------------------------------+ + | Operator | Replacement | + +=============+=====================================+ + | $where | `$expr`_ | + +-------------+-------------------------------------+ + | $near | `$geoWithin`_ with `$center`_ | + +-------------+-------------------------------------+ + | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | + +-------------+-------------------------------------+ + + :param filter: A query document that selects which documents + to count in the collection. Can be an empty document to count all + documents. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + + .. versionadded:: 3.7 + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ + .. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ + .. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ + """ + pipeline = [{"$match": filter}] + if "skip" in kwargs: + pipeline.append({"$skip": kwargs.pop("skip")}) + if "limit" in kwargs: + pipeline.append({"$limit": kwargs.pop("limit")}) + if comment is not None: + kwargs["comment"] = comment + pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) + if "hint" in kwargs and not isinstance(kwargs["hint"], str): + kwargs["hint"] = helpers_shared._index_document(kwargs["hint"]) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> int: + cmd: dict[str, Any] = {"aggregate": self._name, "pipeline": pipeline, "cursor": {}} + cmd.update(kwargs) + result = self._aggregate_one_result(conn, read_preference, cmd, collation, session) + if not result: + return 0 + return result["n"] + + return self._retryable_non_cursor_read(_cmd, session, _Op.COUNT) + + def _retryable_non_cursor_read( + self, + func: Callable[ + [Optional[ClientSession], Server, Connection, Optional[_ServerMode]], + T, + ], + session: Optional[ClientSession], + operation: str, + ) -> T: + """Non-cursor read helper to handle implicit session creation.""" + client = self._database.client + with client._tmp_session(session) as s: + return client._retryable_read(func, self._read_preference_for(s), s, operation) + + def create_indexes( + self, + indexes: Sequence[IndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create one or more indexes on this collection. + + >>> from pymongo import IndexModel, ASCENDING, DESCENDING + >>> index1 = IndexModel([("hello", DESCENDING), + ... ("world", ASCENDING)], name="hello_world") + >>> index2 = IndexModel([("goodbye", DESCENDING)]) + >>> db.test.create_indexes([index1, index2]) + ["hello_world", "goodbye_-1"] + + :param indexes: A list of :class:`~pymongo.operations.IndexModel` + instances. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + + + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + .. versionadded:: 3.0 + + .. _createIndexes: https://mongodb.com/docs/manual/reference/command/createIndexes/ + """ + common.validate_list("indexes", indexes) + if comment is not None: + kwargs["comment"] = comment + return self._create_indexes(indexes, session, **kwargs) + + @_csot.apply + def _create_indexes( + self, indexes: Sequence[IndexModel], session: Optional[ClientSession], **kwargs: Any + ) -> list[str]: + """Internal createIndexes helper. + + :param indexes: A list of :class:`~pymongo.operations.IndexModel` + instances. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + """ + names = [] + with self._conn_for_writes(session, operation=_Op.CREATE_INDEXES) as conn: + supports_quorum = conn.max_wire_version >= 9 + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in indexes: + if not isinstance(index, IndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.IndexModel" + ) + document = index.document + names.append(document["name"]) + yield document + + cmd = {"createIndexes": self.name, "indexes": list(gen_indexes())} + cmd.update(kwargs) + if "commitQuorum" in kwargs and not supports_quorum: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use the " + "commitQuorum option for createIndexes" + ) + + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + write_concern=self._write_concern_for(session), + session=session, + ) + return names + + def create_index( + self, + keys: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> str: + """Creates an index on this collection. + + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str` and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). + + To create a single key ascending index on the key ``'mike'`` we just + use a string argument:: + + >>> my_collection.create_index("mike") + + For a compound index on ``'mike'`` descending and ``'eliot'`` + ascending we need to use a list of tuples:: + + >>> my_collection.create_index([("mike", pymongo.DESCENDING), + ... "eliot"]) + + All optional index creation parameters should be passed as + keyword arguments to this method. For example:: + + >>> my_collection.create_index([("mike", pymongo.DESCENDING)], + ... background=True) + + Valid options include, but are not limited to: + + - `name`: custom name to use for this index - if none is + given, a name will be generated. + - `unique`: if ``True``, creates a uniqueness constraint on the + index. + - `background`: if ``True``, this index should be created in the + background. + - `sparse`: if ``True``, omit from the index any documents that lack + the indexed field. + - `bucketSize`: for use with geoHaystack indexes. + Number of documents to group together within a certain proximity + to a given longitude and latitude. + - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` + index. + - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` + index. + - `expireAfterSeconds`: Used to create an expiring (TTL) + collection. MongoDB will automatically delete documents from + this collection after seconds. The indexed field must + be a UTC datetime or the data will not expire. + - `partialFilterExpression`: A document that specifies a filter for + a partial index. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `wildcardProjection`: Allows users to include or exclude specific + field paths from a `wildcard index`_ using the {"$**" : 1} key + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. + + See the MongoDB documentation for a full list of supported options by + server version. + + .. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The + option is silently ignored by the server and unique index builds + using the option will fail if a duplicate value is detected. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + :param keys: a single key or a list of (key, direction) + pairs specifying the index to create + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: any additional index creation + options (see the above list) should be passed as keyword + arguments. + + .. versionchanged:: 4.4 + Allow passing a list containing (key, direction) pairs + or keys for the ``keys`` parameter. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added the ``hidden`` option. + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for passing maxTimeMS + in kwargs. + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. Support the `collation` option. + .. versionchanged:: 3.2 + Added partialFilterExpression to support partial indexes. + .. versionchanged:: 3.0 + Renamed `key_or_list` to `keys`. Removed the `cache_for` option. + :meth:`create_index` no longer caches index names. Removed support + for the drop_dups and bucket_size aliases. + + .. seealso:: The MongoDB documentation on `indexes `_. + + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ + """ + cmd_options = {} + if "maxTimeMS" in kwargs: + cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") + if comment is not None: + cmd_options["comment"] = comment + index = IndexModel(keys, **kwargs) + return (self._create_indexes([index], session, **cmd_options))[0] + + def drop_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Drops all indexes on this collection. + + Can be used on non-existent collections or collections with no indexes. + Raises OperationFailure on an error. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + """ + if comment is not None: + kwargs["comment"] = comment + self._drop_index("*", session=session, **kwargs) + + @_csot.apply + def drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Drops the specified index on this collection. + + Can be used on non-existent collections or collections with no + indexes. Raises OperationFailure on an error (e.g. trying to + drop an index that does not exist). `index_or_name` + can be either an index name (as returned by `create_index`), + or an index specifier (as passed to `create_index`). An index + specifier should be a list of (key, direction) pairs. Raises + TypeError if index is not an instance of (str, unicode, list). + + .. warning:: + + if a custom name was used on index creation (by + passing the `name` parameter to :meth:`create_index`) the index + **must** be dropped by name. + + :param index_or_name: index (or name of index) to drop + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + self._drop_index(index_or_name, session, comment, **kwargs) + + @_csot.apply + def _drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + name = index_or_name + if isinstance(index_or_name, list): + name = helpers_shared._gen_index_name(index_or_name) + + if not isinstance(name, str): + raise TypeError(f"index_or_name must be an instance of str or list, not {type(name)}") + + cmd = {"dropIndexes": self._name, "index": name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session, operation=_Op.DROP_INDEXES) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + session=session, + ) + + def list_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the index documents for this collection. + + >>> for index in db.test.list_indexes(): + ... print(index) + ... + SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.list_indexes() as cursor: + for index in cursor: + print(index) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionadded:: 3.0 + """ + return self._list_indexes(session, comment) + + def _list_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> CommandCursor[MutableMapping[str, Any]]: + codec_options: CodecOptions[Mapping[str, Any]] = CodecOptions(SON) + coll = cast( + Collection[MutableMapping[str, Any]], + self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), + ) + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[MutableMapping[str, Any]]: + cmd = {"listIndexes": self._name, "cursor": {}} + if comment is not None: + cmd["comment"] = comment + + try: + cursor = ( + self._command(conn, cmd, read_preference, codec_options, session=session) + )["cursor"] + except OperationFailure as exc: + # Ignore NamespaceNotFound errors to match the behavior + # of reading from *.system.indexes. + if exc.code != 26: + raise + cursor = {"id": 0, "firstBatch": []} + cmd_cursor = CommandCursor( + coll, + cursor, + conn.address, + session=session, + comment=cmd.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + with self._database.client._tmp_session(session) as s: + return self._database.client._retryable_read( + _cmd, read_pref, s, operation=_Op.LIST_INDEXES + ) + + def index_information( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: + """Get information on this collection's indexes. + + Returns a dictionary where the keys are index names (as + returned by create_index()) and the values are dictionaries + containing information about each index. The dictionary is + guaranteed to contain at least a single key, ``"key"`` which + is a list of (key, direction) pairs specifying the index (as + passed to create_index()). It will also contain any other + metadata about the indexes, except for the ``"ns"`` and + ``"name"`` keys, which are cleaned. Example output might look + like this: + + >>> db.test.create_index("x", unique=True) + 'x_1' + >>> db.test.index_information() + {'_id_': {'key': [('_id', 1)]}, + 'x_1': {'unique': True, 'key': [('x', 1)]}} + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + cursor = self._list_indexes(session=session, comment=comment) + info = {} + for index in cursor: + index["key"] = list(index["key"].items()) + index = dict(index) # noqa: PLW2901 + info[index.pop("name")] = index + return info + + def list_search_indexes( + self, + name: Optional[str] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[Mapping[str, Any]]: + """Return a cursor over search indexes for the current collection. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.list_search_indexes() as cursor: + for index in cursor: + print(index) + + :param name: If given, the name of the index to search + for. Only indexes with matching index names will be returned. + If not given, all search indexes for the current collection + will be returned. + :param session: a :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result + set. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if name is None: + pipeline: _Pipeline = [{"$listSearchIndexes": {}}] + else: + pipeline = [{"$listSearchIndexes": {"name": name}}] + + coll = self.with_options( + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + read_concern=DEFAULT_READ_CONCERN, + ) + cmd = _CollectionAggregationCommand( + coll, + CommandCursor, + pipeline, + kwargs, + comment=comment, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return self._database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + operation=_Op.LIST_SEARCH_INDEX, + ) + + def create_search_index( + self, + model: Union[Mapping[str, Any], SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Any = None, + **kwargs: Any, + ) -> str: + """Create a single search index for the current collection. + + :param model: The model for the new search index. + It can be given as a :class:`~pymongo.operations.SearchIndexModel` + instance or a dictionary with a model "definition" and optional + "name". + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :return: The name of the new search index. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if not isinstance(model, SearchIndexModel): + model = SearchIndexModel(**model) + return (self._create_search_indexes([model], session, comment, **kwargs))[0] + + def create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create multiple search indexes for the current collection. + + :param models: A list of :class:`~pymongo.operations.SearchIndexModel` instances. + :param session: a :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :return: A list of the newly created search index names. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + return self._create_search_indexes(models, session, comment, **kwargs) + + def _create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + if comment is not None: + kwargs["comment"] = comment + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in models: + if not isinstance(index, SearchIndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.SearchIndexModel" + ) + yield index.document + + cmd = {"createSearchIndexes": self.name, "indexes": list(gen_indexes())} + cmd.update(kwargs) + + with self._conn_for_writes(session, operation=_Op.CREATE_SEARCH_INDEXES) as conn: + resp = self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + return [index["name"] for index in resp["indexesCreated"]] + + def drop_search_index( + self, + name: str, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Delete a search index by index name. + + :param name: The name of the search index to be deleted. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the dropSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = {"dropSearchIndex": self._name, "name": name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session, operation=_Op.DROP_SEARCH_INDEXES) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + def update_search_index( + self, + name: str, + definition: Mapping[str, Any], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Update a search index by replacing the existing index definition with the provided definition. + + :param name: The name of the search index to be updated. + :param definition: The new search index definition. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the updateSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = {"updateSearchIndex": self._name, "name": name, "definition": definition} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session, operation=_Op.UPDATE_SEARCH_INDEX) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + def options( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: + """Get the options set on this collection. + + Returns a dictionary of options and their values - see + :meth:`~pymongo.database.Database.create_collection` for more + information on the possible options. Returns an empty + dictionary if the collection has not been created yet. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + dbo = self._database.client.get_database( + self._database.name, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + cursor = dbo.list_collections(session=session, filter={"name": self._name}, comment=comment) + + result = None + for doc in cursor: + result = doc + break + + if not result: + return {} + + options = result.get("options", {}) + assert options is not None + if "create" in options: + del options["create"] + + return options + + @_csot.apply + def _aggregate( + self, + aggregation_command: Type[_AggregationCommand], + pipeline: _Pipeline, + cursor_class: Type[CommandCursor], # type: ignore[type-arg] + session: Optional[ClientSession], + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + if comment is not None: + kwargs["comment"] = comment + cmd = aggregation_command( + self, + cursor_class, + pipeline, + kwargs, + let, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return self._database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, + ) + + def aggregate( + self, + pipeline: _Pipeline, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + """Perform an aggregation using the aggregation framework on this + collection. + + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`Collection`, except when ``$out`` or ``$merge`` are used on + MongoDB <5.0, in which case + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. + + .. note:: This method does not support the 'explain' option. Please + use `PyMongoExplain `_ + instead. An example is included in the `aggregation example `_ + documentation. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.aggregate() as cursor: + for operation in cursor: + print(operation) + + :param pipeline: a list of aggregation pipeline stages + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `bypassDocumentValidation` (bool): If ``True``, allows the write to opt-out of document level validation. + + + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result + set. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + Support $merge and $out executing on secondaries according to the + collection's :attr:`read_preference`. + .. versionchanged:: 4.0 + Removed the ``useCursor`` option. + .. versionchanged:: 3.9 + Apply this collection's read concern to pipelines containing the + `$out` stage when connected to MongoDB >= 4.2. + Added support for the ``$merge`` pipeline stage. + Aggregations that write always use read preference + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + .. versionchanged:: 3.6 + Added the `session` parameter. Added the `maxAwaitTimeMS` option. + Deprecated the `useCursor` option. + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. Support the `collation` option. + .. versionchanged:: 3.0 + The :meth:`aggregate` method always returns a CommandCursor. The + pipeline argument must be a list. + + .. seealso:: `Aggregation `_ + + .. _aggregate command: + https://mongodb.com/docs/manual/reference/command/aggregate + """ + with self._database.client._tmp_session(session) as s: + return self._aggregate( + _CollectionAggregationCommand, + pipeline, + CommandCursor, + session=s, + let=let, + comment=comment, + **kwargs, + ) + + def aggregate_raw_batches( + self, + pipeline: _Pipeline, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> RawBatchCursor[_DocumentType]: + """Perform an aggregation and retrieve batches of raw BSON. + + Similar to the :meth:`aggregate` method but returns a + :class:`~pymongo.cursor.RawBatchCursor`. + + This example demonstrates how to work with raw batches, but in practice + raw batches should be passed to an external library that can decode + BSON into another data type, rather than used with PyMongo's + :mod:`bson` module. + + >>> import bson + >>> cursor = db.test.aggregate_raw_batches([ + ... {'$project': {'x': {'$multiply': [2, '$x']}}}]) + >>> for batch in cursor: + ... print(bson.decode_all(batch)) + + .. note:: aggregate_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Added session support. + + .. versionadded:: 3.6 + """ + # OP_MSG is required to support encryption. + if self._database.client._encrypter: + raise InvalidOperation("aggregate_raw_batches does not support auto encryption") + if comment is not None: + kwargs["comment"] = comment + with self._database.client._tmp_session(session) as s: + return cast( + RawBatchCursor[_DocumentType], + self._aggregate( + _CollectionRawAggregationCommand, + pipeline, + RawBatchCommandCursor, + session=s, + **kwargs, + ), + ) + + @_csot.apply + def rename( + self, + new_name: str, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> MutableMapping[str, Any]: + """Rename this collection. + + If operating in auth mode, client must be authorized as an + admin to perform this operation. Raises :class:`TypeError` if + `new_name` is not an instance of :class:`str`. + Raises :class:`~pymongo.errors.InvalidName` + if `new_name` is not a valid collection name. + + :param new_name: new name for this collection + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional arguments to the rename command + may be passed as keyword arguments to this helper method + (i.e. ``dropTarget=True``) + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + if not isinstance(new_name, str): + raise TypeError(f"new_name must be an instance of str, not {type(new_name)}") + + if not new_name or ".." in new_name: + raise InvalidName("collection names cannot be empty") + if new_name[0] == "." or new_name[-1] == ".": + raise InvalidName("collection names must not start or end with '.'") + if "$" in new_name and not new_name.startswith("oplog.$main"): + raise InvalidName("collection names must not contain '$'") + + new_name = f"{self._database.name}.{new_name}" + cmd = {"renameCollection": self._full_name, "to": new_name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + write_concern = self._write_concern_for_cmd(cmd, session) + + with self._conn_for_writes(session, operation=_Op.RENAME) as conn: + with self._database.client._tmp_session(session) as s: + return conn.command( + "admin", + cmd, + write_concern=write_concern, + parse_write_concern_error=True, + session=s, + client=self._database.client, + ) + + def distinct( + self, + key: str, + filter: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + hint: Optional[_IndexKeyHint] = None, + **kwargs: Any, + ) -> list[Any]: + """Get a list of distinct values for `key` among all documents + in this collection. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + All optional distinct parameters should be passed as keyword arguments + to this method. Valid options include: + + - `maxTimeMS` (int): The maximum amount of time to allow the count + command to run, in milliseconds. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + + The :meth:`distinct` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + :param key: name of the field for which we want to get the distinct + values + :param filter: A query document that specifies the documents + from which to retrieve the distinct values. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``). + :param kwargs: See list of options above. + + .. versionchanged:: 4.12 + Added ``hint`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Support the `collation` option. + + """ + if not isinstance(key, str): + raise TypeError(f"key must be an instance of str, not {type(key)}") + if filter is not None: + if "query" in kwargs: + raise ConfigurationError("can't pass both filter and query") + kwargs["query"] = filter + collation = validate_collation_or_none(kwargs.pop("collation", None)) + if hint is not None: + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> list: # type: ignore[type-arg] + cmd = {"distinct": self._name, "key": key} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + if hint is not None: + cmd["hint"] = hint # type: ignore[assignment] + return ( + self._command( + conn, + cmd, + read_preference=read_preference, + read_concern=self.read_concern, + collation=collation, + session=session, + user_fields={"values": 1}, + ) + )["values"] + + return self._retryable_non_cursor_read(_cmd, session, operation=_Op.DISTINCT) + + def _find_and_modify( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]], + sort: Optional[_IndexList], + upsert: Optional[bool] = None, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Internal findAndModify helper.""" + common.validate_is_mapping("filter", filter) + if not isinstance(return_document, bool): + raise ValueError( + f"return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER, not {type(return_document)}" + ) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + if hint is not None: + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + write_concern = self._write_concern_for_cmd(kwargs, session) + + def _find_and_modify_helper( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Any: + cmd = {"findAndModify": self._name, "query": filter, "new": return_document} + if let is not None: + common.validate_is_mapping("let", let) + cmd["let"] = let + cmd.update(kwargs) + if projection is not None: + cmd["fields"] = helpers_shared._fields_list_to_dict(projection, "projection") + if sort is not None: + cmd["sort"] = helpers_shared._index_document(sort) + if upsert is not None: + validate_boolean("upsert", upsert) + cmd["upsert"] = upsert + acknowledged = write_concern.acknowledged + if array_filters is not None: + if not acknowledged: + raise ConfigurationError( + "arrayFilters is unsupported for unacknowledged writes." + ) + cmd["arrayFilters"] = list(array_filters) + if hint is not None: + if conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on find and modify commands." + ) + elif not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." + ) + cmd["hint"] = hint + out = self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=write_concern, + collation=collation, + session=session, + retryable_write=retryable_write, + user_fields=_FIND_AND_MODIFY_DOC_FIELDS, + ) + _check_write_command_response(out) + + return out.get("value") + + return self._database.client._retryable_write( + write_concern.acknowledged, + _find_and_modify_helper, + session, + operation=_Op.FIND_AND_MODIFY, + ) + + def find_one_and_delete( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and deletes it, returning the document. + + >>> db.test.count_documents({'x': 1}) + 2 + >>> db.test.find_one_and_delete({'x': 1}) + {'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')} + >>> db.test.count_documents({'x': 1}) + 1 + + If multiple documents match *filter*, a *sort* can be applied. + + >>> for doc in db.test.find({'x': 1}): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> db.test.find_one_and_delete( + ... {'x': 1}, sort=[('_id', pymongo.DESCENDING)]) + {'x': 1, '_id': 2} + + The *projection* option can be used to limit the fields returned. + + >>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) + {'x': 1} + + :param filter: A query that matches the document to delete. + :param projection: a list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a mapping to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is deleted. + :param hint: An index to use to support the query predicate + specified either by its string name, or in the same format as + passed to :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``). This option is only supported + on MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.collection.Collection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + kwargs["remove"] = True + if comment is not None: + kwargs["comment"] = comment + return self._find_and_modify( + filter, projection, sort, let=let, hint=hint, session=session, **kwargs + ) + + def find_one_and_replace( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and replaces it, returning either the + original or the replaced document. + + The :meth:`find_one_and_replace` method differs from + :meth:`find_one_and_update` by replacing the document matched by + *filter*, rather than modifying the existing document. + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> db.test.find_one_and_replace({'x': 1}, {'y': 1}) + {'x': 1, '_id': 0} + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'y': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + :param filter: A query that matches the document to replace. + :param replacement: The replacement document. + :param projection: A list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a mapping to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is replaced. + :param upsert: When ``True``, inserts a new document if no + document matches the query. Defaults to ``False``. + :param return_document: If + :attr:`ReturnDocument.BEFORE` (the default), + returns the original document before it was replaced, or ``None`` + if no document matches. If + :attr:`ReturnDocument.AFTER`, returns the replaced + or inserted document. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.collection.Collection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionadded:: 3.0 + """ + common.validate_ok_for_replace(replacement) + kwargs["update"] = replacement + if comment is not None: + kwargs["comment"] = comment + return self._find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + let=let, + hint=hint, + session=session, + **kwargs, + ) + + def find_one_and_update( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and updates it, returning either the + original or the updated document. + + >>> db.test.find_one_and_update( + ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) + {'_id': 665, 'done': False, 'count': 25}} + + Returns ``None`` if no document matches the filter. + + >>> db.test.find_one_and_update( + ... {'_exists': False}, {'$inc': {'count': 1}}) + + When the filter matches, by default :meth:`find_one_and_update` + returns the original version of the document before the update was + applied. To return the updated (or inserted in the case of + *upsert*) version of the document instead, use the *return_document* + option. + + >>> from pymongo import ReturnDocument + >>> db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... return_document=ReturnDocument.AFTER) + {'_id': 'userid', 'seq': 1} + + You can limit the fields returned with the *projection* option. + + >>> db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... projection={'seq': True, '_id': False}, + ... return_document=ReturnDocument.AFTER) + {'seq': 2} + + The *upsert* option can be used to create the document if it doesn't + already exist. + + >>> (db.example.delete_many({})).deleted_count + 1 + >>> db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... projection={'seq': True, '_id': False}, + ... upsert=True, + ... return_document=ReturnDocument.AFTER) + {'seq': 1} + + If multiple documents match *filter*, a *sort* can be applied. + + >>> for doc in db.test.find({'done': True}): + ... print(doc) + ... + {'_id': 665, 'done': True, 'result': {'count': 26}} + {'_id': 701, 'done': True, 'result': {'count': 17}} + >>> db.test.find_one_and_update( + ... {'done': True}, + ... {'$set': {'final': True}}, + ... sort=[('_id', pymongo.DESCENDING)]) + {'_id': 701, 'done': True, 'result': {'count': 17}} + + :param filter: A query that matches the document to update. + :param update: The update operations to apply. + :param projection: A list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a dict to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is updated. + :param upsert: When ``True``, inserts a new document if no + document matches the query. Defaults to ``False``. + :param return_document: If + :attr:`ReturnDocument.BEFORE` (the default), + returns the original document before it was updated. If + :attr:`ReturnDocument.AFTER`, returns the updated + or inserted document. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the ``update``. + .. versionchanged:: 3.6 + Added the ``array_filters`` and ``session`` options. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.collection.Collection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionadded:: 3.0 + """ + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + kwargs["update"] = update + if comment is not None: + kwargs["comment"] = comment + return self._find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + array_filters, + hint=hint, + let=let, + session=session, + **kwargs, + ) diff --git a/pymongo/synchronous/command_cursor.py b/pymongo/synchronous/command_cursor.py new file mode 100644 index 0000000000..a09a67efc9 --- /dev/null +++ b/pymongo/synchronous/command_cursor.py @@ -0,0 +1,472 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CommandCursor class to iterate over command results.""" +from __future__ import annotations + +from collections import deque +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterator, + Mapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from bson import CodecOptions, _convert_raw_document_lists_to_streams +from pymongo import _csot +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _RawBatchGetMore, +) +from pymongo.response import PinnedResponse +from pymongo.synchronous.cursor import _ConnectionManager +from pymongo.typings import _Address, _DocumentOut, _DocumentType + +if TYPE_CHECKING: + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True + + +class CommandCursor(Generic[_DocumentType]): + """A cursor / iterator over command cursors.""" + + _getmore_class = _GetMore + + def __init__( + self, + collection: Collection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[ClientSession] = None, + comment: Any = None, + ) -> None: + """Create a new command cursor.""" + self._sock_mgr: Any = None + self._collection: Collection[_DocumentType] = collection + self._id = cursor_info["id"] + self._data = deque(cursor_info["firstBatch"]) + self._postbatchresumetoken: Optional[Mapping[str, Any]] = cursor_info.get( + "postBatchResumeToken" + ) + self._address = address + self._batch_size = batch_size + self._max_await_time_ms = max_await_time_ms + self._timeout = self._collection.database.client.options.timeout + self._session = session + if self._session is not None: + self._session._attached_to_cursor = True + self._killed = self._id == 0 + self._comment = comment + if self._killed: + self._end_session() + + if "ns" in cursor_info: # noqa: SIM401 + self._ns = cursor_info["ns"] + else: + self._ns = collection.full_name + + self.batch_size(batch_size) + + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) + + def __del__(self) -> None: + self._die_no_lock() + + def batch_size(self, batch_size: int) -> CommandCursor[_DocumentType]: + """Limits the number of documents returned in one batch. Each batch + requires a round trip to the server. It can be adjusted to optimize + performance and limit data transfer. + + .. note:: batch_size can not override MongoDB's internal limits on the + amount of data it will return to the client in a single batch (i.e + if you set batch size to 1,000,000,000, MongoDB will currently only + return 4-16MB of results per batch). + + Raises :exc:`TypeError` if `batch_size` is not an integer. + Raises :exc:`ValueError` if `batch_size` is less than ``0``. + + :param batch_size: The size of each batch of results requested. + """ + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + + self._batch_size = batch_size == 1 and 2 or batch_size + return self + + def _has_next(self) -> bool: + """Returns `True` if the cursor has documents remaining from the + previous batch. + """ + return len(self._data) > 0 + + @property + def _post_batch_resume_token(self) -> Optional[Mapping[str, Any]]: + """Retrieve the postBatchResumeToken from the response to a + changeStream aggregate or getMore. + """ + return self._postbatchresumetoken + + def _maybe_pin_connection(self, conn: Connection) -> None: + client = self._collection.database.client + if not client._should_pin_cursor(self._session): + return + if not self._sock_mgr: + conn.pin_cursor() + conn_mgr = _ConnectionManager(conn, False) + # Ensure the connection gets returned when the entire result is + # returned in the first batch. + if self._id == 0: + conn_mgr.close() + else: + self._sock_mgr = conn_mgr + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration`. Best to use a for loop:: + + for doc in collection.aggregate(pipeline): + print(doc) + + .. note:: :attr:`alive` can be True while iterating a cursor from + a failed server. In this case :attr:`alive` will return False after + :meth:`next` fails to retrieve the next batch of results from the + server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> int: + """Returns the id of the cursor.""" + return self._id + + @property + def address(self) -> Optional[_Address]: + """The (host, port) of the server used, or None. + + .. versionadded:: 3.0 + """ + return self._address + + @property + def session(self) -> Optional[ClientSession]: + """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._session and not self._session._implicit: + return self._session + return None + + def _prepare_to_die(self) -> tuple[int, Optional[_CursorAddress]]: + already_killed = self._killed + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, self._ns) + else: + # Skip killCursors. + cursor_id = 0 + address = None + return cursor_id, address + + def _die_no_lock(self) -> None: + """Closes this cursor without acquiring a lock.""" + cursor_id, address = self._prepare_to_die() + self._collection.database.client._cleanup_cursor_no_lock( + cursor_id, address, self._sock_mgr, self._session + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + def _die_lock(self) -> None: + """Closes this cursor.""" + cursor_id, address = self._prepare_to_die() + self._collection.database.client._cleanup_cursor_lock( + cursor_id, + address, + self._sock_mgr, + self._session, + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + def _end_session(self) -> None: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session._end_implicit_session() + self._session = None + + def close(self) -> None: + """Explicitly close / kill this cursor.""" + self._die_lock() + + def _send_message(self, operation: _GetMore) -> None: + """Send a getmore message and handle the response.""" + client = self._collection.database.client + try: + response = client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die_no_lock() + else: + # Return the session and pinned connection, if necessary. + self.close() + raise + except ConnectionFailure: + # Don't send killCursors because the cursor is already closed. + self._killed = True + # Return the session and pinned connection, if necessary. + self.close() + raise + except Exception: + self.close() + raise + + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) # type: ignore[arg-type] + if response.from_command: + cursor = response.docs[0]["cursor"] + documents = cursor["nextBatch"] + self._postbatchresumetoken = cursor.get("postBatchResumeToken") + self._id = cursor["id"] + else: + documents = response.docs + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + + if self._id == 0: + self.close() + self._data = deque(documents) + + def _refresh(self) -> int: + """Refreshes the cursor with more data from the server. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if self._id: # Get More + dbname, collname = self._ns.split(".", 1) + read_pref = self._collection._read_preference_for(self.session) + self._send_message( + self._getmore_class( + dbname, + collname, + self._batch_size, + self._id, + self._collection.codec_options, + read_pref, + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + False, + self._comment, + ) + ) + else: # Cursor id is zero nothing else to return + self._die_lock() + + return len(self._data) + + def __iter__(self) -> Iterator[_DocumentType]: + return self + + def next(self) -> _DocumentType: + """Advance the cursor.""" + # Block until a document is returnable. + while self.alive: + doc = self._try_next(True) + if doc is not None: + return doc + + raise StopIteration + + def __next__(self) -> _DocumentType: + return self.next() + + def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: + """Advance the cursor blocking for at most one getMore command.""" + if not len(self._data) and not self._killed and get_more_allowed: + self._refresh() + if len(self._data): + return self._data.popleft() + else: + return None + + def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] + """Get all or some available documents from the cursor.""" + if not len(self._data) and not self._killed: + self._refresh() + if len(self._data): + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) + return True + else: + return False + + def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next document without waiting + indefinitely for data. + + If no document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there is no additional data) then ``None`` is returned. + + :return: The next document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 4.5 + """ + return self._try_next(get_more_allowed=True) + + def __enter__(self) -> CommandCursor[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + @_csot.apply + def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: + """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. + + To use:: + + >>> cursor.to_list() + + Or, so read at most n items from the cursor:: + + >>> cursor.to_list(n) + + If the cursor is empty or has no more results, an empty list will be returned. + + .. versionadded:: 4.9 + """ + res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") + while self.alive: + if not self._next_batch(res, remaining): + break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break + return res + + +class RawBatchCommandCursor(CommandCursor[_DocumentType]): + _getmore_class = _RawBatchGetMore + + def __init__( + self, + collection: Collection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[ClientSession] = None, + comment: Any = None, + ) -> None: + """Create a new cursor / iterator over raw batches of BSON data. + + Should not be called directly by application developers - + see :meth:`~pymongo.collection.Collection.aggregate_raw_batches` + instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + assert not cursor_info.get("firstBatch") + super().__init__( + collection, + cursor_info, + address, + batch_size, + max_await_time_ms, + session, + comment, + ) + + def _unpack_response( # type: ignore[override] + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[dict[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[Mapping[str, Any]]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return raw_response # type: ignore[return-value] + + def __getitem__(self, index: int) -> NoReturn: + raise InvalidOperation("Cannot call __getitem__ on RawBatchCommandCursor") diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py new file mode 100644 index 0000000000..fcd8ebeb1d --- /dev/null +++ b/pymongo/synchronous/cursor.py @@ -0,0 +1,1370 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cursor class to iterate over Mongo query results.""" +from __future__ import annotations + +import copy +import warnings +from collections import deque +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterable, + List, + Mapping, + NoReturn, + Optional, + Sequence, + Union, + cast, + overload, +) + +from bson import RE_TYPE, _convert_raw_document_lists_to_streams +from bson.code import Code +from bson.son import SON +from pymongo import _csot, helpers_shared +from pymongo.collation import validate_collation_or_none +from pymongo.common import ( + validate_is_document_type, + validate_is_mapping, +) +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS, _QUERY_OPTIONS, CursorType, _Hint, _Sort +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.lock import _create_lock +from pymongo.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _Query, + _RawBatchGetMore, + _RawBatchQuery, +) +from pymongo.response import PinnedResponse +from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from _typeshed import SupportsItems + + from bson.codec_options import CodecOptions + from pymongo.read_preferences import _ServerMode + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True + + +class _ConnectionManager: + """Used with exhaust cursors to ensure the connection is returned.""" + + def __init__(self, conn: Connection, more_to_come: bool): + self.conn: Optional[Connection] = conn + self.more_to_come = more_to_come + self._lock = _create_lock() + + def update_exhaust(self, more_to_come: bool) -> None: + self.more_to_come = more_to_come + + def close(self) -> None: + """Return this instance's connection to the connection pool.""" + if self.conn: + self.conn.unpin() + self.conn = None + + +class Cursor(Generic[_DocumentType]): + _query_class = _Query + _getmore_class = _GetMore + + def __init__( + self, + collection: Collection[_DocumentType], + filter: Optional[Mapping[str, Any]] = None, + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + cursor_type: int = CursorType.NON_TAILABLE, + sort: Optional[_Sort] = None, + allow_partial_results: bool = False, + oplog_replay: bool = False, + batch_size: int = 0, + collation: Optional[_CollationIn] = None, + hint: Optional[_Hint] = None, + max_scan: Optional[int] = None, + max_time_ms: Optional[int] = None, + max: Optional[_Sort] = None, + min: Optional[_Sort] = None, + return_key: Optional[bool] = None, + show_record_id: Optional[bool] = None, + snapshot: Optional[bool] = None, + comment: Optional[Any] = None, + session: Optional[ClientSession] = None, + allow_disk_use: Optional[bool] = None, + let: Optional[bool] = None, + ) -> None: + """Create a new cursor. + + Should not be called directly by application developers - see + :meth:`~pymongo.collection.Collection.find` instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + # Initialize all attributes used in __del__ before possibly raising + # an error to avoid attribute errors during garbage collection. + self._collection: Collection[_DocumentType] = collection + self._id: Any = None + self._exhaust = False + self._sock_mgr: Any = None + self._killed = False + self._session: Optional[ClientSession] + + if session: + self._session = session + self._session._attached_to_cursor = True + else: + self._session = None + + spec: Mapping[str, Any] = filter or {} + validate_is_mapping("filter", spec) + if not isinstance(skip, int): + raise TypeError(f"skip must be an instance of int, not {type(skip)}") + if not isinstance(limit, int): + raise TypeError(f"limit must be an instance of int, not {type(limit)}") + validate_boolean("no_cursor_timeout", no_cursor_timeout) + if no_cursor_timeout and self._session and self._session._implicit: + warnings.warn( + "use an explicit session with no_cursor_timeout=True " + "otherwise the cursor may still timeout after " + "30 minutes, for more info see " + "https://mongodb.com/docs/v4.4/reference/method/" + "cursor.noCursorTimeout/" + "#session-idle-timeout-overrides-nocursortimeout", + UserWarning, + stacklevel=2, + ) + if cursor_type not in ( + CursorType.NON_TAILABLE, + CursorType.TAILABLE, + CursorType.TAILABLE_AWAIT, + CursorType.EXHAUST, + ): + raise ValueError("not a valid value for cursor_type") + validate_boolean("allow_partial_results", allow_partial_results) + validate_boolean("oplog_replay", oplog_replay) + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + # Only set if allow_disk_use is provided by the user, else None. + if allow_disk_use is not None: + allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) + + if projection is not None: + projection = helpers_shared._fields_list_to_dict(projection, "projection") + + if let is not None: + validate_is_document_type("let", let) + + self._let = let + self._spec = spec + self._has_filter = filter is not None + self._projection = projection + self._skip = skip + self._limit = limit + self._batch_size = batch_size + self._ordering = sort and helpers_shared._index_document(sort) or None + self._max_scan = max_scan + self._explain = False + self._comment = comment + self._max_time_ms = max_time_ms + self._timeout = self._collection.database.client.options.timeout + self._max_await_time_ms: Optional[int] = None + self._max: Optional[Union[dict[Any, Any], _Sort]] = max + self._min: Optional[Union[dict[Any, Any], _Sort]] = min + self._collation = validate_collation_or_none(collation) + self._return_key = return_key + self._show_record_id = show_record_id + self._allow_disk_use = allow_disk_use + self._snapshot = snapshot + self._hint: Union[str, dict[str, Any], None] + self._set_hint(hint) + + # This is ugly. People want to be able to do cursor[5:5] and + # get an empty result set (old behavior was an + # exception). It's hard to do that right, though, because the + # server uses limit(0) to mean 'no limit'. So we set __empty + # in that case and check for it when iterating. We also unset + # it anytime we change __limit. + self._empty = False + + self._data: deque = deque() # type: ignore[type-arg] + self._address: Optional[_Address] = None + self._retrieved = 0 + + self._codec_options = collection.codec_options + # Read preference is set when the initial find is sent. + self._read_preference: Optional[_ServerMode] = None + self._read_concern = collection.read_concern + + self._query_flags = cursor_type + self._cursor_type = cursor_type + if no_cursor_timeout: + self._query_flags |= _QUERY_OPTIONS["no_timeout"] + if allow_partial_results: + self._query_flags |= _QUERY_OPTIONS["partial"] + if oplog_replay: + self._query_flags |= _QUERY_OPTIONS["oplog_replay"] + + # The namespace to use for find/getMore commands. + self._dbname = collection.database.name + self._collname = collection.name + + # Checking exhaust cursor support requires network IO + if _IS_SYNC: + self._exhaust_checked = True + self._supports_exhaust() # type: ignore[unused-coroutine] + else: + self._exhaust = cursor_type == CursorType.EXHAUST + self._exhaust_checked = False + + def _supports_exhaust(self) -> None: + # Exhaust cursor support + if self._cursor_type == CursorType.EXHAUST: + if self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + self._exhaust = True + + @property + def collection(self) -> Collection[_DocumentType]: + """The :class:`~pymongo.collection.Collection` that this + :class:`Cursor` is iterating. + """ + return self._collection + + @property + def retrieved(self) -> int: + """The number of documents retrieved so far.""" + return self._retrieved + + def __del__(self) -> None: + self._die_no_lock() + + def clone(self) -> Cursor[_DocumentType]: + """Get a clone of this cursor. + + Returns a new Cursor instance with options matching those that have + been set on the current instance. The clone will be completely + unevaluated, even if the current instance has been partially or + completely evaluated. + """ + return self._clone(True) + + def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor: # type: ignore[type-arg] + """Internal clone helper.""" + if not base: + if self._session and not self._session._implicit: + base = self._clone_base(self._session) + else: + base = self._clone_base(None) + + values_to_clone = ( + "spec", + "projection", + "skip", + "limit", + "max_time_ms", + "max_await_time_ms", + "comment", + "max", + "min", + "ordering", + "explain", + "hint", + "batch_size", + "max_scan", + "query_flags", + "collation", + "empty", + "show_record_id", + "return_key", + "allow_disk_use", + "snapshot", + "exhaust", + "has_filter", + "cursor_type", + ) + data = { + k: v for k, v in self.__dict__.items() if k.startswith("_") and k[1:] in values_to_clone + } + if deepcopy: + data = self._deepcopy(data) + base.__dict__.update(data) + return base + + def _clone_base(self, session: Optional[ClientSession]) -> Cursor: # type: ignore[type-arg] + """Creates an empty Cursor object for information to be copied into.""" + return self.__class__(self._collection, session=session) + + def _query_spec(self) -> Mapping[str, Any]: + """Get the spec to use for a query.""" + operators: dict[str, Any] = {} + if self._ordering: + operators["$orderby"] = self._ordering + if self._explain: + operators["$explain"] = True + if self._hint: + operators["$hint"] = self._hint + if self._let: + operators["let"] = self._let + if self._comment: + operators["$comment"] = self._comment + if self._max_scan: + operators["$maxScan"] = self._max_scan + if self._max_time_ms is not None: + operators["$maxTimeMS"] = self._max_time_ms + if self._max: + operators["$max"] = self._max + if self._min: + operators["$min"] = self._min + if self._return_key is not None: + operators["$returnKey"] = self._return_key + if self._show_record_id is not None: + # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. + operators["$showDiskLoc"] = self._show_record_id + if self._snapshot is not None: + operators["$snapshot"] = self._snapshot + + if operators: + # Make a shallow copy so we can cleanly rewind or clone. + spec = dict(self._spec) + + # Allow-listed commands must be wrapped in $query. + if "$query" not in spec: + # $query has to come first + spec = {"$query": spec} + + spec.update(operators) + return spec + # Have to wrap with $query if "query" is the first key. + # We can't just use $query anytime "query" is a key as + # that breaks commands like count and find_and_modify. + # Checking spec.keys()[0] covers the case that the spec + # was passed as an instance of SON or OrderedDict. + elif "query" in self._spec and (len(self._spec) == 1 or next(iter(self._spec)) == "query"): + return {"$query": self._spec} + + return self._spec + + def _check_okay_to_chain(self) -> None: + """Check if it is okay to chain more options onto this cursor.""" + if self._retrieved or self._id is not None: + raise InvalidOperation("cannot set options after executing query") + + def add_option(self, mask: int) -> Cursor[_DocumentType]: + """Set arbitrary query flags using a bitmask. + + To set the tailable flag: + cursor.add_option(2) + """ + if not isinstance(mask, int): + raise TypeError(f"mask must be an int, not {type(mask)}") + self._check_okay_to_chain() + + if mask & _QUERY_OPTIONS["exhaust"]: + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + if self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + self._exhaust = True + + self._query_flags |= mask + return self + + def remove_option(self, mask: int) -> Cursor[_DocumentType]: + """Unset arbitrary query flags using a bitmask. + + To unset the tailable flag: + cursor.remove_option(2) + """ + if not isinstance(mask, int): + raise TypeError(f"mask must be an int, not {type(mask)}") + self._check_okay_to_chain() + + if mask & _QUERY_OPTIONS["exhaust"]: + self._exhaust = False + + self._query_flags &= ~mask + return self + + def allow_disk_use(self, allow_disk_use: bool) -> Cursor[_DocumentType]: + """Specifies whether MongoDB can use temporary disk files while + processing a blocking sort operation. + + Raises :exc:`TypeError` if `allow_disk_use` is not a boolean. + + .. note:: `allow_disk_use` requires server version **>= 4.4** + + :param allow_disk_use: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. + + .. versionadded:: 3.11 + """ + if not isinstance(allow_disk_use, bool): + raise TypeError(f"allow_disk_use must be a bool, not {type(allow_disk_use)}") + self._check_okay_to_chain() + + self._allow_disk_use = allow_disk_use + return self + + def limit(self, limit: int) -> Cursor[_DocumentType]: + """Limits the number of results to be returned by this cursor. + + Raises :exc:`TypeError` if `limit` is not an integer. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` + has already been used. The last `limit` applied to this cursor + takes precedence. A limit of ``0`` is equivalent to no limit. + + :param limit: the number of results to return + + .. seealso:: The MongoDB documentation on `limit `_. + """ + if not isinstance(limit, int): + raise TypeError(f"limit must be an integer, not {type(limit)}") + if self._exhaust: + raise InvalidOperation("Can't use limit and exhaust together.") + self._check_okay_to_chain() + + self._empty = False + self._limit = limit + return self + + def batch_size(self, batch_size: int) -> Cursor[_DocumentType]: + """Limits the number of documents returned in one batch. Each batch + requires a round trip to the server. It can be adjusted to optimize + performance and limit data transfer. + + .. note:: batch_size can not override MongoDB's internal limits on the + amount of data it will return to the client in a single batch (i.e + if you set batch size to 1,000,000,000, MongoDB will currently only + return 4-16MB of results per batch). + + Raises :exc:`TypeError` if `batch_size` is not an integer. + Raises :exc:`ValueError` if `batch_size` is less than ``0``. + Raises :exc:`~pymongo.errors.InvalidOperation` if this + :class:`Cursor` has already been used. The last `batch_size` + applied to this cursor takes precedence. + + :param batch_size: The size of each batch of results requested. + """ + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + self._check_okay_to_chain() + + self._batch_size = batch_size + return self + + def skip(self, skip: int) -> Cursor[_DocumentType]: + """Skips the first `skip` results of this cursor. + + Raises :exc:`TypeError` if `skip` is not an integer. Raises + :exc:`ValueError` if `skip` is less than ``0``. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has + already been used. The last `skip` applied to this cursor takes + precedence. + + :param skip: the number of results to skip + """ + if not isinstance(skip, int): + raise TypeError(f"skip must be an integer, not {type(skip)}") + if skip < 0: + raise ValueError("skip must be >= 0") + self._check_okay_to_chain() + + self._skip = skip + return self + + def max_time_ms(self, max_time_ms: Optional[int]) -> Cursor[_DocumentType]: + """Specifies a time limit for a query operation. If the specified + time is exceeded, the operation will be aborted and + :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` + is ``None`` no limit is applied. + + Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``. + Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` + has already been used. + + :param max_time_ms: the time limit after which the operation is aborted + """ + if not isinstance(max_time_ms, int) and max_time_ms is not None: + raise TypeError(f"max_time_ms must be an integer or None, not {type(max_time_ms)}") + self._check_okay_to_chain() + + self._max_time_ms = max_time_ms + return self + + def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_DocumentType]: + """Specifies a time limit for a getMore operation on a + :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other + types of cursor max_await_time_ms is ignored. + + Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or + ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this + :class:`Cursor` has already been used. + + .. note:: `max_await_time_ms` requires server version **>= 3.2** + + :param max_await_time_ms: the time limit after which the operation is + aborted + + .. versionadded:: 3.2 + """ + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) + self._check_okay_to_chain() + + # Ignore max_await_time_ms if not tailable or await_data is False. + if self._query_flags & CursorType.TAILABLE_AWAIT: + self._max_await_time_ms = max_await_time_ms + + return self + + @overload + def __getitem__(self, index: int) -> _DocumentType: + ... + + @overload + def __getitem__(self, index: slice) -> Cursor[_DocumentType]: + ... + + def __getitem__(self, index: Union[int, slice]) -> Union[_DocumentType, Cursor[_DocumentType]]: + """Get a single document or a slice of documents from this cursor. + + .. warning:: A :class:`~Cursor` is not a Python :class:`list`. Each + index access or slice requires that a new query be run using skip + and limit. Do not iterate the cursor using index accesses. + The following example is **extremely inefficient** and may return + surprising results:: + + cursor = db.collection.find() + # Warning: This runs a new query for each document. + # Don't do this! + for idx in range(10): + print(cursor[idx]) + + Raises :class:`~pymongo.errors.InvalidOperation` if this + cursor has already been used. + + To get a single document use an integral index, e.g.:: + + >>> db.test.find()[50] + + An :class:`IndexError` will be raised if the index is negative + or greater than the amount of documents in this cursor. Any + limit previously applied to this cursor will be ignored. + + To get a slice of documents use a slice index, e.g.:: + + >>> db.test.find()[20:25] + + This will return this cursor with a limit of ``5`` and skip of + ``20`` applied. Using a slice index will override any prior + limits or skips applied to this cursor (including those + applied through previous calls to this method). Raises + :class:`IndexError` when the slice has a step, a negative + start value, or a stop value less than or equal to the start + value. + + :param index: An integer or slice index to be applied to this cursor + """ + if _IS_SYNC: + self._check_okay_to_chain() + self._empty = False + if isinstance(index, slice): + if index.step is not None: + raise IndexError("Cursor instances do not support slice steps") + + skip = 0 + if index.start is not None: + if index.start < 0: + raise IndexError("Cursor instances do not support negative indices") + skip = index.start + + if index.stop is not None: + limit = index.stop - skip + if limit < 0: + raise IndexError( + "stop index must be greater than start index for slice %r" % index + ) + if limit == 0: + self._empty = True + else: + limit = 0 + + self._skip = skip + self._limit = limit + return self + + if isinstance(index, int): + if index < 0: + raise IndexError("Cursor instances do not support negative indices") + clone = self.clone() + clone.skip(index + self._skip) + clone.limit(-1) # use a hard limit + clone._query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371 + for doc in clone: # type: ignore[attr-defined] + return doc + raise IndexError("no such item for Cursor instance") + raise TypeError("index %r cannot be applied to Cursor instances" % index) + else: + raise IndexError("Cursor does not support indexing") + + def max_scan(self, max_scan: Optional[int]) -> Cursor[_DocumentType]: + """**DEPRECATED** - Limit the number of documents to scan when + performing the query. + + Raises :class:`~pymongo.errors.InvalidOperation` if this + cursor has already been used. Only the last :meth:`max_scan` + applied to this cursor has any effect. + + :param max_scan: the maximum number of documents to scan + + .. versionchanged:: 3.7 + Deprecated :meth:`max_scan`. Support for this option is deprecated in + MongoDB 4.0. Use :meth:`max_time_ms` instead to limit server side + execution time. + """ + self._check_okay_to_chain() + self._max_scan = max_scan + return self + + def max(self, spec: _Sort) -> Cursor[_DocumentType]: + """Adds ``max`` operator that specifies upper bound for specific index. + + When using ``max``, :meth:`~hint` should also be configured to ensure + the query uses the expected index and starting in MongoDB 4.2 + :meth:`~hint` will be required. + + :param spec: a list of field, limit pairs specifying the exclusive + upper bound for all keys of a specific index in order. + + .. versionchanged:: 3.8 + Deprecated cursors that use ``max`` without a :meth:`~hint`. + + .. versionadded:: 2.7 + """ + if not isinstance(spec, (list, tuple)): + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") + + self._check_okay_to_chain() + self._max = dict(spec) + return self + + def min(self, spec: _Sort) -> Cursor[_DocumentType]: + """Adds ``min`` operator that specifies lower bound for specific index. + + When using ``min``, :meth:`~hint` should also be configured to ensure + the query uses the expected index and starting in MongoDB 4.2 + :meth:`~hint` will be required. + + :param spec: a list of field, limit pairs specifying the inclusive + lower bound for all keys of a specific index in order. + + .. versionchanged:: 3.8 + Deprecated cursors that use ``min`` without a :meth:`~hint`. + + .. versionadded:: 2.7 + """ + if not isinstance(spec, (list, tuple)): + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") + + self._check_okay_to_chain() + self._min = dict(spec) + return self + + def sort( + self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None + ) -> Cursor[_DocumentType]: + """Sorts this cursor's results. + + Pass a field name and a direction, either + :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`.:: + + for doc in collection.find().sort('field', pymongo.ASCENDING): + print(doc) + + To sort by multiple fields, pass a list of (key, direction) pairs. + If just a name is given, :data:`~pymongo.ASCENDING` will be inferred:: + + for doc in collection.find().sort([ + 'field1', + ('field2', pymongo.DESCENDING)]): + print(doc) + + Text search results can be sorted by relevance:: + + cursor = db.test.find( + {'$text': {'$search': 'some words'}}, + {'score': {'$meta': 'textScore'}}) + + # Sort by 'score' field. + cursor.sort([('score', {'$meta': 'textScore'})]) + + for doc in cursor: + print(doc) + + For more advanced text search functionality, see MongoDB's + `Atlas Search `_. + + Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has + already been used. Only the last :meth:`sort` applied to this + cursor has any effect. + + :param key_or_list: a single key or a list of (key, direction) + pairs specifying the keys to sort on + :param direction: only used if `key_or_list` is a single + key, if not given :data:`~pymongo.ASCENDING` is assumed + """ + self._check_okay_to_chain() + keys = helpers_shared._index_list(key_or_list, direction) + self._ordering = helpers_shared._index_document(keys) + return self + + def explain(self) -> _DocumentType: + """Returns an explain plan record for this cursor. + + .. note:: This method uses the default verbosity mode of the + `explain command + `_, + ``allPlansExecution``. To use a different verbosity use + :meth:`~pymongo.database.Database.command` to run the explain + command directly. + + .. note:: The timeout of this method can be set using :func:`pymongo.timeout`. + + .. seealso:: The MongoDB documentation on `explain `_. + """ + c = self.clone() + c._explain = True + + # always use a hard limit for explains + if c._limit: + c._limit = -abs(c._limit) + return next(c) + + def _set_hint(self, index: Optional[_Hint]) -> None: + if index is None: + self._hint = None + return + + if isinstance(index, str): + self._hint = index + else: + self._hint = helpers_shared._index_document(index) + + def hint(self, index: Optional[_Hint]) -> Cursor[_DocumentType]: + """Adds a 'hint', telling Mongo the proper index to use for the query. + + Judicious use of hints can greatly improve query + performance. When doing a query on multiple fields (at least + one of which is indexed) pass the indexed field as a hint to + the query. Raises :class:`~pymongo.errors.OperationFailure` if the + provided hint requires an index that does not exist on this collection, + and raises :class:`~pymongo.errors.InvalidOperation` if this cursor has + already been used. + + `index` should be an index as passed to + :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``) or the name of the index. + If `index` is ``None`` any existing hint for this query is + cleared. The last hint applied to this cursor takes precedence + over all others. + + :param index: index to hint on (as an index specifier) + """ + self._check_okay_to_chain() + self._set_hint(index) + return self + + def comment(self, comment: Any) -> Cursor[_DocumentType]: + """Adds a 'comment' to the cursor. + + http://mongodb.com/docs/manual/reference/operator/comment/ + + :param comment: A string to attach to the query to help interpret and + trace the operation in the server logs and in profile data. + + .. versionadded:: 2.7 + """ + self._check_okay_to_chain() + self._comment = comment + return self + + def where(self, code: Union[str, Code]) -> Cursor[_DocumentType]: + """Adds a `$where`_ clause to this query. + + The `code` argument must be an instance of :class:`str` or + :class:`~bson.code.Code` containing a JavaScript expression. + This expression will be evaluated for each document scanned. + Only those documents for which the expression evaluates to + *true* will be returned as results. The keyword *this* refers + to the object currently being scanned. For example:: + + # Find all documents where field "a" is less than "b" plus "c". + for doc in db.test.find().where('this.a < (this.b + this.c)'): + print(doc) + + Raises :class:`TypeError` if `code` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidOperation` if this + :class:`Cursor` has already been used. Only the last call to + :meth:`where` applied to a :class:`Cursor` has any effect. + + .. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code` + with scope variables. Consider using `$expr`_ instead. + + :param code: JavaScript expression to use as a filter + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ + """ + self._check_okay_to_chain() + if not isinstance(code, Code): + code = Code(code) + + # Avoid overwriting a filter argument that was given by the user + # when updating the spec. + spec: dict[str, Any] + if self._has_filter: + spec = dict(self._spec) + else: + spec = cast(dict, self._spec) # type: ignore[type-arg] + spec["$where"] = code + self._spec = spec + return self + + def collation(self, collation: Optional[_CollationIn]) -> Cursor[_DocumentType]: + """Adds a :class:`~pymongo.collation.Collation` to this query. + + Raises :exc:`TypeError` if `collation` is not an instance of + :class:`~pymongo.collation.Collation` or a ``dict``. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has + already been used. Only the last collation applied to this cursor has + any effect. + + :param collation: An instance of :class:`~pymongo.collation.Collation`. + """ + self._check_okay_to_chain() + self._collation = validate_collation_or_none(collation) + return self + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions, # type: ignore[type-arg] + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + def _get_read_preference(self) -> _ServerMode: + if self._read_preference is None: + # Save the read preference for getMore commands. + self._read_preference = self._collection._read_preference_for(self.session) + return self._read_preference + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + This is mostly useful with `tailable cursors + `_ + since they will stop iterating even though they *may* return more + results in the future. + + With regular cursors, simply use a for loop instead of :attr:`alive`:: + + for doc in collection.find(): + print(doc) + + .. note:: Even if :attr:`alive` is True, :meth:`next` can raise + :exc:`StopIteration`. :attr:`alive` can also be True while iterating + a cursor from a failed server. In this case :attr:`alive` will + return False after :meth:`next` fails to retrieve the next batch + of results from the server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> Optional[int]: + """Returns the id of the cursor + + .. versionadded:: 2.2 + """ + return self._id + + @property + def address(self) -> Optional[tuple[str, Any]]: + """The (host, port) of the server used, or None. + + .. versionchanged:: 3.0 + Renamed from "conn_id". + """ + return self._address + + @property + def session(self) -> Optional[ClientSession]: + """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._session and not self._session._implicit: + return self._session + return None + + def __copy__(self) -> Cursor[_DocumentType]: + """Support function for `copy.copy()`. + + .. versionadded:: 2.4 + """ + return self._clone(deepcopy=False) + + def __deepcopy__(self, memo: Any) -> Any: + """Support function for `copy.deepcopy()`. + + .. versionadded:: 2.4 + """ + return self._clone(deepcopy=True) + + @overload + def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: # type: ignore[type-arg] + ... + + @overload + def _deepcopy( + self, + x: SupportsItems, # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> dict: # type: ignore[type-arg] + ... + + def _deepcopy( + self, + x: Union[Iterable, SupportsItems], # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> Union[list[Any], dict[str, Any]]: + """Deepcopy helper for the data dictionary or list. + + Regular expressions cannot be deep copied but as they are immutable we + don't have to copy them when cloning. + """ + y: Union[list[Any], dict[str, Any]] + iterator: Iterable[tuple[Any, Any]] + if not hasattr(x, "items"): + y, is_list, iterator = [], True, enumerate(x) + else: + y, is_list, iterator = {}, False, cast("SupportsItems", x).items() # type: ignore[type-arg] + if memo is None: + memo = {} + val_id = id(x) + if val_id in memo: + return memo[val_id] + memo[val_id] = y + + for key, value in iterator: + if isinstance(value, (dict, list)) and not isinstance(value, SON): + value = self._deepcopy(value, memo) # noqa: PLW2901 + elif not isinstance(value, RE_TYPE): + value = copy.deepcopy(value, memo) # noqa: PLW2901 + + if is_list: + y.append(value) # type: ignore[union-attr] + else: + if not isinstance(key, RE_TYPE): + key = copy.deepcopy(key, memo) # noqa: PLW2901 + y[key] = value # type:ignore[index] + return y + + def _prepare_to_die(self, already_killed: bool) -> tuple[int, Optional[_CursorAddress]]: + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, f"{self._dbname}.{self._collname}") + else: + # Skip killCursors. + cursor_id = 0 + address = None + return cursor_id, address + + def _die_no_lock(self) -> None: + """Closes this cursor without acquiring a lock.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + cursor_id, address = self._prepare_to_die(already_killed) + self._collection.database.client._cleanup_cursor_no_lock( + cursor_id, address, self._sock_mgr, self._session + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + def _die_lock(self) -> None: + """Closes this cursor.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + cursor_id, address = self._prepare_to_die(already_killed) + self._collection.database.client._cleanup_cursor_lock( + cursor_id, + address, + self._sock_mgr, + self._session, + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + def close(self) -> None: + """Explicitly close / kill this cursor.""" + self._die_lock() + + def distinct(self, key: str) -> list[Any]: + """Get a list of distinct values for `key` among all documents + in the result set of this query. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + The :meth:`distinct` method obeys the + :attr:`~pymongo.collection.Collection.read_preference` of the + :class:`~pymongo.collection.Collection` instance on which + :meth:`~pymongo.collection.Collection.find` was called. + + :param key: name of key for which we want to get the distinct values + + .. seealso:: :meth:`pymongo.collection.Collection.distinct` + """ + options: dict[str, Any] = {} + if self._spec: + options["query"] = self._spec + if self._max_time_ms is not None: + options["maxTimeMS"] = self._max_time_ms + if self._comment: + options["comment"] = self._comment + if self._collation is not None: + options["collation"] = self._collation + + return self._collection.distinct(key, session=self._session, **options) + + def _send_message(self, operation: Union[_Query, _GetMore]) -> None: + """Send a query or getmore operation and handles the response. + + If operation is ``None`` this is an exhaust cursor, which reads + the next result batch off the exhaust socket instead of + sending getMore messages to the server. + + Can raise ConnectionFailure. + """ + client = self._collection.database.client + # OP_MSG is required to support exhaust cursors with encryption. + if client._encrypter and self._exhaust: + raise InvalidOperation("exhaust cursors do not support auto encryption") + + try: + response = client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS or self._exhaust: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die_no_lock() + else: + self.close() + # If this is a tailable cursor the error is likely + # due to capped collection roll over. Setting + # self._killed to True ensures Cursor.alive will be + # False. No need to re-raise. + if ( + exc.code in _CURSOR_CLOSED_ERRORS + and self._query_flags & _QUERY_OPTIONS["tailable_cursor"] + ): + return + raise + except ConnectionFailure: + self._killed = True + self.close() + raise + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + self.close() + raise + self._address = response.address + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) # type: ignore[arg-type] + + cmd_name = operation.name + docs = response.docs + if response.from_command: + if cmd_name != "explain": + cursor = docs[0]["cursor"] + self._id = cursor["id"] + if cmd_name == "find": + documents = cursor["firstBatch"] + # Update the namespace used for future getMore commands. + ns = cursor.get("ns") + if ns: + self._dbname, self._collname = ns.split(".", 1) + else: + documents = cursor["nextBatch"] + self._data = deque(documents) + self._retrieved += len(documents) + else: + self._id = 0 + self._data = deque(docs) + self._retrieved += len(docs) + else: + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + self._data = deque(docs) + self._retrieved += response.data.number_returned + + if self._id == 0: + # Don't wait for garbage collection to call __del__, return the + # socket and the session to the pool now. + self.close() + + if self._limit and self._id and self._limit <= self._retrieved: + self.close() + + def _refresh(self) -> int: + """Refreshes the cursor with more data from Mongo. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if not self._session: + self._session = self._collection.database.client._ensure_session() + + if self._id is None: # Query + if (self._min or self._max) and not self._hint: + raise InvalidOperation( + "Passing a 'hint' is required when using the min/max query" + " option to ensure the query utilizes the correct index" + ) + q = self._query_class( + self._query_flags, + self._collection.database.name, + self._collection.name, + self._skip, + self._query_spec(), + self._projection, + self._codec_options, + self._get_read_preference(), + self._limit, + self._batch_size, + self._read_concern, + self._collation, + self._session, + self._collection.database.client, + self._allow_disk_use, + self._exhaust, + ) + self._send_message(q) + elif self._id: # Get More + if self._limit: + limit = self._limit - self._retrieved + if self._batch_size: + limit = min(limit, self._batch_size) + else: + limit = self._batch_size + # Exhaust cursors don't send getMore messages. + g = self._getmore_class( + self._dbname, + self._collname, + limit, + self._id, + self._codec_options, + self._get_read_preference(), + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + self._exhaust, + self._comment, + ) + self._send_message(g) + + return len(self._data) + + def rewind(self) -> Cursor[_DocumentType]: + """Rewind this cursor to its unevaluated state. + + Reset this cursor if it has been partially or completely evaluated. + Any options that are present on the cursor will remain in effect. + Future iterating performed on this cursor will cause new queries to + be sent to the server, even if the resultant data has already been + retrieved by this cursor. + """ + self.close() + self._data = deque() + self._id = None + self._address = None + self._retrieved = 0 + self._killed = False + + return self + + def next(self) -> _DocumentType: + """Advance the cursor.""" + if not self._exhaust_checked: + self._exhaust_checked = True + self._supports_exhaust() + if self._empty: + raise StopIteration + if len(self._data) or self._refresh(): + return self._data.popleft() + else: + raise StopIteration + + def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] + """Get all or some documents from the cursor.""" + if not self._exhaust_checked: + self._exhaust_checked = True + self._supports_exhaust() + if self._empty: + return False + if len(self._data) or self._refresh(): + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) + return True + else: + return False + + def __next__(self) -> _DocumentType: + return self.next() + + def __iter__(self) -> Cursor[_DocumentType]: + return self + + def __enter__(self) -> Cursor[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + @_csot.apply + def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: + """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. + + To use:: + + >>> cursor.to_list() + + Or, to read at most n items from the cursor:: + + >>> cursor.to_list(n) + + If the cursor is empty or has no more results, an empty list will be returned. + + .. versionadded:: 4.9 + """ + res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") + while self.alive: + if not self._next_batch(res, remaining): + break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break + return res + + +class RawBatchCursor(Cursor, Generic[_DocumentType]): # type: ignore[type-arg] + """A cursor / iterator over raw batches of BSON data from a query result.""" + + _query_class = _RawBatchQuery + _getmore_class = _RawBatchGetMore + + def __init__(self, collection: Collection[_DocumentType], *args: Any, **kwargs: Any) -> None: + """Create a new cursor / iterator over raw batches of BSON data. + + Should not be called directly by application developers - + see :meth:`~pymongo.collection.Collection.find_raw_batches` + instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + super().__init__(collection, *args, **kwargs) + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[_DocumentOut]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return cast(List["_DocumentOut"], raw_response) + + def explain(self) -> _DocumentType: + """Returns an explain plan record for this cursor. + + .. seealso:: The MongoDB documentation on `explain `_. + """ + clone = self._clone(deepcopy=True, base=Cursor(self.collection)) + return clone.explain() + + def __getitem__(self, index: Any) -> NoReturn: + raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py new file mode 100644 index 0000000000..0d129ba972 --- /dev/null +++ b/pymongo/synchronous/database.py @@ -0,0 +1,1462 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Database level operations.""" +from __future__ import annotations + +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, + cast, + overload, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.dbref import DBRef +from bson.timestamp import Timestamp +from pymongo import _csot, common +from pymongo.common import _ecoc_coll_name, _esc_coll_name +from pymongo.database_shared import _check_name, _CodecDocumentType +from pymongo.errors import CollectionInvalid, InvalidOperation +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.synchronous.aggregation import _DatabaseAggregationCommand +from pymongo.synchronous.change_stream import DatabaseChangeStream +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline + +if TYPE_CHECKING: + import bson + import bson.codec_options + from pymongo.read_concern import ReadConcern + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class Database(common.BaseObject, Generic[_DocumentType]): + def __init__( + self, + client: MongoClient[_DocumentType], + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> None: + """Get a database by client and name. + + Raises :class:`TypeError` if `name` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if + `name` is not a valid database name. + + :param client: A :class:`~pymongo.mongo_client.MongoClient` instance. + :param name: The database name. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) client.codec_options is used. + :param read_preference: The read preference to use. If + ``None`` (the default) client.read_preference is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) client.write_concern is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) client.read_concern is used. + + .. seealso:: The MongoDB documentation on `databases `_. + + .. versionchanged:: 4.0 + Removed the eval, system_js, error, last_status, previous_error, + reset_error_history, authenticate, logout, collection_names, + current_op, add_user, remove_user, profiling_level, + set_profiling_level, and profiling_info methods. + See the :ref:`pymongo4-migration-guide`. + + .. versionchanged:: 3.2 + Added the read_concern option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + :class:`~pymongo.database.Database` no longer returns an instance + of :class:`~pymongo.collection.Collection` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + db['__my_collection__'] + + Not: + + db.__my_collection__ + """ + super().__init__( + codec_options or client.codec_options, + read_preference or client.read_preference, + write_concern or client.write_concern, + read_concern or client.read_concern, + ) + + from pymongo.synchronous.mongo_client import MongoClient + + if not isinstance(name, str): + raise TypeError(f"name must be an instance of str, not {type(name)}") + + if not isinstance(client, MongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "MongoClient" for cls in type(client).__mro__): + raise TypeError(f"MongoClient required but given {type(client).__name__}") + + if name != "$external": + _check_name(name) + + self._name = name + self._client: MongoClient[_DocumentType] = client + self._timeout = client.options.timeout + + @property + def client(self) -> MongoClient[_DocumentType]: + """The client instance for this :class:`Database`.""" + return self._client + + @property + def name(self) -> str: + """The name of this :class:`Database`.""" + return self._name + + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Database[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Database[_DocumentTypeArg]: + ... + + def with_options( + self, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Database[_DocumentType] | Database[_DocumentTypeArg]: + """Get a clone of this database changing the specified settings. + + >>> db1.read_preference + Primary() + >>> from pymongo.read_preferences import Secondary + >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) + >>> db1.read_preference + Primary() + >>> db2.read_preference + Secondary(tag_sets=[{'node': 'analytics'}], max_staleness=-1, hedge=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Collection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Collection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Collection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Collection` + is used. + + .. versionadded:: 3.8 + """ + return Database( + self._client, + self._name, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Database): + return self._client == other.client and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._client, self._name)) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._client!r}, {self._name!r})" + + def __getattr__(self, name: str) -> Collection[_DocumentType]: + """Get a collection of this database by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" collection, use database[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> Collection[_DocumentType]: + """Get a collection of this database by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + return Collection(self, name) + + def get_collection( + self, + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType]: + """Get a :class:`~pymongo.collection.Collection` with the given name + and options. + + Useful for creating a :class:`~pymongo.collection.Collection` with + different codec options, read preference, and/or write concern from + this :class:`Database`. + + >>> db.read_preference + Primary() + >>> coll1 = db.test + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = db.get_collection( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the collection - a string. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Database` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Database` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Database` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Database` is + used. + """ + return Collection( + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + + def _get_encrypted_fields( + self, kwargs: Mapping[str, Any], coll_name: str, ask_db: bool + ) -> Optional[Mapping[str, Any]]: + encrypted_fields = kwargs.get("encryptedFields") + if encrypted_fields: + return cast(Mapping[str, Any], deepcopy(encrypted_fields)) + if ( + self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + and self.client.options.auto_encryption_opts._encrypted_fields_map.get( + f"{self.name}.{coll_name}" + ) + ): + return cast( + Mapping[str, Any], + deepcopy( + self.client.options.auto_encryption_opts._encrypted_fields_map[ + f"{self.name}.{coll_name}" + ] + ), + ) + if ask_db and self.client.options.auto_encryption_opts: + options = self[coll_name].options() + if options.get("encryptedFields"): + return cast(Mapping[str, Any], deepcopy(options["encryptedFields"])) + return None + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'Database' object is not iterable") + + next = __next__ + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: database is not None" + ) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> DatabaseChangeStream[_DocumentType]: + """Watch changes on this database. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.DatabaseChangeStream` cursor which + iterates over changes on all collections in this database. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + with db.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.change_stream.DatabaseChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.DatabaseChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with db.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = DatabaseChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream + + @_csot.apply + def create_collection( + self, + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, + check_exists: Optional[bool] = True, + **kwargs: Any, + ) -> Collection[_DocumentType]: + """Create a new :class:`~pymongo.collection.Collection` in this + database. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.CollectionInvalid` will be + raised if the collection already exists. + + :param name: the name of the collection to create + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Database` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Database` is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Database` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Database` is + used. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param check_exists: if True (the default), send a listCollections command to + check if the collection already exists before creation. + :param kwargs: additional keyword arguments will + be passed as options for the `create collection command`_ + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. Valid options include, but are not + limited to: + + - ``size`` (int): desired initial size for the collection (in + bytes). For capped collections this size is the max + size of the collection. + - ``capped`` (bool): if True, this is a capped collection + - ``max`` (int): maximum number of objects if capped (optional) + - ``timeseries`` (dict): a document specifying configuration options for + timeseries collections + - ``expireAfterSeconds`` (int): the number of seconds after which a + document in a timeseries collection expires + - ``validator`` (dict): a document specifying validation rules or expressions + for the collection + - ``validationLevel`` (str): how strictly to apply the + validation rules to existing documents during an update. The default level + is "strict" + - ``validationAction`` (str): whether to "error" on invalid documents + (the default) or just "warn" about the violations but allow invalid + documents to be inserted + - ``indexOptionDefaults`` (dict): a document specifying a default configuration + for indexes when creating a collection + - ``viewOn`` (str): the name of the source collection or view from which + to create the view + - ``pipeline`` (list): a list of aggregation pipeline stages + - ``comment`` (str): a user-provided comment to attach to this command. + This option is only supported on MongoDB >= 4.4. + - ``encryptedFields`` (dict): **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + - ``clusteredIndex`` (dict): Document that specifies the clustered index + configuration. It must have the following form:: + + { + // key pattern must be {_id: 1} + key: , // required + unique: , // required, must be `true` + name: , // optional, otherwise automatically generated + v: , // optional, must be `2` if provided + } + - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for + enabling pre- and post-images. + + .. versionchanged:: 4.2 + Added the ``check_exists``, ``clusteredIndex``, and ``encryptedFields`` parameters. + + .. versionchanged:: 3.11 + This method is now supported inside multi-document transactions + with MongoDB 4.4+. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Added the collation option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + """ + encrypted_fields = self._get_encrypted_fields(kwargs, name, False) + if encrypted_fields: + common.validate_is_mapping("encryptedFields", encrypted_fields) + kwargs["encryptedFields"] = encrypted_fields + + clustered_index = kwargs.get("clusteredIndex") + if clustered_index: + common.validate_is_mapping("clusteredIndex", clustered_index) + + with self._client._tmp_session(session) as s: + if s and not s.in_transaction: + s._leave_alive = True + # Skip this check in a transaction where listCollections is not + # supported. + if ( + check_exists + and (not s or not s.in_transaction) + and name in self._list_collection_names(filter={"name": name}, session=s) + ): + raise CollectionInvalid("collection %s already exists" % name) + if s: + s._leave_alive = False + coll = Collection( + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + coll._create(kwargs, s) + + return coll + + def aggregate( + self, pipeline: _Pipeline, session: Optional[ClientSession] = None, **kwargs: Any + ) -> CommandCursor[_DocumentType]: + """Perform a database-level aggregation. + + See the `aggregation pipeline`_ documentation for a list of stages + that are supported. + + .. code-block:: python + + # Lists all operations currently running on the server. + with client.admin.aggregate([{"$currentOp": {}}]) as cursor: + for operation in cursor: + print(operation) + + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`Database`, except when ``$out`` or ``$merge`` are used, in + which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` + is used. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement. + + .. note:: This method does not support the 'explain' option. Please + use :meth:`~pymongo.database.Database.command` instead. + + .. note:: The :attr:`~pymongo.database.Database.write_concern` of + this collection is automatically applied to this operation. + + :param pipeline: a list of aggregation pipeline stages + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param kwargs: extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `let` (dict): A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result + set. + + .. versionadded:: 3.9 + + .. _aggregation pipeline: + https://mongodb.com/docs/manual/reference/operator/aggregation-pipeline + + .. _aggregate command: + https://mongodb.com/docs/manual/reference/command/aggregate + """ + with self.client._tmp_session(session) as s: + cmd = _DatabaseAggregationCommand( + self, + CommandCursor, + pipeline, + kwargs, + user_fields={"cursor": {"firstBatch": 1}}, + ) + return self.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(s), # type: ignore[arg-type] + s, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, + ) + + @overload + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[dict[str, Any]] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[_CodecDocumentType] = ..., + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: Union[ + CodecOptions[dict[str, Any]], CodecOptions[_CodecDocumentType] + ] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: + """Internal command helper.""" + if isinstance(command, str): + command = {command: value} + + command.update(kwargs) + with self._client._tmp_session(session) as s: + return conn.command( + self._name, + command, + read_preference, + codec_options, # type: ignore[arg-type] + check, + allowable_errors, + write_concern=write_concern, + parse_write_concern_error=parse_write_concern_error, + session=s, + client=self._client, + ) + + @overload + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: None = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: CodecOptions[_CodecDocumentType] = ..., + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + @_csot.apply + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: + """Issue a MongoDB command. + + Send command `command` to the database and return the + response. If `command` is an instance of :class:`str` + then the command {`command`: `value`} will be sent. + Otherwise, `command` must be an instance of + :class:`dict` and will be sent as is. + + Any additional keyword arguments will be added to the final + command document before it is sent. + + For example, a command like ``{buildinfo: 1}`` can be sent + using: + + >>> db.command("buildinfo") + OR + >>> db.command({"buildinfo": 1}) + + For a command where the value matters, like ``{count: + collection_name}`` we can do: + + >>> db.command("count", collection_name) + OR + >>> db.command({"count": collection_name}) + + For commands that take additional arguments we can use + kwargs. So ``{count: collection_name, query: query}`` becomes: + + >>> db.command("count", collection_name, query=query) + OR + >>> db.command({"count": collection_name, "query": query}) + + :param command: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should be done with this in mind. + + :param value: value to use for the command verb when + `command` is passed as a string + :param check: check the response for errors, raising + :class:`~pymongo.errors.OperationFailure` if there are any + :param allowable_errors: if `check` is ``True``, error messages + in this list will be ignored by error-checking + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent + + + .. note:: :meth:`command` does **not** obey this Database's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see `versioned API `_), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`, + and `secondary_acceptable_latency_ms` option. + Removed `compile_re` option: PyMongo now always represents BSON + regular expressions as :class:`~bson.regex.Regex` objects. Use + :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a + BSON regular expression to a Python regular expression object. + Added the ``codec_options`` parameter. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + opts = codec_options or DEFAULT_CODEC_OPTIONS + if comment is not None: + kwargs["comment"] = comment + + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + + if read_preference is None: + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + with self._client._conn_for_reads(read_preference, session, operation=command_name) as ( + connection, + read_preference, + ): + return self._command( + connection, + command, + value, + check, + allowable_errors, + read_preference, + opts, # type: ignore[arg-type] + session=session, + **kwargs, + ) + + @_csot.apply + def cursor_command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions[_CodecDocumentType]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + max_await_time_ms: Optional[int] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + """Issue a MongoDB command and parse the response as a cursor. + + If the response from the server does not include a cursor field, an error will be thrown. + + Otherwise, behaves identically to issuing a normal MongoDB command. + + :param command: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should use an instance of :class:`~bson.son.SON` or + a string and kwargs instead of a Python `dict`. + + :param value: value to use for the command verb when + `command` is passed as a string + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to future getMores for this + command. + :param max_await_time_ms: The number of ms to wait for more data on future getMores for this command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent + + .. note:: :meth:`command` does **not** obey this Database's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see `versioned API `_), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + + with self._client._tmp_session(session) as tmp_session: + opts = codec_options or DEFAULT_CODEC_OPTIONS + + if read_preference is None: + read_preference = ( + tmp_session and tmp_session._txn_read_preference() + ) or ReadPreference.PRIMARY + with self._client._conn_for_reads(read_preference, tmp_session, command_name) as ( + conn, + read_preference, + ): + response = self._command( + conn, + command, + value, + True, + None, + read_preference, + opts, + session=tmp_session, + **kwargs, + ) + coll = self.get_collection("$cmd", read_preference=read_preference) + if response.get("cursor"): + cmd_cursor = CommandCursor( + coll, + response["cursor"], + conn.address, + max_await_time_ms=max_await_time_ms, + session=tmp_session, + comment=comment, + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + else: + raise InvalidOperation("Command does not return a cursor.") + + def _retryable_read_command( + self, + command: Union[str, MutableMapping[str, Any]], + operation: str, + session: Optional[ClientSession] = None, + ) -> dict[str, Any]: + """Same as command but used for retryable read commands.""" + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> dict[str, Any]: + return self._command( + conn, + command, + read_preference=read_preference, + session=session, + ) + + return self._client._retryable_read(_cmd, read_preference, session, operation) + + def _list_collections( + self, + conn: Connection, + session: Optional[ClientSession], + read_preference: _ServerMode, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Internal listCollections helper.""" + coll = cast( + Collection[MutableMapping[str, Any]], + self.get_collection("$cmd", read_preference=read_preference), + ) + cmd = {"listCollections": 1, "cursor": {}} + cmd.update(kwargs) + with self._client._tmp_session(session) as tmp_session: + cursor = ( + self._command(conn, cmd, read_preference=read_preference, session=tmp_session) + )["cursor"] + cmd_cursor = CommandCursor( + coll, + cursor, + conn.address, + session=tmp_session, + comment=cmd.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + def _list_collections_helper( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionadded:: 3.6 + """ + if filter is not None: + kwargs["filter"] = filter + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + if comment is not None: + kwargs["comment"] = comment + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[MutableMapping[str, Any]]: + return self._list_collections(conn, session, read_preference=read_preference, **kwargs) + + return self._client._retryable_read( + _cmd, read_pref, session, operation=_Op.LIST_COLLECTIONS + ) + + def list_collections( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with database.list_collections() as cursor: + for collection in cursor: + print(collection) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionadded:: 3.6 + """ + return self._list_collections_helper(session, filter, comment, **kwargs) + + def _list_collection_names( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + if comment is not None: + kwargs["comment"] = comment + if filter is None: + kwargs["nameOnly"] = True + + else: + # The enumerate collections spec states that "drivers MUST NOT set + # nameOnly if a filter specifies any keys other than name." + common.validate_is_mapping("filter", filter) + kwargs["filter"] = filter + if not filter or (len(filter) == 1 and "name" in filter): + kwargs["nameOnly"] = True + + return [ + result["name"] for result in self._list_collections_helper(session=session, **kwargs) + ] + + def list_collection_names( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Get a list of all the collection names in this database. + + For example, to list all non-system collections:: + + filter = {"name": {"$regex": r"^(?!system\\.)"}} + db.list_collection_names(filter=filter) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + .. versionchanged:: 3.8 + Added the ``filter`` and ``**kwargs`` parameters. + + .. versionadded:: 3.6 + """ + return self._list_collection_names(session, filter, comment, **kwargs) + + def _drop_helper( + self, name: str, session: Optional[ClientSession] = None, comment: Optional[Any] = None + ) -> dict[str, Any]: + command = {"drop": name} + if comment is not None: + command["comment"] = comment + + with self._client._conn_for_writes(session, operation=_Op.DROP) as connection: + return self._command( + connection, + command, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + @_csot.apply + def drop_collection( + self, + name_or_collection: Union[str, Collection[_DocumentTypeArg]], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> dict[str, Any]: + """Drop a collection. + + :param name_or_collection: the name of a collection to drop or the + collection object itself + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + + } + + + .. note:: The :attr:`~pymongo.database.Database.write_concern` of + this database is automatically applied to this operation. + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Apply this database's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_collection + if isinstance(name, Collection): + name = name.name + + if not isinstance(name, str): + raise TypeError(f"name_or_collection must be an instance of str, not {type(name)}") + encrypted_fields = self._get_encrypted_fields( + {"encryptedFields": encrypted_fields}, + name, + True, + ) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + self._drop_helper( + _esc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + self._drop_helper( + _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + + return self._drop_helper(name, session, comment) + + def validate_collection( + self, + name_or_collection: Union[str, Collection[_DocumentTypeArg]], + scandata: bool = False, + full: bool = False, + session: Optional[ClientSession] = None, + background: Optional[bool] = None, + comment: Optional[Any] = None, + ) -> dict[str, Any]: + """Validate a collection. + + Returns a dict of validation info. Raises CollectionInvalid if + validation fails. + + See also the MongoDB documentation on the `validate command`_. + + :param name_or_collection: A Collection object or the name of a + collection to validate. + :param scandata: Do extra checks beyond checking the overall + structure of the collection. + :param full: Have the server do a more thorough scan of the + collection. Use with `scandata` for a thorough scan + of the structure of the collection and the individual + documents. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param background: A boolean flag that determines whether + the command runs in the background. Requires MongoDB 4.4+. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.11 + Added ``background`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ + """ + name = name_or_collection + if isinstance(name, Collection): + name = name.name + + if not isinstance(name, str): + raise TypeError( + f"name_or_collection must be an instance of str or Collection, not {type(name)}" + ) + cmd = {"validate": name, "scandata": scandata, "full": full} + if comment is not None: + cmd["comment"] = comment + + if background is not None: + cmd["background"] = background + + result = self.command(cmd, session=session) + + valid = True + # Pre 1.9 results + if "result" in result: + info = result["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") + # Sharded results + elif "raw" in result: + for _, res in result["raw"].items(): + if "result" in res: + info = res["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") + elif not res.get("valid", False): + valid = False + break + # Post 1.9 non-sharded results. + elif not result.get("valid", False): + valid = False + + if not valid: + raise CollectionInvalid(f"{name} invalid: {result!r}") + + return result + + def dereference( + self, + dbref: DBRef, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Optional[_DocumentType]: + """Dereference a :class:`~bson.dbref.DBRef`, getting the + document it points to. + + Raises :class:`TypeError` if `dbref` is not an instance of + :class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if + the reference does not point to a valid document. Raises + :class:`ValueError` if `dbref` has a database specified that + is different from the current database. + + :param dbref: the reference + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: any additional keyword arguments + are the same as the arguments to + :meth:`~pymongo.collection.Collection.find`. + + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + if not isinstance(dbref, DBRef): + raise TypeError("cannot dereference a %s" % type(dbref)) + if dbref.database is not None and dbref.database != self._name: + raise ValueError( + "trying to dereference a DBRef that points to " + f"another database ({dbref.database!r} not {self._name!r})" + ) + return self[dbref.collection].find_one( + {"_id": dbref.id}, session=session, comment=comment, **kwargs + ) diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py new file mode 100644 index 0000000000..2d666b9763 --- /dev/null +++ b/pymongo/synchronous/encryption.py @@ -0,0 +1,1276 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Support for explicit client-side field level encryption.""" +from __future__ import annotations + +import contextlib +import enum +import socket +import time as time # noqa: PLC0414 # needed in sync version +import uuid +import weakref +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generator, + Generic, + Iterator, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +try: + from pymongocrypt.errors import MongoCryptError # type:ignore[import] + from pymongocrypt.mongocrypt import MongoCryptOptions # type:ignore[import] + from pymongocrypt.synchronous.auto_encrypter import AutoEncrypter # type:ignore[import] + from pymongocrypt.synchronous.explicit_encrypter import ( # type:ignore[import] + ExplicitEncrypter, + ) + from pymongocrypt.synchronous.state_machine import ( # type:ignore[import] + MongoCryptCallback, + ) + + _HAVE_PYMONGOCRYPT = True +except ImportError: + _HAVE_PYMONGOCRYPT = False + MongoCryptCallback = object + +from bson import _dict_to_bson, decode, encode +from bson.binary import STANDARD, UUID_SUBTYPE, Binary +from bson.codec_options import CodecOptions +from bson.errors import BSONError +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson +from pymongo import _csot +from pymongo.common import CONNECT_TIMEOUT +from pymongo.daemon import _spawn_daemon +from pymongo.encryption_options import ( + AutoEncryptionOpts, + RangeOpts, + TextOpts, + check_min_pymongocrypt, +) +from pymongo.errors import ( + ConfigurationError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + NetworkTimeout, + ServerSelectionTimeoutError, +) +from pymongo.helpers_shared import _get_timeout_details +from pymongo.network_layer import sendall +from pymongo.operations import UpdateOne +from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + _configured_socket, + _raise_connection_failure, +) +from pymongo.read_concern import ReadConcern +from pymongo.results import BulkWriteResult, DeleteResult +from pymongo.ssl_support import BLOCKING_IO_ERRORS, get_ssl_context +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.typings import _DocumentType, _DocumentTypeArg +from pymongo.uri_parser_shared import _parse_kms_tls_options, parse_host +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from pymongocrypt.mongocrypt import MongoCryptKmsContext + + from pymongo.pyopenssl_context import _sslConn + from pymongo.typings import _Address + + +_IS_SYNC = True + +_HTTPS_PORT = 443 +_KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT +_MONGOCRYPTD_TIMEOUT_MS = 10000 + +_DATA_KEY_OPTS: CodecOptions[dict[str, Any]] = CodecOptions( + document_class=Dict[str, Any], uuid_representation=STANDARD +) +# Use RawBSONDocument codec options to avoid needlessly decoding +# documents from the key vault. +_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) + + +def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: + try: + return _configured_socket(address, opts) + except Exception as exc: + _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) + + +@contextlib.contextmanager +def _wrap_encryption_errors() -> Iterator[None]: + """Context manager to wrap encryption related errors.""" + try: + yield + except BSONError: + # BSON encoding/decoding errors are unrelated to encryption so + # we should propagate them unchanged. + raise + except Exception as exc: + raise EncryptionError(exc) from exc + + +class _EncryptionIO(MongoCryptCallback): # type: ignore[misc] + def __init__( + self, + client: Optional[MongoClient[_DocumentTypeArg]], + key_vault_coll: Collection[_DocumentTypeArg], + mongocryptd_client: Optional[MongoClient[_DocumentTypeArg]], + opts: AutoEncryptionOpts, + ): + """Internal class to perform I/O on behalf of pymongocrypt.""" + self.client_ref: Any + # Use a weak ref to break reference cycle. + if client is not None: + self.client_ref = weakref.ref(client) + else: + self.client_ref = None + self.key_vault_coll: Optional[Collection[RawBSONDocument]] = cast( + Collection[RawBSONDocument], + key_vault_coll.with_options( + codec_options=_KEY_VAULT_OPTS, + read_concern=ReadConcern(level="majority"), + write_concern=WriteConcern(w="majority"), + ), + ) + self.mongocryptd_client = mongocryptd_client + self.opts = opts + self._spawned = False + self._kms_ssl_contexts = opts._kms_ssl_contexts(_IS_SYNC) + + def kms_request(self, kms_context: MongoCryptKmsContext) -> None: + """Complete a KMS request. + + :param kms_context: A :class:`MongoCryptKmsContext`. + + :return: None + """ + endpoint = kms_context.endpoint + message = kms_context.message + provider = kms_context.kms_provider + ctx = self._kms_ssl_contexts.get(provider) + if ctx is None: + # Enable strict certificate verification, OCSP, match hostname, and + # SNI using the system default CA certificates. + ctx = get_ssl_context( + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, # disable_ocsp_endpoint_check + _IS_SYNC, + ) + # CSOT: set timeout for socket creation. + connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) + opts = PoolOptions( + connect_timeout=connect_timeout, + socket_timeout=connect_timeout, + ssl_context=ctx, + ) + address = parse_host(endpoint, _HTTPS_PORT) + sleep_u = kms_context.usleep + if sleep_u: + sleep_sec = float(sleep_u) / 1e6 + time.sleep(sleep_sec) + try: + conn = _connect_kms(address, opts) + try: + sendall(conn, message) + while kms_context.bytes_needed > 0: + # CSOT: update timeout. + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data: memoryview | bytes + if _IS_SYNC: + data = conn.recv(kms_context.bytes_needed) + else: + from pymongo.network_layer import ( # type: ignore[attr-defined] + receive_data_socket, + ) + + data = receive_data_socket(conn, kms_context.bytes_needed) + if not data: + raise OSError("KMS connection closed") + kms_context.feed(data) + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + # Wrap I/O errors in PyMongo exceptions. + if isinstance(exc, BLOCKING_IO_ERRORS): + exc = socket.timeout("timed out") + # Async raises an OSError instead of returning empty bytes. + if isinstance(exc, OSError): + msg_prefix = "KMS connection closed" + else: + msg_prefix = None + _raise_connection_failure( + address, exc, msg_prefix=msg_prefix, timeout_details=_get_timeout_details(opts) + ) + finally: + conn.close() + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + remaining = _csot.remaining() + if isinstance(exc, NetworkTimeout) or (remaining is not None and remaining <= 0): + raise + # Mark this attempt as failed and defer to libmongocrypt to retry. + try: + kms_context.fail() + except MongoCryptError as final_err: + exc = MongoCryptError( + f"{final_err}, last attempt failed with: {exc}", final_err.code + ) + raise exc from final_err + + def collection_info(self, database: str, filter: bytes) -> Optional[list[bytes]]: + """Get the collection info for a namespace. + + The returned collection info is passed to libmongocrypt which reads + the JSON schema. + + :param database: The database on which to run listCollections. + :param filter: The filter to pass to listCollections. + + :return: All documents from the listCollections command response as BSON. + """ + with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: + return [_dict_to_bson(doc, False, _DATA_KEY_OPTS) for doc in cursor] + + def spawn(self) -> None: + """Spawn mongocryptd. + + Note this method is thread safe; at most one mongocryptd will start + successfully. + """ + self._spawned = True + args = [self.opts._mongocryptd_spawn_path or "mongocryptd"] + args.extend(self.opts._mongocryptd_spawn_args) + _spawn_daemon(args) + + def mark_command(self, database: str, cmd: bytes) -> bytes | memoryview: + """Mark a command for encryption. + + :param database: The database on which to run this command. + :param cmd: The BSON command to run. + + :return: The marked command response from mongocryptd. + """ + if not self._spawned and not self.opts._mongocryptd_bypass_spawn: + self.spawn() + # Database.command only supports mutable mappings so we need to decode + # the raw BSON command first. + inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) + assert self.mongocryptd_client is not None + try: + res = self.mongocryptd_client[database].command( + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) + except ServerSelectionTimeoutError: + if self.opts._mongocryptd_bypass_spawn: + raise + self.spawn() + res = self.mongocryptd_client[database].command( + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) + return res.raw + + def fetch_keys(self, filter: bytes) -> Generator[bytes | memoryview, None]: + """Yields one or more keys from the key vault. + + :param filter: The filter to pass to find. + + :return: A generator which yields the requested keys from the key vault. + """ + assert self.key_vault_coll is not None + with self.key_vault_coll.find(RawBSONDocument(filter)) as cursor: + for key in cursor: + yield key.raw + + def insert_data_key(self, data_key: bytes) -> Binary: + """Insert a data key into the key vault. + + :param data_key: The data key document to insert. + + :return: The _id of the inserted data key document. + """ + raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) + data_key_id = raw_doc.get("_id") + if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: + raise TypeError( + f"data_key _id must be Binary with a UUID subtype, not {type(data_key_id)}" + ) + + assert self.key_vault_coll is not None + self.key_vault_coll.insert_one(raw_doc) + return data_key_id + + def bson_encode(self, doc: MutableMapping[str, Any]) -> bytes: + """Encode a document to BSON. + + A document can be any mapping type (like :class:`dict`). + + :param doc: mapping type representing a document + + :return: The encoded BSON bytes. + """ + return encode(doc) + + def close(self) -> None: + """Release resources. + + Note it is not safe to call this method from __del__ or any GC hooks. + """ + self.client_ref = None + self.key_vault_coll = None + if self.mongocryptd_client: + self.mongocryptd_client.close() + self.mongocryptd_client = None + + +class RewrapManyDataKeyResult: + """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. + + .. versionadded:: 4.2 + """ + + def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: + self._bulk_write_result = bulk_write_result + + @property + def bulk_write_result(self) -> Optional[BulkWriteResult]: + """The result of the bulk write operation used to update the key vault + collection with one or more rewrapped data keys. If + :meth:`~ClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, + no bulk write operation will be executed and this field will be + ``None``. + """ + return self._bulk_write_result + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._bulk_write_result!r})" + + +class _Encrypter: + """Encrypts and decrypts MongoDB commands. + + This class is used to support automatic encryption and decryption of + MongoDB commands. + """ + + def __init__(self, client: MongoClient[_DocumentTypeArg], opts: AutoEncryptionOpts): + """Create a _Encrypter for a client. + + :param client: The encrypted MongoClient. + :param opts: The encrypted client's :class:`AutoEncryptionOpts`. + """ + if opts._schema_map is None: + schema_map = None + else: + schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) + + if opts._encrypted_fields_map is None: + encrypted_fields_map = None + else: + encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) + self._bypass_auto_encryption = opts._bypass_auto_encryption + self._internal_client = None + # parsing kms_ssl_contexts here so that parsing errors will be raised before internal clients are created + opts._kms_ssl_contexts(_IS_SYNC) + + def _get_internal_client( + encrypter: _Encrypter, mongo_client: MongoClient[_DocumentTypeArg] + ) -> MongoClient[_DocumentTypeArg]: + if mongo_client.options.pool_options.max_pool_size is None: + # Unlimited pool size, use the same client. + return mongo_client + # Else - limited pool size, use an internal client. + if encrypter._internal_client is not None: + return encrypter._internal_client + internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None) + encrypter._internal_client = internal_client + return internal_client + + if opts._key_vault_client is not None: + key_vault_client = opts._key_vault_client + else: + key_vault_client = _get_internal_client(self, client) + + if opts._bypass_auto_encryption: + metadata_client = None + else: + metadata_client = _get_internal_client(self, client) + + db, coll = opts._key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + mongocryptd_client: MongoClient[Mapping[str, Any]] = MongoClient( + opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS + ) + + io_callbacks = _EncryptionIO( # type:ignore[misc] + metadata_client, + key_vault_coll, # type:ignore[arg-type] + mongocryptd_client, + opts, + ) + self._auto_encrypter = AutoEncrypter( + io_callbacks, + _create_mongocrypt_options( + kms_providers=opts._kms_providers, + schema_map=schema_map, + crypt_shared_lib_path=opts._crypt_shared_lib_path, + crypt_shared_lib_required=opts._crypt_shared_lib_required, + bypass_encryption=opts._bypass_auto_encryption, + encrypted_fields_map=encrypted_fields_map, + bypass_query_analysis=opts._bypass_query_analysis, + key_expiration_ms=opts._key_expiration_ms, + ), + ) + self._closed = False + + def encrypt( + self, database: str, cmd: Mapping[str, Any], codec_options: CodecOptions[_DocumentTypeArg] + ) -> dict[str, Any]: + """Encrypt a MongoDB command. + + :param database: The database for this command. + :param cmd: A command document. + :param codec_options: The CodecOptions to use while encoding `cmd`. + + :return: The encrypted command to execute. + """ + self._check_closed() + encoded_cmd = _dict_to_bson(cmd, False, codec_options) + with _wrap_encryption_errors(): + encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) + # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. + return _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) + + def decrypt(self, response: bytes | memoryview) -> Optional[bytes]: + """Decrypt a MongoDB command response. + + :param response: A MongoDB command response as BSON. + + :return: The decrypted command response. + """ + self._check_closed() + with _wrap_encryption_errors(): + return cast(bytes, self._auto_encrypter.decrypt(response)) + + def _check_closed(self) -> None: + if self._closed: + raise InvalidOperation("Cannot use MongoClient after close") + + def close(self) -> None: + """Cleanup resources.""" + self._closed = True + self._auto_encrypter.close() + if self._internal_client: + self._internal_client.close() + self._internal_client = None + + +class Algorithm(str, enum.Enum): + """An enum that defines the supported encryption algorithms.""" + + AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + """AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic.""" + AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + """AEAD_AES_256_CBC_HMAC_SHA_512_Random.""" + INDEXED = "Indexed" + """Indexed. + + .. versionadded:: 4.2 + """ + UNINDEXED = "Unindexed" + """Unindexed. + + .. versionadded:: 4.2 + """ + RANGE = "Range" + """Range. + + .. versionadded:: 4.9 + """ + RANGEPREVIEW = "RangePreview" + """**DEPRECATED** - RangePreview. + + .. note:: Support for RangePreview is deprecated. Use :attr:`Algorithm.RANGE` instead. + + .. versionadded:: 4.4 + """ + TEXTPREVIEW = "TextPreview" + """**BETA** - TextPreview. + + .. versionadded:: 4.15 + """ + + +class QueryType(str, enum.Enum): + """An enum that defines the supported values for explicit encryption query_type. + + .. versionadded:: 4.2 + """ + + EQUALITY = "equality" + """Used to encrypt a value for an equality query.""" + + RANGE = "range" + """Used to encrypt a value for a range query. + + .. versionadded:: 4.9 + """ + + RANGEPREVIEW = "RangePreview" + """**DEPRECATED** - Used to encrypt a value for a rangePreview query. + + .. note:: Support for RangePreview is deprecated. Use :attr:`QueryType.RANGE` instead. + + .. versionadded:: 4.4 + """ + + PREFIXPREVIEW = "prefixPreview" + """**BETA** - Used to encrypt a value for a prefixPreview query. + + .. versionadded:: 4.15 + """ + + SUFFIXPREVIEW = "suffixPreview" + """**BETA** - Used to encrypt a value for a suffixPreview query. + + .. versionadded:: 4.15 + """ + + SUBSTRINGPREVIEW = "substringPreview" + """**BETA** - Used to encrypt a value for a substringPreview query. + + .. versionadded:: 4.15 + """ + + +def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: + # For compat with pymongocrypt <1.13, avoid setting the default key_expiration_ms. + if kwargs.get("key_expiration_ms") is None: + kwargs.pop("key_expiration_ms", None) + return MongoCryptOptions(**kwargs, enable_multiple_collinfo=True) + + +class ClientEncryption(Generic[_DocumentType]): + """Explicit client-side field level encryption.""" + + def __init__( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient[_DocumentTypeArg], + codec_options: CodecOptions[_DocumentTypeArg], + kms_tls_options: Optional[Mapping[str, Any]] = None, + key_expiration_ms: Optional[int] = None, + ) -> None: + """Explicit client-side field level encryption. + + The ClientEncryption class encapsulates explicit operations on a key + vault collection that cannot be done directly on a MongoClient. Similar + to configuring auto encryption on a MongoClient, it is constructed with + a MongoClient (to a MongoDB cluster containing the key vault + collection), KMS provider configuration, and keyVaultNamespace. It + provides an API for explicitly encrypting and decrypting values, and + creating data keys. It does not provide an API to query keys from the + key vault collection, as this can be done directly on the MongoClient. + + See `explicit client-side encryption `_ for an example. + + :param kms_providers: Map of KMS provider options. The `kms_providers` + map values differ by provider: + + - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. + These are the AWS access key ID and AWS secret access key used + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string. + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. + + KMS providers may be specified with an optional name suffix + separated by a colon, for example "kmip:name" or "aws:name". + Named KMS providers do not support `CSFLE on-demand credentials `_. + :param key_vault_namespace: The namespace for the key vault collection. + The key vault collection contains all data keys used for encryption + and decryption. Data keys are stored as documents in this MongoDB + collection. Data keys are protected with encryption by a KMS + provider. + :param key_vault_client: A MongoClient connected to a MongoDB cluster + containing the `key_vault_namespace` collection. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions` to use when encoding a + value for encryption and decoding the decrypted BSON value. This + should be the same CodecOptions instance configured on the + MongoClient, Database, or Collection used to access application + data. + :param kms_tls_options: A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + :param key_expiration_ms: The cache expiration time for data encryption keys. + Defaults to ``None`` which defers to libmongocrypt's default which is currently 60000. + Set to 0 to disable key expiration. + + .. versionchanged:: 4.12 + Added the `key_expiration_ms` parameter. + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter and the "kmip" KMS provider. + + .. versionadded:: 3.9 + """ + if not _HAVE_PYMONGOCRYPT: + raise ConfigurationError( + "client-side field level encryption requires the pymongocrypt " + "library: install a compatible version with: " + "python -m pip install --upgrade 'pymongo[encryption]'" + ) + + check_min_pymongocrypt() + + if not isinstance(codec_options, CodecOptions): + raise TypeError( + f"codec_options must be an instance of bson.codec_options.CodecOptions, not {type(codec_options)}" + ) + + if not isinstance(key_vault_client, MongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "MongoClient" for cls in type(key_vault_client).__mro__): + raise TypeError(f"MongoClient required but given {type(key_vault_client).__name__}") + + self._kms_providers = kms_providers + self._key_vault_namespace = key_vault_namespace + self._key_vault_client = key_vault_client + self._codec_options = codec_options + + db, coll = key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + opts = AutoEncryptionOpts( + kms_providers, + key_vault_namespace, + kms_tls_options=kms_tls_options, + key_expiration_ms=key_expiration_ms, + ) + self._kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( + None, key_vault_coll, None, opts + ) + self._encryption = ExplicitEncrypter( + self._io_callbacks, + _create_mongocrypt_options( + kms_providers=kms_providers, schema_map=None, key_expiration_ms=key_expiration_ms + ), + ) + # Use the same key vault collection as the callback. + assert self._io_callbacks.key_vault_coll is not None + self._key_vault_coll = self._io_callbacks.key_vault_coll + + def create_encrypted_collection( + self, + database: Database[_DocumentTypeArg], + name: str, + encrypted_fields: Mapping[str, Any], + kms_provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> tuple[Collection[_DocumentTypeArg], Mapping[str, Any]]: + """Create a collection with encryptedFields. + + .. warning:: + This function does not update the encryptedFieldsMap in the client's + AutoEncryptionOpts, thus the user must create a new client after calling this function with + the encryptedFields returned. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.EncryptionError` will be + raised if the collection already exists. + + :param database: the database to create the collection + :param name: the name of the collection to create + :param encrypted_fields: Document that describes the encrypted fields for + Queryable Encryption. The "keyId" may be set to ``None`` to auto-generate the data keys. For example: + + .. code-block: python + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + + :param kms_provider: the KMS provider to be used + :param master_key: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + :param kwargs: additional keyword arguments are the same as "create_collection". + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. + See the documentation for :meth:`~pymongo.database.Database.create_collection` for all valid options. + + :raises: - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. + + .. versionadded:: 4.4 + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + + """ + if not isinstance(database, Database): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "Database" for cls in type(database).__mro__): + raise TypeError(f"Database required but given {type(database).__name__}") + + encrypted_fields = deepcopy(encrypted_fields) + for i, field in enumerate(encrypted_fields["fields"]): + if isinstance(field, dict) and field.get("keyId") is None: + try: + encrypted_fields["fields"][i]["keyId"] = self.create_data_key( + kms_provider=kms_provider, # type:ignore[arg-type] + master_key=master_key, + ) + except EncryptionError as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + kwargs["encryptedFields"] = encrypted_fields + kwargs["check_exists"] = False + try: + return ( + database.create_collection(name=name, **kwargs), + encrypted_fields, + ) + except Exception as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + + def create_data_key( + self, + kms_provider: str, + master_key: Optional[Mapping[str, Any]] = None, + key_alt_names: Optional[Sequence[str]] = None, + key_material: Optional[bytes] = None, + ) -> Binary: + """Create and insert a new data key into the key vault collection. + + :param kms_provider: The KMS provider to use. Supported values are + "aws", "azure", "gcp", "kmip", "local", or a named provider like + "kmip:name". + :param master_key: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + + If the `kms_provider` type is "aws" it is required and has the + following fields:: + + - `region` (string): Required. The AWS region, e.g. "us-east-1". + - `key` (string): Required. The Amazon Resource Name (ARN) to + the AWS customer. + - `endpoint` (string): Optional. An alternate host to send KMS + requests to. May include port number, e.g. + "kms.us-east-1.amazonaws.com:443". + + If the `kms_provider` type is "azure" it is required and has the + following fields:: + + - `keyVaultEndpoint` (string): Required. Host with optional + port, e.g. "example.vault.azure.net". + - `keyName` (string): Required. Key name in the key vault. + - `keyVersion` (string): Optional. Version of the key to use. + + If the `kms_provider` type is "gcp" it is required and has the + following fields:: + + - `projectId` (string): Required. The Google cloud project ID. + - `location` (string): Required. The GCP location, e.g. "us-east1". + - `keyRing` (string): Required. Name of the key ring that contains + the key to use. + - `keyName` (string): Required. Name of the key to use. + - `keyVersion` (string): Optional. Version of the key to use. + - `endpoint` (string): Optional. Host with optional port. + Defaults to "cloudkms.googleapis.com". + + If the `kms_provider` type is "kmip" it is optional and has the + following fields:: + + - `keyId` (string): Optional. `keyId` is the KMIP Unique + Identifier to a 96 byte KMIP Secret Data managed object. If + keyId is omitted, the driver creates a random 96 byte KMIP + Secret Data managed object. + - `endpoint` (string): Optional. Host with optional + port, e.g. "example.vault.azure.net:". + - `delegated` (bool): Optional. If True (recommended), the + KMIP server will perform encryption and decryption. If + delegated is not provided, defaults to false. + + :param key_alt_names: An optional list of string alternate + names used to reference a key. If a key is created with alternate + names, then encryption may refer to the key by the unique alternate + name instead of by ``key_id``. The following example shows creating + and referring to a data key by alternate name:: + + client_encryption.create_data_key("local", key_alt_names=["name1"]) + # reference the key with the alternate name + client_encryption.encrypt("457-55-5462", key_alt_name="name1", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + :param key_material: Sets the custom key material to be used + by the data key for encryption and decryption. + + :return: The ``_id`` of the created data key document as a + :class:`~bson.binary.Binary` with subtype + :data:`~bson.binary.UUID_SUBTYPE`. + + .. versionchanged:: 4.2 + Added the `key_material` parameter. + """ + self._check_closed() + with _wrap_encryption_errors(): + return cast( + Binary, + self._encryption.create_data_key( + kms_provider, + master_key=master_key, + key_alt_names=key_alt_names, + key_material=key_material, + ), + ) + + def _encrypt_helper( + self, + value: Any, + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + is_expression: bool = False, + text_opts: Optional[TextOpts] = None, + ) -> Any: + self._check_closed() + if isinstance(key_id, uuid.UUID): + key_id = Binary.from_uuid(key_id) + if key_id is not None and not ( + isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + + doc = encode( + {"v": value}, + codec_options=self._codec_options, + ) + range_opts_bytes = None + if range_opts: + range_opts_bytes = encode( + range_opts.document, + codec_options=self._codec_options, + ) + text_opts_bytes = None + if text_opts: + text_opts_bytes = encode( + text_opts.document, + codec_options=self._codec_options, + ) + with _wrap_encryption_errors(): + encrypted_doc = self._encryption.encrypt( + value=doc, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts_bytes, + is_expression=is_expression, + # For compatibility with pymongocrypt < 1.16: + **{"text_opts": text_opts_bytes} if text_opts_bytes else {}, + ) + return decode(encrypted_doc)["v"] + + def encrypt( + self, + value: Any, + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + text_opts: Optional[TextOpts] = None, + ) -> Binary: + """Encrypt a BSON value with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :param value: The BSON value to encrypt. + :param algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + :param key_id: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + :param range_opts: Index options for `range` queries. See + :class:`RangeOpts` for some valid options. + :param text_opts: Index options for `textPreview` queries. See + :class:`TextOpts` for some valid options. + + :return: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + + .. versionchanged:: 4.9 + Added the `text_opts` parameter. + + .. versionchanged:: 4.9 + Added the `range_opts` parameter. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. + + .. versionchanged:: 4.2 + Added the `query_type` and `contention_factor` parameters. + """ + return cast( + Binary, + self._encrypt_helper( + value=value, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=False, + text_opts=text_opts, + ), + ) + + def encrypt_expression( + self, + expression: Mapping[str, Any], + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + ) -> RawBSONDocument: + """Encrypt a BSON expression with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :param expression: The BSON aggregate or match expression to encrypt. + :param algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + :param key_id: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See + :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + :param range_opts: Index options for `range` queries. See + :class:`RangeOpts` for some valid options. + + :return: The encrypted expression, a :class:`~bson.RawBSONDocument`. + + .. versionchanged:: 4.9 + Added the `range_opts` parameter. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. + + .. versionadded:: 4.4 + """ + return cast( + RawBSONDocument, + self._encrypt_helper( + value=expression, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=True, + ), + ) + + def decrypt(self, value: Binary) -> Any: + """Decrypt an encrypted value. + + :param value` (Binary): The encrypted value, a + :class:`~bson.binary.Binary` with subtype 6. + + :return: The decrypted BSON value. + """ + self._check_closed() + if not (isinstance(value, Binary) and value.subtype == 6): + raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6") + + with _wrap_encryption_errors(): + doc = encode({"v": value}) + decrypted_doc = self._encryption.decrypt(doc) + return decode(decrypted_doc, codec_options=self._codec_options)["v"] + + def get_key(self, id: Binary) -> Optional[RawBSONDocument]: + """Get a data key by id. + + :param id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :return: The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one({"_id": id}) + + def get_keys(self) -> Cursor[RawBSONDocument]: + """Get all of the data keys. + + :return: An instance of :class:`~pymongo.cursor.Cursor` over the data key + documents. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find({}) + + def delete_key(self, id: Binary) -> DeleteResult: + """Delete a key document in the key vault collection that has the given ``key_id``. + + :param id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :return: The delete result. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.delete_one({"_id": id}) + + def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: + """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. + + :param id: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: The key alternate name to add. + + :return: The previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + update = {"$addToSet": {"keyAltNames": key_alt_name}} + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one_and_update({"_id": id}, update) + + def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: + """Get a key document in the key vault collection that has the given ``key_alt_name``. + + :param key_alt_name: (str): The key alternate name of the key to get. + + :return: The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) + + def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSONDocument]: + """Remove ``key_alt_name`` from the set of keyAltNames in the key document with UUID ``id``. + + Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. + + :param id: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: The key alternate name to remove. + + :return: Returns the previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + pipeline = [ + { + "$set": { + "keyAltNames": { + "$cond": [ + {"$eq": ["$keyAltNames", [key_alt_name]]}, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": {"$ne": ["$$this", key_alt_name]}, + } + }, + ] + } + } + } + ] + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one_and_update({"_id": id}, pipeline) + + def rewrap_many_data_key( + self, + filter: Mapping[str, Any], + provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + ) -> RewrapManyDataKeyResult: + """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. + + :param filter: A document used to filter the data keys. + :param provider: The new KMS provider to use to encrypt the data keys, + or ``None`` to use the current KMS provider(s). + :param master_key: The master key fields corresponding to the new KMS + provider when ``provider`` is not ``None``. + + :return: A :class:`RewrapManyDataKeyResult`. + + This method allows you to re-encrypt all of your data-keys with a new CMK, or master key. + Note that this does *not* require re-encrypting any of the data in your encrypted collections, + but rather refreshes the key that protects the keys that encrypt the data: + + .. code-block:: python + + client_encryption.rewrap_many_data_key( + filter={"keyAltNames": "optional filter for which keys you want to update"}, + master_key={ + "provider": "azure", # replace with your cloud provider + "master_key": { + # put the rest of your master_key options here + "key": "" + }, + }, + ) + + .. versionadded:: 4.2 + """ + if master_key is not None and provider is None: + raise ConfigurationError("A provider must be given if a master_key is given") + self._check_closed() + with _wrap_encryption_errors(): + raw_result = self._encryption.rewrap_many_data_key(filter, provider, master_key) + if raw_result is None: + return RewrapManyDataKeyResult() + + raw_doc = RawBSONDocument(raw_result, DEFAULT_RAW_BSON_OPTIONS) + replacements = [] + for key in raw_doc["v"]: + update_model = { + "$set": {"keyMaterial": key["keyMaterial"], "masterKey": key["masterKey"]}, + "$currentDate": {"updateDate": True}, + } + op = UpdateOne({"_id": key["_id"]}, update_model) + replacements.append(op) + if not replacements: + return RewrapManyDataKeyResult() + assert self._key_vault_coll is not None + result = self._key_vault_coll.bulk_write(replacements) + return RewrapManyDataKeyResult(result) + + def __enter__(self) -> ClientEncryption[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + def _check_closed(self) -> None: + if self._encryption is None: + raise InvalidOperation("Cannot use closed ClientEncryption") + + def close(self) -> None: + """Release resources. + + Note that using this class in a with-statement will automatically call + :meth:`close`:: + + with ClientEncryption(...) as client_encryption: + encrypted = client_encryption.encrypt(value, ...) + decrypted = client_encryption.decrypt(encrypted) + + """ + if self._io_callbacks: + self._io_callbacks.close() + self._encryption.close() + self._io_callbacks = None + self._encryption = None diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py new file mode 100644 index 0000000000..c1b75a3c95 --- /dev/null +++ b/pymongo/synchronous/helpers.py @@ -0,0 +1,86 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Miscellaneous pieces that need to be synchronized.""" +from __future__ import annotations + +import asyncio +import socket +from typing import ( + Any, + Callable, + TypeVar, + cast, +) + +from pymongo.errors import ( + OperationFailure, +) +from pymongo.helpers_shared import _REAUTHENTICATION_REQUIRED_CODE + +_IS_SYNC = True + +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def _handle_reauth(func: F) -> F: + def inner(*args: Any, **kwargs: Any) -> Any: + no_reauth = kwargs.pop("no_reauth", False) + from pymongo.message import _BulkWriteContext + from pymongo.synchronous.pool import Connection + + try: + return func(*args, **kwargs) + except OperationFailure as exc: + if no_reauth: + raise + if exc.code == _REAUTHENTICATION_REQUIRED_CODE: + # Look for an argument that either is a Connection + # or has a connection attribute, so we can trigger + # a reauth. + conn = None + for arg in args: + if isinstance(arg, Connection): + conn = arg + break + if isinstance(arg, _BulkWriteContext): + conn = arg.conn # type: ignore[assignment] + break + if conn: + conn.authenticate(reauthenticate=True) + else: + raise + return func(*args, **kwargs) + raise + + return cast(F, inner) + + +def _getaddrinfo( + host: Any, port: Any, **kwargs: Any +) -> list[ + tuple[ + socket.AddressFamily, + socket.SocketKind, + int, + str, + tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], + ] +]: + if not _IS_SYNC: + loop = asyncio.get_running_loop() + return loop.getaddrinfo(host, port, **kwargs) # type: ignore[return-value] + else: + return socket.getaddrinfo(host, port, **kwargs) diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py new file mode 100644 index 0000000000..6e716402f4 --- /dev/null +++ b/pymongo/synchronous/mongo_client.py @@ -0,0 +1,2968 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools for connecting to MongoDB. + +.. seealso:: `Read and Write Settings `_ for examples of connecting + to replica sets or sets of mongos servers. + +To get a :class:`~pymongo.database.Database` instance from a +:class:`MongoClient` use either dictionary-style or attribute-style +access: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> c = MongoClient() + >>> c.test_database + Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') + >>> c["test-database"] + Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') +""" +from __future__ import annotations + +import asyncio +import contextlib +import os +import warnings +import weakref +from collections import defaultdict +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Collection, + ContextManager, + FrozenSet, + Generator, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry +from bson.timestamp import Timestamp +from pymongo import _csot, common, helpers_shared, periodic_executor +from pymongo.client_options import ClientOptions +from pymongo.driver_info import DriverInfo +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ClientBulkWriteException, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, +) +from pymongo.lock import ( + _HAS_REGISTER_AT_FORK, + _create_lock, + _release_locks, +) +from pymongo.logger import ( + _CLIENT_LOGGER, + _COMMAND_LOGGER, + _debug_log, + _log_client_error, + _log_or_warn, +) +from pymongo.message import _CursorAddress, _GetMore, _Query +from pymongo.monitoring import ConnectionClosedReason, _EventListeners +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, + _Op, +) +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ClientBulkWriteResult +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous import client_session, database, uri_parser +from pymongo.synchronous.change_stream import ChangeStream, ClusterChangeStream +from pymongo.synchronous.client_bulk import _ClientBulk +from pymongo.synchronous.client_session import _EmptyServerSession +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology, _ErrorContext +from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription +from pymongo.typings import ( + ClusterTime, + _Address, + _CollationIn, + _DocumentType, + _DocumentTypeArg, + _Pipeline, +) +from pymongo.uri_parser_shared import ( + SRV_SCHEME, + _check_options, + _handle_option_deprecations, + _handle_security_options, + _normalize_options, + _validate_uri, + split_hosts, +) +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern + +if TYPE_CHECKING: + from types import TracebackType + + from bson.objectid import ObjectId + from pymongo.read_concern import ReadConcern + from pymongo.response import Response + from pymongo.server_selectors import Selection + from pymongo.synchronous.bulk import _Bulk + from pymongo.synchronous.client_session import ClientSession, _ServerSession + from pymongo.synchronous.cursor import _ConnectionManager + from pymongo.synchronous.encryption import _Encrypter + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + + +T = TypeVar("T") + +_WriteCall = Callable[[Optional["ClientSession"], "Connection", bool], T] +_ReadCall = Callable[ + [Optional["ClientSession"], "Server", "Connection", _ServerMode], + T, +] + +_IS_SYNC = True + +_WriteOp = Union[ + InsertOne, # type: ignore[type-arg] + DeleteOne, + DeleteMany, + ReplaceOne, # type: ignore[type-arg] + UpdateOne, + UpdateMany, +] + + +class MongoClient(common.BaseObject, Generic[_DocumentType]): + HOST = "localhost" + PORT = 27017 + # Define order to retrieve options from ClientOptions for __repr__. + # No host/port; these are retrieved from TopologySettings. + _constructor_args = ("document_class", "tz_aware", "connect") + _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() # type: ignore[type-arg] + + def __init__( + self, + host: Optional[Union[str, Sequence[str]]] = None, + port: Optional[int] = None, + document_class: Optional[Type[_DocumentType]] = None, + tz_aware: Optional[bool] = None, + connect: Optional[bool] = None, + type_registry: Optional[TypeRegistry] = None, + **kwargs: Any, + ) -> None: + """Client for a MongoDB instance, a replica set, or a set of mongoses. + + .. warning:: Starting in PyMongo 4.0, ``directConnection`` now has a default value of + False instead of None. + For more details, see the relevant section of the PyMongo 4.x migration guide: + :ref:`pymongo4-migration-direct-connection`. + + The client object is thread-safe and has connection-pooling built in. + If an operation fails because of a network error, + :class:`~pymongo.errors.ConnectionFailure` is raised and the client + reconnects in the background. Application code should handle this + exception (recognizing that the operation failed) and then continue to + execute. + + Best practice is to call :meth:`MongoClient.close` when the client is no longer needed, + or use the client in a with statement:: + + with MongoClient(url) as client: + # Use client here. + + The `host` parameter can be a full `mongodb URI + `_, in addition to + a simple hostname. It can also be a list of hostnames but no more + than one URI. Any port specified in the host string(s) will override + the `port` parameter. For username and + passwords reserved characters like ':', '/', '+' and '@' must be + percent encoded following RFC 2396:: + + from urllib.parse import quote_plus + + uri = "mongodb://%s:%s@%s" % ( + quote_plus(user), quote_plus(password), host) + client = MongoClient(uri) + + Unix domain sockets are also supported. The socket path must be percent + encoded in the URI:: + + uri = "mongodb://%s:%s@%s" % ( + quote_plus(user), quote_plus(password), quote_plus(socket_path)) + client = MongoClient(uri) + + But not when passed as a simple hostname:: + + client = MongoClient('/tmp/mongodb-27017.sock') + + Starting with version 3.6, PyMongo supports mongodb+srv:// URIs. The + URI must include one, and only one, hostname. The hostname will be + resolved to one or more DNS `SRV records + `_ which will be used + as the seed list for connecting to the MongoDB deployment. When using + SRV URIs, the `authSource` and `replicaSet` configuration options can + be specified using `TXT records + `_. See the + `Initial DNS Seedlist Discovery spec + `_ + for more details. Note that the use of SRV URIs implicitly enables + TLS support. Pass tls=false in the URI to override. + + .. note:: MongoClient creation will block waiting for answers from + DNS when mongodb+srv:// URIs are used. + + .. note:: Starting with version 3.0 the :class:`MongoClient` + constructor no longer blocks while connecting to the server or + servers, and it no longer raises + :class:`~pymongo.errors.ConnectionFailure` if they are + unavailable, nor :class:`~pymongo.errors.ConfigurationError` + if the user's credentials are wrong. Instead, the constructor + returns immediately and launches the connection process on + background threads. You can check if the server is available + like this:: + + from pymongo.errors import ConnectionFailure + client = MongoClient() + try: + # The ping command is cheap and does not require auth. + client.admin.command('ping') + except ConnectionFailure: + print("Server not available") + + .. warning:: When using PyMongo in a multiprocessing context, please + read `PyMongo multiprocessing `_ first. + + .. note:: Many of the following options can be passed using a MongoDB + URI or keyword parameters. If the same option is passed in a URI and + as a keyword parameter the keyword parameter takes precedence. + + :param host: hostname or IP address or Unix domain socket + path of a single mongod or mongos instance to connect to, or a + mongodb URI, or a list of hostnames (but no more than one mongodb + URI). If `host` is an IPv6 literal it must be enclosed in '[' + and ']' characters + following the RFC2732 URL syntax (e.g. '[::1]' for localhost). + Multihomed and round robin DNS addresses are **not** supported. + :param port: port number on which to connect + :param document_class: default class to use for + documents returned from queries on this client + :param tz_aware: if ``True``, + :class:`~datetime.datetime` instances returned as values + in a document by this :class:`MongoClient` will be timezone + aware (otherwise they will be naive) + :param connect: If ``True`` (the default), immediately + begin connecting to MongoDB in the background. Otherwise connect + on the first operation. The default value is ``False`` when + running in a Function-as-a-service environment. + :param type_registry: instance of + :class:`~bson.codec_options.TypeRegistry` to enable encoding + and decoding of custom types. + :param kwargs: **Additional optional parameters available as keyword arguments:** + + - `datetime_conversion` (optional): Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + `handling out of range datetimes `_ for details. + - `directConnection` (optional): if ``True``, forces this client to + connect directly to the specified MongoDB host as a standalone. + If ``false``, the client connects to the entire replica set of + which the given MongoDB host(s) is a part. If this is ``True`` + and a mongodb+srv:// URI or a URI containing multiple seeds is + provided, an exception will be raised. + - `maxPoolSize` (optional): The maximum allowable number of + concurrent connections to each connected server. Requests to a + server will block if there are `maxPoolSize` outstanding + connections to the requested server. Defaults to 100. Can be + either 0 or None, in which case there is no limit on the number + of concurrent connections. + - `minPoolSize` (optional): The minimum required number of concurrent + connections that the pool will maintain to each connected server. + Default is 0. + - `maxIdleTimeMS` (optional): The maximum number of milliseconds that + a connection can remain idle in the pool before being removed and + replaced. Defaults to `None` (no limit). + - `maxConnecting` (optional): The maximum number of connections that + each pool can establish concurrently. Defaults to `2`. + - `timeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait when executing an operation + (including retry attempts) before raising a timeout error. + ``0`` or ``None`` means no timeout. + - `socketTimeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait for a response after sending an + ordinary (non-monitoring) database operation before concluding that + a network error has occurred. ``0`` or ``None`` means no timeout. + Defaults to ``None`` (no timeout). + - `connectTimeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait during server monitoring when + connecting a new socket to a server before concluding the server + is unavailable. ``0`` or ``None`` means no timeout. + Defaults to ``20000`` (20 seconds). + - `server_selector`: (callable or None) Optional, user-provided + function that augments server selection rules. The function should + accept as an argument a list of + :class:`~pymongo.server_description.ServerDescription` objects and + return a list of server descriptions that should be considered + suitable for the desired operation. + - `serverSelectionTimeoutMS`: (integer) Controls how long (in + milliseconds) the driver will wait to find an available, + appropriate server to carry out a database operation; while it is + waiting, multiple server monitoring operations may be carried out, + each controlled by `connectTimeoutMS`. Defaults to ``30000`` (30 + seconds). + - `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds) + a thread will wait for a socket from the pool if the pool has no + free sockets. Defaults to ``None`` (no timeout). + - `heartbeatFrequencyMS`: (optional) The number of milliseconds + between periodic server checks, or None to accept the default + frequency of 10 seconds. + - `serverMonitoringMode`: (optional) The server monitoring mode to use. + Valid values are the strings: "auto", "stream", "poll". Defaults to "auto". + - `appname`: (string or None) The name of the application that + created this MongoClient instance. The server will log this value + upon establishing each connection. It is also recorded in the slow + query log and profile collections. + - `driver`: (pair or None) A driver implemented on top of PyMongo can + pass a :class:`~pymongo.driver_info.DriverInfo` to add its name, + version, and platform to the message printed in the server log when + establishing a connection. + - `event_listeners`: a list or tuple of event listeners. See + :mod:`~pymongo.monitoring` for details. + - `retryWrites`: (boolean) Whether supported write operations + executed within this MongoClient will be retried once after a + network error. Defaults to ``True``. + The supported write operations are: + + - :meth:`~pymongo.collection.Collection.bulk_write`, as long as + :class:`~pymongo.operations.UpdateMany` or + :class:`~pymongo.operations.DeleteMany` are not included. + - :meth:`~pymongo.collection.Collection.delete_one` + - :meth:`~pymongo.collection.Collection.insert_one` + - :meth:`~pymongo.collection.Collection.insert_many` + - :meth:`~pymongo.collection.Collection.replace_one` + - :meth:`~pymongo.collection.Collection.update_one` + - :meth:`~pymongo.collection.Collection.find_one_and_delete` + - :meth:`~pymongo.collection.Collection.find_one_and_replace` + - :meth:`~pymongo.collection.Collection.find_one_and_update` + + Unsupported write operations include, but are not limited to, + :meth:`~pymongo.collection.Collection.aggregate` using the ``$out`` + pipeline operator and any operation with an unacknowledged write + concern (e.g. {w: 0})). See + https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.md + - `retryReads`: (boolean) Whether supported read operations + executed within this MongoClient will be retried once after a + network error. Defaults to ``True``. + The supported read operations are: + :meth:`~pymongo.collection.Collection.find`, + :meth:`~pymongo.collection.Collection.find_one`, + :meth:`~pymongo.collection.Collection.aggregate` without ``$out``, + :meth:`~pymongo.collection.Collection.distinct`, + :meth:`~pymongo.collection.Collection.count`, + :meth:`~pymongo.collection.Collection.estimated_document_count`, + :meth:`~pymongo.collection.Collection.count_documents`, + :meth:`pymongo.collection.Collection.watch`, + :meth:`~pymongo.collection.Collection.list_indexes`, + :meth:`pymongo.database.Database.watch`, + :meth:`~pymongo.database.Database.list_collections`, + :meth:`pymongo.mongo_client.MongoClient.watch`, + and :meth:`~pymongo.mongo_client.MongoClient.list_databases`. + + Unsupported read operations include, but are not limited to + :meth:`~pymongo.database.Database.command` and any getMore + operation on a cursor. + + Enabling retryable reads makes applications more resilient to + transient errors such as network failures, database upgrades, and + replica set failovers. For an exact definition of which errors + trigger a retry, see the `retryable reads specification + `_. + + - `compressors`: Comma separated list of compressors for wire + protocol compression. The list is used to negotiate a compressor + with the server. Currently supported options are "snappy", "zlib" + and "zstd". Support for snappy requires the + `python-snappy `_ package. + zlib support requires the Python standard library zlib module. zstd + requires the `zstandard `_ + package. By default no compression is used. Compression support + must also be enabled on the server. MongoDB 3.6+ supports snappy + and zlib compression. MongoDB 4.2+ adds support for zstd. + See `compress network traffic `_ for details. + - `zlibCompressionLevel`: (int) The zlib compression level to use + when zlib is used as the wire protocol compressor. Supported values + are -1 through 9. -1 tells the zlib library to use its default + compression level (usually 6). 0 means no compression. 1 is best + speed. 9 is best compression. Defaults to -1. + - `uuidRepresentation`: The BSON representation to use when encoding + from and decoding to instances of :class:`~uuid.UUID`. Valid + values are the strings: "standard", "pythonLegacy", "javaLegacy", + "csharpLegacy", and "unspecified" (the default). New applications + should consider setting this to "standard" for cross language + compatibility. See `handling UUID data `_ for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `srvServiceName`: (string) The SRV service name to use for + "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: + + MongoClient("mongodb+srv://example.com/?srvServiceName=customname") + - `srvMaxHosts`: (int) limits the number of mongos-like hosts a client will + connect to. More specifically, when a "mongodb+srv://" connection string + resolves to more than srvMaxHosts number of hosts, the client will randomly + choose an srvMaxHosts sized subset of hosts. + + + | **Write Concern options:** + | (Only set if passed. No default values.) + + - `w`: (integer or string) If this is a replica set, write operations + will block until they have been replicated to the specified number + or tagged set of servers. `w=` always includes the replica set + primary (e.g. w=3 means write to the primary and wait until + replicated to **two** secondaries). Passing w=0 **disables write + acknowledgement** and all other write concern options. + - `wTimeoutMS`: **DEPRECATED** (integer) Used in conjunction with `w`. + Specify a value in milliseconds to control how long to wait for write propagation + to complete. If replication does not complete in the given + timeframe, a timeout exception is raised. Passing wTimeoutMS=0 + will cause **write operations to wait indefinitely**. + - `journal`: If ``True`` block until write operations have been + committed to the journal. Cannot be used in combination with + `fsync`. Write operations will fail with an exception if this + option is used when the server is running without journaling. + - `fsync`: If ``True`` and the server is running without journaling, + blocks until the server has synced all data files to disk. If the + server is running with journaling, this acts the same as the `j` + option, blocking until write operations have been committed to the + journal. Cannot be used in combination with `j`. + + | **Replica set keyword arguments for connecting with a replica set + - either directly or via a mongos:** + + - `replicaSet`: (string or None) The name of the replica set to + connect to. The driver will verify that all servers it connects to + match this name. Implies that the hosts specified are a seed list + and the driver should attempt to find all members of the set. + Defaults to ``None``. + + | **Read Preference:** + + - `readPreference`: The replica set read preference for this client. + One of ``primary``, ``primaryPreferred``, ``secondary``, + ``secondaryPreferred``, or ``nearest``. Defaults to ``primary``. + - `readPreferenceTags`: Specifies a tag set as a comma-separated list + of colon-separated key-value pairs. For example ``dc:ny,rack:1``. + Defaults to ``None``. + - `maxStalenessSeconds`: (integer) The maximum estimated + length of time a replica set secondary can fall behind the primary + in replication before it will no longer be selected for operations. + Defaults to ``-1``, meaning no maximum. If maxStalenessSeconds + is set, it must be a positive integer greater than or equal to + 90 seconds. + + .. seealso:: `Customize Server Selection `_ + + | **Authentication:** + + - `username`: A string. + - `password`: A string. + + Although username and password must be percent-escaped in a MongoDB + URI, they must not be percent-escaped when passed as parameters. In + this example, both the space and slash special characters are passed + as-is:: + + MongoClient(username="user name", password="pass/word") + + - `authSource`: The database to authenticate on. Defaults to the + database specified in the URI, if provided, or to "admin". + - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. + If no mechanism is specified, PyMongo automatically negotiates the + mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) with the MongoDB server. + - `authMechanismProperties`: Used to specify authentication mechanism + specific options. To specify the service name for GSSAPI + authentication pass authMechanismProperties='SERVICE_NAME:'. + To specify the session token for MONGODB-AWS authentication pass + ``authMechanismProperties='AWS_SESSION_TOKEN:'``. + + .. seealso:: `Authentication `_ + + | **TLS/SSL configuration:** + + - `tls`: (boolean) If ``True``, create the connection to the server + using transport layer security. Defaults to ``False``. + - `tlsInsecure`: (boolean) Specify whether TLS constraints should be + relaxed as much as possible. Setting ``tlsInsecure=True`` implies + ``tlsAllowInvalidCertificates=True`` and + ``tlsAllowInvalidHostnames=True``. Defaults to ``False``. Think + very carefully before setting this to ``True`` as it dramatically + reduces the security of TLS. + - `tlsAllowInvalidCertificates`: (boolean) If ``True``, continues + the TLS handshake regardless of the outcome of the certificate + verification process. If this is ``False``, and a value is not + provided for ``tlsCAFile``, PyMongo will attempt to load system + provided CA certificates. If the python version in use does not + support loading system CA certificates then the ``tlsCAFile`` + parameter must point to a file of CA certificates. + ``tlsAllowInvalidCertificates=False`` implies ``tls=True``. + Defaults to ``False``. Think very carefully before setting this + to ``True`` as that could make your application vulnerable to + on-path attackers. + - `tlsAllowInvalidHostnames`: (boolean) If ``True``, disables TLS + hostname verification. ``tlsAllowInvalidHostnames=False`` implies + ``tls=True``. Defaults to ``False``. Think very carefully before + setting this to ``True`` as that could make your application + vulnerable to on-path attackers. + - `tlsCAFile`: A file containing a single or a bundle of + "certification authority" certificates, which are used to validate + certificates passed from the other end of the connection. + Implies ``tls=True``. Defaults to ``None``. + - `tlsCertificateKeyFile`: A file containing the client certificate + and private key. Implies ``tls=True``. Defaults to ``None``. + - `tlsCRLFile`: A file containing a PEM or DER formatted + certificate revocation list. Implies ``tls=True``. Defaults to + ``None``. + - `tlsCertificateKeyFilePassword`: The password or passphrase for + decrypting the private key in ``tlsCertificateKeyFile``. Only + necessary if the private key is encrypted. Defaults to ``None``. + - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables + certificate revocation status checking via the OCSP responder + specified on the server certificate. + ``tlsDisableOCSPEndpointCheck=False`` implies ``tls=True``. + Defaults to ``False``. + - `ssl`: (boolean) Alias for ``tls``. + + | **Read Concern options:** + | (If not set explicitly, this will use the server default) + + - `readConcernLevel`: (string) The read concern level specifies the + level of isolation for read operations. For example, a read + operation using a read concern level of ``majority`` will only + return data that has been written to a majority of nodes. If the + level is left unspecified, the server default will be used. + + | **Client side encryption options:** + | (If not set explicitly, client side encryption will not be enabled.) + + - `auto_encryption_opts`: A + :class:`~pymongo.encryption_options.AutoEncryptionOpts` which + configures this client to automatically encrypt collection commands + and automatically decrypt results. See + `client-side field level encryption `_ for an example. + If a :class:`MongoClient` is configured with + ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a + separate internal ``MongoClient`` is created if any of the + following are true: + + - A ``key_vault_client`` is not passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + - ``bypass_auto_encrpytion=False`` is passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + + | **Stable API options:** + | (If not set explicitly, Stable API will not be enabled.) + + - `server_api`: A + :class:`~pymongo.server_api.ServerApi` which configures this + client to use Stable API. See `versioned API `_ for + details. + + .. seealso:: The MongoDB documentation on `connections `_. + + .. versionchanged:: 4.5 + Added the ``serverMonitoringMode`` keyword argument. + + .. versionchanged:: 4.2 + Added the ``timeoutMS`` keyword argument. + + .. versionchanged:: 4.0 + + - Removed the fsync, unlock, is_locked, database_names, and + close_cursor methods. + See the :ref:`pymongo4-migration-guide`. + - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` + keyword arguments. + - The default for `uuidRepresentation` was changed from + ``pythonLegacy`` to ``unspecified``. + - Added the ``srvServiceName``, ``maxConnecting``, and ``srvMaxHosts`` URI and + keyword arguments. + + .. versionchanged:: 3.12 + Added the ``server_api`` keyword argument. + The following keyword arguments were deprecated: + + - ``ssl_certfile`` and ``ssl_keyfile`` were deprecated in favor + of ``tlsCertificateKeyFile``. + + .. versionchanged:: 3.11 + Added the following keyword arguments and URI options: + + - ``tlsDisableOCSPEndpointCheck`` + - ``directConnection`` + + .. versionchanged:: 3.9 + Added the ``retryReads`` keyword argument and URI option. + Added the ``tlsInsecure`` keyword argument and URI option. + The following keyword arguments and URI options were deprecated: + + - ``wTimeout`` was deprecated in favor of ``wTimeoutMS``. + - ``j`` was deprecated in favor of ``journal``. + - ``ssl_cert_reqs`` was deprecated in favor of + ``tlsAllowInvalidCertificates``. + - ``ssl_match_hostname`` was deprecated in favor of + ``tlsAllowInvalidHostnames``. + - ``ssl_ca_certs`` was deprecated in favor of ``tlsCAFile``. + - ``ssl_certfile`` was deprecated in favor of + ``tlsCertificateKeyFile``. + - ``ssl_crlfile`` was deprecated in favor of ``tlsCRLFile``. + - ``ssl_pem_passphrase`` was deprecated in favor of + ``tlsCertificateKeyFilePassword``. + + .. versionchanged:: 3.9 + ``retryWrites`` now defaults to ``True``. + + .. versionchanged:: 3.8 + Added the ``server_selector`` keyword argument. + Added the ``type_registry`` keyword argument. + + .. versionchanged:: 3.7 + Added the ``driver`` keyword argument. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + Added the ``retryWrites`` keyword argument and URI option. + + .. versionchanged:: 3.5 + Add ``username`` and ``password`` options. Document the + ``authSource``, ``authMechanism``, and ``authMechanismProperties`` + options. + Deprecated the ``socketKeepAlive`` keyword argument and URI option. + ``socketKeepAlive`` now defaults to ``True``. + + .. versionchanged:: 3.0 + :class:`~pymongo.mongo_client.MongoClient` is now the one and only + client class for a standalone server, mongos, or replica set. + It includes the functionality that had been split into + :class:`~pymongo.mongo_client.MongoReplicaSetClient`: it can connect + to a replica set, discover all its members, and monitor the set for + stepdowns, elections, and reconfigs. + + The :class:`~pymongo.mongo_client.MongoClient` constructor no + longer blocks while connecting to the server or servers, and it no + longer raises :class:`~pymongo.errors.ConnectionFailure` if they + are unavailable, nor :class:`~pymongo.errors.ConfigurationError` + if the user's credentials are wrong. Instead, the constructor + returns immediately and launches the connection process on + background threads. + + Therefore the ``alive`` method is removed since it no longer + provides meaningful information; even if the client is disconnected, + it may discover a server in time to fulfill the next operation. + + In PyMongo 2.x, :class:`~pymongo.MongoClient` accepted a list of + standalone MongoDB servers and used the first it could connect to:: + + MongoClient(['host1.com:27017', 'host2.com:27017']) + + A list of multiple standalones is no longer supported; if multiple + servers are listed they must be members of the same replica set, or + mongoses in the same sharded cluster. + + The behavior for a list of mongoses is changed from "high + availability" to "load balancing". Before, the client connected to + the lowest-latency mongos in the list, and used it until a network + error prompted it to re-evaluate all mongoses' latencies and + reconnect to one of them. In PyMongo 3, the client monitors its + network latency to all the mongoses continuously, and distributes + operations evenly among those with the lowest latency. See + `load balancing `_ for more information. + + The ``connect`` option is added. + + The ``start_request``, ``in_request``, and ``end_request`` methods + are removed, as well as the ``auto_start_request`` option. + + The ``copy_database`` method is removed, see + `Copy and Clone Databases `_ for alternatives. + + The :meth:`MongoClient.disconnect` method is removed; it was a + synonym for :meth:`~pymongo.MongoClient.close`. + + :class:`~pymongo.mongo_client.MongoClient` no longer returns an + instance of :class:`~pymongo.database.Database` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + client['__my_database__'] + + Not:: + + client.__my_database__ + + .. versionchanged:: 4.7 + Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. + + .. versionchanged:: 4.9 + The default value of ``connect`` is changed to ``False`` when running in a + Function-as-a-service environment. + """ + doc_class = document_class or dict + self._init_kwargs: dict[str, Any] = { + "host": host, + "port": port, + "document_class": doc_class, + "tz_aware": tz_aware, + "connect": connect, + "type_registry": type_registry, + **kwargs, + } + + if host is None: + host = self.HOST + if isinstance(host, str): + host = [host] + if port is None: + port = self.PORT + if not isinstance(port, int): + raise TypeError(f"port must be an instance of int, not {type(port)}") + self._host = host + self._port = port + self._topology: Topology = None # type: ignore[assignment] + self._timeout: float | None = None + self._topology_settings: TopologySettings = None # type: ignore[assignment] + self._event_listeners: _EventListeners | None = None + + # _pool_class, _monitor_class, and _condition_class are for deep + # customization of PyMongo, e.g. Motor. + pool_class = kwargs.pop("_pool_class", None) + monitor_class = kwargs.pop("_monitor_class", None) + condition_class = kwargs.pop("_condition_class", None) + + # Parse options passed as kwargs. + keyword_opts = common._CaseInsensitiveDictionary(kwargs) + keyword_opts["document_class"] = doc_class + self._resolve_srv_info: dict[str, Any] = {"keyword_opts": keyword_opts} + + self._seeds = set() + is_srv = False + username = None + password = None + dbase = None + opts = common._CaseInsensitiveDictionary() + fqdn = None + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + if len([h for h in self._host if "/" in h]) > 1: + raise ConfigurationError("host must not contain multiple MongoDB URIs") + for entity in self._host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + res = _validate_uri( + entity, + port, + validate=True, + warn=True, + normalize=False, + srv_max_hosts=srv_max_hosts, + ) + is_srv = entity.startswith(SRV_SCHEME) + self._seeds.update(res["nodelist"]) + username = res["username"] or username + password = res["password"] or password + dbase = res["database"] or dbase + opts = res["options"] + fqdn = res["fqdn"] + else: + self._seeds.update(split_hosts(entity, self._port)) + if not self._seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in self._seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + if type_registry is not None: + keyword_opts["type_registry"] = type_registry + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + opts = self._normalize_and_validate_options(opts, self._seeds) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", username) + password = opts.get("password", password) + self._options = ClientOptions(username, password, dbase, opts, _IS_SYNC) + + self._default_database_name = dbase + self._lock = _create_lock() + self._kill_cursors_queue: list = [] # type: ignore[type-arg] + + self._encrypter: Optional[_Encrypter] = None + + self._resolve_srv_info.update( + { + "is_srv": is_srv, + "username": username, + "password": password, + "dbase": dbase, + "seeds": self._seeds, + "fqdn": fqdn, + "srv_service_name": srv_service_name, + "pool_class": pool_class, + "monitor_class": monitor_class, + "condition_class": condition_class, + } + ) + + super().__init__( + self._options.codec_options, + self._options.read_preference, + self._options.write_concern, + self._options.read_concern, + ) + + self._init_based_on_options(self._seeds, srv_max_hosts, srv_service_name) + + self._opened = False + self._closed = False + self._loop: Optional[asyncio.AbstractEventLoop] = None + if not is_srv: + self._init_background() + + if _IS_SYNC and connect: + self._get_topology() # type: ignore[unused-coroutine] + + def _resolve_srv(self) -> None: + keyword_opts = self._resolve_srv_info["keyword_opts"] + seeds = set() + opts = common._CaseInsensitiveDictionary() + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + for entity in self._host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + # Determine connection timeout from kwargs. + timeout = keyword_opts.get("connecttimeoutms") + if timeout is not None: + timeout = common.validate_timeout_or_none_or_zero( + keyword_opts.cased_key("connecttimeoutms"), timeout + ) + res = uri_parser._parse_srv( + entity, + self._port, + validate=True, + warn=True, + normalize=False, + connect_timeout=timeout, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + ) + seeds.update(res["nodelist"]) + opts = res["options"] + else: + seeds.update(split_hosts(entity, self._port)) + + if not seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + tz_aware = keyword_opts["tz_aware"] + connect = keyword_opts["connect"] + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + opts = self._normalize_and_validate_options(opts, seeds) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", self._resolve_srv_info["username"]) + password = opts.get("password", self._resolve_srv_info["password"]) + self._options = ClientOptions( + username, password, self._resolve_srv_info["dbase"], opts, _IS_SYNC + ) + + self._init_based_on_options(seeds, srv_max_hosts, srv_service_name) + + def _init_based_on_options( + self, seeds: Collection[tuple[str, int]], srv_max_hosts: Any, srv_service_name: Any + ) -> None: + self._event_listeners = self._options.pool_options._event_listeners + self._topology_settings = TopologySettings( + seeds=seeds, + replica_set_name=self._options.replica_set_name, + pool_class=self._resolve_srv_info["pool_class"], + pool_options=self._options.pool_options, + monitor_class=self._resolve_srv_info["monitor_class"], + condition_class=self._resolve_srv_info["condition_class"], + local_threshold_ms=self._options.local_threshold_ms, + server_selection_timeout=self._options.server_selection_timeout, + server_selector=self._options.server_selector, + heartbeat_frequency=self._options.heartbeat_frequency, + fqdn=self._resolve_srv_info["fqdn"], + direct_connection=self._options.direct_connection, + load_balanced=self._options.load_balanced, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + server_monitoring_mode=self._options.server_monitoring_mode, + topology_id=self._topology_settings._topology_id if self._topology_settings else None, + ) + if self._options.auto_encryption_opts: + from pymongo.synchronous.encryption import _Encrypter + + self._encrypter = _Encrypter(self, self._options.auto_encryption_opts) + self._timeout = self._options.timeout + + def _normalize_and_validate_options( + self, opts: common._CaseInsensitiveDictionary, seeds: set[tuple[str, int | None]] + ) -> common._CaseInsensitiveDictionary: + # Handle security-option conflicts in combined options. + opts = _handle_security_options(opts) + # Normalize combined options. + opts = _normalize_options(opts) + _check_options(seeds, opts) + return opts + + def _validate_kwargs_and_update_opts( + self, + keyword_opts: common._CaseInsensitiveDictionary, + opts: common._CaseInsensitiveDictionary, + ) -> common._CaseInsensitiveDictionary: + # Handle deprecated options in kwarg options. + keyword_opts = _handle_option_deprecations(keyword_opts) + # Validate kwarg options. + keyword_opts = common._CaseInsensitiveDictionary( + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) + # Override connection string options with kwarg options. + opts.update(keyword_opts) + return opts + + def _connect(self) -> None: + """Explicitly connect to MongoDB synchronously instead of on the first operation.""" + self._get_topology() + + def _init_background(self, old_pid: Optional[int] = None) -> None: + self._topology = Topology(self._topology_settings) + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + MongoClient._clients[self._topology._topology_id] = self + # Seed the topology with the old one's pid so we can detect clients + # that are opened before a fork and used after. + self._topology._pid = old_pid + + def target() -> bool: + client = self_ref() + if client is None: + return False # Stop the executor. + MongoClient._process_periodic_tasks(client) + return True + + executor = periodic_executor.PeriodicExecutor( + interval=common.KILL_CURSOR_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_kill_cursors_thread", + ) + + # We strongly reference the executor and it weakly references us via + # this closure. When the client is freed, stop the executor soon. + self_ref: Any = weakref.ref(self, executor.close) + self._kill_cursors_executor = executor + self._opened = False + + def append_metadata(self, driver_info: DriverInfo) -> None: + """Appends the given metadata to existing driver metadata. + + :param driver_info: a :class:`~pymongo.driver_info.DriverInfo` + + .. versionadded:: 4.14 + """ + + if not isinstance(driver_info, DriverInfo): + raise TypeError( + f"driver_info must be an instance of DriverInfo, not {type(driver_info)}" + ) + self._options.pool_options._update_metadata(driver_info) + + def _should_pin_cursor(self, session: Optional[ClientSession]) -> Optional[bool]: + return self._options.load_balanced and not (session and session.in_transaction) + + def _after_fork(self) -> None: + """Resets topology in a child after successfully forking.""" + self._init_background(self._topology._pid) + # Reset the session pool to avoid duplicate sessions in the child process. + self._topology._session_pool.reset() + + def _duplicate(self, **kwargs: Any) -> MongoClient: # type: ignore[type-arg] + args = self._init_kwargs.copy() + args.update(kwargs) + return MongoClient(**args) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[client_session.ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> ChangeStream[_DocumentType]: + """Watch changes on this cluster. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.ClusterChangeStream` cursor which + iterates over changes on all databases on this cluster. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + with client.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.change_stream.ClusterChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.ClusterChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with client.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = ClusterChangeStream( + self.admin, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream + + @property + def topology_description(self) -> TopologyDescription: + """The description of the connected MongoDB deployment. + + >>> client.topology_description + , , ]> + >>> client.topology_description.topology_type_name + 'ReplicaSetWithPrimary' + + Note that the description is periodically updated in the background + but the returned object itself is immutable. Access this property again + to get a more recent + :class:`~pymongo.topology_description.TopologyDescription`. + + :return: An instance of + :class:`~pymongo.topology_description.TopologyDescription`. + + .. versionadded:: 4.0 + """ + if self._topology is None: + servers = {(host, port): ServerDescription((host, port)) for host, port in self._seeds} + return TopologyDescription( + TOPOLOGY_TYPE.Unknown, + servers, + None, + None, + None, + self._topology_settings, + ) + return self._topology.description + + @property + def nodes(self) -> FrozenSet[_Address]: + """Set of all currently connected servers. + + .. warning:: When connected to a replica set the value of :attr:`nodes` + can change over time as :class:`MongoClient`'s view of the replica + set changes. :attr:`nodes` can also be an empty set when + :class:`MongoClient` is first instantiated and hasn't yet connected + to any servers, or a network partition causes it to lose connection + to all servers. + """ + if self._topology is None: + return frozenset() + description = self._topology.description + return frozenset(s.address for s in description.known_servers) + + @property + def options(self) -> ClientOptions: + """The configuration options for this client. + + :return: An instance of :class:`~pymongo.client_options.ClientOptions`. + + .. versionadded:: 4.0 + """ + return self._options + + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + return ( + tuple(sorted(self._resolve_srv_info["seeds"])), + self._options.replica_set_name, + self._resolve_srv_info["fqdn"], + self._resolve_srv_info["srv_service_name"], + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self.eq_props()) + + def _repr_helper(self) -> str: + def option_repr(option: str, value: Any) -> str: + """Fix options whose __repr__ isn't usable in a constructor.""" + if option == "document_class": + if value is dict: + return "document_class=dict" + else: + return f"document_class={value.__module__}.{value.__name__}" + if option in common.TIMEOUT_OPTIONS and value is not None: + return f"{option}={int(value * 1000)}" + + return f"{option}={value!r}" + + # Host first... + if self._topology is None: + options = [f"host='mongodb+srv://{self._resolve_srv_info['fqdn']}'"] + else: + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] + ] + # ... then everything in self._constructor_args... + options.extend( + option_repr(key, self._options._options[key]) for key in self._constructor_args + ) + # ... then everything else. + options.extend( + option_repr(key, self._options._options[key]) + for key in self._options._options + if key not in set(self._constructor_args) and key != "username" and key != "password" + ) + return ", ".join(options) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._repr_helper()})" + + def __getattr__(self, name: str) -> database.Database[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" database, use client[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> database.Database[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + return database.Database(self, name) + + def __del__(self) -> None: + """Check that this MongoClient has been closed and issue a warning if not.""" + try: + if self._opened and not self._closed: + warnings.warn( + ( + f"Unclosed {type(self).__name__} opened at:\n{self._topology_settings._stack}" + f"Call {type(self).__name__}.close() to safely shut down your client and free up resources." + ), + ResourceWarning, + stacklevel=2, + ) + except (AttributeError, TypeError): + # Ignore errors at interpreter exit. + pass + + def _close_cursor_soon( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Request that a cursor and/or connection be cleaned up soon.""" + self._kill_cursors_queue.append((address, cursor_id, conn_mgr)) + + def _start_session(self, implicit: bool, **kwargs: Any) -> ClientSession: + server_session = _EmptyServerSession() + opts = client_session.SessionOptions(**kwargs) + return client_session.ClientSession(self, server_session, opts, implicit) + + def start_session( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[client_session.TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> client_session.ClientSession: + """Start a logical session. + + This method takes the same parameters as + :class:`~pymongo.client_session.SessionOptions`. See the + :mod:`~pymongo.client_session` module for details and examples. + + A :class:`~pymongo.client_session.ClientSession` may only be used with + the MongoClient that started it. :class:`ClientSession` instances are + **not thread-safe or fork-safe**. They can only be used by one thread + or process at a time. A single :class:`ClientSession` cannot be used + to run multiple operations concurrently. + + :return: An instance of :class:`~pymongo.client_session.ClientSession`. + + .. versionadded:: 3.6 + """ + return self._start_session( + False, + causal_consistency=causal_consistency, + default_transaction_options=default_transaction_options, + snapshot=snapshot, + ) + + def _ensure_session(self, session: Optional[ClientSession] = None) -> Optional[ClientSession]: + """If provided session is None, lend a temporary session.""" + if session: + return session + + try: + # Don't make implicit sessions causally consistent. Applications + # should always opt-in. + return self._start_session(True, causal_consistency=False) + except (ConfigurationError, InvalidOperation): + # Sessions not supported. + return None + + def _send_cluster_time( + self, command: MutableMapping[str, Any], session: Optional[ClientSession] + ) -> None: + topology_time = self._topology.max_cluster_time() + session_time = session.cluster_time if session else None + if topology_time and session_time: + if topology_time["clusterTime"] > session_time["clusterTime"]: + cluster_time: Optional[ClusterTime] = topology_time + else: + cluster_time = session_time + else: + cluster_time = topology_time or session_time + if cluster_time: + command["$clusterTime"] = cluster_time + + def get_default_database( + self, + default: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.Database[_DocumentType]: + """Get the database named in the MongoDB connection URI. + + >>> uri = 'mongodb://host/my_database' + >>> client = MongoClient(uri) + >>> db = client.get_default_database() + >>> assert db.name == 'my_database' + >>> db = client.get_database() + >>> assert db.name == 'my_database' + + Useful in scripts where you want to choose which database to use + based only on the URI in a configuration file. + + :param default: the database name to use if no database name + was provided in the URI. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`MongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`MongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`MongoClient` is + used. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.8 + Undeprecated. Added the ``default``, ``codec_options``, + ``read_preference``, ``write_concern`` and ``read_concern`` + parameters. + + .. versionchanged:: 3.5 + Deprecated, use :meth:`get_database` instead. + """ + if self._default_database_name is None and default is None: + raise ConfigurationError("No default database name defined or provided.") + + name = cast(str, self._default_database_name or default) + return database.Database( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def get_database( + self, + name: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.Database[_DocumentType]: + """Get a :class:`~pymongo.database.Database` with the given name and + options. + + Useful for creating a :class:`~pymongo.database.Database` with + different codec options, read preference, and/or write concern from + this :class:`MongoClient`. + + >>> client.read_preference + Primary() + >>> db1 = client.test + >>> db1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> db2 = client.get_database( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> db2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the database - a string. If ``None`` + (the default) the database named in the MongoDB connection URI is + returned. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`MongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`MongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`MongoClient` is + used. + + .. versionchanged:: 3.5 + The `name` parameter is now optional, defaulting to the database + named in the MongoDB connection URI. + """ + if name is None: + if self._default_database_name is None: + raise ConfigurationError("No default database defined") + name = self._default_database_name + + return database.Database( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def _database_default_options(self, name: str) -> database.Database: # type: ignore[type-arg] + """Get a Database instance with the default settings.""" + return self.get_database( + name, + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + ) + + def __enter__(self) -> MongoClient[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'MongoClient' object is not iterable") + + next = __next__ + + def _server_property(self, attr_name: str) -> Any: + """An attribute of the current server's description. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + Not threadsafe if used multiple times in a single method, since + the server may change. In such cases, store a local reference to a + ServerDescription first, then use its properties. + """ + server = (self._get_topology()).select_server(writable_server_selector, _Op.TEST) + + return getattr(server.description, attr_name) + + @property + def address(self) -> Optional[tuple[str, int]]: + """(host, port) of the current standalone, primary, or mongos, or None. + + Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if + the client is load-balancing among mongoses, since there is no single + address. Use :attr:`nodes` instead. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + .. versionadded:: 3.0 + """ + if self._topology is None: + self._get_topology() + topology_type = self._topology._description.topology_type + if ( + topology_type == TOPOLOGY_TYPE.Sharded + and len(self.topology_description.server_descriptions()) > 1 + ): + raise InvalidOperation( + 'Cannot use "address" property when load balancing among' + ' mongoses, use "nodes" instead.' + ) + return self._server_property("address") + + @property + def primary(self) -> Optional[tuple[str, int]]: + """The (host, port) of the current primary of the replica set. + + Returns ``None`` if this client is not connected to a replica set, + there is no primary, or this client was created without the + `replicaSet` option. + + .. versionadded:: 3.0 + MongoClient gained this property in version 3.0. + """ + if self._topology is None: + self._get_topology() + return self._topology.get_primary() # type: ignore[return-value] + + @property + def secondaries(self) -> set[_Address]: + """The secondary members known to this client. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no visible secondaries, or this + client was created without the `replicaSet` option. + + .. versionadded:: 3.0 + MongoClient gained this property in version 3.0. + """ + if self._topology is None: + self._get_topology() + return self._topology.get_secondaries() + + @property + def arbiters(self) -> set[_Address]: + """Arbiters in the replica set. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no arbiters, or this client was + created without the `replicaSet` option. + """ + if self._topology is None: + self._get_topology() + return self._topology.get_arbiters() + + @property + def is_primary(self) -> bool: + """If this client is connected to a server that can accept writes. + + True if the current server is a standalone, mongos, or the primary of + a replica set. If the client is not connected, this will block until a + connection is established or raise ServerSelectionTimeoutError if no + server is available. + """ + return self._server_property("is_writable") + + @property + def is_mongos(self) -> bool: + """If this client is connected to mongos. If the client is not + connected, this will block until a connection is established or raise + ServerSelectionTimeoutError if no server is available. + """ + return self._server_property("server_type") == SERVER_TYPE.Mongos + + def _end_sessions(self, session_ids: list[_ServerSession]) -> None: + """Send endSessions command(s) with the given session ids.""" + try: + # Use Connection.command directly to avoid implicitly creating + # another session. + with self._conn_for_reads( + ReadPreference.PRIMARY_PREFERRED, None, operation=_Op.END_SESSIONS + ) as ( + conn, + read_pref, + ): + if not conn.supports_sessions: + return + + for i in range(0, len(session_ids), common._MAX_END_SESSIONS): + spec = {"endSessions": session_ids[i : i + common._MAX_END_SESSIONS]} + conn.command("admin", spec, read_preference=read_pref, client=self) + except PyMongoError: + # Drivers MUST ignore any errors returned by the endSessions + # command. + pass + + def close(self) -> None: + """Cleanup client resources and disconnect from MongoDB. + + End all server sessions created by this client by sending one or more + endSessions commands. + + Close all sockets in the connection pools and stop the monitor threads. + + .. versionchanged:: 4.0 + Once closed, the client cannot be used again and any attempt will + raise :exc:`~pymongo.errors.InvalidOperation`. + + .. versionchanged:: 3.6 + End all server sessions created by this client. + """ + if self._topology is None: + return + session_ids = self._topology.pop_all_sessions() + if session_ids: + self._end_sessions(session_ids) + # Stop the periodic task thread and then send pending killCursor + # requests before closing the topology. + self._kill_cursors_executor.close() + self._process_kill_cursors() + self._topology.close() + if self._encrypter: + # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. + self._encrypter.close() + self._closed = True + if not _IS_SYNC: + asyncio.gather( + self._topology.cleanup_monitors(), # type: ignore[func-returns-value] + self._kill_cursors_executor.join(), # type: ignore[func-returns-value] + return_exceptions=True, + ) + + if not _IS_SYNC: + # Add support for contextlib.closing. + close = close + + def _get_topology(self) -> Topology: + """Get the internal :class:`~pymongo.topology.Topology` object. + + If this client was created with "connect=False", calling _get_topology + launches the connection process in the background. + """ + if not _IS_SYNC: + if self._loop is None: + self._loop = asyncio.get_running_loop() + elif self._loop != asyncio.get_running_loop(): + raise RuntimeError( + "Cannot use MongoClient in different event loop. MongoClient uses low-level asyncio APIs that bind it to the event loop it was created on." + ) + if not self._opened: + if self._resolve_srv_info["is_srv"]: + self._resolve_srv() + self._init_background() + self._topology.open() + with self._lock: + self._kill_cursors_executor.open() + self._opened = True + return self._topology + + @contextlib.contextmanager + def _checkout( + self, server: Server, session: Optional[ClientSession] + ) -> Generator[Connection, None]: + in_txn = session and session.in_transaction + with _MongoClientErrorHandler(self, server, session) as err_handler: + # Reuse the pinned connection, if it exists. + if in_txn and session and session._pinned_connection: + err_handler.contribute_socket(session._pinned_connection) + yield session._pinned_connection + return + with server.checkout(handler=err_handler) as conn: + # Pin this session to the selected server or connection. + if ( + in_txn + and session + and server.description.server_type + in ( + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + ): + session._pin(server, conn) + err_handler.contribute_socket(conn) + if ( + self._encrypter + and not self._encrypter._bypass_auto_encryption + and conn.max_wire_version < 8 + ): + raise ConfigurationError( + "Auto-encryption requires a minimum MongoDB version of 4.2" + ) + yield conn + + def _select_server( + self, + server_selector: Callable[[Selection], Selection], + session: Optional[ClientSession], + operation: str, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Select a server to run an operation on this client. + + :Parameters: + - `server_selector`: The server selector to use if the session is + not pinned and no address is given. + - `session`: The ClientSession for the next operation, or None. May + be pinned to a mongos server address. + - `address` (optional): Address when sending a message + to a specific server, used for getMore. + """ + try: + topology = self._get_topology() + if session and not session.in_transaction: + session._transaction.reset() + if not address and session: + address = session._pinned_address + if address: + # We're running a getMore or this session is pinned to a mongos. + server = topology.select_server_by_address( + address, operation, operation_id=operation_id + ) + if not server: + raise AutoReconnect("server %s:%s no longer available" % address) # noqa: UP031 + else: + server = topology.select_server( + server_selector, + operation, + deprioritized_servers=deprioritized_servers, + operation_id=operation_id, + ) + return server + except PyMongoError as exc: + # Server selection errors in a transaction are transient. + if session and session.in_transaction: + exc._add_error_label("TransientTransactionError") + session._unpin() + raise + + def _conn_for_writes( + self, session: Optional[ClientSession], operation: str + ) -> ContextManager[Connection]: + server = self._select_server(writable_server_selector, session, operation) + return self._checkout(server, session) + + @contextlib.contextmanager + def _conn_from_server( + self, read_preference: _ServerMode, server: Server, session: Optional[ClientSession] + ) -> Generator[tuple[Connection, _ServerMode], None]: + assert read_preference is not None, "read_preference must not be None" + # Get a connection for a server matching the read preference, and yield + # conn with the effective read preference. The Server Selection + # Spec says not to send any $readPreference to standalones and to + # always send primaryPreferred when directly connected to a repl set + # member. + # Thread safe: if the type is single it cannot change. + # NOTE: We already opened the Topology when selecting a server so there's no need + # to call _get_topology() again. + single = self._topology.description.topology_type == TOPOLOGY_TYPE.Single + with self._checkout(server, session) as conn: + if single: + if conn.is_repl and not (session and session.in_transaction): + # Use primary preferred to ensure any repl set member + # can handle the request. + read_preference = ReadPreference.PRIMARY_PREFERRED + elif conn.is_standalone: + # Don't send read preference to standalones. + read_preference = ReadPreference.PRIMARY + yield conn, read_preference + + def _conn_for_reads( + self, + read_preference: _ServerMode, + session: Optional[ClientSession], + operation: str, + ) -> ContextManager[tuple[Connection, _ServerMode]]: + assert read_preference is not None, "read_preference must not be None" + server = self._select_server(read_preference, session, operation) + return self._conn_from_server(read_preference, server, session) + + @_csot.apply + def _run_operation( + self, + operation: Union[_Query, _GetMore], + unpack_res: Callable, # type: ignore[type-arg] + address: Optional[_Address] = None, + ) -> Response: + """Run a _Query/_GetMore operation and return a Response. + + :param operation: a _Query or _GetMore object. + :param unpack_res: A callable that decodes the wire protocol response. + :param address: Optional address when sending a message + to a specific server, used for getMore. + """ + if operation.conn_mgr: + server = self._select_server( + operation.read_preference, + operation.session, # type: ignore[arg-type] + operation.name, + address=address, + ) + + with operation.conn_mgr._lock: + with _MongoClientErrorHandler(self, server, operation.session) as err_handler: # type: ignore[arg-type] + err_handler.contribute_socket(operation.conn_mgr.conn) + return server.run_operation( + operation.conn_mgr.conn, + operation, + operation.read_preference, + self._event_listeners, + unpack_res, + self, + ) + + def _cmd( + _session: Optional[ClientSession], + server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> Response: + operation.reset() # Reset op in case of retry. + return server.run_operation( + conn, + operation, + read_preference, + self._event_listeners, + unpack_res, + self, + ) + + return self._retryable_read( + _cmd, + operation.read_preference, + operation.session, # type: ignore[arg-type] + address=address, + retryable=isinstance(operation, _Query), + operation=operation.name, + ) + + def _retry_with_session( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[ClientSession], + bulk: Optional[Union[_Bulk, _ClientBulk]], + operation: str, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with at most one consecutive retries + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + """ + # Ensure that the options supports retry_writes and there is a valid session not in + # transaction, otherwise, we will not support retry behavior for this txn. + retryable = bool( + retryable and self.options.retry_writes and session and not session.in_transaction + ) + return self._retry_internal( + func=func, + session=session, + bulk=bulk, + operation=operation, + retryable=retryable, + operation_id=operation_id, + ) + + @_csot.apply + def _retry_internal( + self, + func: _WriteCall[T] | _ReadCall[T], + session: Optional[ClientSession], + bulk: Optional[Union[_Bulk, _ClientBulk]], + operation: str, + is_read: bool = False, + address: Optional[_Address] = None, + read_pref: Optional[_ServerMode] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ) -> T: + """Internal retryable helper for all client transactions. + + :param func: Callback function we want to retry + :param session: Client Session on which the transaction should occur + :param bulk: Abstraction to handle bulk write operations + :param operation: The name of the operation that the server is being selected for + :param is_read: If this is an exclusive read transaction, defaults to False + :param address: Server Address, defaults to None + :param read_pref: Topology of read operation, defaults to None + :param retryable: If the operation should be retried once, defaults to None + + :return: Output of the calling func() + """ + return _ClientConnectionRetryable( + mongo_client=self, + func=func, + bulk=bulk, + operation=operation, + is_read=is_read, + session=session, + read_pref=read_pref, + address=address, + retryable=retryable, + operation_id=operation_id, + ).run() + + def _retryable_read( + self, + func: _ReadCall[T], + read_pref: _ServerMode, + session: Optional[ClientSession], + operation: str, + address: Optional[_Address] = None, + retryable: bool = True, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param func: Read call we want to execute + :param read_pref: Desired topology of read operation + :param session: Client session we should use to execute operation + :param operation: The name of the operation that the server is being selected for + :param address: Optional address when sending a message, defaults to None + :param retryable: if we should attempt retries + (may not always be supported even if supplied), defaults to False + """ + + # Ensure that the client supports retrying on reads and there is no session in + # transaction, otherwise, we will not support retry behavior for this call. + retryable = bool( + retryable and self.options.retry_reads and not (session and session.in_transaction) + ) + with self._tmp_session(session) as s: + return self._retry_internal( + func, + s, + None, + operation, + is_read=True, + address=address, + read_pref=read_pref, + retryable=retryable, + operation_id=operation_id, + ) + + def _retryable_write( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[ClientSession], + operation: str, + bulk: Optional[Union[_Bulk, _ClientBulk]] = None, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param retryable: if we should attempt retries (may not always be supported) + :param func: write call we want to execute during a session + :param session: Client session we will use to execute write operation + :param operation: The name of the operation that the server is being selected for + :param bulk: bulk abstraction to execute operations in bulk, defaults to None + """ + with self._tmp_session(session) as s: + return self._retry_with_session(retryable, func, s, bulk, operation, operation_id) + + def _cleanup_cursor_no_lock( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[ClientSession], + ) -> None: + """Cleanup a cursor from __del__ without locking. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection attached at the time the cursor + was garbage collected. + + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + """ + # The cursor will be closed later in a different session. + if cursor_id or conn_mgr: + self._close_cursor_soon(cursor_id, address, conn_mgr) + if session and session._implicit and not session._leave_alive: + session._end_implicit_session() + + def _cleanup_cursor_lock( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[ClientSession], + ) -> None: + """Cleanup a cursor from cursor.close() using a lock. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection or implicit session attached at the time the cursor + was closed or garbage collected. + + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + :param session: The cursor's session. + """ + if cursor_id: + if conn_mgr and conn_mgr.more_to_come: + # If this is an exhaust cursor and we haven't completely + # exhausted the result set we *must* close the socket + # to stop the server from sending more data. + assert conn_mgr.conn is not None + conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) + else: + self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) + if conn_mgr: + conn_mgr.close() + if session and session._implicit and not session._leave_alive: + session._end_implicit_session() + + def _close_cursor_now( + self, + cursor_id: int, + address: Optional[_CursorAddress], + session: Optional[ClientSession] = None, + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Send a kill cursors message with the given id. + + The cursor is closed synchronously on the current thread. + """ + if not isinstance(cursor_id, int): + raise TypeError(f"cursor_id must be an instance of int, not {type(cursor_id)}") + + try: + if conn_mgr: + with conn_mgr._lock: + # Cursor is pinned to LB outside of a transaction. + assert address is not None + assert conn_mgr.conn is not None + self._kill_cursor_impl([cursor_id], address, session, conn_mgr.conn) + else: + self._kill_cursors([cursor_id], address, self._get_topology(), session) + except PyMongoError: + # Make another attempt to kill the cursor later. + self._close_cursor_soon(cursor_id, address) + + def _kill_cursors( + self, + cursor_ids: Sequence[int], + address: Optional[_CursorAddress], + topology: Topology, + session: Optional[ClientSession], + ) -> None: + """Send a kill cursors message with the given ids.""" + if address: + # address could be a tuple or _CursorAddress, but + # select_server_by_address needs (host, port). + server = topology.select_server_by_address(tuple(address), _Op.KILL_CURSORS) # type: ignore[arg-type] + else: + # Application called close_cursor() with no address. + server = topology.select_server(writable_server_selector, _Op.KILL_CURSORS) + + with self._checkout(server, session) as conn: + assert address is not None + self._kill_cursor_impl(cursor_ids, address, session, conn) + + def _kill_cursor_impl( + self, + cursor_ids: Sequence[int], + address: _CursorAddress, + session: Optional[ClientSession], + conn: Connection, + ) -> None: + namespace = address.namespace + db, coll = namespace.split(".", 1) + spec = {"killCursors": coll, "cursors": cursor_ids} + conn.command(db, spec, session=session, client=self) + + def _process_kill_cursors(self) -> None: + """Process any pending kill cursors requests.""" + address_to_cursor_ids = defaultdict(list) + pinned_cursors = [] + + # Other threads or the GC may append to the queue concurrently. + while True: + try: + address, cursor_id, conn_mgr = self._kill_cursors_queue.pop() + except IndexError: + break + + if conn_mgr: + pinned_cursors.append((address, cursor_id, conn_mgr)) + else: + address_to_cursor_ids[address].append(cursor_id) + + for address, cursor_id, conn_mgr in pinned_cursors: + try: + self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + # Raise the exception when client is closed so that it + # can be caught in _process_periodic_tasks + raise + else: + _log_client_error() + + # Don't re-open topology if it's closed and there's no pending cursors. + if address_to_cursor_ids: + topology = self._get_topology() + for address, cursor_ids in address_to_cursor_ids.items(): + try: + self._kill_cursors(cursor_ids, address, topology, session=None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + raise + else: + _log_client_error() + + # This method is run periodically by a background thread. + def _process_periodic_tasks(self) -> None: + """Process any pending kill cursors requests and + maintain connection pool parameters. + """ + try: + self._process_kill_cursors() + self._topology.update_pool() + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + return + else: + _log_client_error() + + def _return_server_session( + self, server_session: Union[_ServerSession, _EmptyServerSession] + ) -> None: + """Internal: return a _ServerSession to the pool.""" + if isinstance(server_session, _EmptyServerSession): + return None + return self._topology.return_server_session(server_session) + + @contextlib.contextmanager + def _tmp_session( + self, session: Optional[client_session.ClientSession] + ) -> Generator[Optional[client_session.ClientSession], None]: + """If provided session is None, lend a temporary session.""" + if session is not None: + if not isinstance(session, client_session.ClientSession): + raise ValueError( + f"'session' argument must be a ClientSession or None, not {type(session)}" + ) + # Don't call end_session. + yield session + return + + s = self._ensure_session(session) + if s: + try: + yield s + except Exception as exc: + if isinstance(exc, ConnectionFailure): + s._server_session.mark_dirty() + + # Always call end_session on error. + s.end_session() + raise + finally: + # Call end_session when we exit this scope. + if not s._attached_to_cursor: + s.end_session() + else: + yield None + + def _process_response(self, reply: Mapping[str, Any], session: Optional[ClientSession]) -> None: + self._topology.receive_cluster_time(reply.get("$clusterTime")) + if session is not None: + session._process_response(reply) + + def server_info(self, session: Optional[client_session.ClientSession] = None) -> dict[str, Any]: + """Get information about the MongoDB server we're connected to. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return cast( # type: ignore[redundant-cast] + dict[str, Any], + self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ), + ) + + def _list_databases( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[dict[str, Any]]: + cmd = {"listDatabases": 1} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + admin = self._database_default_options("admin") + res = admin._retryable_read_command(cmd, session=session, operation=_Op.LIST_DATABASES) + # listDatabases doesn't return a cursor (yet). Fake one. + cursor = { + "id": 0, + "firstBatch": res["databases"], + "ns": "admin.$cmd", + } + return CommandCursor(admin["$cmd"], cursor, None, comment=comment) + + def list_databases( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[dict[str, Any]]: + """Get a cursor over the databases of the connected server. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with client.list_databases() as cursor: + for database in cursor: + print(database) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listDatabases command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionadded:: 3.6 + """ + return self._list_databases(session, comment, **kwargs) + + def list_database_names( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + ) -> list[str]: + """Get a list of the names of all databases on the connected server. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionadded:: 3.6 + """ + res = self._list_databases(session, nameOnly=True, comment=comment) + return [doc["name"] for doc in res] + + @_csot.apply + def drop_database( + self, + name_or_database: Union[str, database.Database[_DocumentTypeArg]], + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + ) -> None: + """Drop a database. + + Raises :class:`TypeError` if `name_or_database` is not an instance of + :class:`str` or :class:`~pymongo.database.Database`. + + :param name_or_database: the name of a database to drop, or a + :class:`~pymongo.database.Database` instance representing the + database to drop + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. note:: The :attr:`~pymongo.mongo_client.MongoClient.write_concern` of + this client is automatically applied to this operation. + + .. versionchanged:: 3.4 + Apply this client's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_database + if isinstance(name, database.Database): + name = name.name + + if not isinstance(name, str): + raise TypeError( + f"name_or_database must be an instance of str or a Database, not {type(name)}" + ) + + with self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: + self[name]._command( + conn, + {"dropDatabase": 1, "comment": comment}, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + @_csot.apply + def bulk_write( + self, + models: Sequence[_WriteOp], + session: Optional[ClientSession] = None, + ordered: bool = True, + verbose_results: bool = False, + bypass_document_validation: Optional[bool] = None, + comment: Optional[Any] = None, + let: Optional[Mapping[str, Any]] = None, + write_concern: Optional[WriteConcern] = None, + ) -> ClientBulkWriteResult: + """Send a batch of write operations, potentially across multiple namespaces, to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + ... + >>> for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + ... + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> models = [InsertOne(namespace="db.test", document={'y': 1}), + ... DeleteOne(namespace="db.test", filter={'x': 1}), + ... InsertOne(namespace="db.coll", document={'y': 2}), + ... ReplaceOne(namespace="db.test", filter={'w': 1}, replacement={'z': 1}, upsert=True)] + >>> result = client.bulk_write(models=models) + >>> result.inserted_count + 2 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_count + 1 + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + ... + >>> for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + {'y': 2, '_id': ObjectId('507f1f77bcf86cd799439012')} + + :param models: A list of write operation instances. + :param session: (optional) An instance of + :class:`~pymongo.client_session.ClientSession`. + :param ordered: If ``True`` (the default), requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False``, requests + will be still performed on the server serially, in the order provided, + but all operations will be attempted even if any errors occur. + :param verbose_results: If ``True``, detailed results for each + successful operation will be included in the returned + :class:`~pymongo.results.ClientBulkWriteResult`. Default is ``False``. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is ``False``. + :param comment: (optional) A user-provided comment to attach to this + command. + :param let: (optional) Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param write_concern: (optional) The write concern to use for this bulk write. + + :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. + + .. seealso:: For more info, see `Client Bulk Write `_. + + .. seealso:: `Writes and ids `_ + + .. note:: requires MongoDB server version 8.0+. + + .. versionadded:: 4.9 + """ + if self._options.auto_encryption_opts: + raise InvalidOperation( + "MongoClient.bulk_write does not currently support automatic encryption" + ) + + if session and session.in_transaction: + # Inherit the transaction write concern. + if write_concern: + raise InvalidOperation("Cannot set write concern after starting a transaction") + write_concern = session._transaction.opts.write_concern # type: ignore[union-attr] + else: + # Inherit the client's write concern if none is provided. + if not write_concern: + write_concern = self.write_concern + + if write_concern and not write_concern.acknowledged and verbose_results: + raise InvalidOperation( + "Cannot request unacknowledged write concern and verbose results" + ) + elif write_concern and not write_concern.acknowledged and ordered: + raise InvalidOperation("Cannot request unacknowledged write concern and ordered writes") + + common.validate_list("models", models) + + blk = _ClientBulk( + self, + write_concern=write_concern, # type: ignore[arg-type] + ordered=ordered, + bypass_document_validation=bypass_document_validation, + comment=comment, + let=let, + verbose_results=verbose_results, + ) + for model in models: + try: + model._add_to_client_bulk(blk) + except AttributeError: + raise TypeError(f"{model!r} is not a valid request") from None + + return blk.execute(session, _Op.BULK_WRITE) + + +def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: + """Return the server response from PyMongo exception or None.""" + if isinstance(exc, (BulkWriteError, ClientBulkWriteException)): + # Check the last writeConcernError to determine if this + # BulkWriteError is retryable. + wces = exc.details["writeConcernErrors"] + return wces[-1] if wces else None + if isinstance(exc, (NotPrimaryError, OperationFailure)): + return cast(Mapping[str, Any], exc.details) + return None + + +def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mongos: bool) -> None: + doc = _retryable_error_doc(exc) + if doc: + code = doc.get("code", 0) + # retryWrites on MMAPv1 should raise an actionable error. + if code == 20 and str(exc).startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, exc.details) # type: ignore[attr-defined] + if max_wire_version >= 9: + # In MongoDB 4.4+, the server reports the error labels. + for label in doc.get("errorLabels", []): + exc._add_error_label(label) + else: + # Do not consult writeConcernError for pre-4.4 mongos. + if isinstance(exc, WriteConcernError) and is_mongos: + pass + elif code in helpers_shared._RETRYABLE_ERROR_CODES: + exc._add_error_label("RetryableWriteError") + + # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is + # handled above. + if isinstance(exc, ClientBulkWriteException): + exc_to_check = exc.error + else: + exc_to_check = exc + if isinstance(exc_to_check, ConnectionFailure) and not isinstance( + exc_to_check, (NotPrimaryError, WaitQueueTimeoutError) + ): + exc_to_check._add_error_label("RetryableWriteError") + + +class _MongoClientErrorHandler: + """Handle errors raised when executing an operation.""" + + __slots__ = ( + "client", + "server_address", + "session", + "max_wire_version", + "sock_generation", + "completed_handshake", + "service_id", + "handled", + ) + + def __init__( + self, + client: MongoClient, # type: ignore[type-arg] + server: Server, + session: Optional[ClientSession], + ): + if not isinstance(client, MongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "MongoClient" for cls in type(client).__mro__): + raise TypeError(f"MongoClient required but given {type(client).__name__}") + + self.client = client + self.server_address = server.description.address + self.session = session + self.max_wire_version = common.MIN_WIRE_VERSION + # XXX: When get_socket fails, this generation could be out of date: + # "Note that when a network error occurs before the handshake + # completes then the error's generation number is the generation + # of the pool at the time the connection attempt was started." + self.sock_generation = server.pool.gen.get_overall() + self.completed_handshake = False + self.service_id: Optional[ObjectId] = None + self.handled = False + + def contribute_socket(self, conn: Connection, completed_handshake: bool = True) -> None: + """Provide socket information to the error handler.""" + self.max_wire_version = conn.max_wire_version + self.sock_generation = conn.generation + self.service_id = conn.service_id + self.completed_handshake = completed_handshake + + def handle( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException] + ) -> None: + if self.handled or exc_val is None: + return + self.handled = True + if self.session: + if isinstance(exc_val, ClientBulkWriteException): + exc_val = exc_val.error + if isinstance(exc_val, ConnectionFailure): + if self.session.in_transaction: + exc_val._add_error_label("TransientTransactionError") + self.session._server_session.mark_dirty() + + if isinstance(exc_val, PyMongoError): + if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( + "RetryableWriteError" + ): + self.session._unpin() + err_ctx = _ErrorContext( + exc_val, # type: ignore[arg-type] + self.max_wire_version, + self.sock_generation, + self.completed_handshake, + self.service_id, + ) + assert self.client._topology is not None + self.client._topology.handle_error(self.server_address, err_ctx) + + def __enter__(self) -> _MongoClientErrorHandler: + return self + + def __exit__( + self, + exc_type: Optional[Type[Exception]], + exc_val: Optional[Exception], + exc_tb: Optional[TracebackType], + ) -> None: + return self.handle(exc_type, exc_val) + + +class _ClientConnectionRetryable(Generic[T]): + """Responsible for executing retryable connections on read or write operations""" + + def __init__( + self, + mongo_client: MongoClient, # type: ignore[type-arg] + func: _WriteCall[T] | _ReadCall[T], + bulk: Optional[Union[_Bulk, _ClientBulk]], + operation: str, + is_read: bool = False, + session: Optional[ClientSession] = None, + read_pref: Optional[_ServerMode] = None, + address: Optional[_Address] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ): + self._last_error: Optional[Exception] = None + self._retrying = False + self._multiple_retries = _csot.get_timeout() is not None + self._client = mongo_client + + self._func = func + self._bulk = bulk + self._session = session + self._is_read = is_read + self._retryable = retryable + self._read_pref = read_pref + self._server_selector: Callable[[Selection], Selection] = ( + read_pref if is_read else writable_server_selector # type: ignore + ) + self._address = address + self._server: Server = None # type: ignore + self._deprioritized_servers: list[Server] = [] + self._operation = operation + self._operation_id = operation_id + self._attempt_number = 0 + + def run(self) -> T: + """Runs the supplied func() and attempts a retry + + :raises: self._last_error: Last exception raised + + :return: Result of the func() call + """ + # Increment the transaction id up front to ensure any retry attempt + # will use the proper txnNumber, even if server or socket selection + # fails before the command can be sent. + if self._is_session_state_retryable() and self._retryable and not self._is_read: + self._session._start_retryable_write() # type: ignore + if self._bulk: + self._bulk.started_retryable_write = True + + while True: + self._check_last_error(check_csot=True) + try: + return self._read() if self._is_read else self._write() + except ServerSelectionTimeoutError: + # The application may think the write was never attempted + # if we raise ServerSelectionTimeoutError on the retry + # attempt. Raise the original exception instead. + self._check_last_error() + # A ServerSelectionTimeoutError error indicates that there may + # be a persistent outage. Attempting to retry in this case will + # most likely be a waste of time. + raise + except PyMongoError as exc: + # Execute specialized catch on read + if self._is_read: + if isinstance(exc, (ConnectionFailure, OperationFailure)): + # ConnectionFailures do not supply a code property + exc_code = getattr(exc, "code", None) + if self._is_not_eligible_for_retry() or ( + isinstance(exc, OperationFailure) + and exc_code not in helpers_shared._RETRYABLE_ERROR_CODES + ): + raise + self._retrying = True + self._last_error = exc + self._attempt_number += 1 + else: + raise + + # Specialized catch on write operation + if not self._is_read: + if not self._retryable: + raise + if isinstance(exc, ClientBulkWriteException) and exc.error: + retryable_write_error_exc = isinstance( + exc.error, PyMongoError + ) and exc.error.has_error_label("RetryableWriteError") + else: + retryable_write_error_exc = exc.has_error_label("RetryableWriteError") + if retryable_write_error_exc: + assert self._session + self._session._unpin() + if not retryable_write_error_exc or self._is_not_eligible_for_retry(): + if exc.has_error_label("NoWritesPerformed") and self._last_error: + raise self._last_error from exc + else: + raise + self._attempt_number += 1 + if self._bulk: + self._bulk.retrying = True + else: + self._retrying = True + if not exc.has_error_label("NoWritesPerformed"): + self._last_error = exc + if self._last_error is None: + self._last_error = exc + + if self._client.topology_description.topology_type == TOPOLOGY_TYPE.Sharded: + self._deprioritized_servers.append(self._server) + + def _is_not_eligible_for_retry(self) -> bool: + """Checks if the exchange is not eligible for retry""" + return not self._retryable or (self._is_retrying() and not self._multiple_retries) + + def _is_retrying(self) -> bool: + """Checks if the exchange is currently undergoing a retry""" + return self._bulk.retrying if self._bulk else self._retrying + + def _is_session_state_retryable(self) -> bool: + """Checks if provided session is eligible for retry + + reads: Make sure there is no ongoing transaction (if provided a session) + writes: Make sure there is a session without an active transaction + """ + if self._is_read: + return not (self._session and self._session.in_transaction) + return bool(self._session and not self._session.in_transaction) + + def _check_last_error(self, check_csot: bool = False) -> None: + """Checks if the ongoing client exchange experienced a exception previously. + If so, raise last error + + :param check_csot: Checks CSOT to ensure we are retrying with time remaining defaults to False + """ + if self._is_retrying(): + remaining = _csot.remaining() + if not check_csot or (remaining is not None and remaining <= 0): + assert self._last_error is not None + raise self._last_error + + def _get_server(self) -> Server: + """Retrieves a server object based on provided object context + + :return: Abstraction to connect to server + """ + return self._client._select_server( + self._server_selector, + self._session, + self._operation, + address=self._address, + deprioritized_servers=self._deprioritized_servers, + operation_id=self._operation_id, + ) + + def _write(self) -> T: + """Wrapper method for write-type retryable client executions + + :return: Output for func()'s call + """ + try: + max_wire_version = 0 + is_mongos = False + self._server = self._get_server() + with self._client._checkout(self._server, self._session) as conn: + max_wire_version = conn.max_wire_version + sessions_supported = ( + self._session + and self._server.description.retryable_writes_supported + and conn.supports_sessions + ) + is_mongos = conn.is_mongos + if not sessions_supported: + # A retry is not possible because this server does + # not support sessions raise the last error. + self._check_last_error() + self._retryable = False + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying write attempt number {self._attempt_number}", + clientId=self._client._topology_settings._topology_id, + commandName=self._operation, + operationId=self._operation_id, + ) + return self._func(self._session, conn, self._retryable) # type: ignore + except PyMongoError as exc: + if not self._retryable: + raise + # Add the RetryableWriteError label, if applicable. + _add_retryable_write_error(exc, max_wire_version, is_mongos) + raise + + def _read(self) -> T: + """Wrapper method for read-type retryable client executions + + :return: Output for func()'s call + """ + self._server = self._get_server() + assert self._read_pref is not None, "Read Preference required on read calls" + with self._client._conn_from_server(self._read_pref, self._server, self._session) as ( + conn, + read_pref, + ): + if self._retrying and not self._retryable: + self._check_last_error() + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying read attempt number {self._attempt_number}", + clientId=self._client._topology_settings._topology_id, + commandName=self._operation, + operationId=self._operation_id, + ) + return self._func(self._session, self._server, conn, read_pref) # type: ignore + + +def _after_fork_child() -> None: + """Releases the locks in child process and resets the + topologies in all MongoClients. + """ + # Reinitialize locks + _release_locks() + + # Perform cleanup in clients (i.e. get rid of topology) + for _, client in MongoClient._clients.items(): + client._after_fork() + + +def _detect_external_db(entity: str) -> bool: + """Detects external database hosts and logs an informational message at the INFO level.""" + entity = entity.lower() + cosmos_db_hosts = [".cosmos.azure.com"] + document_db_hosts = [".docdb.amazonaws.com", ".docdb-elastic.amazonaws.com"] + + for host in cosmos_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a CosmosDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb", + ) + return True + for host in document_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a DocumentDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/documentdb", + ) + return True + return False + + +if _HAS_REGISTER_AT_FORK: + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork_child) diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py new file mode 100644 index 0000000000..f395588814 --- /dev/null +++ b/pymongo/synchronous/monitor.py @@ -0,0 +1,543 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Class to monitor a MongoDB server on a background thread.""" + +from __future__ import annotations + +import asyncio +import atexit +import logging +import time +import weakref +from typing import TYPE_CHECKING, Any, Optional + +from pymongo import common, periodic_executor +from pymongo._csot import MovingMinimum +from pymongo.errors import NetworkTimeout, _OperationCancelled +from pymongo.hello import Hello +from pymongo.lock import _create_lock +from pymongo.logger import _SDAM_LOGGER, _debug_log, _SDAMStatusMessage +from pymongo.periodic_executor import _shutdown_executors +from pymongo.pool_options import _is_faas +from pymongo.read_preferences import MovingAverage +from pymongo.server_description import ServerDescription +from pymongo.synchronous.srv_resolver import _SrvResolver + +if TYPE_CHECKING: + from pymongo.synchronous.pool import ( # type: ignore[attr-defined] + Connection, + Pool, + _CancellationContext, + ) + from pymongo.synchronous.settings import TopologySettings + from pymongo.synchronous.topology import Topology + +_IS_SYNC = True + + +def _sanitize(error: Exception) -> None: + """PYTHON-2433 Clear error traceback info.""" + error.__traceback__ = None + error.__context__ = None + error.__cause__ = None + + +def _monotonic_duration(start: float) -> float: + """Return the duration since the given start time. + + Accounts for buggy platforms where time.monotonic() is not monotonic. + See PYTHON-4600. + """ + return max(0.0, time.monotonic() - start) + + +class MonitorBase: + def __init__(self, topology: Topology, name: str, interval: int, min_interval: float): + """Base class to do periodic work on a background thread. + + The background thread is signaled to stop when the Topology or + this instance is freed. + """ + + # We strongly reference the executor and it weakly references us via + # this closure. When the monitor is freed, stop the executor soon. + def target() -> bool: + monitor = self_ref() + if monitor is None: + return False # Stop the executor. + monitor._run() # type:ignore[attr-defined] + return True + + executor = periodic_executor.PeriodicExecutor( + interval=interval, min_interval=min_interval, target=target, name=name + ) + + self._executor = executor + + def _on_topology_gc(dummy: Optional[Topology] = None) -> None: + # This prevents GC from waiting 10 seconds for hello to complete + # See test_cleanup_executors_on_client_del. + monitor = self_ref() + if monitor: + monitor.gc_safe_close() + + # Avoid cycles. When self or topology is freed, stop executor soon. + self_ref = weakref.ref(self, executor.close) + self._topology = weakref.proxy(topology, _on_topology_gc) + _register(self) + + def open(self) -> None: + """Start monitoring, or restart after a fork. + + Multiple calls have no effect. + """ + self._executor.open() + + def gc_safe_close(self) -> None: + """GC safe close.""" + self._executor.close() + + def close(self) -> None: + """Close and stop monitoring. + + open() restarts the monitor after closing. + """ + self.gc_safe_close() + + def join(self) -> None: + """Wait for the monitor to stop.""" + self._executor.join() + + def request_check(self) -> None: + """If the monitor is sleeping, wake it soon.""" + self._executor.wake() + + +class Monitor(MonitorBase): + def __init__( + self, + server_description: ServerDescription, + topology: Topology, + pool: Pool, + topology_settings: TopologySettings, + ): + """Class to monitor a MongoDB server on a background thread. + + Pass an initial ServerDescription, a Topology, a Pool, and + TopologySettings. + + The Topology is weakly referenced. The Pool must be exclusive to this + Monitor. + """ + super().__init__( + topology, + "pymongo_server_monitor_thread", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + self._server_description = server_description + self._pool = pool + self._settings = topology_settings + self._listeners = self._settings._pool_options._event_listeners + self._publish = self._listeners is not None and self._listeners.enabled_for_server_heartbeat + self._cancel_context: Optional[_CancellationContext] = None + self._conn_id: Optional[int] = None + self._rtt_monitor = _RttMonitor( + topology, + topology_settings, + topology._create_pool_for_monitor(server_description.address), + ) + if topology_settings.server_monitoring_mode == "stream": + self._stream = True + elif topology_settings.server_monitoring_mode == "poll": + self._stream = False + else: + self._stream = not _is_faas() + + def cancel_check(self) -> None: + """Cancel any concurrent hello check. + + Note: this is called from a weakref.proxy callback and MUST NOT take + any locks. + """ + context = self._cancel_context + if context: + # Note: we cannot close the socket because doing so may cause + # concurrent reads/writes to hang until a timeout occurs + # (depending on the platform). + context.cancel() + + def _start_rtt_monitor(self) -> None: + """Start an _RttMonitor that periodically runs ping.""" + # If this monitor is closed directly before (or during) this open() + # call, the _RttMonitor will not be closed. Checking if this monitor + # was closed directly after resolves the race. + self._rtt_monitor.open() + if self._executor._stopped: + self._rtt_monitor.close() + + def gc_safe_close(self) -> None: + self._executor.close() + self._rtt_monitor.gc_safe_close() + self.cancel_check() + + def join(self) -> None: + asyncio.gather(self._executor.join(), self._rtt_monitor.join(), return_exceptions=True) # type: ignore[func-returns-value] + + def close(self) -> None: + self.gc_safe_close() + self._rtt_monitor.close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + self._reset_connection() + + def _reset_connection(self) -> None: + # Clear our pooled connection. + self._pool.reset() + + def _run(self) -> None: + try: + prev_sd = self._server_description + try: + self._server_description = self._check_server() + except _OperationCancelled as exc: + _sanitize(exc) + # Already closed the connection, wait for the next check. + self._server_description = ServerDescription( + self._server_description.address, error=exc + ) + if prev_sd.is_server_type_known: + # Immediately retry since we've already waited 500ms to + # discover that we've been cancelled. + self._executor.skip_sleep() + return + + # Update the Topology and clear the server pool on error. + self._topology.on_change( + self._server_description, + reset_pool=self._server_description.error, + interrupt_connections=isinstance(self._server_description.error, NetworkTimeout), + ) + + if self._stream and ( + self._server_description.is_server_type_known + and self._server_description.topology_version + ): + self._start_rtt_monitor() + # Immediately check for the next streaming response. + self._executor.skip_sleep() + + if self._server_description.error and prev_sd.is_server_type_known: + # Immediately retry on network errors. + self._executor.skip_sleep() + except ReferenceError: + # Topology was garbage-collected. + self.close() + finally: + if self._executor._stopped: + self._rtt_monitor.close() + + def _check_server(self) -> ServerDescription: + """Call hello or read the next streaming response. + + Returns a ServerDescription. + """ + self._conn_id = None + start = time.monotonic() + try: + return self._check_once() + except ReferenceError: + raise + except Exception as error: + _sanitize(error) + sd = self._server_description + address = sd.address + duration = _monotonic_duration(start) + awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_FAIL, + topologyId=self._topology._topology_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=duration * 1000, + failure=error, + driverConnectionId=self._conn_id, + ) + self._reset_connection() + if isinstance(error, _OperationCancelled): + raise + self._rtt_monitor.reset() + # Server type defaults to Unknown. + return ServerDescription(address, error=error) + + def _check_once(self) -> ServerDescription: + """A single attempt to call hello. + + Returns a ServerDescription, or raises an exception. + """ + address = self._server_description.address + sd = self._server_description + + # XXX: "awaited" could be incorrectly set to True in the rare case + # the pool checkout closes and recreates a connection. + awaited = bool( + self._pool.conns and self._stream and sd.is_server_type_known and sd.topology_version + ) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_started(address, awaited) + + if self._cancel_context and self._cancel_context.cancelled: + self._reset_connection() + with self._pool.checkout() as conn: + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_START, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + ) + + self._cancel_context = conn.cancel_context + # Record the connection id so we can later attach it to the failed log message. + self._conn_id = conn.id + response, round_trip_time = self._check_with_socket(conn) + if not response.awaitable: + self._rtt_monitor.add_sample(round_trip_time) + + avg_rtt, min_rtt = self._rtt_monitor.get() + sd = ServerDescription(address, response, avg_rtt, min_round_trip_time=min_rtt) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_succeeded( + address, round_trip_time, response, response.awaitable + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_SUCCESS, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=round_trip_time * 1000, + reply=response.document, + ) + return sd + + def _check_with_socket(self, conn: Connection) -> tuple[Hello, float]: # type: ignore[type-arg] + """Return (Hello, round_trip_time). + + Can raise ConnectionFailure or OperationFailure. + """ + start = time.monotonic() + if conn.more_to_come: + # Read the next streaming hello (MongoDB 4.4+). + response = Hello(conn._next_reply(), awaitable=True) + elif ( + self._stream and conn.performed_handshake and self._server_description.topology_version + ): + # Initiate streaming hello (MongoDB 4.4+). + response = conn._hello( + self._server_description.topology_version, + self._settings.heartbeat_frequency, + ) + else: + # New connection handshake or polling hello (MongoDB <4.4). + response = conn._hello(None, None) + duration = _monotonic_duration(start) + return response, duration + + +class SrvMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings): + """Class to poll SRV records on a background thread. + + Pass a Topology and a TopologySettings. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_srv_polling_thread", + common.MIN_SRV_RESCAN_INTERVAL, + topology_settings.heartbeat_frequency, + ) + self._settings = topology_settings + self._seedlist = self._settings._seeds + assert isinstance(self._settings.fqdn, str) + self._fqdn: str = self._settings.fqdn + self._startup_time = time.monotonic() + + def _run(self) -> None: + # Don't poll right after creation, wait 60 seconds first + if time.monotonic() < self._startup_time + common.MIN_SRV_RESCAN_INTERVAL: + return + seedlist = self._get_seedlist() + if seedlist: + self._seedlist = seedlist + try: + self._topology.on_srv_update(self._seedlist) + except ReferenceError: + # Topology was garbage-collected. + self.close() + + def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: + """Poll SRV records for a seedlist. + + Returns a list of ServerDescriptions. + """ + try: + resolver = _SrvResolver( + self._fqdn, + self._settings.pool_options.connect_timeout, + self._settings.srv_service_name, + ) + seedlist, ttl = resolver.get_hosts_and_min_ttl() + if len(seedlist) == 0: + # As per the spec: this should be treated as a failure. + raise Exception + except Exception as exc: + # As per the spec, upon encountering an error: + # - An error must not be raised + # - SRV records must be rescanned every heartbeatFrequencyMS + # - Topology must be left unchanged + self.request_check() + _debug_log(_SDAM_LOGGER, message="SRV monitor check failed", failure=repr(exc)) + return None + else: + self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) + return seedlist + + +class _RttMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings, pool: Pool): + """Maintain round trip times for a server. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_server_rtt_thread", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + + self._pool = pool + self._moving_average = MovingAverage() + self._moving_min = MovingMinimum() + self._lock = _create_lock() + + def close(self) -> None: + self.gc_safe_close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + self._pool.reset() + + def add_sample(self, sample: float) -> None: + """Add a RTT sample.""" + with self._lock: + self._moving_average.add_sample(sample) + self._moving_min.add_sample(sample) + + def get(self) -> tuple[Optional[float], float]: + """Get the calculated average, or None if no samples yet and the min.""" + with self._lock: + return self._moving_average.get(), self._moving_min.get() + + def reset(self) -> None: + """Reset the average RTT.""" + with self._lock: + self._moving_average.reset() + self._moving_min.reset() + + def _run(self) -> None: + try: + # NOTE: This thread is only run when using the streaming + # heartbeat protocol (MongoDB 4.4+). + # XXX: Skip check if the server is unknown? + rtt = self._ping() + self.add_sample(rtt) + except ReferenceError: + # Topology was garbage-collected. + self.close() + except Exception: + self._pool.reset() + + def _ping(self) -> float: + """Run a "hello" command and return the RTT.""" + with self._pool.checkout() as conn: + if self._executor._stopped: + raise Exception("_RttMonitor closed") + start = time.monotonic() + conn.hello() + return _monotonic_duration(start) + + +# Close monitors to cancel any in progress streaming checks before joining +# executor threads. For an explanation of how this works see the comment +# about _EXECUTORS in periodic_executor.py. +_MONITORS = set() + + +def _register(monitor: MonitorBase) -> None: + ref = weakref.ref(monitor, _unregister) + _MONITORS.add(ref) + + +def _unregister(monitor_ref: weakref.ReferenceType[MonitorBase]) -> None: + _MONITORS.remove(monitor_ref) + + +def _shutdown_monitors() -> None: + if _MONITORS is None: + return + + # Copy the set. Closing monitors removes them. + monitors = list(_MONITORS) + + # Close all monitors. + for ref in monitors: + monitor = ref() + if monitor: + monitor.gc_safe_close() + + monitor = None + + +def _shutdown_resources() -> None: + # _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown. + shutdown = _shutdown_monitors + if shutdown: # type:ignore[truthy-function] + shutdown() + shutdown = _shutdown_executors + if shutdown: # type:ignore[truthy-function] + shutdown() + + +if _IS_SYNC: + atexit.register(_shutdown_resources) diff --git a/pymongo/synchronous/network.py b/pymongo/synchronous/network.py new file mode 100644 index 0000000000..7d9bca4d58 --- /dev/null +++ b/pymongo/synchronous/network.py @@ -0,0 +1,298 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal network layer helper methods.""" +from __future__ import annotations + +import datetime +import logging +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +from bson import _decode_all_selective +from pymongo import _csot, helpers_shared, message +from pymongo.compression_support import _NO_COMPRESSION +from pymongo.errors import ( + NotPrimaryError, + OperationFailure, +) +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import _OpMsg +from pymongo.monitoring import _is_speculative_authenticate +from pymongo.network_layer import ( + receive_message, + sendall, +) + +if TYPE_CHECKING: + from bson import CodecOptions + from pymongo.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.monitoring import _EventListeners + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType + from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +def command( + conn: Connection, + dbname: str, + spec: MutableMapping[str, Any], + is_mongos: bool, + read_preference: Optional[_ServerMode], + codec_options: CodecOptions[_DocumentType], + session: Optional[ClientSession], + client: Optional[MongoClient[Any]], + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + address: Optional[_Address] = None, + listeners: Optional[_EventListeners] = None, + max_bson_size: Optional[int] = None, + read_concern: Optional[ReadConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + compression_ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, + use_op_msg: bool = False, + unacknowledged: bool = False, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + write_concern: Optional[WriteConcern] = None, +) -> _DocumentType: + """Execute a command over the socket, or raise socket.error. + + :param conn: a Connection instance + :param dbname: name of the database on which to run the command + :param spec: a command document as an ordered dict type, eg SON. + :param is_mongos: are we connected to a mongos? + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param session: optional ClientSession instance. + :param client: optional MongoClient instance for updating $clusterTime. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param address: the (host, port) of `conn` + :param listeners: An instance of :class:`~pymongo.monitoring.EventListeners` + :param max_bson_size: The maximum encoded bson size for this server + :param read_concern: The read concern for this command. + :param parse_write_concern_error: Whether to parse the ``writeConcernError`` + field in the command response. + :param collation: The collation for this command. + :param compression_ctx: optional compression Context. + :param use_op_msg: True if we should use OP_MSG. + :param unacknowledged: True if this is an unacknowledged command. + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + :param exhaust_allowed: True if we should enable OP_MSG exhaustAllowed. + """ + name = next(iter(spec)) + ns = dbname + ".$cmd" + speculative_hello = False + + # Publish the original command document, perhaps with lsid and $clusterTime. + orig = spec + if is_mongos and not use_op_msg: + assert read_preference is not None + spec = message._maybe_add_read_preference(spec, read_preference) + if read_concern and not (session and session.in_transaction): + if read_concern.level: + spec["readConcern"] = read_concern.document + if session: + session._update_read_concern(spec, conn) + if collation is not None: + spec["collation"] = collation + + publish = listeners is not None and listeners.enabled_for_commands + start = datetime.datetime.now() + if publish: + speculative_hello = _is_speculative_authenticate(name, spec) + + if compression_ctx and name.lower() in _NO_COMPRESSION: + compression_ctx = None + + if client and client._encrypter and not client._encrypter._bypass_auto_encryption: + spec = orig = client._encrypter.encrypt(dbname, spec, codec_options) + + # Support CSOT + if client: + conn.apply_timeout(client, spec) + _csot.apply_write_concern(spec, write_concern) + + if use_op_msg: + flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 + flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0 + request_id, msg, size, max_doc_size = message._op_msg( + flags, spec, dbname, read_preference, codec_options, ctx=compression_ctx + ) + # If this is an unacknowledged write then make sure the encoded doc(s) + # are small enough, otherwise rely on the server to return an error. + if unacknowledged and max_bson_size is not None and max_doc_size > max_bson_size: + message._raise_document_too_large(name, size, max_bson_size) + else: + request_id, msg, size = message._query( + 0, ns, 0, -1, spec, None, codec_options, compression_ctx + ) + + if max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD: + message._raise_document_too_large(name, size, max_bson_size + message._COMMAND_OVERHEAD) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=spec, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_start( + orig, + dbname, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + ) + + try: + sendall(conn.conn.get_conn, msg) + if use_op_msg and unacknowledged: + # Unacknowledged, fake a successful command response. + reply = None + response_doc: _DocumentOut = {"ok": 1} + else: + reply = receive_message(conn, request_id) + conn.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response( + codec_options=codec_options, user_fields=user_fields + ) + + response_doc = unpacked_docs[0] + if not conn.ready: + cluster_time = response_doc.get("$clusterTime") + if cluster_time: + conn._cluster_time = cluster_time + if client: + client._process_response(response_doc, session) + if check: + helpers_shared._check_command_response( + response_doc, + conn.max_wire_version, + allowable_errors, + parse_write_concern_error=parse_write_concern_error, + ) + except Exception as exc: + duration = datetime.datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = message._convert_exception(exc) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_failure( + duration, + failure, + name, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbname, + ) + raise + duration = datetime.datetime.now() - start + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=response_doc, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + speculative_authenticate="speculativeAuthenticate" in orig, + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_success( + duration, + response_doc, + name, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + speculative_hello=speculative_hello, + database_name=dbname, + ) + + if client and client._encrypter and reply: + decrypted = client._encrypter.decrypt(reply.raw_command_response()) + response_doc = cast( + "_DocumentOut", _decode_all_selective(decrypted, codec_options, user_fields)[0] + ) + + return response_doc # type: ignore[return-value] diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py new file mode 100644 index 0000000000..66258fda18 --- /dev/null +++ b/pymongo/synchronous/pool.py @@ -0,0 +1,1475 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +from __future__ import annotations + +import asyncio +import collections +import contextlib +import logging +import os +import sys +import time +import weakref +from typing import ( + TYPE_CHECKING, + Any, + Generator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from bson import DEFAULT_CODEC_OPTIONS +from pymongo import _csot, helpers_shared +from pymongo.common import ( + MAX_BSON_SIZE, + MAX_MESSAGE_SIZE, + MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, + ORDERED_TYPES, +) +from pymongo.errors import ( # type:ignore[attr-defined] + AutoReconnect, + ConfigurationError, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _get_timeout_details, format_timeout_details +from pymongo.lock import ( + _cond_wait, + _create_condition, + _create_lock, +) +from pymongo.logger import ( + _CONNECTION_LOGGER, + _ConnectionStatusMessage, + _debug_log, + _verbose_connection_error_reason, +) +from pymongo.monitoring import ( + ConnectionCheckOutFailedReason, + ConnectionClosedReason, +) +from pymongo.network_layer import NetworkingInterface, receive_message, sendall +from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + SSLErrors, + _CancellationContext, + _configured_socket_interface, + _raise_connection_failure, +) +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import _add_to_command +from pymongo.server_type import SERVER_TYPE +from pymongo.socket_checker import SocketChecker +from pymongo.synchronous.client_session import _validate_session_write_concern +from pymongo.synchronous.helpers import _handle_reauth +from pymongo.synchronous.network import command + +if TYPE_CHECKING: + from bson import CodecOptions + from bson.objectid import ObjectId + from pymongo.compression_support import ( + SnappyContext, + ZlibContext, + ZstdContext, + ) + from pymongo.message import _OpMsg, _OpReply + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.synchronous.auth import _AuthContext + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient, _MongoClientErrorHandler + from pymongo.typings import _Address, _CollationIn + from pymongo.write_concern import WriteConcern + +try: + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + + def _set_non_inheritable_non_atomic(fd: int) -> None: + """Set the close-on-exec flag on the given file descriptor.""" + flags = fcntl(fd, F_GETFD) + fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + +except ImportError: + # Windows, various platforms we don't claim to support + # (Jython, IronPython, ..), systems that don't provide + # everything we need from fcntl, etc. + def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 + """Dummy function for platforms that don't provide fcntl.""" + + +_IS_SYNC = True + + +class Connection: + """Store a connection with some metadata. + + :param conn: a raw connection object + :param pool: a Pool instance + :param address: the server's (host, port) + :param id: the id of this socket in it's pool + :param is_sdam: SDAM connections do not call hello on creation + """ + + def __init__( + self, + conn: NetworkingInterface, + pool: Pool, + address: tuple[str, int], + id: int, + is_sdam: bool, + ): + self.pool_ref = weakref.ref(pool) + self.conn = conn + self.address = address + self.id = id + self.is_sdam = is_sdam + self.closed = False + self.last_checkin_time = time.monotonic() + self.performed_handshake = False + self.is_writable: bool = False + self.max_wire_version = MAX_WIRE_VERSION + self.max_bson_size = MAX_BSON_SIZE + self.max_message_size = MAX_MESSAGE_SIZE + self.max_write_batch_size = MAX_WRITE_BATCH_SIZE + self.supports_sessions = False + self.hello_ok: bool = False + self.is_mongos = False + self.op_msg_enabled = False + self.listeners = pool.opts._event_listeners + self.enabled_for_cmap = pool.enabled_for_cmap + self.enabled_for_logging = pool.enabled_for_logging + self.compression_settings = pool.opts._compression_settings + self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None + self.socket_checker: SocketChecker = SocketChecker() + self.oidc_token_gen_id: Optional[int] = None + # Support for mechanism negotiation on the initial handshake. + self.negotiated_mechs: Optional[list[str]] = None + self.auth_ctx: Optional[_AuthContext] = None + + # The pool's generation changes with each reset() so we can close + # sockets created before the last reset. + self.pool_gen = pool.gen + self.generation = self.pool_gen.get_overall() + self.ready = False + self.cancel_context: _CancellationContext = _CancellationContext() + self.opts = pool.opts + self.more_to_come: bool = False + # For load balancer support. + self.service_id: Optional[ObjectId] = None + self.server_connection_id: Optional[int] = None + # When executing a transaction in load balancing mode, this flag is + # set to true to indicate that the session now owns the connection. + self.pinned_txn = False + self.pinned_cursor = False + self.active = False + self.last_timeout = self.opts.socket_timeout + self.connect_rtt = 0.0 + self._client_id = pool._client_id + self.creation_time = time.monotonic() + # For gossiping $clusterTime from the connection handshake to the client. + self._cluster_time = None + + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.get_conn.settimeout(timeout) + + def apply_timeout( + self, client: MongoClient[Any], cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + + def pin_txn(self) -> None: + self.pinned_txn = True + assert not self.pinned_cursor + + def pin_cursor(self) -> None: + self.pinned_cursor = True + assert not self.pinned_txn + + def unpin(self) -> None: + pool = self.pool_ref() + if pool: + pool.checkin(self) + else: + self.close_conn(ConnectionClosedReason.STALE) + + def hello_cmd(self) -> dict[str, Any]: + # Handshake spec requires us to use OP_MSG+hello command for the + # initial handshake in load balanced or stable API mode. + if self.opts.server_api or self.hello_ok or self.opts.load_balanced: + self.op_msg_enabled = True + return {HelloCompat.CMD: 1} + else: + return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} + + def hello(self) -> Hello[dict[str, Any]]: + return self._hello(None, None) + + def _hello( + self, + topology_version: Optional[Any], + heartbeat_frequency: Optional[int], + ) -> Hello[dict[str, Any]]: + cmd = self.hello_cmd() + performing_handshake = not self.performed_handshake + awaitable = False + if performing_handshake: + self.performed_handshake = True + cmd["client"] = self.opts.metadata + if self.compression_settings: + cmd["compression"] = self.compression_settings.compressors + if self.opts.load_balanced: + cmd["loadBalanced"] = True + elif topology_version is not None: + cmd["topologyVersion"] = topology_version + assert heartbeat_frequency is not None + cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) + awaitable = True + # If connect_timeout is None there is no timeout. + if self.opts.connect_timeout: + self.set_conn_timeout(self.opts.connect_timeout + heartbeat_frequency) + + creds = self.opts._credentials + if creds: + if creds.mechanism == "DEFAULT" and creds.username: + cmd["saslSupportedMechs"] = creds.source + "." + creds.username + from pymongo.synchronous import auth + + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) + if auth_ctx: + speculative_authenticate = auth_ctx.speculate_command() + if speculative_authenticate is not None: + cmd["speculativeAuthenticate"] = speculative_authenticate + else: + auth_ctx = None + + if performing_handshake: + start = time.monotonic() + doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) + if performing_handshake: + self.connect_rtt = time.monotonic() - start + hello = Hello(doc, awaitable=awaitable) + self.is_writable = hello.is_writable + self.max_wire_version = hello.max_wire_version + self.max_bson_size = hello.max_bson_size + self.max_message_size = hello.max_message_size + self.max_write_batch_size = hello.max_write_batch_size + self.supports_sessions = ( + hello.logical_session_timeout_minutes is not None and hello.is_readable + ) + self.logical_session_timeout_minutes: Optional[int] = hello.logical_session_timeout_minutes + self.hello_ok = hello.hello_ok + self.is_repl = hello.server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, + SERVER_TYPE.RSOther, + SERVER_TYPE.RSGhost, + ) + self.is_standalone = hello.server_type == SERVER_TYPE.Standalone + self.is_mongos = hello.server_type == SERVER_TYPE.Mongos + if performing_handshake and self.compression_settings: + ctx = self.compression_settings.get_compression_context(hello.compressors) + self.compression_context = ctx + + self.op_msg_enabled = True + self.server_connection_id = hello.connection_id + if creds: + self.negotiated_mechs = hello.sasl_supported_mechs + if auth_ctx: + auth_ctx.parse_response(hello) # type:ignore[arg-type] + if auth_ctx.speculate_succeeded(): + self.auth_ctx = auth_ctx + if self.opts.load_balanced: + if not hello.service_id: + raise ConfigurationError( + "Driver attempted to initialize in load balancing mode," + " but the server does not support this mode" + ) + self.service_id = hello.service_id + self.generation = self.pool_gen.get(self.service_id) + return hello + + def _next_reply(self) -> dict[str, Any]: + reply = self.receive_message(None) + self.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response() + response_doc = unpacked_docs[0] + helpers_shared._check_command_response(response_doc, self.max_wire_version) + return response_doc + + @_handle_reauth + def command( + self, + dbname: str, + spec: MutableMapping[str, Any], + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[Mapping[str, Any]] = DEFAULT_CODEC_OPTIONS, # type: ignore[assignment] + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + session: Optional[ClientSession] = None, + client: Optional[MongoClient[Any]] = None, + retryable_write: bool = False, + publish_events: bool = True, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + ) -> dict[str, Any]: + """Execute a command or raise an error. + + :param dbname: name of the database on which to run the command + :param spec: a command document as a dict, SON, or mapping object + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern: The read concern for this command. + :param write_concern: The write concern for this command. + :param parse_write_concern_error: Whether to parse the + ``writeConcernError`` field in the command response. + :param collation: The collation for this command. + :param session: optional ClientSession instance. + :param client: optional MongoClient for gossipping $clusterTime. + :param retryable_write: True if this command is a retryable write. + :param publish_events: Should we publish events for this command? + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + """ + self.validate_session(client, session) + session = _validate_session_write_concern(session, write_concern) + + # Ensure command name remains in first place. + if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] + spec = dict(spec) + + if not (write_concern is None or write_concern.acknowledged or collation is None): + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + + self.add_server_api(spec) + if session: + session._apply_to(spec, retryable_write, read_preference, self) + self.send_cluster_time(spec, session, client) + listeners = self.listeners if publish_events else None + unacknowledged = bool(write_concern and not write_concern.acknowledged) + if self.op_msg_enabled: + self._raise_if_not_writable(unacknowledged) + try: + return command( + self, + dbname, + spec, + self.is_mongos, + read_preference, + codec_options, # type: ignore[arg-type] + session, + client, + check, + allowable_errors, + self.address, + listeners, + self.max_bson_size, + read_concern, + parse_write_concern_error=parse_write_concern_error, + collation=collation, + compression_ctx=self.compression_context, + use_op_msg=self.op_msg_enabled, + unacknowledged=unacknowledged, + user_fields=user_fields, + exhaust_allowed=exhaust_allowed, + write_concern=write_concern, + ) + except (OperationFailure, NotPrimaryError): + raise + # Catch socket.error, KeyboardInterrupt, CancelledError, etc. and close ourselves. + except BaseException as error: + self._raise_connection_failure(error) + + def send_message(self, message: bytes, max_doc_size: int) -> None: + """Send a raw BSON message or raise ConnectionFailure. + + If a network exception is raised, the socket is closed. + """ + if self.max_bson_size is not None and max_doc_size > self.max_bson_size: + raise DocumentTooLarge( + "BSON document too large (%d bytes) - the connected server " + "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) + ) + + try: + sendall(self.conn.get_conn, message) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + self._raise_connection_failure(error) + + def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise ConnectionFailure. + + If any exception is raised, the socket is closed. + """ + try: + return receive_message(self, request_id, self.max_message_size) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + self._raise_connection_failure(error) + + def _raise_if_not_writable(self, unacknowledged: bool) -> None: + """Raise NotPrimaryError on unacknowledged write if this socket is not + writable. + """ + if unacknowledged and not self.is_writable: + # Write won't succeed, bail as if we'd received a not primary error. + raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) + + def unack_write(self, msg: bytes, max_doc_size: int) -> None: + """Send unack OP_MSG. + + Can raise ConnectionFailure or InvalidDocument. + + :param msg: bytes, an OP_MSG message. + :param max_doc_size: size in bytes of the largest document in `msg`. + """ + self._raise_if_not_writable(True) + self.send_message(msg, max_doc_size) + + def write_command( + self, request_id: int, msg: bytes, codec_options: CodecOptions[Mapping[str, Any]] + ) -> dict[str, Any]: + """Send "insert" etc. command, returning response as a dict. + + Can raise ConnectionFailure or OperationFailure. + + :param request_id: an int. + :param msg: bytes, the command message. + """ + self.send_message(msg, 0) + reply = self.receive_message(request_id) + result = reply.command_response(codec_options) + + # Raises NotPrimaryError or OperationFailure. + helpers_shared._check_command_response(result, self.max_wire_version) + return result + + def authenticate(self, reauthenticate: bool = False) -> None: + """Authenticate to the server if needed. + + Can raise ConnectionFailure or OperationFailure. + """ + # CMAP spec says to publish the ready event only after authenticating + # the connection. + if reauthenticate: + if self.performed_handshake: + # Existing auth_ctx is stale, remove it. + self.auth_ctx = None + self.ready = False + if not self.ready: + creds = self.opts._credentials + if creds: + from pymongo.synchronous import auth + + auth.authenticate(creds, self, reauthenticate=reauthenticate) + self.ready = True + duration = time.monotonic() - self.creation_time + if self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_ready(self.address, self.id, duration) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_READY, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + durationMS=duration, + ) + + def validate_session( + self, client: Optional[MongoClient[Any]], session: Optional[ClientSession] + ) -> None: + """Validate this session before use with client. + + Raises error if the client is not the one that created the session. + """ + if session: + if session._client is not client: + raise InvalidOperation("Can only use session with the MongoClient that started it") + + def close_conn(self, reason: Optional[str]) -> None: + """Close this connection with a reason.""" + if self.closed: + return + self._close_conn() + if reason: + if self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_closed(self.address, self.id, reason) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + reason=_verbose_connection_error_reason(reason), + error=reason, + ) + + def _close_conn(self) -> None: + """Close this connection.""" + if self.closed: + return + self.closed = True + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. + try: + self.conn.close() + except Exception: # noqa: S110 + pass + + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + if _IS_SYNC: + return self.socket_checker.socket_closed(self.conn.get_conn) + else: + return self.conn.is_closing() + + def send_cluster_time( + self, + command: MutableMapping[str, Any], + session: Optional[ClientSession], + client: Optional[MongoClient[Any]], + ) -> None: + """Add $clusterTime.""" + if client: + client._send_cluster_time(command, session) + + def add_server_api(self, command: MutableMapping[str, Any]) -> None: + """Add server_api parameters.""" + if self.opts.server_api: + _add_to_command(command, self.opts.server_api) + + def update_last_checkin_time(self) -> None: + self.last_checkin_time = time.monotonic() + + def update_is_writable(self, is_writable: bool) -> None: + self.is_writable = is_writable + + def idle_time_seconds(self) -> float: + """Seconds since this socket was last checked into its pool.""" + return time.monotonic() - self.last_checkin_time + + def _raise_connection_failure(self, error: BaseException) -> NoReturn: + # Catch *all* exceptions from socket methods and close the socket. In + # regular Python, socket operations only raise socket.error, even if + # the underlying cause was a Ctrl-C: a signal raised during socket.recv + # is expressed as an EINTR error from poll. See internal_select_ex() in + # socketmodule.c. All error codes from poll become socket.error at + # first. Eventually in PyEval_EvalFrameEx the interpreter checks for + # signals and throws KeyboardInterrupt into the current frame on the + # main thread. + # + # But in Gevent, the polling mechanism (epoll, kqueue, + # ..) is called in Python code, which experiences the signal as a + # KeyboardInterrupt from the start, rather than as an initial + # socket.error, so we catch that, close the socket, and reraise it. + # + # The connection closed event will be emitted later in checkin. + if self.ready: + reason = None + else: + reason = ConnectionClosedReason.ERROR + self.close_conn(reason) + # SSLError from PyOpenSSL inherits directly from Exception. + if isinstance(error, (IOError, OSError, *SSLErrors)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + else: + raise + + def __eq__(self, other: Any) -> bool: + return self.conn == other.conn + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self.conn) + + def __repr__(self) -> str: + return "Connection({}){} at {}".format( + repr(self.conn), + self.closed and " CLOSED" or "", + id(self), + ) + + +class _PoolClosedError(PyMongoError): + """Internal error raised when a thread tries to get a connection from a + closed pool. + """ + + +class _PoolGeneration: + def __init__(self) -> None: + # Maps service_id to generation. + self._generations: dict[ObjectId, int] = collections.defaultdict(int) + # Overall pool generation. + self._generation = 0 + + def get(self, service_id: Optional[ObjectId]) -> int: + """Get the generation for the given service_id.""" + if service_id is None: + return self._generation + return self._generations[service_id] + + def get_overall(self) -> int: + """Get the Pool's overall generation.""" + return self._generation + + def inc(self, service_id: Optional[ObjectId]) -> None: + """Increment the generation for the given service_id.""" + self._generation += 1 + if service_id is None: + for service_id in self._generations: + self._generations[service_id] += 1 + else: + self._generations[service_id] += 1 + + def stale(self, gen: int, service_id: Optional[ObjectId]) -> bool: + """Return if the given generation for a given service_id is stale.""" + return gen != self.get(service_id) + + +class PoolState: + PAUSED = 1 + READY = 2 + CLOSED = 3 + + +# Do *not* explicitly inherit from object or Jython won't call __del__ +# https://bugs.jython.org/issue1057 +class Pool: + def __init__( + self, + address: _Address, + options: PoolOptions, + is_sdam: bool = False, + client_id: Optional[ObjectId] = None, + ): + """ + :param address: a (hostname, port) tuple + :param options: a PoolOptions instance + :param is_sdam: whether to call hello for each new Connection + """ + if options.pause_enabled: + self.state = PoolState.PAUSED + else: + self.state = PoolState.READY + # Check a socket's health with socket_closed() every once in a while. + # Can override for testing: 0 to always check, None to never check. + self._check_interval_seconds = 1 + # LIFO pool. Sockets are ordered on idle time. Sockets claimed + # and returned to pool from the left side. Stale sockets removed + # from the right side. + self.conns: collections.deque[Connection] = collections.deque() + self.active_contexts: set[_CancellationContext] = set() + self.lock = _create_lock() + self._max_connecting_cond = _create_condition(self.lock) + self.active_sockets = 0 + # Monotonically increasing connection ID required for CMAP Events. + self.next_connection_id = 1 + # Track whether the sockets in this pool are writeable or not. + self.is_writable: Optional[bool] = None + + # Keep track of resets, so we notice sockets created before the most + # recent reset and close them. + # self.generation = 0 + self.gen = _PoolGeneration() + self.pid = os.getpid() + self.address = address + self.opts = options + self.is_sdam = is_sdam + # Don't publish events or logs in Monitor pools. + self.enabled_for_cmap = ( + not self.is_sdam + and self.opts._event_listeners is not None + and self.opts._event_listeners.enabled_for_cmap + ) + self.enabled_for_logging = not self.is_sdam + + # The first portion of the wait queue. + # Enforces: maxPoolSize + # Also used for: clearing the wait queue + self.size_cond = _create_condition(self.lock) + self.requests = 0 + self.max_pool_size = self.opts.max_pool_size + if not self.max_pool_size: + self.max_pool_size = float("inf") + # The second portion of the wait queue. + # Enforces: maxConnecting + # Also used for: clearing the wait queue + self._max_connecting_cond = _create_condition(self.lock) + self._max_connecting = self.opts.max_connecting + self._pending = 0 + self._client_id = client_id + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_created( + self.address, self.opts.non_default_options + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CREATED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + **self.opts.non_default_options, + ) + # Similar to active_sockets but includes threads in the wait queue. + self.operation_count: int = 0 + # Retain references to pinned connections to prevent the CPython GC + # from thinking that a cursor's pinned connection can be GC'd when the + # cursor is GC'd (see PYTHON-2751). + self.__pinned_sockets: set[Connection] = set() + self.ncursors = 0 + self.ntxns = 0 + + def ready(self) -> None: + # Take the lock to avoid the race condition described in PYTHON-2699. + with self.lock: + if self.state != PoolState.READY: + self.state = PoolState.READY + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_ready(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_READY, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + @property + def closed(self) -> bool: + return self.state == PoolState.CLOSED + + def _reset( + self, + close: bool, + pause: bool = True, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: + old_state = self.state + with self.size_cond: + if self.closed: + return + if self.opts.pause_enabled and pause and not self.opts.load_balanced: + old_state, self.state = self.state, PoolState.PAUSED + self.gen.inc(service_id) + newpid = os.getpid() + if self.pid != newpid: + self.pid = newpid + self.active_sockets = 0 + self.operation_count = 0 + if service_id is None: + sockets, self.conns = self.conns, collections.deque() + else: + discard: collections.deque = collections.deque() # type: ignore[type-arg] + keep: collections.deque = collections.deque() # type: ignore[type-arg] + for conn in self.conns: + if conn.service_id == service_id: + discard.append(conn) + else: + keep.append(conn) + sockets = discard + self.conns = keep + + if close: + self.state = PoolState.CLOSED + # Clear the wait queue + self._max_connecting_cond.notify_all() + self.size_cond.notify_all() + + if interrupt_connections: + for context in self.active_contexts: + context.cancel() + + listeners = self.opts._event_listeners + # CMAP spec says that close() MUST close sockets before publishing the + # PoolClosedEvent but that reset() SHOULD close sockets *after* + # publishing the PoolClearedEvent. + if close: + if not _IS_SYNC: + asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.POOL_CLOSED) for conn in sockets], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in sockets: + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_closed(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + else: + if old_state != PoolState.PAUSED: + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_cleared( + self.address, + service_id=service_id, + interrupt_connections=interrupt_connections, + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CLEARED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + serviceId=service_id, + ) + if not _IS_SYNC: + asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.STALE) for conn in sockets], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in sockets: + conn.close_conn(ConnectionClosedReason.STALE) + + def update_is_writable(self, is_writable: Optional[bool]) -> None: + """Updates the is_writable attribute on all sockets currently in the + Pool. + """ + self.is_writable = is_writable + with self.lock: + for _socket in self.conns: + _socket.update_is_writable(self.is_writable) # type: ignore[arg-type] + + def reset( + self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False + ) -> None: + self._reset(close=False, service_id=service_id, interrupt_connections=interrupt_connections) + + def reset_without_pause(self) -> None: + self._reset(close=False, pause=False) + + def close(self) -> None: + self._reset(close=True) + + def stale_generation(self, gen: int, service_id: Optional[ObjectId]) -> bool: + return self.gen.stale(gen, service_id) + + def remove_stale_sockets(self, reference_generation: int) -> None: + """Removes stale sockets then adds new ones if pool is too small and + has not been reset. The `reference_generation` argument specifies the + `generation` at the point in time this operation was requested on the + pool. + """ + # Take the lock to avoid the race condition described in PYTHON-2699. + with self.lock: + if self.state != PoolState.READY: + return + + if self.opts.max_idle_time_seconds is not None: + close_conns = [] + with self.lock: + while ( + self.conns + and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds + ): + close_conns.append(self.conns.pop()) + if not _IS_SYNC: + asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.IDLE) for conn in close_conns], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in close_conns: + conn.close_conn(ConnectionClosedReason.IDLE) + + while True: + with self.size_cond: + # There are enough sockets in the pool. + if len(self.conns) + self.active_sockets >= self.opts.min_pool_size: + return + if self.requests >= self.opts.min_pool_size: + return + self.requests += 1 + incremented = False + try: + with self._max_connecting_cond: + # If maxConnecting connections are already being created + # by this pool then try again later instead of waiting. + if self._pending >= self._max_connecting: + return + self._pending += 1 + incremented = True + conn = self.connect() + close_conn = False + with self.lock: + # Close connection and return if the pool was reset during + # socket creation or while acquiring the pool lock. + if self.gen.get_overall() != reference_generation: + close_conn = True + if not close_conn: + self.conns.appendleft(conn) + self.active_contexts.discard(conn.cancel_context) + if close_conn: + conn.close_conn(ConnectionClosedReason.STALE) + return + finally: + if incremented: + # Notify after adding the socket to the pool. + with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + + with self.size_cond: + self.requests -= 1 + self.size_cond.notify() + + def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connection: + """Connect to Mongo and return a new Connection. + + Can raise ConnectionFailure. + + Note that the pool does not keep a reference to the socket -- you + must call checkin() when you're done with it. + """ + with self.lock: + conn_id = self.next_connection_id + self.next_connection_id += 1 + # Use a temporary context so that interrupt_connections can cancel creating the socket. + tmp_context = _CancellationContext() + self.active_contexts.add(tmp_context) + + listeners = self.opts._event_listeners + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_created(self.address, conn_id) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CREATED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + ) + + try: + networking_interface = _configured_socket_interface(self.address, self.opts) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + with self.lock: + self.active_contexts.discard(tmp_context) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn_id, ConnectionClosedReason.ERROR + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + if isinstance(error, (IOError, OSError, *SSLErrors)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + + raise + + conn = Connection(networking_interface, self, self.address, conn_id, self.is_sdam) # type: ignore[arg-type] + with self.lock: + self.active_contexts.add(conn.cancel_context) + self.active_contexts.discard(tmp_context) + if tmp_context.cancelled: + conn.cancel_context.cancel() + try: + if not self.is_sdam: + conn.hello() + self.is_writable = conn.is_writable + if handler: + handler.contribute_socket(conn, completed_handshake=False) + + conn.authenticate() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + with self.lock: + self.active_contexts.discard(conn.cancel_context) + conn.close_conn(ConnectionClosedReason.ERROR) + raise + + if handler: + handler.client._topology.receive_cluster_time(conn._cluster_time) + + return conn + + @contextlib.contextmanager + def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> Generator[Connection, None]: + """Get a connection from the pool. Use with a "with" statement. + + Returns a :class:`Connection` object wrapping a connected + :class:`socket.socket`. + + This method should always be used in a with-statement:: + + with pool.get_conn() as connection: + connection.send_message(msg) + data = connection.receive_message(op_code, request_id) + + Can raise ConnectionFailure or OperationFailure. + + :param handler: A _MongoClientErrorHandler. + """ + listeners = self.opts._event_listeners + checkout_started_time = time.monotonic() + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_check_out_started(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_STARTED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + conn = self._get_conn(checkout_started_time, handler=handler) + + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_out(self.address, conn.id, duration) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + durationMS=duration, + ) + try: + with self.lock: + self.active_contexts.add(conn.cancel_context) + yield conn + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + # Exception in caller. Ensure the connection gets returned. + # Note that when pinned is True, the session owns the + # connection and it is responsible for checking the connection + # back into the pool. + pinned = conn.pinned_txn or conn.pinned_cursor + if handler: + # Perform SDAM error handling rules while the connection is + # still checked out. + exc_type, exc_val, _ = sys.exc_info() + handler.handle(exc_type, exc_val) + if not pinned and conn.active: + self.checkin(conn) + raise + if conn.pinned_txn: + with self.lock: + self.__pinned_sockets.add(conn) + self.ntxns += 1 + elif conn.pinned_cursor: + with self.lock: + self.__pinned_sockets.add(conn) + self.ncursors += 1 + elif conn.active: + self.checkin(conn) + + def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: + if self.state != PoolState.READY: + if emit_event: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + + details = _get_timeout_details(self.opts) + _raise_connection_failure( + self.address, AutoReconnect("connection pool paused"), timeout_details=details + ) + + def _get_conn( + self, checkout_started_time: float, handler: Optional[_MongoClientErrorHandler] = None + ) -> Connection: + """Get or create a Connection. Can raise ConnectionFailure.""" + # We use the pid here to avoid issues with fork / multiprocessing. + # See test.test_client:TestClient.test_fork for an example of + # what could go wrong otherwise + if self.pid != os.getpid(): + self.reset_without_pause() + + if self.closed: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.POOL_CLOSED, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Connection pool was closed", + error=ConnectionCheckOutFailedReason.POOL_CLOSED, + durationMS=duration, + ) + raise _PoolClosedError( + "Attempted to check out a connection from closed connection pool" + ) + + with self.lock: + self.operation_count += 1 + + # Get a free socket or create one. + if _csot.get_timeout(): + deadline = _csot.get_deadline() + elif self.opts.wait_queue_timeout: + deadline = time.monotonic() + self.opts.wait_queue_timeout + else: + deadline = None + + with self.size_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=True) + while not (self.requests < self.max_pool_size): + timeout = deadline - time.monotonic() if deadline else None + if not _cond_wait(self.size_cond, timeout): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.requests < self.max_pool_size: + self.size_cond.notify() + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=True) + self.requests += 1 + + # We've now acquired the semaphore and must release it on error. + conn = None + incremented = False + emitted_event = False + try: + with self.lock: + self.active_sockets += 1 + incremented = True + while conn is None: + # CMAP: we MUST wait for either maxConnecting OR for a socket + # to be checked back into the pool. + with self._max_connecting_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=False) + while not (self.conns or self._pending < self._max_connecting): + timeout = deadline - time.monotonic() if deadline else None + if not _cond_wait(self._max_connecting_cond, timeout): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.conns or self._pending < self._max_connecting: + self._max_connecting_cond.notify() + emitted_event = True + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=False) + + try: + conn = self.conns.popleft() + except IndexError: + self._pending += 1 + if conn: # We got a socket from the pool + if self._perished(conn): + conn = None + continue + else: # We need to create a new connection + try: + conn = self.connect(handler=handler) + finally: + with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + if conn: + # We checked out a socket but authentication failed. + conn.close_conn(ConnectionClosedReason.ERROR) + with self.size_cond: + self.requests -= 1 + if incremented: + self.active_sockets -= 1 + self.size_cond.notify() + + if not emitted_event: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + raise + + conn.active = True + return conn + + def checkin(self, conn: Connection) -> None: + """Return the connection to the pool, or if it's closed discard it. + + :param conn: The connection to check into the pool. + """ + txn = conn.pinned_txn + cursor = conn.pinned_cursor + conn.active = False + conn.pinned_txn = False + conn.pinned_cursor = False + self.__pinned_sockets.discard(conn) + listeners = self.opts._event_listeners + with self.lock: + self.active_contexts.discard(conn.cancel_context) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_in(self.address, conn.id) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKEDIN, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + ) + if self.pid != os.getpid(): + self.reset_without_pause() + else: + if self.closed: + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + elif conn.closed: + # CMAP requires the closed event be emitted after the check in. + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn.id, ConnectionClosedReason.ERROR + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + else: + close_conn = False + with self.lock: + # Hold the lock to ensure this section does not race with + # Pool.reset(). + if self.stale_generation(conn.generation, conn.service_id): + close_conn = True + else: + conn.update_last_checkin_time() + conn.update_is_writable(bool(self.is_writable)) + self.conns.appendleft(conn) + # Notify any threads waiting to create a connection. + self._max_connecting_cond.notify() + if close_conn: + conn.close_conn(ConnectionClosedReason.STALE) + + with self.size_cond: + if txn: + self.ntxns -= 1 + elif cursor: + self.ncursors -= 1 + self.requests -= 1 + self.active_sockets -= 1 + self.operation_count -= 1 + self.size_cond.notify() + + def _perished(self, conn: Connection) -> bool: + """Return True and close the connection if it is "perished". + + This side-effecty function checks if this socket has been idle for + for longer than the max idle time, or if the socket has been closed by + some external network error, or if the socket's generation is outdated. + + Checking sockets lets us avoid seeing *some* + :class:`~pymongo.errors.AutoReconnect` exceptions on server + hiccups, etc. We only check if the socket was closed by an external + error if it has been > 1 second since the socket was checked into the + pool, to keep performance reasonable - we can't avoid AutoReconnects + completely anyway. + """ + idle_time_seconds = conn.idle_time_seconds() + # If socket is idle, open a new one. + if ( + self.opts.max_idle_time_seconds is not None + and idle_time_seconds > self.opts.max_idle_time_seconds + ): + conn.close_conn(ConnectionClosedReason.IDLE) + return True + + if self._check_interval_seconds is not None and ( + self._check_interval_seconds == 0 or idle_time_seconds > self._check_interval_seconds + ): + if conn.conn_closed(): + conn.close_conn(ConnectionClosedReason.ERROR) + return True + + if self.stale_generation(conn.generation, conn.service_id): + conn.close_conn(ConnectionClosedReason.STALE) + return True + + return False + + def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: + listeners = self.opts._event_listeners + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.TIMEOUT, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Wait queue timeout elapsed without a connection becoming available", + error=ConnectionCheckOutFailedReason.TIMEOUT, + durationMS=duration, + ) + timeout = _csot.get_timeout() or self.opts.wait_queue_timeout + if self.opts.load_balanced: + other_ops = self.active_sockets - self.ncursors - self.ntxns + raise WaitQueueTimeoutError( + "Timeout waiting for connection from the connection pool. " + "maxPoolSize: {}, connections in use by cursors: {}, " + "connections in use by transactions: {}, connections in use " + "by other operations: {}, timeout: {}".format( + self.opts.max_pool_size, + self.ncursors, + self.ntxns, + other_ops, + timeout, + ) + ) + raise WaitQueueTimeoutError( + "Timed out while checking out a connection from connection pool. " + f"maxPoolSize: {self.opts.max_pool_size}, timeout: {timeout}" + ) + + def __del__(self) -> None: + # Avoid ResourceWarnings in Python 3 + # Close all sockets without calling reset() or close() because it is + # not safe to acquire a lock in __del__. + if _IS_SYNC: + for conn in self.conns: + conn.close_conn(None) # type: ignore[unused-coroutine] diff --git a/pymongo/synchronous/server.py b/pymongo/synchronous/server.py new file mode 100644 index 0000000000..f57420918b --- /dev/null +++ b/pymongo/synchronous/server.py @@ -0,0 +1,383 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Communicate with one MongoDB server in a topology.""" +from __future__ import annotations + +import logging +from datetime import datetime +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Optional, + Union, +) + +from bson import _decode_all_selective +from pymongo.errors import NotPrimaryError, OperationFailure +from pymongo.helpers_shared import _check_command_response +from pymongo.logger import ( + _COMMAND_LOGGER, + _SDAM_LOGGER, + _CommandStatusMessage, + _debug_log, + _SDAMStatusMessage, +) +from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query +from pymongo.response import PinnedResponse, Response +from pymongo.synchronous.helpers import _handle_reauth + +if TYPE_CHECKING: + from queue import Queue + from weakref import ReferenceType + + from bson.objectid import ObjectId + from pymongo.monitoring import _EventListeners + from pymongo.read_preferences import _ServerMode + from pymongo.server_description import ServerDescription + from pymongo.synchronous.mongo_client import MongoClient, _MongoClientErrorHandler + from pymongo.synchronous.monitor import Monitor + from pymongo.synchronous.pool import Connection, Pool + from pymongo.typings import _DocumentOut + +_IS_SYNC = True + +_CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} + + +class Server: + def __init__( + self, + server_description: ServerDescription, + pool: Pool, + monitor: Monitor, + topology_id: Optional[ObjectId] = None, + listeners: Optional[_EventListeners] = None, + events: Optional[ReferenceType[Queue[Any]]] = None, + ) -> None: + """Represent one MongoDB server.""" + self._description = server_description + self._pool = pool + self._monitor = monitor + self._topology_id = topology_id + self._publish = listeners is not None and listeners.enabled_for_server + self._listener = listeners + self._events = None + if self._publish: + self._events = events() # type: ignore[misc] + + def open(self) -> None: + """Start monitoring, or restart after a fork. + + Multiple calls have no effect. + """ + if not self._pool.opts.load_balanced: + self._monitor.open() + + def reset(self, service_id: Optional[ObjectId] = None) -> None: + """Clear the connection pool.""" + self.pool.reset(service_id) + + def close(self) -> None: + """Clear the connection pool and stop the monitor. + + Reconnect with open(). + """ + if self._publish: + assert self._listener is not None + assert self._events is not None + self._events.put( + ( + self._listener.publish_server_closed, + (self._description.address, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.STOP_SERVER, + topologyId=self._topology_id, + serverHost=self._description.address[0], + serverPort=self._description.address[1], + ) + + self._monitor.close() + self._pool.close() + + def request_check(self) -> None: + """Check the server's state soon.""" + self._monitor.request_check() + + def operation_to_command( + self, operation: Union[_Query, _GetMore], conn: Connection, apply_timeout: bool = False + ) -> tuple[dict[str, Any], str]: + cmd, db = operation.as_command(conn, apply_timeout) + # Support auto encryption + if operation.client._encrypter and not operation.client._encrypter._bypass_auto_encryption: + cmd = operation.client._encrypter.encrypt( # type: ignore[misc, assignment] + operation.db, cmd, operation.codec_options + ) + operation.update_command(cmd) + + return cmd, db + + @_handle_reauth + def run_operation( + self, + conn: Connection, + operation: Union[_Query, _GetMore], + read_preference: _ServerMode, + listeners: Optional[_EventListeners], + unpack_res: Callable[..., list[_DocumentOut]], + client: MongoClient[Any], + ) -> Response: + """Run a _Query or _GetMore operation and return a Response object. + + This method is used only to run _Query/_GetMore operations from + cursors. + Can raise ConnectionFailure, OperationFailure, etc. + + :param conn: A Connection instance. + :param operation: A _Query or _GetMore object. + :param read_preference: The read preference to use. + :param listeners: Instance of _EventListeners or None. + :param unpack_res: A callable that decodes the wire protocol response. + :param client: A MongoClient instance. + """ + assert listeners is not None + publish = listeners.enabled_for_commands + start = datetime.now() + + use_cmd = operation.use_command(conn) + more_to_come = operation.conn_mgr and operation.conn_mgr.more_to_come + cmd, dbn = self.operation_to_command(operation, conn, use_cmd) + if more_to_come: + request_id = 0 + else: + message = operation.get_message(read_preference, conn, use_cmd) + request_id, data, max_doc_size = self._split_message(message) + + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + + if publish: + if "$db" not in cmd: + cmd["$db"] = dbn + assert listeners is not None + listeners.publish_command_start( + cmd, + dbn, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + ) + + try: + if more_to_come: + reply = conn.receive_message(None) + else: + conn.send_message(data, max_doc_size) + reply = conn.receive_message(request_id) + + # Unpack and check for command errors. + if use_cmd: + user_fields = _CURSOR_DOC_FIELDS + legacy_response = False + else: + user_fields = None + legacy_response = True + docs = unpack_res( + reply, + operation.cursor_id, + operation.codec_options, + legacy_response=legacy_response, + user_fields=user_fields, + ) + if use_cmd: + first = docs[0] + operation.client._process_response(first, operation.session) # type: ignore[misc, arg-type] + _check_command_response(first, conn.max_wire_version, pool_opts=conn.opts) # type:ignore[has-type] + except Exception as exc: + duration = datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if publish: + assert listeners is not None + listeners.publish_command_failure( + duration, + failure, + operation.name, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbn, + ) + raise + duration = datetime.now() - start + # Must publish in find / getMore / explain command response + # format. + if use_cmd: + res = docs[0] + elif operation.name == "explain": + res = docs[0] if docs else {} + else: + res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} # type: ignore[union-attr] + if operation.name == "find": + res["cursor"]["firstBatch"] = docs + else: + res["cursor"]["nextBatch"] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=res, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: + assert listeners is not None + listeners.publish_command_success( + duration, + res, + operation.name, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbn, + ) + + # Decrypt response. + client = operation.client # type: ignore[assignment] + if client and client._encrypter: + if use_cmd: + decrypted = client._encrypter.decrypt(reply.raw_command_response()) + docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) + + response: Response + + if client._should_pin_cursor(operation.session) or operation.exhaust: # type: ignore[arg-type] + conn.pin_cursor() + if isinstance(reply, _OpMsg): + # In OP_MSG, the server keeps sending only if the + # more_to_come flag is set. + more_to_come = reply.more_to_come + else: + # In OP_REPLY, the server keeps sending until cursor_id is 0. + more_to_come = bool(operation.exhaust and reply.cursor_id) + if operation.conn_mgr: + operation.conn_mgr.update_exhaust(more_to_come) + response = PinnedResponse( + data=reply, + address=self._description.address, + conn=conn, + duration=duration, + request_id=request_id, + from_command=use_cmd, + docs=docs, + more_to_come=more_to_come, + ) + else: + response = Response( + data=reply, + address=self._description.address, + duration=duration, + request_id=request_id, + from_command=use_cmd, + docs=docs, + ) + + return response + + def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> ContextManager[Connection]: + return self.pool.checkout(handler) + + @property + def description(self) -> ServerDescription: + return self._description + + @description.setter + def description(self, server_description: ServerDescription) -> None: + assert server_description.address == self._description.address + self._description = server_description + + @property + def pool(self) -> Pool: + return self._pool + + def _split_message( + self, message: Union[tuple[int, Any], tuple[int, Any, int]] + ) -> tuple[int, Any, int]: + """Return request_id, data, max_doc_size. + + :param message: (request_id, data, max_doc_size) or (request_id, data) + """ + if len(message) == 3: + return message # type: ignore[return-value] + else: + # get_more and kill_cursors messages don't include BSON documents. + request_id, data = message # type: ignore[misc] + return request_id, data, 0 + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self._description!r}>" diff --git a/pymongo/synchronous/settings.py b/pymongo/synchronous/settings.py new file mode 100644 index 0000000000..61b86fa18d --- /dev/null +++ b/pymongo/synchronous/settings.py @@ -0,0 +1,175 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Represent MongoClient's configuration.""" +from __future__ import annotations + +import threading +import traceback +from typing import Any, Collection, Optional, Type, Union + +from bson.objectid import ObjectId +from pymongo import common +from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT +from pymongo.errors import ConfigurationError +from pymongo.pool_options import PoolOptions +from pymongo.server_description import ServerDescription +from pymongo.synchronous import monitor, pool +from pymongo.synchronous.pool import Pool +from pymongo.topology_description import TOPOLOGY_TYPE, _ServerSelector + +_IS_SYNC = True + + +class TopologySettings: + def __init__( + self, + seeds: Optional[Collection[tuple[str, int]]] = None, + replica_set_name: Optional[str] = None, + pool_class: Optional[Type[Pool]] = None, + pool_options: Optional[PoolOptions] = None, + monitor_class: Optional[Type[monitor.Monitor]] = None, + condition_class: Optional[Type[threading.Condition]] = None, + local_threshold_ms: int = LOCAL_THRESHOLD_MS, + server_selection_timeout: int = SERVER_SELECTION_TIMEOUT, + heartbeat_frequency: int = common.HEARTBEAT_FREQUENCY, + server_selector: Optional[_ServerSelector] = None, + fqdn: Optional[str] = None, + direct_connection: Optional[bool] = False, + load_balanced: Optional[bool] = None, + srv_service_name: str = common.SRV_SERVICE_NAME, + srv_max_hosts: int = 0, + server_monitoring_mode: str = common.SERVER_MONITORING_MODE, + topology_id: Optional[ObjectId] = None, + ): + """Represent MongoClient's configuration. + + Take a list of (host, port) pairs and optional replica set name. + """ + if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL: + raise ConfigurationError( + "heartbeatFrequencyMS cannot be less than %d" + % (common.MIN_HEARTBEAT_INTERVAL * 1000,) + ) + + self._seeds: Collection[tuple[str, int]] = seeds or [("localhost", 27017)] + self._replica_set_name = replica_set_name + self._pool_class: Type[Pool] = pool_class or pool.Pool + self._pool_options: PoolOptions = pool_options or PoolOptions() + self._monitor_class: Type[monitor.Monitor] = monitor_class or monitor.Monitor + self._condition_class: Type[threading.Condition] = condition_class or threading.Condition + self._local_threshold_ms = local_threshold_ms + self._server_selection_timeout = server_selection_timeout + self._server_selector = server_selector + self._fqdn = fqdn + self._heartbeat_frequency = heartbeat_frequency + self._direct = direct_connection + self._load_balanced = load_balanced + self._srv_service_name = srv_service_name + self._srv_max_hosts = srv_max_hosts or 0 + self._server_monitoring_mode = server_monitoring_mode + if topology_id is not None: + self._topology_id = topology_id + else: + self._topology_id = ObjectId() + # Store the allocation traceback to catch unclosed clients in the + # test suite. + self._stack = "".join(traceback.format_stack()[:-2]) + + @property + def seeds(self) -> Collection[tuple[str, int]]: + """List of server addresses.""" + return self._seeds + + @property + def replica_set_name(self) -> Optional[str]: + return self._replica_set_name + + @property + def pool_class(self) -> Type[Pool]: + return self._pool_class + + @property + def pool_options(self) -> PoolOptions: + return self._pool_options + + @property + def monitor_class(self) -> Type[monitor.Monitor]: + return self._monitor_class + + @property + def condition_class(self) -> Type[threading.Condition]: + return self._condition_class + + @property + def local_threshold_ms(self) -> int: + return self._local_threshold_ms + + @property + def server_selection_timeout(self) -> int: + return self._server_selection_timeout + + @property + def server_selector(self) -> Optional[_ServerSelector]: + return self._server_selector + + @property + def heartbeat_frequency(self) -> int: + return self._heartbeat_frequency + + @property + def fqdn(self) -> Optional[str]: + return self._fqdn + + @property + def direct(self) -> Optional[bool]: + """Connect directly to a single server, or use a set of servers? + + True if there is one seed and no replica_set_name. + """ + return self._direct + + @property + def load_balanced(self) -> Optional[bool]: + """True if the client was configured to connect to a load balancer.""" + return self._load_balanced + + @property + def srv_service_name(self) -> str: + """The srvServiceName.""" + return self._srv_service_name + + @property + def srv_max_hosts(self) -> int: + """The srvMaxHosts.""" + return self._srv_max_hosts + + @property + def server_monitoring_mode(self) -> str: + """The serverMonitoringMode.""" + return self._server_monitoring_mode + + def get_topology_type(self) -> int: + if self.load_balanced: + return TOPOLOGY_TYPE.LoadBalanced + elif self.direct: + return TOPOLOGY_TYPE.Single + elif self.replica_set_name is not None: + return TOPOLOGY_TYPE.ReplicaSetNoPrimary + else: + return TOPOLOGY_TYPE.Unknown + + def get_server_descriptions(self) -> dict[Union[tuple[str, int], Any], ServerDescription]: + """Initial dict of (address, ServerDescription) for all seeds.""" + return {address: ServerDescription(address) for address in self.seeds} diff --git a/pymongo/synchronous/srv_resolver.py b/pymongo/synchronous/srv_resolver.py new file mode 100644 index 0000000000..4802310698 --- /dev/null +++ b/pymongo/synchronous/srv_resolver.py @@ -0,0 +1,155 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for resolving hosts and options from mongodb+srv:// URIs.""" +from __future__ import annotations + +import ipaddress +import random +from typing import TYPE_CHECKING, Any, Optional, Union + +from pymongo.common import CONNECT_TIMEOUT +from pymongo.errors import ConfigurationError + +if TYPE_CHECKING: + from dns import resolver + +_IS_SYNC = True + + +def _have_dnspython() -> bool: + try: + import dns # noqa: F401 + + return True + except ImportError: + return False + + +# dnspython can return bytes or str from various parts +# of its API depending on version. We always want str. +def maybe_decode(text: Union[str, bytes]) -> str: + if isinstance(text, bytes): + return text.decode() + return text + + +# PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. +def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: + if _IS_SYNC: + from dns import resolver + + return resolver.resolve(*args, **kwargs) + else: + from dns import asyncresolver + + return asyncresolver.resolve(*args, **kwargs) # type:ignore[return-value] + + +_INVALID_HOST_MSG = ( + "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " + "Did you mean to use 'mongodb://'?" +) + + +class _SrvResolver: + def __init__( + self, + fqdn: str, + connect_timeout: Optional[float], + srv_service_name: str, + srv_max_hosts: int = 0, + ): + self.__fqdn = fqdn + self.__srv = srv_service_name + self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT + self.__srv_max_hosts = srv_max_hosts or 0 + # Validate the fully qualified domain name. + try: + ipaddress.ip_address(fqdn) + raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) + except ValueError: + pass + try: + split_fqdn = self.__fqdn.split(".") + self.__plist = split_fqdn[1:] if len(split_fqdn) > 2 else split_fqdn + except Exception: + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None + self.__slen = len(self.__plist) + self.nparts = len(split_fqdn) + + def get_options(self) -> Optional[str]: + from dns import resolver + + try: + results = _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) + except (resolver.NoAnswer, resolver.NXDOMAIN): + # No TXT records + return None + except Exception as exc: + raise ConfigurationError(str(exc)) from exc + if len(results) > 1: + raise ConfigurationError("Only one TXT record is supported") + return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") # type: ignore[attr-defined] + + def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: + try: + results = _resolve( + "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout + ) + except Exception as exc: + if not encapsulate_errors: + # Raise the original error. + raise + # Else, raise all errors as ConfigurationError. + raise ConfigurationError(str(exc)) from exc + return results + + def _get_srv_response_and_hosts( + self, encapsulate_errors: bool + ) -> tuple[resolver.Answer, list[tuple[str, Any]]]: + results = self._resolve_uri(encapsulate_errors) + + # Construct address tuples + nodes = [ + (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) # type: ignore[attr-defined] + for res in results + ] + + # Validate hosts + for node in nodes: + srv_host = node[0].lower() + if self.__fqdn == srv_host and self.nparts < 3: + raise ConfigurationError( + "Invalid SRV host: return address is identical to SRV hostname" + ) + try: + nlist = srv_host.split(".")[1:][-self.__slen :] + except Exception as exc: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") from exc + if self.__plist != nlist: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") + if self.__srv_max_hosts: + nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) + return results, nodes + + def get_hosts(self) -> list[tuple[str, Any]]: + _, nodes = self._get_srv_response_and_hosts(True) + return nodes + + def get_hosts_and_min_ttl(self) -> tuple[list[tuple[str, Any]], int]: + results, nodes = self._get_srv_response_and_hosts(False) + rrset = results.rrset + ttl = rrset.ttl if rrset else 0 + return nodes, ttl diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py new file mode 100644 index 0000000000..a4ca0e6e0f --- /dev/null +++ b/pymongo/synchronous/topology.py @@ -0,0 +1,1125 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Internal class to monitor a topology of one or more servers.""" + +from __future__ import annotations + +import asyncio +import logging +import os +import queue +import random +import sys +import time +import warnings +import weakref +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, cast + +from pymongo import _csot, common, helpers_shared, periodic_executor +from pymongo.errors import ( + ConnectionFailure, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteError, +) +from pymongo.hello import Hello +from pymongo.lock import ( + _cond_wait, + _create_condition, + _create_lock, +) +from pymongo.logger import ( + _SDAM_LOGGER, + _SERVER_SELECTION_LOGGER, + _debug_log, + _SDAMStatusMessage, + _ServerSelectionStatusMessage, +) +from pymongo.pool_options import PoolOptions +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import ( + Selection, + any_server_selector, + arbiter_server_selector, + secondary_server_selector, + writable_server_selector, +) +from pymongo.synchronous.client_session import _ServerSession, _ServerSessionPool +from pymongo.synchronous.monitor import MonitorBase, SrvMonitor +from pymongo.synchronous.pool import Pool +from pymongo.synchronous.server import Server +from pymongo.topology_description import ( + SRV_POLLING_TOPOLOGIES, + TOPOLOGY_TYPE, + TopologyDescription, + _updated_topology_description_srv_polling, + updated_topology_description, +) + +if TYPE_CHECKING: + from bson import ObjectId + from pymongo.synchronous.settings import TopologySettings + from pymongo.typings import ClusterTime, _Address + +_IS_SYNC = True + +_pymongo_dir = str(Path(__file__).parent) + + +def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: # type: ignore[type-arg] + q = queue_ref() + if not q: + return False # Cancel PeriodicExecutor. + + while True: + try: + event = q.get_nowait() + except queue.Empty: + break + else: + fn, args = event + fn(*args) + + return True # Continue PeriodicExecutor. + + +class Topology: + """Monitor a topology of one or more servers.""" + + def __init__(self, topology_settings: TopologySettings): + self._topology_id = topology_settings._topology_id + self._listeners = topology_settings._pool_options._event_listeners + self._publish_server = self._listeners is not None and self._listeners.enabled_for_server + self._publish_tp = self._listeners is not None and self._listeners.enabled_for_topology + + # Create events queue if there are publishers. + self._events = None + self.__events_executor: Any = None + + if self._publish_server or self._publish_tp: + self._events = queue.Queue(maxsize=100) + + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.START_TOPOLOGY, + topologyId=self._topology_id, + ) + + if self._publish_tp: + assert self._events is not None + self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) + self._settings = topology_settings + topology_description = TopologyDescription( + topology_settings.get_topology_type(), + topology_settings.get_server_descriptions(), + topology_settings.replica_set_name, + None, + None, + topology_settings, + ) + + self._description = topology_description + initial_td = TopologyDescription( + TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings + ) + if self._publish_tp: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (initial_td, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(initial_td), + newDescription=repr(self._description), + ) + + for seed in topology_settings.seeds: + if self._publish_server: + assert self._events is not None + self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.START_SERVER, + topologyId=self._topology_id, + serverHost=seed[0], + serverPort=seed[1], + ) + + # Store the seed list to help diagnose errors in _error_message(). + self._seed_addresses = list(topology_description.server_descriptions()) + self._opened = False + self._closed = False + self._lock = _create_lock() + self._condition = _create_condition( + self._lock, self._settings.condition_class if _IS_SYNC else None + ) + self._servers: dict[_Address, Server] = {} + self._pid: Optional[int] = None + self._max_cluster_time: Optional[ClusterTime] = None + self._session_pool = _ServerSessionPool() + + if self._publish_server or self._publish_tp: + assert self._events is not None + weak: weakref.ReferenceType[queue.Queue[Any]] + + def target() -> bool: + return process_events_queue(weak) + + executor = periodic_executor.PeriodicExecutor( + interval=common.EVENTS_QUEUE_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_events_thread", + ) + + # We strongly reference the executor and it weakly references + # the queue via this closure. When the topology is freed, stop + # the executor soon. + weak = weakref.ref(self._events, executor.close) + self.__events_executor = executor + executor.open() + + self._srv_monitor = None + if self._settings.fqdn is not None and not self._settings.load_balanced: + self._srv_monitor = SrvMonitor(self, self._settings) + + # Stores all monitor tasks that need to be joined on close or server selection + self._monitor_tasks: list[MonitorBase] = [] + + def open(self) -> None: + """Start monitoring, or restart after a fork. + + No effect if called multiple times. + + .. warning:: Topology is shared among multiple threads and is protected + by mutual exclusion. Using Topology from a process other than the one + that initialized it will emit a warning and may result in deadlock. To + prevent this from happening, MongoClient must be created after any + forking. + + """ + pid = os.getpid() + if self._pid is None: + self._pid = pid + elif pid != self._pid: + self._pid = pid + if sys.version_info[:2] >= (3, 12): + kwargs = {"skip_file_prefixes": (_pymongo_dir,)} + else: + kwargs = {"stacklevel": 6} + # Ignore B028 warning for missing stacklevel. + warnings.warn( # type: ignore[call-overload] # noqa: B028 + "MongoClient opened before fork. May not be entirely fork-safe, " + "proceed with caution. See PyMongo's documentation for details: " + "https://dochub.mongodb.org/core/pymongo-fork-deadlock", + **kwargs, + ) + with self._lock: + # Close servers and clear the pools. + for server in self._servers.values(): + server.close() + # Reset the session pool to avoid duplicate sessions in + # the child process. + self._session_pool.reset() + + with self._lock: + self._ensure_opened() + + def get_server_selection_timeout(self) -> float: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + return self._settings.server_selection_timeout + return timeout + + def select_servers( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + operation_id: Optional[int] = None, + ) -> list[Server]: + """Return a list of Servers matching selector, or time out. + + :param selector: function that takes a list of Servers and returns + a subset of them. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. + If not provided, the default value common.SERVER_SELECTION_TIMEOUT + is used. + :param address: optional server address to select. + + Calls self.open() if needed. + + Raises exc:`ServerSelectionTimeoutError` after + `server_selection_timeout` if no matching servers are found. + """ + if server_selection_timeout is None: + server_timeout = self.get_server_selection_timeout() + else: + server_timeout = server_selection_timeout + + # Cleanup any completed monitor tasks safely + if not _IS_SYNC and self._monitor_tasks: + self.cleanup_monitors() + + with self._lock: + server_descriptions = self._select_servers_loop( + selector, server_timeout, operation, operation_id, address + ) + + return [ + cast(Server, self.get_server_by_address(sd.address)) for sd in server_descriptions + ] + + def _select_servers_loop( + self, + selector: Callable[[Selection], Selection], + timeout: float, + operation: str, + operation_id: Optional[int], + address: Optional[_Address], + ) -> list[ServerDescription]: + """select_servers() guts. Hold the lock when calling this.""" + now = time.monotonic() + end_time = now + timeout + logged_waiting = False + + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.STARTED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + ) + + server_descriptions = self._description.apply_selector( + selector, address, custom_selector=self._settings.server_selector + ) + + while not server_descriptions: + # No suitable servers. + if timeout == 0 or now > end_time: + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.FAILED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + failure=self._error_message(selector), + ) + raise ServerSelectionTimeoutError( + f"{self._error_message(selector)}, Timeout: {timeout}s, Topology Description: {self.description!r}" + ) + + if not logged_waiting: + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.WAITING, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + remainingTimeMS=int(1000 * (end_time - time.monotonic())), + ) + logged_waiting = True + + self._ensure_opened() + self._request_check_all() + + # Release the lock and wait for the topology description to + # change, or for a timeout. We won't miss any changes that + # came after our most recent apply_selector call, since we've + # held the lock until now. + _cond_wait(self._condition, common.MIN_HEARTBEAT_INTERVAL) + self._description.check_compatible() + now = time.monotonic() + server_descriptions = self._description.apply_selector( + selector, address, custom_selector=self._settings.server_selector + ) + + self._description.check_compatible() + return server_descriptions + + def _select_server( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + servers = self.select_servers( + selector, operation, server_selection_timeout, address, operation_id + ) + servers = _filter_servers(servers, deprioritized_servers) + if len(servers) == 1: + return servers[0] + server1, server2 = random.sample(servers, 2) + if server1.pool.operation_count <= server2.pool.operation_count: + return server1 + else: + return server2 + + def select_server( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Like select_servers, but choose a random server if several match.""" + server = self._select_server( + selector, + operation, + server_selection_timeout, + address, + deprioritized_servers, + operation_id=operation_id, + ) + if _csot.get_timeout(): + _csot.set_rtt(server.description.min_round_trip_time) + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.SUCCEEDED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + serverHost=server.description.address[0], + serverPort=server.description.address[1], + ) + return server + + def select_server_by_address( + self, + address: _Address, + operation: str, + server_selection_timeout: Optional[int] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Return a Server for "address", reconnecting if necessary. + + If the server's type is not known, request an immediate check of all + servers. Time out after "server_selection_timeout" if the server + cannot be reached. + + :param address: A (host, port) pair. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. + If not provided, the default value + common.SERVER_SELECTION_TIMEOUT is used. + :param operation_id: The unique id of the current operation being performed. Defaults to None if not provided. + + Calls self.open() if needed. + + Raises exc:`ServerSelectionTimeoutError` after + `server_selection_timeout` if no matching servers are found. + """ + return self.select_server( + any_server_selector, + operation, + server_selection_timeout, + address, + operation_id=operation_id, + ) + + def _process_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: + """Process a new ServerDescription on an opened topology. + + Hold the lock when calling this. + """ + td_old = self._description + sd_old = td_old._server_descriptions[server_description.address] + if _is_stale_server_description(sd_old, server_description): + # This is a stale hello response. Ignore it. + return + + new_td = updated_topology_description(self._description, server_description) + # CMAP: Ensure the pool is "ready" when the server is selectable. + if server_description.is_readable or ( + server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single + ): + server = self._servers.get(server_description.address) + if server: + server.pool.ready() + + suppress_event = sd_old == server_description + if self._publish_server and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_server_description_changed, + (sd_old, server_description, server_description.address, self._topology_id), + ) + ) + + self._description = new_td + self._update_servers() + + if self._publish_tp and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG) and not suppress_event: + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(td_old), + newDescription=repr(self._description), + ) + + # Shutdown SRV polling for unsupported cluster types. + # This is only applicable if the old topology was Unknown, and the + # new one is something other than Unknown or Sharded. + if self._srv_monitor and ( + td_old.topology_type == TOPOLOGY_TYPE.Unknown + and self._description.topology_type not in SRV_POLLING_TOPOLOGIES + ): + self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) + + # Wake anything waiting in select_servers(). + self._condition.notify_all() + + def on_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: + """Process a new ServerDescription after an hello call completes.""" + # We do no I/O holding the lock. + with self._lock: + # Monitors may continue working on hello calls for some time + # after a call to Topology.close, so this method may be called at + # any time. Ensure the topology is open before processing the + # change. + # Any monitored server was definitely in the topology description + # once. Check if it's still in the description or if some state- + # change removed it. E.g., we got a host list from the primary + # that didn't include this server. + if self._opened and self._description.has_server(server_description.address): + self._process_change(server_description, reset_pool, interrupt_connections) + # Clear the pool from a failed heartbeat, done outside the lock to avoid blocking on connection close. + if reset_pool: + server = self._servers.get(server_description.address) + if server: + server.pool.reset(interrupt_connections=interrupt_connections) + + def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: + """Process a new seedlist on an opened topology. + Hold the lock when calling this. + """ + td_old = self._description + if td_old.topology_type not in SRV_POLLING_TOPOLOGIES: + return + self._description = _updated_topology_description_srv_polling(self._description, seedlist) + + self._update_servers() + + if self._publish_tp: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(td_old), + newDescription=repr(self._description), + ) + + def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: + """Process a new list of nodes obtained from scanning SRV records.""" + # We do no I/O holding the lock. + with self._lock: + if self._opened: + self._process_srv_update(seedlist) + + def get_server_by_address(self, address: _Address) -> Optional[Server]: + """Get a Server or None. + + Returns the current version of the server immediately, even if it's + Unknown or absent from the topology. Only use this in unittests. + In driver code, use select_server_by_address, since then you're + assured a recent view of the server's type and wire protocol version. + """ + return self._servers.get(address) + + def has_server(self, address: _Address) -> bool: + return address in self._servers + + def get_primary(self) -> Optional[_Address]: + """Return primary's address or None.""" + # Implemented here in Topology instead of MongoClient, so it can lock. + with self._lock: + topology_type = self._description.topology_type + if topology_type != TOPOLOGY_TYPE.ReplicaSetWithPrimary: + return None + + return writable_server_selector(self._new_selection())[0].address + + def _get_replica_set_members(self, selector: Callable[[Selection], Selection]) -> set[_Address]: + """Return set of replica set member addresses.""" + # Implemented here in Topology instead of MongoClient, so it can lock. + with self._lock: + topology_type = self._description.topology_type + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ): + return set() + + return {sd.address for sd in iter(selector(self._new_selection()))} + + def get_secondaries(self) -> set[_Address]: + """Return set of secondary addresses.""" + return self._get_replica_set_members(secondary_server_selector) + + def get_arbiters(self) -> set[_Address]: + """Return set of arbiter addresses.""" + return self._get_replica_set_members(arbiter_server_selector) + + def max_cluster_time(self) -> Optional[ClusterTime]: + """Return a document, the highest seen $clusterTime.""" + return self._max_cluster_time + + def _receive_cluster_time_no_lock(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + # Driver Sessions Spec: "Whenever a driver receives a cluster time from + # a server it MUST compare it to the current highest seen cluster time + # for the deployment. If the new cluster time is higher than the + # highest seen cluster time it MUST become the new highest seen cluster + # time. Two cluster times are compared using only the BsonTimestamp + # value of the clusterTime embedded field." + if cluster_time: + # ">" uses bson.timestamp.Timestamp's comparison operator. + if ( + not self._max_cluster_time + or cluster_time["clusterTime"] > self._max_cluster_time["clusterTime"] + ): + self._max_cluster_time = cluster_time + + def receive_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + with self._lock: + self._receive_cluster_time_no_lock(cluster_time) + + def request_check_all(self, wait_time: int = 5) -> None: + """Wake all monitors, wait for at least one to check its server.""" + with self._lock: + self._request_check_all() + _cond_wait(self._condition, wait_time) + + def data_bearing_servers(self) -> list[ServerDescription]: + """Return a list of all data-bearing servers. + + This includes any server that might be selected for an operation. + """ + if self._description.topology_type == TOPOLOGY_TYPE.Single: + return self._description.known_servers + return self._description.readable_servers + + def update_pool(self) -> None: + # Remove any stale sockets and add new sockets if pool is too small. + servers = [] + with self._lock: + # Only update pools for data-bearing servers. + for sd in self.data_bearing_servers(): + server = self._servers[sd.address] + servers.append((server, server.pool.gen.get_overall())) + + for server, generation in servers: + try: + server.pool.remove_stale_sockets(generation) + except PyMongoError as exc: + ctx = _ErrorContext(exc, 0, generation, False, None) + self.handle_error(server.description.address, ctx) + raise + + def close(self) -> None: + """Clear pools and terminate monitors. Topology does not reopen on + demand. Any further operations will raise + :exc:`~.errors.InvalidOperation`. + """ + with self._lock: + old_td = self._description + for server in self._servers.values(): + server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) + + # Mark all servers Unknown. + self._description = self._description.reset() + for address, sd in self._description.server_descriptions().items(): + if address in self._servers: + self._servers[address].description = sd + + # Stop SRV polling thread. + if self._srv_monitor: + self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) + + self._opened = False + self._closed = True + + # Publish only after releasing the lock. + if self._publish_tp: + assert self._events is not None + self._description = TopologyDescription( + TOPOLOGY_TYPE.Unknown, + {}, + self._description.replica_set_name, + self._description.max_set_version, + self._description.max_election_id, + self._description._topology_settings, + ) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + ( + old_td, + self._description, + self._topology_id, + ), + ) + ) + self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(old_td), + newDescription=repr(self._description), + ) + _debug_log( + _SDAM_LOGGER, message=_SDAMStatusMessage.STOP_TOPOLOGY, topologyId=self._topology_id + ) + + if self._publish_server or self._publish_tp: + # Make sure the events executor thread is fully closed before publishing the remaining events + self.__events_executor.close() + self.__events_executor.join(1) + process_events_queue(weakref.ref(self._events)) # type: ignore[arg-type] + + @property + def description(self) -> TopologyDescription: + return self._description + + def pop_all_sessions(self) -> list[_ServerSession]: + """Pop all session ids from the pool.""" + return self._session_pool.pop_all() + + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: + """Start or resume a server session, or raise ConfigurationError.""" + return self._session_pool.get_server_session(session_timeout_minutes) + + def return_server_session(self, server_session: _ServerSession) -> None: + self._session_pool.return_server_session(server_session) + + def _new_selection(self) -> Selection: + """A Selection object, initially including all known servers. + + Hold the lock when calling this. + """ + return Selection.from_topology_description(self._description) + + def _ensure_opened(self) -> None: + """Start monitors, or restart after a fork. + + Hold the lock when calling this. + """ + if self._closed: + raise InvalidOperation("Cannot use MongoClient after close") + + if not self._opened: + self._opened = True + self._update_servers() + + # Start or restart the events publishing thread. + if self._publish_tp or self._publish_server: + self.__events_executor.open() + + # Start the SRV polling thread. + if self._srv_monitor and (self.description.topology_type in SRV_POLLING_TOPOLOGIES): + self._srv_monitor.open() + + if self._settings.load_balanced: + # Emit initial SDAM events for load balancer mode. + self._process_change( + ServerDescription( + self._seed_addresses[0], + Hello({"ok": 1, "serviceId": self._topology_id, "maxWireVersion": 13}), + ) + ) + + # Ensure that the monitors are open. + for server in self._servers.values(): + server.open() + + def _is_stale_error(self, address: _Address, err_ctx: _ErrorContext) -> bool: + server = self._servers.get(address) + if server is None: + # Another thread removed this server from the topology. + return True + + if server._pool.stale_generation(err_ctx.sock_generation, err_ctx.service_id): + # This is an outdated error from a previous pool version. + return True + + # topologyVersion check, ignore error when cur_tv >= error_tv: + cur_tv = server.description.topology_version + error = err_ctx.error + error_tv = None + if error and hasattr(error, "details"): + if isinstance(error.details, dict): + error_tv = error.details.get("topologyVersion") + + return _is_stale_error_topology_version(cur_tv, error_tv) + + def _handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + if self._is_stale_error(address, err_ctx): + return + + server = self._servers[address] + error = err_ctx.error + service_id = err_ctx.service_id + + # Ignore a handshake error if the server is behind a load balancer but + # the service ID is unknown. This indicates that the error happened + # when dialing the connection or during the MongoDB handshake, so we + # don't know the service ID to use for clearing the pool. + if self._settings.load_balanced and not service_id and not err_ctx.completed_handshake: + return + + if isinstance(error, NetworkTimeout) and err_ctx.completed_handshake: + # The socket has been closed. Don't reset the server. + # Server Discovery And Monitoring Spec: "When an application + # operation fails because of any network error besides a socket + # timeout...." + return + elif isinstance(error, WriteError): + # Ignore writeErrors. + return + elif isinstance(error, (NotPrimaryError, OperationFailure)): + # As per the SDAM spec if: + # - the server sees a "not primary" error, and + # - the server is not shutting down, and + # - the server version is >= 4.2, then + # we keep the existing connection pool, but mark the server type + # as Unknown and request an immediate check of the server. + # Otherwise, we clear the connection pool, mark the server as + # Unknown and request an immediate check of the server. + if hasattr(error, "code"): + err_code = error.code + else: + # Default error code if one does not exist. + default = 10107 if isinstance(error, NotPrimaryError) else None + err_code = error.details.get("code", default) # type: ignore[union-attr] + if err_code in helpers_shared._NOT_PRIMARY_CODES: + is_shutting_down = err_code in helpers_shared._SHUTDOWN_CODES + # Mark server Unknown, clear the pool, and request check. + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) + if is_shutting_down or (err_ctx.max_wire_version <= 7): + # Clear the pool. + server.reset(service_id) + server.request_check() + elif not err_ctx.completed_handshake: + # Unknown command error during the connection handshake. + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + server.reset(service_id) + elif isinstance(error, ConnectionFailure): + if isinstance(error, WaitQueueTimeoutError): + return + # "Client MUST replace the server's description with type Unknown + # ... MUST NOT request an immediate check of the server." + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + server.reset(service_id) + # "When a client marks a server Unknown from `Network error when + # reading or writing`_, clients MUST cancel the hello check on + # that server and close the current monitoring connection." + server._monitor.cancel_check() + + def handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + """Handle an application error. + + May reset the server to Unknown, clear the pool, and request an + immediate check depending on the error and the context. + """ + with self._lock: + self._handle_error(address, err_ctx) + + def _request_check_all(self) -> None: + """Wake all monitors. Hold the lock when calling this.""" + for server in self._servers.values(): + server.request_check() + + def _update_servers(self) -> None: + """Sync our Servers from TopologyDescription.server_descriptions. + + Hold the lock while calling this. + """ + for address, sd in self._description.server_descriptions().items(): + if address not in self._servers: + monitor = self._settings.monitor_class( + server_description=sd, + topology=self, + pool=self._create_pool_for_monitor(address), + topology_settings=self._settings, + ) + + weak = None + if self._publish_server and self._events is not None: + weak = weakref.ref(self._events) + server = Server( + server_description=sd, + pool=self._create_pool_for_server(address), + monitor=monitor, + topology_id=self._topology_id, + listeners=self._listeners, + events=weak, + ) + + self._servers[address] = server + server.open() + else: + # Cache old is_writable value. + was_writable = self._servers[address].description.is_writable + # Update server description. + self._servers[address].description = sd + # Update is_writable value of the pool, if it changed. + if was_writable != sd.is_writable: + self._servers[address].pool.update_is_writable(sd.is_writable) + + for address, server in list(self._servers.items()): + if not self._description.has_server(address): + server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) + self._servers.pop(address) + + def _create_pool_for_server(self, address: _Address) -> Pool: + return self._settings.pool_class( + address, self._settings.pool_options, client_id=self._topology_id + ) + + def _create_pool_for_monitor(self, address: _Address) -> Pool: + options = self._settings.pool_options + + # According to the Server Discovery And Monitoring Spec, monitors use + # connect_timeout for both connect_timeout and socket_timeout. The + # pool only has one socket so maxPoolSize and so on aren't needed. + monitor_pool_options = PoolOptions( + connect_timeout=options.connect_timeout, + socket_timeout=options.connect_timeout, + ssl_context=options._ssl_context, + tls_allow_invalid_hostnames=options.tls_allow_invalid_hostnames, + event_listeners=options._event_listeners, + appname=options.appname, + driver=options.driver, + pause_enabled=False, + server_api=options.server_api, + ) + + return self._settings.pool_class( + address, monitor_pool_options, is_sdam=True, client_id=self._topology_id + ) + + def _error_message(self, selector: Callable[[Selection], Selection]) -> str: + """Format an error message if server selection fails. + + Hold the lock when calling this. + """ + is_replica_set = self._description.topology_type in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ) + + if is_replica_set: + server_plural = "replica set members" + elif self._description.topology_type == TOPOLOGY_TYPE.Sharded: + server_plural = "mongoses" + else: + server_plural = "servers" + + if self._description.known_servers: + # We've connected, but no servers match the selector. + if selector is writable_server_selector: + if is_replica_set: + return "No primary available for writes" + else: + return "No %s available for writes" % server_plural + else: + return f'No {server_plural} match selector "{selector}"' + else: + addresses = list(self._description.server_descriptions()) + servers = list(self._description.server_descriptions().values()) + if not servers: + if is_replica_set: + # We removed all servers because of the wrong setName? + return 'No {} available for replica set name "{}"'.format( + server_plural, + self._settings.replica_set_name, + ) + else: + return "No %s available" % server_plural + + # 1 or more servers, all Unknown. Are they unknown for one reason? + error = servers[0].error + same = all(server.error == error for server in servers[1:]) + if same: + if error is None: + # We're still discovering. + return "No %s found yet" % server_plural + + if is_replica_set and not set(addresses).intersection(self._seed_addresses): + # We replaced our seeds with new hosts but can't reach any. + return ( + "Could not reach any servers in %s. Replica set is" + " configured with internal hostnames or IPs?" % addresses + ) + + return str(error) + else: + return ",".join(str(server.error) for server in servers if server.error) + + def cleanup_monitors(self) -> None: + tasks = [] + try: + while self._monitor_tasks: + tasks.append(self._monitor_tasks.pop()) + except IndexError: + pass + asyncio.gather(*[t.join() for t in tasks], return_exceptions=True) # type: ignore[func-returns-value] + + def __repr__(self) -> str: + msg = "" + if not self._opened: + msg = "CLOSED " + return f"<{self.__class__.__name__} {msg}{self._description!r}>" + + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + """The properties to use for MongoClient/Topology equality checks.""" + ts = self._settings + return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name) + + def __eq__(self, other: object) -> bool: + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __hash__(self) -> int: + return hash(self.eq_props()) + + +class _ErrorContext: + """An error with context for SDAM error handling.""" + + def __init__( + self, + error: BaseException, + max_wire_version: int, + sock_generation: int, + completed_handshake: bool, + service_id: Optional[ObjectId], + ): + self.error = error + self.max_wire_version = max_wire_version + self.sock_generation = sock_generation + self.completed_handshake = completed_handshake + self.service_id = service_id + + +def _is_stale_error_topology_version( + current_tv: Optional[Mapping[str, Any]], error_tv: Optional[Mapping[str, Any]] +) -> bool: + """Return True if the error's topologyVersion is <= current.""" + if current_tv is None or error_tv is None: + return False + if current_tv["processId"] != error_tv["processId"]: + return False + return current_tv["counter"] >= error_tv["counter"] + + +def _is_stale_server_description(current_sd: ServerDescription, new_sd: ServerDescription) -> bool: + """Return True if the new topologyVersion is < current.""" + current_tv, new_tv = current_sd.topology_version, new_sd.topology_version + if current_tv is None or new_tv is None: + return False + if current_tv["processId"] != new_tv["processId"]: + return False + return current_tv["counter"] > new_tv["counter"] + + +def _filter_servers( + candidates: list[Server], deprioritized_servers: Optional[list[Server]] = None +) -> list[Server]: + """Filter out deprioritized servers from a list of server candidates.""" + if not deprioritized_servers: + return candidates + + filtered = [server for server in candidates if server not in deprioritized_servers] + + # If not possible to pick a prioritized server, return the original list + return filtered or candidates diff --git a/pymongo/synchronous/uri_parser.py b/pymongo/synchronous/uri_parser.py new file mode 100644 index 0000000000..45c1752953 --- /dev/null +++ b/pymongo/synchronous/uri_parser.py @@ -0,0 +1,193 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI.""" +from __future__ import annotations + +from typing import Any, Optional +from urllib.parse import unquote_plus + +from pymongo.common import SRV_SERVICE_NAME, _CaseInsensitiveDictionary +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.synchronous.srv_resolver import _SrvResolver +from pymongo.uri_parser_shared import ( + _ALLOWED_TXT_OPTS, + DEFAULT_PORT, + SCHEME, + SCHEME_LEN, + SRV_SCHEME_LEN, + _check_options, + _make_options_case_sensitive, + _validate_uri, + split_hosts, + split_options, +) + +_IS_SYNC = True + + +def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + """Parse and validate a MongoDB URI. + + Returns a dict of the form:: + + { + 'nodelist': , + 'username': or None, + 'password': or None, + 'database': or None, + 'collection': or None, + 'options': , + 'fqdn': or None + } + + If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done + to build nodelist and options. + + :param uri: The MongoDB URI to parse. + :param default_port: The port number to use when one wasn't specified + for a host in the URI. + :param validate: If ``True`` (the default), validate and + normalize all options. Default: ``True``. + :param warn: When validating, if ``True`` then will warn + the user then ignore any invalid options or values. If ``False``, + validation will error when options are unsupported or values are + invalid. Default: ``False``. + :param normalize: If ``True``, convert names of URI options + to their internally-used names. Default: ``True``. + :param connect_timeout: The maximum time in milliseconds to + wait for a response from the DNS server. + :param srv_service_name: A custom SRV service name + + .. versionchanged:: 4.14 + ``options`` is now type ``dict`` as opposed to a ``_CaseInsensitiveDictionary``. + + .. versionchanged:: 4.6 + The delimiting slash (``/``) between hosts and connection options is now optional. + For example, "mongodb://example.com?tls=true" is now a valid URI. + + .. versionchanged:: 4.0 + To better follow RFC 3986, unquoted percent signs ("%") are no longer + supported. + + .. versionchanged:: 3.9 + Added the ``normalize`` parameter. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + + .. versionchanged:: 3.5 + Return the original value of the ``readPreference`` MongoDB URI option + instead of the validated read preference mode. + + .. versionchanged:: 3.1 + ``warn`` added so invalid options can be ignored. + """ + result = _validate_uri(uri, default_port, validate, warn, normalize, srv_max_hosts) + result.update( + _parse_srv( + uri, + default_port, + validate, + warn, + normalize, + connect_timeout, + srv_service_name, + srv_max_hosts, + ) + ) + result["options"] = _make_options_case_sensitive(result["options"]) + return result + + +def _parse_srv( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + else: + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, _ = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if srv_service_name is None: + srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) + if "@" in host_part: + _, _, hosts = host_part.rpartition("@") + else: + hosts = host_part + + hosts = unquote_plus(hosts) + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + nodes = split_hosts(hosts, default_port=None) + fqdn, port = nodes[0] + + # Use the connection timeout. connectTimeoutMS passed as a keyword + # argument overrides the same option passed in the connection string. + connect_timeout = connect_timeout or options.get("connectTimeoutMS") + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) + nodes = dns_resolver.get_hosts() + dns_options = dns_resolver.get_options() + if dns_options: + parsed_dns_options = split_options(dns_options, validate, warn, normalize) + if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: + raise ConfigurationError( + "Only authSource, replicaSet, and loadBalanced are supported from DNS" + ) + for opt, val in parsed_dns_options.items(): + if opt not in options: + options[opt] = val + if options.get("loadBalanced") and srv_max_hosts: + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") + if options.get("replicaSet") and srv_max_hosts: + raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") + if "tls" not in options and "ssl" not in options: + options["tls"] = True if validate else "true" + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "options": options, + } diff --git a/pymongo/thread_util.py b/pymongo/thread_util.py deleted file mode 100644 index 36e650a1fd..0000000000 --- a/pymongo/thread_util.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright 2012-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities to abstract the differences between threads and greenlets.""" - -import threading -import sys -import weakref -try: - from time import monotonic as _time -except ImportError: - from time import time as _time - -have_gevent = True -try: - import greenlet - - try: - # gevent-1.0rc2 and later. - from gevent.lock import BoundedSemaphore as GeventBoundedSemaphore - except ImportError: - from gevent.coros import BoundedSemaphore as GeventBoundedSemaphore - - from gevent.greenlet import SpawnedLink - from gevent.event import Event as GeventEvent - -except ImportError: - have_gevent = False - -from pymongo.errors import ExceededMaxWaiters - - -# Do we have to work around http://bugs.python.org/issue1868? -issue1868 = (sys.version_info[:3] <= (2, 7, 0)) - - -class DummyLock(object): - def acquire(self): - pass - - def release(self): - pass - - -class Ident(object): - def __init__(self): - self._refs = {} - - def watching(self): - """Is the current thread or greenlet being watched for death?""" - return self.get() in self._refs - - def unwatch(self, tid): - self._refs.pop(tid, None) - - def get(self): - """An id for this thread or greenlet""" - raise NotImplementedError - - def watch(self, callback): - """Run callback when this thread or greenlet dies. callback takes - one meaningless argument. - """ - raise NotImplementedError - - -class ThreadIdent(Ident): - def __init__(self): - super(ThreadIdent, self).__init__() - self._local = threading.local() - if issue1868: - self._lock = threading.Lock() - else: - self._lock = DummyLock() - - # We watch for thread-death using a weakref callback to a thread local. - # Weakrefs are permitted on subclasses of object but not object() itself. - class ThreadVigil(object): - pass - - def _make_vigil(self): - # Threadlocals in Python <= 2.7.0 have race conditions when setting - # attributes and possibly when getting them, too, leading to weakref - # callbacks not getting called later. - self._lock.acquire() - try: - vigil = getattr(self._local, 'vigil', None) - if not vigil: - self._local.vigil = vigil = ThreadIdent.ThreadVigil() - finally: - self._lock.release() - - return vigil - - def get(self): - return id(self._make_vigil()) - - def watch(self, callback): - vigil = self._make_vigil() - self._refs[id(vigil)] = weakref.ref(vigil, callback) - - -class GreenletIdent(Ident): - def get(self): - return id(greenlet.getcurrent()) - - def watch(self, callback): - current = greenlet.getcurrent() - tid = self.get() - - if hasattr(current, 'link'): - # This is a Gevent Greenlet (capital G), which inherits from - # greenlet and provides a 'link' method to detect when the - # Greenlet exits. - link = SpawnedLink(callback) - current.rawlink(link) - self._refs[tid] = link - else: - # This is a non-Gevent greenlet (small g), or it's the main - # greenlet. - self._refs[tid] = weakref.ref(current, callback) - - def unwatch(self, tid): - """ call unlink if link before """ - link = self._refs.pop(tid, None) - current = greenlet.getcurrent() - if hasattr(current, 'unlink'): - # This is a Gevent enhanced Greenlet. Remove the SpawnedLink we - # linked to it. - current.unlink(link) - - -def create_ident(use_greenlets): - if use_greenlets: - return GreenletIdent() - else: - return ThreadIdent() - - -class Counter(object): - """A thread- or greenlet-local counter. - """ - def __init__(self, use_greenlets): - self.ident = create_ident(use_greenlets) - self._counters = {} - - def inc(self): - # Copy these references so on_thread_died needn't close over self - ident = self.ident - _counters = self._counters - - tid = ident.get() - _counters.setdefault(tid, 0) - _counters[tid] += 1 - - if not ident.watching(): - # Before the tid is possibly reused, remove it from _counters - def on_thread_died(ref): - ident.unwatch(tid) - _counters.pop(tid, None) - - ident.watch(on_thread_died) - - return _counters[tid] - - def dec(self): - tid = self.ident.get() - if self._counters.get(tid, 0) > 0: - self._counters[tid] -= 1 - return self._counters[tid] - else: - return 0 - - def get(self): - return self._counters.get(self.ident.get(), 0) - - -class Future(object): - """Minimal backport of concurrent.futures.Future. - - event_class makes this Future adaptable for Gevent and other frameworks. - """ - def __init__(self, event_class): - self._event = event_class() - self._result = None - self._exception = None - - def set_result(self, result): - self._result = result - self._event.set() - - def set_exception(self, exc): - if hasattr(exc, 'with_traceback'): - # Python 3: avoid potential reference cycle. - self._exception = exc.with_traceback(None) - else: - self._exception = exc - self._event.set() - - def result(self): - self._event.wait() - if self._exception: - raise self._exception - else: - return self._result - - -### Begin backport from CPython 3.2 for timeout support for Semaphore.acquire -class Semaphore: - - # After Tim Peters' semaphore class, but not quite the same (no maximum) - - def __init__(self, value=1): - if value < 0: - raise ValueError("semaphore initial value must be >= 0") - self._cond = threading.Condition(threading.Lock()) - self._value = value - - def acquire(self, blocking=True, timeout=None): - if not blocking and timeout is not None: - raise ValueError("can't specify timeout for non-blocking acquire") - rc = False - endtime = None - self._cond.acquire() - while self._value == 0: - if not blocking: - break - if timeout is not None: - if endtime is None: - endtime = _time() + timeout - else: - timeout = endtime - _time() - if timeout <= 0: - break - self._cond.wait(timeout) - else: - self._value = self._value - 1 - rc = True - self._cond.release() - return rc - - __enter__ = acquire - - def release(self): - self._cond.acquire() - self._value = self._value + 1 - self._cond.notify() - self._cond.release() - - def __exit__(self, t, v, tb): - self.release() - - @property - def counter(self): - return self._value - - -class BoundedSemaphore(Semaphore): - """Semaphore that checks that # releases is <= # acquires""" - def __init__(self, value=1): - Semaphore.__init__(self, value) - self._initial_value = value - - def release(self): - if self._value >= self._initial_value: - raise ValueError("Semaphore released too many times") - return Semaphore.release(self) -### End backport from CPython 3.2 - - -class DummySemaphore(object): - def __init__(self, value=None): - pass - - def acquire(self, blocking=True, timeout=None): - return True - - def release(self): - pass - - -class MaxWaitersBoundedSemaphore(object): - def __init__(self, semaphore_class, value=1, max_waiters=1): - self.waiter_semaphore = semaphore_class(max_waiters) - self.semaphore = semaphore_class(value) - - def acquire(self, blocking=True, timeout=None): - if not self.waiter_semaphore.acquire(False): - raise ExceededMaxWaiters() - try: - return self.semaphore.acquire(blocking, timeout) - finally: - self.waiter_semaphore.release() - - def __getattr__(self, name): - return getattr(self.semaphore, name) - - -class MaxWaitersBoundedSemaphoreThread(MaxWaitersBoundedSemaphore): - def __init__(self, value=1, max_waiters=1): - MaxWaitersBoundedSemaphore.__init__( - self, BoundedSemaphore, value, max_waiters) - - -if have_gevent: - class MaxWaitersBoundedSemaphoreGevent(MaxWaitersBoundedSemaphore): - def __init__(self, value=1, max_waiters=1): - MaxWaitersBoundedSemaphore.__init__( - self, GeventBoundedSemaphore, value, max_waiters) - - -def create_semaphore(max_size, max_waiters, use_greenlets): - if max_size is None: - return DummySemaphore() - elif use_greenlets: - if max_waiters is None: - return GeventBoundedSemaphore(max_size) - else: - return MaxWaitersBoundedSemaphoreGevent(max_size, max_waiters) - else: - if max_waiters is None: - return BoundedSemaphore(max_size) - else: - return MaxWaitersBoundedSemaphoreThread(max_size, max_waiters) - - -def create_event(use_greenlets): - if use_greenlets: - return GeventEvent() - else: - return threading.Event() diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py new file mode 100644 index 0000000000..de67a8f94a --- /dev/null +++ b/pymongo/topology_description.py @@ -0,0 +1,700 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Represent a deployment of MongoDB servers. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from random import sample +from typing import ( + Any, + Callable, + List, + Mapping, + MutableMapping, + NamedTuple, + Optional, + cast, +) + +from bson.min_key import MinKey +from bson.objectid import ObjectId +from pymongo import common +from pymongo.errors import ConfigurationError, PyMongoError +from pymongo.read_preferences import Primary, ReadPreference, _AggWritePref, _ServerMode +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import Selection +from pymongo.server_type import SERVER_TYPE +from pymongo.typings import _Address + + +# Enumeration for various kinds of MongoDB cluster topologies. +class _TopologyType(NamedTuple): + Single: int + ReplicaSetNoPrimary: int + ReplicaSetWithPrimary: int + Sharded: int + Unknown: int + LoadBalanced: int + + +TOPOLOGY_TYPE = _TopologyType(*range(6)) + +# Topologies compatible with SRV record polling. +SRV_POLLING_TOPOLOGIES: tuple[int, int] = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) + + +_ServerSelector = Callable[[List[ServerDescription]], List[ServerDescription]] + + +class TopologyDescription: + def __init__( + self, + topology_type: int, + server_descriptions: dict[_Address, ServerDescription], + replica_set_name: Optional[str], + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], + topology_settings: Any, + ) -> None: + """Representation of a deployment of MongoDB servers. + + :param topology_type: initial type + :param server_descriptions: dict of (address, ServerDescription) for + all seeds + :param replica_set_name: replica set name or None + :param max_set_version: greatest setVersion seen from a primary, or None + :param max_election_id: greatest electionId seen from a primary, or None + :param topology_settings: a TopologySettings + """ + self._topology_type = topology_type + self._replica_set_name = replica_set_name + self._server_descriptions = server_descriptions + self._max_set_version = max_set_version + self._max_election_id = max_election_id + + # The heartbeat_frequency is used in staleness estimates. + self._topology_settings = topology_settings + + # Is PyMongo compatible with all servers' wire protocols? + self._incompatible_err = None + if self._topology_type != TOPOLOGY_TYPE.LoadBalanced: + self._init_incompatible_err() + + # Server Discovery And Monitoring Spec: Whenever a client updates the + # TopologyDescription from an hello response, it MUST set + # TopologyDescription.logicalSessionTimeoutMinutes to the smallest + # logicalSessionTimeoutMinutes value among ServerDescriptions of all + # data-bearing server types. If any have a null + # logicalSessionTimeoutMinutes, then + # TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null. + readable_servers = self.readable_servers + if not readable_servers: + self._ls_timeout_minutes = None + elif any(s.logical_session_timeout_minutes is None for s in readable_servers): + self._ls_timeout_minutes = None + else: + self._ls_timeout_minutes = min( # type: ignore[type-var] + s.logical_session_timeout_minutes for s in readable_servers + ) + + def _init_incompatible_err(self) -> None: + """Internal compatibility check for non-load balanced topologies.""" + for s in self._server_descriptions.values(): + if not s.is_server_type_known: + continue + + # s.min/max_wire_version is the server's wire protocol. + # MIN/MAX_SUPPORTED_WIRE_VERSION is what PyMongo supports. + server_too_new = ( + # Server too new. + s.min_wire_version is not None + and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION + ) + + server_too_old = ( + # Server too old. + s.max_wire_version is not None + and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION + ) + + if server_too_new: + self._incompatible_err = ( + "Server at %s:%d requires wire version %d, but this " # type: ignore + "version of PyMongo only supports up to %d." + % ( + s.address[0], + s.address[1] or 0, + s.min_wire_version, + common.MAX_SUPPORTED_WIRE_VERSION, + ) + ) + + elif server_too_old: + self._incompatible_err = ( + "Server at %s:%d reports wire version %d, but this " # type: ignore + "version of PyMongo requires at least %d (MongoDB %s)." + % ( + s.address[0], + s.address[1] or 0, + s.max_wire_version, + common.MIN_SUPPORTED_WIRE_VERSION, + common.MIN_SUPPORTED_SERVER_VERSION, + ) + ) + + break + + def check_compatible(self) -> None: + """Raise ConfigurationError if any server is incompatible. + + A server is incompatible if its wire protocol version range does not + overlap with PyMongo's. + """ + if self._incompatible_err: + raise ConfigurationError(self._incompatible_err) + + def has_server(self, address: _Address) -> bool: + return address in self._server_descriptions + + def reset_server(self, address: _Address) -> TopologyDescription: + """A copy of this description, with one server marked Unknown.""" + unknown_sd = self._server_descriptions[address].to_unknown() + return updated_topology_description(self, unknown_sd) + + def reset(self) -> TopologyDescription: + """A copy of this description, with all servers marked Unknown.""" + if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: + topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary + else: + topology_type = self._topology_type + + # The default ServerDescription's type is Unknown. + sds = {address: ServerDescription(address) for address in self._server_descriptions} + + return TopologyDescription( + topology_type, + sds, + self._replica_set_name, + self._max_set_version, + self._max_election_id, + self._topology_settings, + ) + + def server_descriptions(self) -> dict[_Address, ServerDescription]: + """dict of (address, + :class:`~pymongo.server_description.ServerDescription`). + """ + return self._server_descriptions.copy() + + @property + def topology_type(self) -> int: + """The type of this topology.""" + return self._topology_type + + @property + def topology_type_name(self) -> str: + """The topology type as a human readable string. + + .. versionadded:: 3.4 + """ + return TOPOLOGY_TYPE._fields[self._topology_type] + + @property + def replica_set_name(self) -> Optional[str]: + """The replica set name.""" + return self._replica_set_name + + @property + def max_set_version(self) -> Optional[int]: + """Greatest setVersion seen from a primary, or None.""" + return self._max_set_version + + @property + def max_election_id(self) -> Optional[ObjectId]: + """Greatest electionId seen from a primary, or None.""" + return self._max_election_id + + @property + def logical_session_timeout_minutes(self) -> Optional[int]: + """Minimum logical session timeout, or None.""" + return self._ls_timeout_minutes + + @property + def known_servers(self) -> list[ServerDescription]: + """List of Servers of types besides Unknown.""" + return [s for s in self._server_descriptions.values() if s.is_server_type_known] + + @property + def has_known_servers(self) -> bool: + """Whether there are any Servers of types besides Unknown.""" + return any(s for s in self._server_descriptions.values() if s.is_server_type_known) + + @property + def readable_servers(self) -> list[ServerDescription]: + """List of readable Servers.""" + return [s for s in self._server_descriptions.values() if s.is_readable] + + @property + def common_wire_version(self) -> Optional[int]: + """Minimum of all servers' max wire versions, or None.""" + servers = self.known_servers + if servers: + return min(s.max_wire_version for s in self.known_servers) + + return None + + @property + def heartbeat_frequency(self) -> int: + return self._topology_settings.heartbeat_frequency + + @property + def srv_max_hosts(self) -> int: + return self._topology_settings._srv_max_hosts + + def _apply_local_threshold(self, selection: Optional[Selection]) -> list[ServerDescription]: + if not selection: + return [] + round_trip_times: list[float] = [] + for server in selection.server_descriptions: + if server.round_trip_time is None: + config_err_msg = f"round_trip_time for server {server.address} is unexpectedly None: {self}, servers: {selection.server_descriptions}" + raise ConfigurationError(config_err_msg) + round_trip_times.append(server.round_trip_time) + # Round trip time in seconds. + fastest = min(round_trip_times) + threshold = self._topology_settings.local_threshold_ms / 1000.0 + return [ + s + for s in selection.server_descriptions + if (cast(float, s.round_trip_time) - fastest) <= threshold + ] + + def apply_selector( + self, + selector: Any, + address: Optional[_Address] = None, + custom_selector: Optional[_ServerSelector] = None, + ) -> list[ServerDescription]: + """List of servers matching the provided selector(s). + + :param selector: a callable that takes a Selection as input and returns + a Selection as output. For example, an instance of a read + preference from :mod:`~pymongo.read_preferences`. + :param address: A server address to select. + :param custom_selector: A callable that augments server + selection rules. Accepts a list of + :class:`~pymongo.server_description.ServerDescription` objects and + return a list of server descriptions that should be considered + suitable for the desired operation. + + .. versionadded:: 3.4 + """ + if getattr(selector, "min_wire_version", 0): + common_wv = self.common_wire_version + if common_wv and common_wv < selector.min_wire_version: + raise ConfigurationError( + "%s requires min wire version %d, but topology's min" + " wire version is %d" % (selector, selector.min_wire_version, common_wv) + ) + + if isinstance(selector, _AggWritePref): + selector.selection_hook(self) + + if self.topology_type == TOPOLOGY_TYPE.Unknown: + return [] + elif self.topology_type in (TOPOLOGY_TYPE.Single, TOPOLOGY_TYPE.LoadBalanced): + # Ignore selectors for standalone and load balancer mode. + return self.known_servers + if address: + # Ignore selectors when explicit address is requested. + description = self.server_descriptions().get(address) + return [description] if description else [] + + # Primary selection fast path. + if self.topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary and type(selector) is Primary: + for sd in self._server_descriptions.values(): + if sd.server_type == SERVER_TYPE.RSPrimary: + sds = [sd] + if custom_selector: + sds = custom_selector(sds) + return sds + # No primary found, return an empty list. + return [] + + selection = Selection.from_topology_description(self) + # Ignore read preference for sharded clusters. + if self.topology_type != TOPOLOGY_TYPE.Sharded: + selection = selector(selection) + + # Apply custom selector followed by localThresholdMS. + if custom_selector is not None and selection: + selection = selection.with_server_descriptions( + custom_selector(selection.server_descriptions) + ) + return self._apply_local_threshold(selection) + + def has_readable_server(self, read_preference: _ServerMode = ReadPreference.PRIMARY) -> bool: + """Does this topology have any readable servers available matching the + given read preference? + + :param read_preference: an instance of a read preference from + :mod:`~pymongo.read_preferences`. Defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + + .. note:: When connected directly to a single server this method + always returns ``True``. + + .. versionadded:: 3.4 + """ + common.validate_read_preference("read_preference", read_preference) + return any(self.apply_selector(read_preference)) + + def has_writable_server(self) -> bool: + """Does this topology have a writable server available? + + .. note:: When connected directly to a single server this method + always returns ``True``. + + .. versionadded:: 3.4 + """ + return self.has_readable_server(ReadPreference.PRIMARY) + + def __repr__(self) -> str: + # Sort the servers by address. + servers = sorted(self._server_descriptions.values(), key=lambda sd: sd.address) + return "<{} id: {}, topology_type: {}, servers: {!r}>".format( + self.__class__.__name__, + self._topology_settings._topology_id, + self.topology_type_name, + servers, + ) + + +# If topology type is Unknown and we receive a hello response, what should +# the new topology type be? +_SERVER_TYPE_TO_TOPOLOGY_TYPE = { + SERVER_TYPE.Mongos: TOPOLOGY_TYPE.Sharded, + SERVER_TYPE.RSPrimary: TOPOLOGY_TYPE.ReplicaSetWithPrimary, + SERVER_TYPE.RSSecondary: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + SERVER_TYPE.RSArbiter: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + SERVER_TYPE.RSOther: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + # Note: SERVER_TYPE.LoadBalancer and Unknown are intentionally left out. +} + + +def updated_topology_description( + topology_description: TopologyDescription, server_description: ServerDescription +) -> TopologyDescription: + """Return an updated copy of a TopologyDescription. + + :param topology_description: the current TopologyDescription + :param server_description: a new ServerDescription that resulted from + a hello call + + Called after attempting (successfully or not) to call hello on the + server at server_description.address. Does not modify topology_description. + """ + address = server_description.address + + # These values will be updated, if necessary, to form the new + # TopologyDescription. + topology_type = topology_description.topology_type + set_name = topology_description.replica_set_name + max_set_version = topology_description.max_set_version + max_election_id = topology_description.max_election_id + server_type = server_description.server_type + + # Don't mutate the original dict of server descriptions; copy it. + sds = topology_description.server_descriptions() + + # Replace this server's description with the new one. + sds[address] = server_description + + if topology_type == TOPOLOGY_TYPE.Single: + # Set server type to Unknown if replica set name does not match. + if set_name is not None and set_name != server_description.replica_set_name: + error = ConfigurationError( + "client is configured to connect to a replica set named " + "'{}' but this node belongs to a set named '{}'".format( + set_name, server_description.replica_set_name + ) + ) + sds[address] = server_description.to_unknown(error=error) + # Single type never changes. + return TopologyDescription( + TOPOLOGY_TYPE.Single, + sds, + set_name, + max_set_version, + max_election_id, + topology_description._topology_settings, + ) + + if topology_type == TOPOLOGY_TYPE.Unknown: + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer): + if len(topology_description._topology_settings.seeds) == 1: + topology_type = TOPOLOGY_TYPE.Single + else: + # Remove standalone from Topology when given multiple seeds. + sds.pop(address) + elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost): + topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type] + + if topology_type == TOPOLOGY_TYPE.Sharded: + if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown): + sds.pop(address) + + elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary: + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): + sds.pop(address) + + elif server_type == SERVER_TYPE.RSPrimary: + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): + topology_type, set_name = _update_rs_no_primary_from_member( + sds, set_name, server_description + ) + + elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): + sds.pop(address) + topology_type = _check_has_primary(sds) + + elif server_type == SERVER_TYPE.RSPrimary: + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): + topology_type = _update_rs_with_primary_from_member(sds, set_name, server_description) + + else: + # Server type is Unknown or RSGhost: did we just lose the primary? + topology_type = _check_has_primary(sds) + + # Return updated copy. + return TopologyDescription( + topology_type, + sds, + set_name, + max_set_version, + max_election_id, + topology_description._topology_settings, + ) + + +def _updated_topology_description_srv_polling( + topology_description: TopologyDescription, seedlist: list[tuple[str, Any]] +) -> TopologyDescription: + """Return an updated copy of a TopologyDescription. + + :param topology_description: the current TopologyDescription + :param seedlist: a list of new seeds new ServerDescription that resulted from + a hello call + """ + assert topology_description.topology_type in SRV_POLLING_TOPOLOGIES + # Create a copy of the server descriptions. + sds = topology_description.server_descriptions() + + # If seeds haven't changed, don't do anything. + if set(sds.keys()) == set(seedlist): + return topology_description + + # Remove SDs corresponding to servers no longer part of the SRV record. + for address in list(sds.keys()): + if address not in seedlist: + sds.pop(address) + + if topology_description.srv_max_hosts != 0: + new_hosts = set(seedlist) - set(sds.keys()) + n_to_add = topology_description.srv_max_hosts - len(sds) + if n_to_add > 0: + seedlist = sample(sorted(new_hosts), min(n_to_add, len(new_hosts))) + else: + seedlist = [] + # Add SDs corresponding to servers recently added to the SRV record. + for address in seedlist: + if address not in sds: + sds[address] = ServerDescription(address) + return TopologyDescription( + topology_description.topology_type, + sds, + topology_description.replica_set_name, + topology_description.max_set_version, + topology_description.max_election_id, + topology_description._topology_settings, + ) + + +def _update_rs_from_primary( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], +) -> tuple[int, Optional[str], Optional[int], Optional[ObjectId]]: + """Update topology description from a primary's hello response. + + Pass in a dict of ServerDescriptions, current replica set name, the + ServerDescription we are processing, and the TopologyDescription's + max_set_version and max_election_id if any. + + Returns (new topology type, new replica_set_name, new max_set_version, + new max_election_id). + """ + if replica_set_name is None: + replica_set_name = server_description.replica_set_name + + elif replica_set_name != server_description.replica_set_name: + # We found a primary but it doesn't have the replica_set_name + # provided by the user. + sds.pop(server_description.address) + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + + if server_description.max_wire_version is None or server_description.max_wire_version < 17: + new_election_tuple: tuple = (server_description.set_version, server_description.election_id) # type: ignore[type-arg] + max_election_tuple: tuple = (max_set_version, max_election_id) # type: ignore[type-arg] + if None not in new_election_tuple: + if None not in max_election_tuple and new_election_tuple < max_election_tuple: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown( + PyMongoError( + f"primary marked stale due to electionId/setVersion mismatch, {new_election_tuple} is stale compared to {max_election_tuple}" + ) + ) + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + max_election_id = server_description.election_id + + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version + ): + max_set_version = server_description.set_version + else: + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe < max_election_safe: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown( + PyMongoError( + f"primary marked stale due to electionId/setVersion mismatch, {new_election_tuple} is stale compared to {max_election_tuple}" + ) + ) + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + else: + max_election_id = server_description.election_id + max_set_version = server_description.set_version + + # We've heard from the primary. Is it the same primary as before? + for server in sds.values(): + if ( + server.server_type is SERVER_TYPE.RSPrimary + and server.address != server_description.address + ): + # Reset old primary's type to Unknown. + sds[server.address] = server.to_unknown( + PyMongoError("primary marked stale due to discovery of newer primary") + ) + + # There can be only one prior primary. + break + + # Discover new hosts from this primary's response. + for new_address in server_description.all_hosts: + if new_address not in sds: + sds[new_address] = ServerDescription(new_address) + + # Remove hosts not in the response. + for addr in set(sds) - server_description.all_hosts: + sds.pop(addr) + + # If the host list differs from the seed list, we may not have a primary + # after all. + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) + + +def _update_rs_with_primary_from_member( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, +) -> int: + """RS with known primary. Process a response from a non-primary. + + Pass in a dict of ServerDescriptions, current replica set name, and the + ServerDescription we are processing. + + Returns new topology type. + """ + assert replica_set_name is not None + + if replica_set_name != server_description.replica_set_name: + sds.pop(server_description.address) + elif server_description.me and server_description.address != server_description.me: + sds.pop(server_description.address) + + # Had this member been the primary? + return _check_has_primary(sds) + + +def _update_rs_no_primary_from_member( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, +) -> tuple[int, Optional[str]]: + """RS without known primary. Update from a non-primary's response. + + Pass in a dict of ServerDescriptions, current replica set name, and the + ServerDescription we are processing. + + Returns (new topology type, new replica_set_name). + """ + topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary + if replica_set_name is None: + replica_set_name = server_description.replica_set_name + + elif replica_set_name != server_description.replica_set_name: + sds.pop(server_description.address) + return topology_type, replica_set_name + + # This isn't the primary's response, so don't remove any servers + # it doesn't report. Only add new servers. + for address in server_description.all_hosts: + if address not in sds: + sds[address] = ServerDescription(address) + + if server_description.me and server_description.address != server_description.me: + sds.pop(server_description.address) + + return topology_type, replica_set_name + + +def _check_has_primary(sds: Mapping[_Address, ServerDescription]) -> int: + """Current topology type is ReplicaSetWithPrimary. Is primary still known? + + Pass in a dict of ServerDescriptions. + + Returns new topology type. + """ + for s in sds.values(): + if s.server_type == SERVER_TYPE.RSPrimary: + return TOPOLOGY_TYPE.ReplicaSetWithPrimary + else: # noqa: PLW0120 + return TOPOLOGY_TYPE.ReplicaSetNoPrimary diff --git a/pymongo/typings.py b/pymongo/typings.py new file mode 100644 index 0000000000..e678720db9 --- /dev/null +++ b/pymongo/typings.py @@ -0,0 +1,78 @@ +# Copyright 2022-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type aliases used by PyMongo""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +from bson.typings import _DocumentOut, _DocumentType, _DocumentTypeArg + +if TYPE_CHECKING: + from pymongo.asynchronous.bulk import _AsyncBulk + from pymongo.asynchronous.client_bulk import _AsyncClientBulk + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.collation import Collation + from pymongo.synchronous.bulk import _Bulk + from pymongo.synchronous.client_bulk import _ClientBulk + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + + +# Common Shared Types. +_Address = Tuple[str, Optional[int]] +_CollationIn = Union[Mapping[str, Any], "Collation"] +_Pipeline = Sequence[Mapping[str, Any]] +ClusterTime = Mapping[str, Any] + +_T = TypeVar("_T") + +# Type hinting types for compatibility between async and sync classes +_AgnosticMongoClient = Union["AsyncMongoClient", "MongoClient"] # type: ignore[type-arg] +_AgnosticConnection = Union["AsyncConnection", "Connection"] +_AgnosticClientSession = Union["AsyncClientSession", "ClientSession"] +_AgnosticBulk = Union["_AsyncBulk", "_Bulk"] +_AgnosticClientBulk = Union["_AsyncClientBulk", "_ClientBulk"] + + +def strip_optional(elem: Optional[_T]) -> _T: + """This function is to allow us to cast all the elements of an iterator from Optional[_T] to _T + while inside a list comprehension. + """ + assert elem is not None + return elem + + +__all__ = [ + "_DocumentOut", + "_DocumentType", + "_DocumentTypeArg", + "_Address", + "_CollationIn", + "_Pipeline", + "strip_optional", + "_AgnosticMongoClient", +] diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index dc7359fab8..fe253b9bbf 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -1,10 +1,10 @@ -# Copyright 2011-2014 MongoDB, Inc. +# Copyright 2011-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,315 +13,32 @@ # permissions and limitations under the License. -"""Tools to parse and validate a MongoDB URI.""" +"""Re-import of synchronous URI Parser API for compatibility.""" +from __future__ import annotations -from urllib import unquote_plus +import sys -from pymongo.common import validate -from pymongo.errors import (ConfigurationError, - InvalidURI, - UnsupportedOption) +from pymongo.errors import InvalidURI +from pymongo.synchronous.uri_parser import * # noqa: F403 +from pymongo.synchronous.uri_parser import __doc__ as original_doc +from pymongo.uri_parser_shared import * # noqa: F403 +__doc__ = original_doc +__all__ = [ # noqa: F405 + "parse_userinfo", + "parse_ipv6_literal_host", + "parse_host", + "validate_options", + "split_options", + "split_hosts", + "parse_uri", +] -SCHEME = 'mongodb://' -SCHEME_LEN = len(SCHEME) -DEFAULT_PORT = 27017 - - -def _partition(entity, sep): - """Python2.4 doesn't have a partition method so we provide - our own that mimics str.partition from later releases. - - Split the string at the first occurrence of sep, and return a - 3-tuple containing the part before the separator, the separator - itself, and the part after the separator. If the separator is not - found, return a 3-tuple containing the string itself, followed - by two empty strings. - """ - parts = entity.split(sep, 1) - if len(parts) == 2: - return parts[0], sep, parts[1] - else: - return entity, '', '' - - -def _rpartition(entity, sep): - """Python2.4 doesn't have an rpartition method so we provide - our own that mimics str.rpartition from later releases. - - Split the string at the last occurrence of sep, and return a - 3-tuple containing the part before the separator, the separator - itself, and the part after the separator. If the separator is not - found, return a 3-tuple containing two empty strings, followed - by the string itself. - """ - idx = entity.rfind(sep) - if idx == -1: - return '', '', entity - return entity[:idx], sep, entity[idx + 1:] - - -def parse_userinfo(userinfo): - """Validates the format of user information in a MongoDB URI. - Reserved characters like ':', '/', '+' and '@' must be escaped - following RFC 2396. - - Returns a 2-tuple containing the unescaped username followed - by the unescaped password. - - :Paramaters: - - `userinfo`: A string of the form : - - .. versionchanged:: 2.2 - Now uses `urllib.unquote_plus` so `+` characters must be escaped. - """ - if '@' in userinfo or userinfo.count(':') > 1: - raise InvalidURI("':' or '@' characters in a username or password " - "must be escaped according to RFC 2396.") - user, _, passwd = _partition(userinfo, ":") - # No password is expected with GSSAPI authentication. - if not user: - raise InvalidURI("The empty string is not valid username.") - user = unquote_plus(user) - passwd = unquote_plus(passwd) - - return user, passwd - - -def parse_ipv6_literal_host(entity, default_port): - """Validates an IPv6 literal host:port string. - - Returns a 2-tuple of IPv6 literal followed by port where - port is default_port if it wasn't specified in entity. - - :Parameters: - - `entity`: A string that represents an IPv6 literal enclosed - in braces (e.g. '[::1]' or '[::1]:27017'). - - `default_port`: The port number to use when one wasn't - specified in entity. - """ - if entity.find(']') == -1: - raise ConfigurationError("an IPv6 address literal must be " - "enclosed in '[' and ']' according " - "to RFC 2732.") - i = entity.find(']:') - if i == -1: - return entity[1:-1], default_port - return entity[1: i], entity[i + 2:] - - -def parse_host(entity, default_port=DEFAULT_PORT): - """Validates a host string - - Returns a 2-tuple of host followed by port where port is default_port - if it wasn't specified in the string. - - :Parameters: - - `entity`: A host or host:port string where host could be a - hostname or IP address. - - `default_port`: The port number to use when one wasn't - specified in entity. - """ - host = entity - port = default_port - if entity[0] == '[': - host, port = parse_ipv6_literal_host(entity, default_port) - elif entity.find(':') != -1: - if entity.count(':') > 1: - raise ConfigurationError("Reserved characters such as ':' must be " - "escaped according RFC 2396. An IPv6 " - "address literal must be enclosed in '[' " - "and ']' according to RFC 2732.") - host, port = host.split(':', 1) - if isinstance(port, basestring): - if not port.isdigit(): - raise ConfigurationError("Port number must be an integer.") - port = int(port) - return host, port - - -def validate_options(opts): - """Validates and normalizes options passed in a MongoDB URI. - - Returns a new dictionary of validated and normalized options. - - :Parameters: - - `opts`: A dict of MongoDB URI options. - """ - normalized = {} - for option, value in opts.iteritems(): - option, value = validate(option, value) - # str(option) to ensure that a unicode URI results in plain 'str' - # option names. 'normalized' is then suitable to be passed as kwargs - # in all Python versions. - normalized[str(option)] = value - return normalized - - -def _parse_options(opts, delim): - """Helper method for split_options which creates the options dict. - Also handles the creation of a list of dicts for the URI tag_sets/ - readpreferencetags portion.""" - options = {} - for opt in opts.split(delim): - key, val = opt.split("=") - if key.lower() == 'readpreferencetags': - options.setdefault('readpreferencetags', []).append(val) - else: - options[key] = val - if 'readpreferencetags' in options: - new_tag_sets = [] - for tag_set in options['readpreferencetags']: - tag_dict = {} - try: - for tag in tag_set.split(","): - tag_parts = tag.split(":") - tag_dict[tag_parts[0]] = tag_parts[1] - new_tag_sets.append(tag_dict) - except IndexError: - new_tag_sets.append({}) - options['readpreferencetags'] = new_tag_sets - return options - - -def split_options(opts): - """Takes the options portion of a MongoDB URI, validates each option - and returns the options in a dictionary. The option names will be returned - lowercase even if camelCase options are used. - - :Parameters: - - `opt`: A string representing MongoDB URI options. - """ - and_idx = opts.find("&") - semi_idx = opts.find(";") - try: - if and_idx >= 0 and semi_idx >= 0: - raise InvalidURI("Can not mix '&' and ';' for option separators.") - elif and_idx >= 0: - options = _parse_options(opts, "&") - elif semi_idx >= 0: - options = _parse_options(opts, ";") - elif opts.find("=") != -1: - options = _parse_options(opts, None) - else: - raise ValueError - except ValueError: - raise InvalidURI("MongoDB URI options are key=value pairs.") - - return validate_options(options) - - -def split_hosts(hosts, default_port=DEFAULT_PORT): - """Takes a string of the form host1[:port],host2[:port]... and - splits it into (host, port) tuples. If [:port] isn't present the - default_port is used. - - Returns a set of 2-tuples containing the host name (or IP) followed by - port number. - - :Parameters: - - `hosts`: A string of the form host1[:port],host2[:port],... - - `default_port`: The port number to use when one wasn't specified - for a host. - """ - nodes = [] - for entity in hosts.split(','): - if not entity: - raise ConfigurationError("Empty host " - "(or extra comma in host list).") - port = default_port - # Unix socket entities don't have ports - if entity.endswith('.sock'): - port = None - nodes.append(parse_host(entity, port)) - return nodes - - -def parse_uri(uri, default_port=DEFAULT_PORT): - """Parse and validate a MongoDB URI. - - Returns a dict of the form:: - - { - 'nodelist': , - 'username': or None, - 'password': or None, - 'database': or None, - 'collection': or None, - 'options': - } - - :Parameters: - - `uri`: The MongoDB URI to parse. - - `default_port`: The port number to use when one wasn't specified - for a host in the URI. - """ - if not uri.startswith(SCHEME): - raise InvalidURI("Invalid URI scheme: URI " - "must begin with '%s'" % (SCHEME,)) - - scheme_free = uri[SCHEME_LEN:] - - if not scheme_free: - raise InvalidURI("Must provide at least one hostname or IP.") - - nodes = None - user = None - passwd = None - dbase = None - collection = None - options = {} - - # Check for unix domain sockets in the uri - if '.sock' in scheme_free: - host_part, _, path_part = _rpartition(scheme_free, '/') - try: - parse_uri('%s%s' % (SCHEME, host_part)) - except (ConfigurationError, InvalidURI): - host_part = scheme_free - path_part = "" - else: - host_part, _, path_part = _partition(scheme_free, '/') - - if not path_part and '?' in host_part: - raise InvalidURI("A '/' is required between " - "the host list and any options.") - - if '@' in host_part: - userinfo, _, hosts = _rpartition(host_part, '@') - user, passwd = parse_userinfo(userinfo) - else: - hosts = host_part - - nodes = split_hosts(hosts, default_port=default_port) - - if path_part: - - if path_part[0] == '?': - opts = path_part[1:] - else: - dbase, _, opts = _partition(path_part, '?') - if '.' in dbase: - dbase, collection = dbase.split('.', 1) - - if opts: - options = split_options(opts) - - return { - 'nodelist': nodes, - 'username': user, - 'password': passwd, - 'database': dbase, - 'collection': collection, - 'options': options - } - - -if __name__ == '__main__': +if __name__ == "__main__": import pprint - import sys + try: - pprint.pprint(parse_uri(sys.argv[1])) - except (InvalidURI, UnsupportedOption), e: - print e + pprint.pprint(parse_uri(sys.argv[1])) # noqa: F405, T203 + except InvalidURI as exc: + print(exc) # noqa: T201 sys.exit(0) diff --git a/pymongo/uri_parser_shared.py b/pymongo/uri_parser_shared.py new file mode 100644 index 0000000000..59168d1e9f --- /dev/null +++ b/pymongo/uri_parser_shared.py @@ -0,0 +1,614 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +import re +import sys +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sized, + Union, + cast, +) +from urllib.parse import unquote_plus + +from pymongo.asynchronous.srv_resolver import _have_dnspython +from pymongo.client_options import _parse_ssl_options +from pymongo.common import ( + INTERNAL_URI_OPTION_NAME_MAP, + URI_OPTIONS_DEPRECATION_MAP, + _CaseInsensitiveDictionary, + get_validated_options, +) +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.typings import _Address + +if TYPE_CHECKING: + from pymongo.pyopenssl_context import SSLContext + +SCHEME = "mongodb://" +SCHEME_LEN = len(SCHEME) +SRV_SCHEME = "mongodb+srv://" +SRV_SCHEME_LEN = len(SRV_SCHEME) +DEFAULT_PORT = 27017 + +URI_OPTIONS = frozenset( + [ + "appname", + "authMechanism", + "authMechanismProperties", + "authSource", + "compressors", + "connectTimeoutMS", + "directConnection", + "heartbeatFrequencyMS", + "journal", + "loadBalanced", + "localThresholdMS", + "maxIdleTimeMS", + "maxPoolSize", + "maxConnecting", + "maxStalenessSeconds", + "minPoolSize", + "proxyHost", + "proxyPort", + "proxyUsername", + "proxyPassword", + "readConcernLevel", + "readPreference", + "readPreferenceTags", + "replicaSet", + "retryReads", + "retryWrites", + "serverMonitoringMode", + "serverSelectionTimeoutMS", + "serverSelectionTryOnce", + "socketTimeoutMS", + "srvMaxHosts", + "srvServiceName", + "ssl", + "tls", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsCAFile", + "tlsCertificateKeyFile", + "tlsCertificateKeyFilePassword", + "tlsDisableCertificateRevocationCheck", + "tlsDisableOCSPEndpointCheck", + "tlsInsecure", + "w", + "waitQueueTimeoutMS", + "wTimeoutMS", + "zlibCompressionLevel", + ] +) + + +def _unquoted_percent(s: str) -> bool: + """Check for unescaped percent signs. + + :param s: A string. `s` can have things like '%25', '%2525', + and '%E2%85%A8' but cannot have unquoted percent like '%foo'. + """ + for i in range(len(s)): + if s[i] == "%": + sub = s[i : i + 3] + # If unquoting yields the same string this means there was an + # unquoted %. + if unquote_plus(sub) == sub: + return True + return False + + +def parse_userinfo(userinfo: str) -> tuple[str, str]: + """Validates the format of user information in a MongoDB URI. + Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", + "]", "@") as per RFC 3986 must be escaped. + + Returns a 2-tuple containing the unescaped username followed + by the unescaped password. + + :param userinfo: A string of the form : + """ + if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): + raise InvalidURI( + "Username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) + + user, _, passwd = userinfo.partition(":") + # No password is expected with GSSAPI authentication. + if not user: + raise InvalidURI("The empty string is not valid username") + + return unquote_plus(user), unquote_plus(passwd) + + +def parse_ipv6_literal_host( + entity: str, default_port: Optional[int] +) -> tuple[str, Optional[Union[str, int]]]: + """Validates an IPv6 literal host:port string. + + Returns a 2-tuple of IPv6 literal followed by port where + port is default_port if it wasn't specified in entity. + + :param entity: A string that represents an IPv6 literal enclosed + in braces (e.g. '[::1]' or '[::1]:27017'). + :param default_port: The port number to use when one wasn't + specified in entity. + """ + if entity.find("]") == -1: + raise ValueError( + "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." + ) + i = entity.find("]:") + if i == -1: + return entity[1:-1], default_port + return entity[1:i], entity[i + 2 :] + + +def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: + """Validates a host string + + Returns a 2-tuple of host followed by port where port is default_port + if it wasn't specified in the string. + + :param entity: A host or host:port string where host could be a + hostname or IP address. + :param default_port: The port number to use when one wasn't + specified in entity. + """ + host = entity + port: Optional[Union[str, int]] = default_port + if entity[0] == "[": + host, port = parse_ipv6_literal_host(entity, default_port) + elif entity.endswith(".sock"): + return entity, default_port + elif entity.find(":") != -1: + if entity.count(":") > 1: + raise ValueError( + "Reserved characters such as ':' must be " + "escaped according RFC 2396. An IPv6 " + "address literal must be enclosed in '[' " + "and ']' according to RFC 2732." + ) + host, port = host.split(":", 1) + if isinstance(port, str): + if not port.isdigit(): + # Special case check for mistakes like "mongodb://localhost:27017 ". + if all(c.isspace() or c.isdigit() for c in port): + for c in port: + if c.isspace(): + raise ValueError(f"Port contains whitespace character: {c!r}") + + # A non-digit port indicates that the URI is invalid, likely because the password + # or username were not escaped. + raise ValueError( + "Port contains non-digit characters. Hint: username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) + if int(port) > 65535 or int(port) <= 0: + raise ValueError("Port must be an integer between 0 and 65535") + port = int(port) + + # Normalize hostname to lowercase, since DNS is case-insensitive: + # https://tools.ietf.org/html/rfc4343 + # This prevents useless rediscovery if "foo.com" is in the seed list but + # "FOO.com" is in the hello response. + return host.lower(), port + + +# Options whose values are implicitly determined by tlsInsecure. +_IMPLICIT_TLSINSECURE_OPTS = { + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", +} + + +def _parse_options(opts: str, delim: Optional[str]) -> _CaseInsensitiveDictionary: + """Helper method for split_options which creates the options dict. + Also handles the creation of a list for the URI tag_sets/ + readpreferencetags portion, and the use of a unicode options string. + """ + options = _CaseInsensitiveDictionary() + for uriopt in opts.split(delim): + key, value = uriopt.split("=") + if key.lower() == "readpreferencetags": + options.setdefault(key, []).append(value) + else: + if key in options: + warnings.warn(f"Duplicate URI option '{key}'.", stacklevel=2) + if key.lower() == "authmechanismproperties": + val = value + else: + val = unquote_plus(value) + options[key] = val + + return options + + +def _handle_security_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Raise appropriate errors when conflicting TLS options are present in + the options dictionary. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + # Implicitly defined options must not be explicitly specified. + tlsinsecure = options.get("tlsinsecure") + if tlsinsecure is not None: + for opt in _IMPLICIT_TLSINSECURE_OPTS: + if opt in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) + ) + + # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. + tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") + if tlsallowinvalidcerts is not None: + if "tlsdisableocspendpointcheck" in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg + % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) + ) + if tlsallowinvalidcerts is True: + options["tlsdisableocspendpointcheck"] = True + + # Handle co-occurence of CRL and OCSP-related options. + tlscrlfile = options.get("tlscrlfile") + if tlscrlfile is not None: + for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): + if options.get(opt) is True: + err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." + raise InvalidURI(err_msg % (opt,)) + + if "ssl" in options and "tls" in options: + + def truth_value(val: Any) -> Any: + if val in ("true", "false"): + return val == "true" + if isinstance(val, bool): + return val + return val + + if truth_value(options.get("ssl")) != truth_value(options.get("tls")): + err_msg = "Can not specify conflicting values for URI options %s and %s." + raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) + + return options + + +def _handle_option_deprecations(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Issue appropriate warnings when deprecated options are present in the + options dictionary. Removes deprecated option key, value pairs if the + options dictionary is found to also have the renamed option. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + for optname in list(options): + if optname in URI_OPTIONS_DEPRECATION_MAP: + mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] + if mode == "renamed": + newoptname = message + if newoptname in options: + warn_msg = "Deprecated option '%s' ignored in favor of '%s'." + warnings.warn( + warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), + DeprecationWarning, + stacklevel=2, + ) + options.pop(optname) + continue + warn_msg = "Option '%s' is deprecated, use '%s' instead." + warnings.warn( + warn_msg % (options.cased_key(optname), newoptname), + DeprecationWarning, + stacklevel=2, + ) + elif mode == "removed": + warn_msg = "Option '%s' is deprecated. %s." + warnings.warn( + warn_msg % (options.cased_key(optname), message), + DeprecationWarning, + stacklevel=2, + ) + + return options + + +def _normalize_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Normalizes option names in the options dictionary by converting them to + their internally-used names. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + # Expand the tlsInsecure option. + tlsinsecure = options.get("tlsinsecure") + if tlsinsecure is not None: + for opt in _IMPLICIT_TLSINSECURE_OPTS: + # Implicit options are logically the same as tlsInsecure. + options[opt] = tlsinsecure + + for optname in list(options): + intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) + if intname is not None: + options[intname] = options.pop(optname) + + return options + + +def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: + """Validates and normalizes options passed in a MongoDB URI. + + Returns a new dictionary of validated and normalized options. If warn is + False then errors will be thrown for invalid options, otherwise they will + be ignored and a warning will be issued. + + :param opts: A dict of MongoDB URI options. + :param warn: If ``True`` then warnings will be logged and + invalid options will be ignored. Otherwise invalid options will + cause errors. + """ + return get_validated_options(opts, warn) + + +def split_options( + opts: str, validate: bool = True, warn: bool = False, normalize: bool = True +) -> MutableMapping[str, Any]: + """Takes the options portion of a MongoDB URI, validates each option + and returns the options in a dictionary. + + :param opt: A string representing MongoDB URI options. + :param validate: If ``True`` (the default), validate and normalize all + options. + :param warn: If ``False`` (the default), suppress all warnings raised + during validation of options. + :param normalize: If ``True`` (the default), renames all options to their + internally-used names. + """ + and_idx = opts.find("&") + semi_idx = opts.find(";") + try: + if and_idx >= 0 and semi_idx >= 0: + raise InvalidURI("Can not mix '&' and ';' for option separators") + elif and_idx >= 0: + options = _parse_options(opts, "&") + elif semi_idx >= 0: + options = _parse_options(opts, ";") + elif opts.find("=") != -1: + options = _parse_options(opts, None) + else: + raise ValueError + except ValueError: + raise InvalidURI("MongoDB URI options are key=value pairs") from None + + options = _handle_security_options(options) + + options = _handle_option_deprecations(options) + + if normalize: + options = _normalize_options(options) + + if validate: + options = cast(_CaseInsensitiveDictionary, validate_options(options, warn)) + if options.get("authsource") == "": + raise InvalidURI("the authSource database cannot be an empty string") + + return options + + +def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> list[_Address]: + """Takes a string of the form host1[:port],host2[:port]... and + splits it into (host, port) tuples. If [:port] isn't present the + default_port is used. + + Returns a set of 2-tuples containing the host name (or IP) followed by + port number. + + :param hosts: A string of the form host1[:port],host2[:port],... + :param default_port: The port number to use when one wasn't specified + for a host. + """ + nodes = [] + for entity in hosts.split(","): + if not entity: + raise ConfigurationError("Empty host (or extra comma in host list)") + port = default_port + # Unix socket entities don't have ports + if entity.endswith(".sock"): + port = None + nodes.append(parse_host(entity, port)) + return nodes + + +# Prohibited characters in database name. DB names also can't have ".", but for +# backward-compat we allow "db.collection" in URI. +_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") + +_ALLOWED_TXT_OPTS = frozenset( + ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] +) + + +def _check_options(nodes: Sized, options: Mapping[str, Any]) -> None: + # Ensure directConnection was not True if there are multiple seeds. + if len(nodes) > 1 and options.get("directconnection"): + raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") + + if options.get("loadbalanced"): + if len(nodes) > 1: + raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") + if options.get("directconnection"): + raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") + if options.get("replicaset"): + raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") + + +def _parse_kms_tls_options( + kms_tls_options: Optional[Mapping[str, Any]], + is_sync: bool, +) -> dict[str, SSLContext]: + """Parse KMS TLS connection options.""" + if not kms_tls_options: + return {} + if not isinstance(kms_tls_options, dict): + raise TypeError("kms_tls_options must be a dict") + contexts = {} + for provider, options in kms_tls_options.items(): + if not isinstance(options, dict): + raise TypeError(f'kms_tls_options["{provider}"] must be a dict') + options.setdefault("tls", True) + opts = _CaseInsensitiveDictionary(options) + opts = _handle_security_options(opts) + opts = _normalize_options(opts) + opts = cast(_CaseInsensitiveDictionary, validate_options(opts)) + ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts, is_sync) + if ssl_context is None: + raise ConfigurationError("TLS is required for KMS providers") + if allow_invalid_hostnames: + raise ConfigurationError("Insecure TLS options prohibited") + + for n in [ + "tlsInsecure", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsDisableCertificateRevocationCheck", + ]: + if n in opts: + raise ConfigurationError(f"Insecure TLS options prohibited: {n}") + contexts[provider] = ssl_context + return contexts + + +def _validate_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + elif uri.startswith(SRV_SCHEME): + if not _have_dnspython(): + python_path = sys.executable or "python" + raise ConfigurationError( + 'The "dnspython" module must be ' + "installed to use mongodb+srv:// URIs. " + "To fix this error install pymongo again:\n " + "%s -m pip install pymongo>=4.3" % (python_path) + ) + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + else: + raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") + + if not scheme_free: + raise InvalidURI("Must provide at least one hostname or IP") + + user = None + passwd = None + dbase = None + collection = None + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, dbase = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if dbase: + dbase = unquote_plus(dbase) + if "." in dbase: + dbase, collection = dbase.split(".", 1) + if _BAD_DB_CHARS.search(dbase): + raise InvalidURI('Bad database name "%s"' % dbase) + else: + dbase = None + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if "@" in host_part: + userinfo, _, hosts = host_part.rpartition("@") + user, passwd = parse_userinfo(userinfo) + else: + hosts = host_part + + if "/" in hosts: + raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) + + hosts = unquote_plus(hosts) + fqdn = None + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + if options.get("directConnection"): + raise ConfigurationError(f"Cannot specify directConnection=true with {SRV_SCHEME} URIs") + nodes = split_hosts(hosts, default_port=None) + if len(nodes) != 1: + raise InvalidURI(f"{SRV_SCHEME} URIs must include one, and only one, hostname") + fqdn, port = nodes[0] + if port is not None: + raise InvalidURI(f"{SRV_SCHEME} URIs must not include a port number") + elif not is_srv and options.get("srvServiceName") is not None: + raise ConfigurationError( + "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" + ) + elif not is_srv and srv_max_hosts: + raise ConfigurationError( + "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" + ) + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "username": user, + "password": passwd, + "database": dbase, + "collection": collection, + "options": options, + "fqdn": fqdn, + } + + +def _make_options_case_sensitive(options: _CaseInsensitiveDictionary) -> dict[str, Any]: + case_sensitive = {} + for option in URI_OPTIONS: + if option.lower() in options: + case_sensitive[option] = options[option] + options.pop(option) + for k, v in options.items(): + case_sensitive[k] = v + return case_sensitive diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py new file mode 100644 index 0000000000..1f9da7af2e --- /dev/null +++ b/pymongo/write_concern.py @@ -0,0 +1,144 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with write concerns. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from typing import Any, Optional, Union + +from pymongo.errors import ConfigurationError + + +# Duplicated here to avoid a circular import. +def validate_boolean(option: str, value: Any) -> bool: + """Validates that 'value' is True or False.""" + if isinstance(value, bool): + return value + raise TypeError(f"{option} must be True or False, was: {option}={value}") + + +class WriteConcern: + """WriteConcern + + :param w: (integer or string) Used with replication, write operations + will block until they have been replicated to the specified number + or tagged set of servers. `w=` always includes the replica + set primary (e.g. w=3 means write to the primary and wait until + replicated to **two** secondaries). **w=0 disables acknowledgement + of write operations and can not be used with other write concern + options.** + :param wtimeout: (integer) **DEPRECATED** Used in conjunction with `w`. + Specify a value in milliseconds to control how long to wait for write + propagation to complete. If replication does not complete in the given + timeframe, a timeout exception is raised. + :param j: If ``True`` block until write operations have been committed + to the journal. Cannot be used in combination with `fsync`. Write + operations will fail with an exception if this option is used when + the server is running without journaling. + :param fsync: If ``True`` and the server is running without journaling, + blocks until the server has synced all data files to disk. If the + server is running with journaling, this acts the same as the `j` + option, blocking until write operations have been committed to the + journal. Cannot be used in combination with `j`. + + + .. versionchanged:: 4.7 + Deprecated parameter ``wtimeout``, use :meth:`~pymongo.timeout`. + """ + + __slots__ = ("__document", "__acknowledged", "__server_default") + + def __init__( + self, + w: Optional[Union[int, str]] = None, + wtimeout: Optional[int] = None, + j: Optional[bool] = None, + fsync: Optional[bool] = None, + ) -> None: + self.__document: dict[str, Any] = {} + self.__acknowledged = True + + if wtimeout is not None: + if not isinstance(wtimeout, int): + raise TypeError(f"wtimeout must be an integer, not {type(wtimeout)}") + if wtimeout < 0: + raise ValueError("wtimeout cannot be less than 0") + self.__document["wtimeout"] = wtimeout + + if j is not None: + validate_boolean("j", j) + self.__document["j"] = j + + if fsync is not None: + validate_boolean("fsync", fsync) + if j and fsync: + raise ConfigurationError("Can't set both j and fsync at the same time") + self.__document["fsync"] = fsync + + if w == 0 and j is True: + raise ConfigurationError("Cannot set w to 0 and j to True") + + if w is not None: + if isinstance(w, int): + if w < 0: + raise ValueError("w cannot be less than 0") + self.__acknowledged = w > 0 + elif not isinstance(w, str): + raise TypeError(f"w must be an integer or string, not {type(w)}") + self.__document["w"] = w + + self.__server_default = not self.__document + + @property + def is_server_default(self) -> bool: + """Does this WriteConcern match the server default.""" + return self.__server_default + + @property + def document(self) -> dict[str, Any]: + """The document representation of this write concern. + + .. note:: + :class:`WriteConcern` is immutable. Mutating the value of + :attr:`document` does not mutate this :class:`WriteConcern`. + """ + return self.__document.copy() + + @property + def acknowledged(self) -> bool: + """If ``True`` write operations will wait for acknowledgement before + returning. + """ + return self.__acknowledged + + def __repr__(self) -> str: + return "WriteConcern({})".format( + ", ".join(f"{k}={v!r}" for k, v in self.__document.items()) + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, WriteConcern): + return self.__document == other.document + return NotImplemented + + def __ne__(self, other: Any) -> bool: + if isinstance(other, WriteConcern): + return self.__document != other.document + return NotImplemented + + +DEFAULT_WRITE_CONCERN = WriteConcern() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..623eb6c164 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,271 @@ +[build-system] +requires = ["hatchling>1.24","setuptools>=65.0","hatch-requirements-txt>=0.4.1"] +build-backend = "hatchling.build" + +[project] +name = "pymongo" +dynamic = ["version", "dependencies", "optional-dependencies"] +description = "PyMongo - the Official MongoDB Python driver" +readme = "README.md" +license = {file="LICENSE"} +requires-python = ">=3.9" +authors = [ + { name = "The MongoDB Python Team" }, +] +keywords = [ + "bson", + "gridfs", + "mongo", + "mongodb", + "pymongo", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Topic :: Database", + "Typing :: Typed", +] + +[project.urls] +Homepage = "https://www.mongodb.org" +Documentation = "https://www.mongodb.com/docs/languages/python/pymongo-driver/current/" +Source = "https://github.com/mongodb/mongo-python-driver" +Tracker = "https://jira.mongodb.org/projects/PYTHON/issues" + +[dependency-groups] +dev = [] +pip = ["pip"] +gevent = ["gevent>=20.6.0"] +coverage = [ + "pytest-cov", + "coverage>=5,<=7.10.6" +] +mockupdb = [ + "mockupdb@git+https://github.com/mongodb-labs/mongo-mockup-db@master" +] +perf = ["simplejson>=3.17.0"] +typing = [ + "mypy==1.18.2", + "pyright==1.1.406", + "typing_extensions", + "pip" +] + +# Used to call hatch_build.py +[tool.hatch.build.hooks.custom] + +[tool.hatch.version] +path = "pymongo/_version.py" +validate-bump = false + +[tool.hatch.build.targets.wheel] +packages = ["bson","gridfs", "pymongo"] + +[tool.hatch.metadata.hooks.requirements_txt] +files = ["requirements.txt"] + +[tool.hatch.metadata.hooks.requirements_txt.optional-dependencies] +aws = ["requirements/aws.txt"] +docs = ["requirements/docs.txt"] +encryption = ["requirements/encryption.txt"] +gssapi = ["requirements/gssapi.txt"] +ocsp = ["requirements/ocsp.txt"] +snappy = ["requirements/snappy.txt"] +test = ["requirements/test.txt"] +zstd = ["requirements/zstd.txt"] + +[tool.pytest.ini_options] +minversion = "7" +addopts = ["-ra", "--strict-config", "--strict-markers", "--junitxml=xunit-results/TEST-results.xml", "-m default or default_async"] +testpaths = ["test"] +log_cli_level = "INFO" +faulthandler_timeout = 1500 +asyncio_default_fixture_loop_scope = "session" +xfail_strict = true +filterwarnings = [ + "error", + # Internal warnings raised during tests. + "module:use an explicit session with no_cursor_timeout=True:UserWarning", + "module:serverselectiontimeoutms must be:UserWarning", + "module:Unsupported compressor:UserWarning", + "module:zlibcompressionlevel must be:UserWarning", + "module:Wire protocol compression with:UserWarning", + "module:GridIn property:DeprecationWarning", + "module:GridOut property:DeprecationWarning", + # pytest-asyncio known issue: https://github.com/pytest-dev/pytest-asyncio/issues/1032 + "module:.*WindowsSelectorEventLoopPolicy:DeprecationWarning", + "module:.*et_event_loop_policy:DeprecationWarning", + # TODO: Remove as part of PYTHON-3923. + "module:unclosed =2.6.1,<3.0.0 diff --git a/requirements/aws.txt b/requirements/aws.txt new file mode 100644 index 0000000000..06e30c11c3 --- /dev/null +++ b/requirements/aws.txt @@ -0,0 +1 @@ +pymongo-auth-aws>=1.1.0,<2.0.0 diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100644 index 0000000000..54ebf3625d --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,6 @@ +sphinx>=5.3,<9 +sphinx_rtd_theme>=2,<4 +readthedocs-sphinx-search~=0.3 +sphinxcontrib-shellcheck>=1,<2 +sphinx-autobuild>=2020.9.1 +furo==2025.9.25 diff --git a/requirements/encryption.txt b/requirements/encryption.txt new file mode 100644 index 0000000000..eec1c990f7 --- /dev/null +++ b/requirements/encryption.txt @@ -0,0 +1,3 @@ +pymongo-auth-aws>=1.1.0,<2.0.0 +pymongocrypt>=1.13.0,<2.0.0 +certifi>=2023.7.22;os.name=='nt' or sys_platform=='darwin' diff --git a/requirements/gssapi.txt b/requirements/gssapi.txt new file mode 100644 index 0000000000..7f156b9cea --- /dev/null +++ b/requirements/gssapi.txt @@ -0,0 +1,2 @@ +pykerberos;os.name!='nt' +winkerberos>=0.5.0;os.name=='nt' diff --git a/requirements/ocsp.txt b/requirements/ocsp.txt new file mode 100644 index 0000000000..39dbddef14 --- /dev/null +++ b/requirements/ocsp.txt @@ -0,0 +1,12 @@ +# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced +# a related feature we need. 17.2.0 fixes a bug +# in set_default_verify_paths we should really avoid. +# service_identity 18.1.0 introduced support for IP addr matching. +# Fallback to certifi on Windows if we can't load CA certs from the system +# store and just use certifi on macOS. +# https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths +certifi>=2023.7.22;os.name=='nt' or sys_platform=='darwin' +pyopenssl>=17.2.0 +requests<3.0.0 +cryptography>=2.5 +service_identity>=18.1.0 diff --git a/requirements/snappy.txt b/requirements/snappy.txt new file mode 100644 index 0000000000..9bb71204b8 --- /dev/null +++ b/requirements/snappy.txt @@ -0,0 +1 @@ +python-snappy diff --git a/requirements/test.txt b/requirements/test.txt new file mode 100644 index 0000000000..566cade7ec --- /dev/null +++ b/requirements/test.txt @@ -0,0 +1,3 @@ +pytest>=8.2 +pytest-asyncio>=0.24.0 +importlib_metadata>=7.0;python_version < "3.13" diff --git a/requirements/zstd.txt b/requirements/zstd.txt new file mode 100644 index 0000000000..864700d2b3 --- /dev/null +++ b/requirements/zstd.txt @@ -0,0 +1 @@ +zstandard diff --git a/sbom.json b/sbom.json new file mode 100644 index 0000000000..56e27f5361 --- /dev/null +++ b/sbom.json @@ -0,0 +1,11 @@ +{ + "metadata": { + "timestamp": "2024-05-02T17:36:12.698229+00:00" + }, + "components": [], + "serialNumber": "urn:uuid:9876a8a6-060e-486f-b128-910aecf0fe7b", + "version": 1, + "$schema": "http://cyclonedx.org/schema/bom-1.5.schema.json", + "bomFormat": "CycloneDX", + "specVersion": "1.5" + } \ No newline at end of file diff --git a/setup.py b/setup.py old mode 100755 new mode 100644 index 0efe5eda3f..f371b3d75b --- a/setup.py +++ b/setup.py @@ -1,327 +1,8 @@ -import glob -import os -import platform -import re -import subprocess -import sys -import warnings +from __future__ import annotations -# Hack to silence atexit traceback in newer python versions. -try: - import multiprocessing -except ImportError: - pass - -try: - from ConfigParser import SafeConfigParser -except ImportError: - # PY3 - from configparser import SafeConfigParser - -# Don't force people to install setuptools unless -# we have to. -try: - from setuptools import setup -except ImportError: - from ez_setup import use_setuptools - use_setuptools() - from setuptools import setup - -from distutils.cmd import Command -from distutils.command.build_ext import build_ext -from distutils.errors import CCompilerError -from distutils.errors import DistutilsPlatformError, DistutilsExecError -from distutils.core import Extension - -version = "2.7" - -f = open("README.rst") -try: - try: - readme_content = f.read() - except: - readme_content = "" -finally: - f.close() - -PY3 = sys.version_info[0] == 3 - -# PYTHON-654 - Clang doesn't support -mno-fused-madd but the pythons Apple -# ships are built with it. This is a problem starting with Xcode 5.1 -# since clang 3.4 errors out when it encounters unrecognized compiler -# flags. This hack removes -mno-fused-madd from the CFLAGS automatically -# generated by distutils for Apple provided pythons, allowing C extension -# builds to complete without error. The inspiration comes from older -# versions of distutils.sysconfig.get_config_vars. -if sys.platform == 'darwin' and 'clang' in platform.python_compiler().lower(): - from distutils.sysconfig import get_config_vars - res = get_config_vars() - for key in ('CFLAGS', 'PY_CFLAGS'): - if key in res: - flags = res[key] - flags = re.sub('-mno-fused-madd', '', flags) - res[key] = flags - -nose_config_options = { - 'with-xunit': '1', # Write out nosetests.xml for CI. - 'py3where': 'build', # Tell nose where to find tests under PY3. -} - -def write_nose_config(): - """Write out setup.cfg. Since py3where has to be set - for tests to run correctly in Python 3 we create this - on the fly. - """ - config = SafeConfigParser() - config.add_section('nosetests') - for opt, val in nose_config_options.items(): - config.set('nosetests', opt, val) - try: - cf = open('setup.cfg', 'w') - config.write(cf) - finally: - cf.close() - - -should_run_tests = False -if "test" in sys.argv or "nosetests" in sys.argv: - should_run_tests = True - - -class doc(Command): - - description = "generate or test documentation" - - user_options = [("test", "t", - "run doctests instead of generating documentation")] - - boolean_options = ["test"] - - def initialize_options(self): - self.test = False - - def finalize_options(self): - pass - - def run(self): - if self.test: - path = "doc/_build/doctest" - mode = "doctest" - else: - path = "doc/_build/%s" % version - mode = "html" - - try: - os.makedirs(path) - except: - pass - - status = subprocess.call(["sphinx-build", "-E", - "-b", mode, "doc", path]) - - if status: - raise RuntimeError("documentation step '%s' failed" % (mode,)) - - sys.stdout.write("\nDocumentation step '%s' performed, results here:\n" - " %s/\n" % (mode, path)) - - -if sys.platform == 'win32' and sys.version_info > (2, 6): - # 2.6's distutils.msvc9compiler can raise an IOError when failing to - # find the compiler - build_errors = (CCompilerError, DistutilsExecError, - DistutilsPlatformError, IOError) -else: - build_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) - - -class custom_build_ext(build_ext): - """Allow C extension building to fail. - - The C extension speeds up BSON encoding, but is not essential. - """ - - warning_message = """ -******************************************************************** -WARNING: %s could not -be compiled. No C extensions are essential for PyMongo to run, -although they do result in significant speed improvements. -%s - -Please see the installation docs for solutions to build issues: - -http://api.mongodb.org/python/current/installation.html - -Here are some hints for popular operating systems: - -If you are seeing this message on Linux you probably need to -install GCC and/or the Python development package for your -version of Python. - -Debian and Ubuntu users should issue the following command: - - $ sudo apt-get install build-essential python-dev - -Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux, -Oracle Linux, Fedora, etc.) should issue the following command: - - $ sudo yum install gcc python-devel - -If you are seeing this message on Microsoft Windows please install -PyMongo using the MS Windows installer for your version of Python, -available on pypi here: - -http://pypi.python.org/pypi/pymongo/#downloads - -If you are seeing this message on OSX please read the documentation -here: - -http://api.mongodb.org/python/current/installation.html#osx -******************************************************************** -""" - - def run(self): - try: - build_ext.run(self) - except DistutilsPlatformError: - e = sys.exc_info()[1] - sys.stdout.write('%s\n' % str(e)) - warnings.warn(self.warning_message % ("Extension modules", - "There was an issue with " - "your platform configuration" - " - see above.")) - - def set_nose_options(self): - # Under python 3 we need to tell nose where to find the - # proper tests. if we built the C extensions this will be - # someplace like build/lib.-- - if PY3: - ver = '.'.join(map(str, sys.version_info[:2])) - lib_dirs = glob.glob(os.path.join('build', 'lib*' + ver)) - if lib_dirs: - nose_config_options['py3where'] = lib_dirs[0] - write_nose_config() - - def build_extension(self, ext): - name = ext.name - if sys.version_info[:3] >= (2, 4, 0): - try: - build_ext.build_extension(self, ext) - if should_run_tests: - self.set_nose_options() - except build_errors: - e = sys.exc_info()[1] - sys.stdout.write('%s\n' % str(e)) - warnings.warn(self.warning_message % ("The %s extension " - "module" % (name,), - "The output above " - "this warning shows how " - "the compilation " - "failed.")) - else: - warnings.warn(self.warning_message % ("The %s extension " - "module" % (name,), - "Please use Python >= 2.4 " - "to take advantage of the " - "extension.")) - -ext_modules = [Extension('bson._cbson', - include_dirs=['bson'], - sources=['bson/_cbsonmodule.c', - 'bson/time64.c', - 'bson/buffer.c', - 'bson/encoding_helpers.c']), - Extension('pymongo._cmessage', - include_dirs=['bson'], - sources=['pymongo/_cmessagemodule.c', - 'bson/buffer.c'])] - -extra_opts = { - "packages": ["bson", "pymongo", "gridfs"], - "test_suite": "nose.collector" -} -if "--no_ext" in sys.argv: - sys.argv.remove("--no_ext") -elif (sys.platform.startswith("java") or - sys.platform == "cli" or - "PyPy" in sys.version): - sys.stdout.write(""" -*****************************************************\n -The optional C extensions are currently not supported\n -by this python implementation.\n -*****************************************************\n -""") -elif sys.byteorder == "big": - sys.stdout.write(""" -*****************************************************\n -The optional C extensions are currently not supported\n -on big endian platforms and will not be built.\n -Performance may be degraded.\n -*****************************************************\n -""") -else: - extra_opts['ext_modules'] = ext_modules - -if PY3: - extra_opts["use_2to3"] = True - if should_run_tests: - # Distribute isn't smart enough to copy the - # tests and run 2to3 on them. We don't want to - # install the test suite so only do this if we - # are testing. - # https://bitbucket.org/tarek/distribute/issue/233 - extra_opts["packages"].append("test") - extra_opts['package_data'] = {"test": ["certificates/ca.pem", - "certificates/client.pem"]} - # Hack to make "python3.x setup.py nosetests" work in python 3 - # otherwise it won't run 2to3 before running the tests. - if "nosetests" in sys.argv: - sys.argv.remove("nosetests") - sys.argv.append("test") - # All "nosetests" does is import and run nose.main. - extra_opts["test_suite"] = "nose.main" - -# This may be called a second time if -# we are testing with C extensions. -if should_run_tests: - write_nose_config() - -setup( - name="pymongo", - version=version, - description="Python driver for MongoDB ", - long_description=readme_content, - author="Mike Dirolf", - author_email="mongodb-user@googlegroups.com", - maintainer="Bernie Hackett", - maintainer_email="bernie@mongodb.com", - url="http://github.com/mongodb/mongo-python-driver", - keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], - install_requires=[], - license="Apache License, Version 2.0", - tests_require=["nose"], - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.4", - "Programming Language :: Python :: 2.5", - "Programming Language :: Python :: 2.6", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.1", - "Programming Language :: Python :: 3.2", - "Programming Language :: Python :: 3.3", - "Programming Language :: Python :: 3.4", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: Jython", - "Programming Language :: Python :: Implementation :: PyPy", - "Topic :: Database"], - cmdclass={"build_ext": custom_build_ext, - "doc": doc}, - **extra_opts +msg = ( + "PyMongo>=4.8 no longer supports building via setup.py, use python -m pip install instead. If " + "this is an editable install (-e) please upgrade to pip>=21.3 first: python -m pip install --upgrade pip" ) + +raise RuntimeError(msg) diff --git a/strict_pyrightconfig.json b/strict_pyrightconfig.json new file mode 100644 index 0000000000..9684598cd9 --- /dev/null +++ b/strict_pyrightconfig.json @@ -0,0 +1 @@ +{"strict": ["tests/test_typing_strict.py"]} \ No newline at end of file diff --git a/test/__init__.py b/test/__init__.py index 9448448fde..1ee2c283d6 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2010-2014 MongoDB, Inc. +# Copyright 2010-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,46 +12,1293 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Clean up databases after running `nosetests`. -""" +"""Synchronous test suite for pymongo, bson, and gridfs.""" +from __future__ import annotations +import asyncio +import gc +import inspect +import logging +import multiprocessing import os +import signal +import socket +import subprocess +import sys +import threading +import time +import traceback +import unittest import warnings +from inspect import iscoroutinefunction + +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT +from pymongo.errors import AutoReconnect +from pymongo.synchronous.uri_parser import parse_uri + +try: + import ipaddress + + HAVE_IPADDRESS = True +except ImportError: + HAVE_IPADDRESS = False +from contextlib import contextmanager +from functools import partial, wraps +from typing import Any, Callable, Dict, Generator, overload +from unittest import SkipTest +from urllib.parse import quote_plus import pymongo -from pymongo.errors import ConnectionFailure +import pymongo.errors +from bson.son import SON +from pymongo.common import partition_node +from pymongo.hello import HelloCompat +from pymongo.server_api import ServerApi +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient + +sys.path[0:0] = [""] + +from test.helpers import client_knobs, global_knobs +from test.helpers_shared import ( + COMPRESSORS, + IS_SRV, + MONGODB_API_VERSION, + MULTI_MONGOS_LB_URI, + TEST_LOADBALANCER, + TLS_OPTIONS, + SystemCertsPatcher, + db_pwd, + db_user, + host, + is_server_resolvable, + port, + print_running_topology, + print_thread_stacks, + print_thread_tracebacks, + sanitize_cmd, + sanitize_reply, +) +from test.version import Version + +_IS_SYNC = True + + +def _connection_string(h): + if h.startswith(("mongodb://", "mongodb+srv://")): + return h + return f"mongodb://{h!s}" + + +class ClientContext: + client: MongoClient + + MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI + + def __init__(self): + """Create a client and grab essential information from the server.""" + self.connection_attempts = [] + self.connected = False + self.w = None + self.nodes = set() + self.replica_set_name = None + self.cmd_line = None + self.server_status = None + self.version = Version(-1) # Needs to be comparable with Version + self.auth_enabled = False + self.test_commands_enabled = False + self.server_parameters = {} + self._hello = None + self.is_mongos = False + self.mongoses = [] + self.is_rs = False + self.has_ipv6 = False + self.tls = False + self.tlsCertificateKeyFile = False + self.server_is_resolvable = is_server_resolvable() + self.default_client_options: Dict = {} + self.sessions_enabled = False + self.client = None # type: ignore + self.conn_lock = threading.Lock() + self.load_balancer = TEST_LOADBALANCER + self._fips_enabled = None + if self.load_balancer: + self.default_client_options["loadBalanced"] = True + if COMPRESSORS: + self.default_client_options["compressors"] = COMPRESSORS + if MONGODB_API_VERSION: + server_api = ServerApi(MONGODB_API_VERSION) + self.default_client_options["server_api"] = server_api + + @property + def client_options(self): + """Return the MongoClient options for creating a duplicate client.""" + opts = client_context.default_client_options.copy() + opts["host"] = host + opts["port"] = port + if client_context.auth_enabled: + opts["username"] = db_user + opts["password"] = db_pwd + if self.replica_set_name: + opts["replicaSet"] = self.replica_set_name + return opts + + @property + def uri(self): + """Return the MongoClient URI for creating a duplicate client.""" + opts = client_context.default_client_options.copy() + opts.pop("server_api", None) # Cannot be set from the URI + opts_parts = [] + for opt, val in opts.items(): + strval = str(val) + if isinstance(val, bool): + strval = strval.lower() + opts_parts.append(f"{opt}={quote_plus(strval)}") + opts_part = "&".join(opts_parts) + auth_part = "" + if client_context.auth_enabled: + auth_part = f"{quote_plus(db_user)}:{quote_plus(db_pwd)}@" + pair = self.pair + return f"mongodb://{auth_part}{pair}/?{opts_part}" + + @property + def hello(self): + if not self._hello: + if self.load_balancer: + self._hello = self.client.admin.command(HelloCompat.CMD) + else: + self._hello = self.client.admin.command(HelloCompat.LEGACY_CMD) + return self._hello + + def _connect(self, host, port, **kwargs): + kwargs.update(self.default_client_options) + client: MongoClient = pymongo.MongoClient( + host, port, serverSelectionTimeoutMS=5000, **kwargs + ) + try: + try: + client.admin.command("ping") # Can we connect? + except pymongo.errors.OperationFailure as exc: + # SERVER-32063 + self.connection_attempts.append( + f"connected client {client!r}, but legacy hello failed: {exc}" + ) + else: + self.connection_attempts.append(f"successfully connected client {client!r}") + # If connected, then return client with default timeout + return pymongo.MongoClient(host, port, **kwargs) + except pymongo.errors.ConnectionFailure as exc: + self.connection_attempts.append(f"failed to connect client {client!r}: {exc}") + return None + finally: + client.close() + + def _init_client(self): + self.mongoses = [] + self.connection_attempts = [] + self.client = self._connect(host, port) + + if HAVE_SSL and not self.client: + # Is MongoDB configured for SSL? + self.client = self._connect(host, port, **TLS_OPTIONS) + if self.client: + self.tls = True + self.default_client_options.update(TLS_OPTIONS) + self.tlsCertificateKeyFile = True + + if self.client: + self.connected = True + + try: + self.cmd_line = self.client.admin.command("getCmdLineOpts") + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: + # Unauthorized. + self.auth_enabled = True + else: + raise + else: + self.auth_enabled = self._server_started_with_auth() + + if self.auth_enabled: + if not IS_SRV: + # See if db_user already exists. + if not self._check_user_provided(): + _create_user(self.client.admin, db_user, db_pwd) + + if self.client: + self.client.close() + + self.client = self._connect( + host, + port, + username=db_user, + password=db_pwd, + replicaSet=self.replica_set_name, + **self.default_client_options, + ) + + # May not have this if OperationFailure was raised earlier. + self.cmd_line = self.client.admin.command("getCmdLineOpts") + + self.server_status = self.client.admin.command("serverStatus") + if self.storage_engine == "mmapv1": + # MMAPv1 does not support retryWrites=True. + self.default_client_options["retryWrites"] = False + + hello = self.hello + self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello + + if "setName" in hello: + self.replica_set_name = str(hello["setName"]) + self.is_rs = True + if self.client: + self.client.close() + if self.auth_enabled: + # It doesn't matter which member we use as the seed here. + self.client = pymongo.MongoClient( + host, + port, + username=db_user, + password=db_pwd, + replicaSet=self.replica_set_name, + **self.default_client_options, + ) + else: + self.client = pymongo.MongoClient( + host, port, replicaSet=self.replica_set_name, **self.default_client_options + ) + + # Get the authoritative hello result from the primary. + self._hello = None + hello = self.hello + nodes = [partition_node(node.lower()) for node in hello.get("hosts", [])] + nodes.extend([partition_node(node.lower()) for node in hello.get("passives", [])]) + nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) + self.nodes = set(nodes) + else: + self.nodes = {(host, port)} + self.w = len(hello.get("hosts", [])) or 1 + self.version = Version.from_client(self.client) + + self.server_parameters = self.client.admin.command("getParameter", "*") + assert self.cmd_line is not None + if self.server_parameters["enableTestCommands"]: + self.test_commands_enabled = True + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: + self.test_commands_enabled = True + else: + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": + self.test_commands_enabled = True + self.has_ipv6 = self._server_started_with_ipv6() + + self.is_mongos = (self.hello).get("msg") == "isdbgrid" + if self.is_mongos: + address = self.client.address + self.mongoses.append(address) + # Check for another mongos on the next port. + assert address is not None + next_address = address[0], address[1] + 1 + mongos_client = self._connect(*next_address, **self.default_client_options) + if mongos_client: + hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") == "isdbgrid": + self.mongoses.append(next_address) + mongos_client.close() + + def init(self): + with self.conn_lock: + if not self.client and not self.connection_attempts: + self._init_client() + + def connection_attempt_info(self): + return "\n".join(self.connection_attempts) + + @property + def host(self): + if self.is_rs and not IS_SRV: + primary = self.client.primary + return str(primary[0]) if primary is not None else host + return host + + @property + def port(self): + if self.is_rs and not IS_SRV: + primary = self.client.primary + return primary[1] if primary is not None else port + return port + + @property + def pair(self): + return "%s:%d" % (self.host, self.port) + + @property + def has_secondaries(self): + if not self.client: + return False + return bool(len(self.client.secondaries)) + + @property + def storage_engine(self): + try: + return self.server_status.get("storageEngine", {}).get( # type:ignore[union-attr] + "name" + ) + except AttributeError: + # Raised if self.server_status is None. + return None + + @property + def fips_enabled(self): + if self._fips_enabled is not None: + return self._fips_enabled + try: + subprocess.run(["fips-mode-setup", "--is-enabled"], check=True) + self._fips_enabled = True + except (subprocess.SubprocessError, FileNotFoundError): + self._fips_enabled = False + if os.environ.get("REQUIRE_FIPS") and not self._fips_enabled: + raise RuntimeError("Expected FIPS to be enabled") + return self._fips_enabled + + def check_auth_type(self, auth_type): + auth_mechs = self.server_parameters.get("authenticationMechanisms", []) + return auth_type in auth_mechs + + def _check_user_provided(self): + """Return True if db_user/db_password is already an admin user.""" + client: MongoClient = pymongo.MongoClient( + host, + port, + username=db_user, + password=db_pwd, + **self.default_client_options, + ) + + try: + return db_user in _all_users(client.admin) + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 18 or "auth fails" in msg: + # Auth failed. + return False + else: + raise + finally: + client.close() + + def _server_started_with_auth(self): + # MongoDB >= 2.0 + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + parsed = self.cmd_line["parsed"] + # MongoDB >= 2.6 + if "security" in parsed: + security = parsed["security"] + # >= rc3 + if "authorization" in security: + return security["authorization"] == "enabled" + # < rc3 + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) + # Legacy + argv = self.cmd_line["argv"] + return "--auth" in argv or "--keyFile" in argv + + def _server_started_with_ipv6(self): + if not socket.has_ipv6: + return False + + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + if not self.cmd_line["parsed"].get("net", {}).get("ipv6"): + return False + else: + if "--ipv6" not in self.cmd_line["argv"]: + return False + + # The server was started with --ipv6. Is there an IPv6 route to it? + try: + for info in socket.getaddrinfo(self.host, self.port): + if info[0] == socket.AF_INET6: + return True + except OSError: + pass + + return False + + def _require(self, condition, msg, func=None): + def make_wrapper(f): + if iscoroutinefunction(f): + wraps_async = True + else: + wraps_async = False + + @wraps(f) + def wrap(*args, **kwargs): + self.init() + # Always raise SkipTest if we can't connect to MongoDB + if not self.connected: + pair = self.pair + raise SkipTest(f"Cannot connect to MongoDB on {pair}") + if iscoroutinefunction(condition): + if condition(): + if wraps_async: + return f(*args, **kwargs) + else: + return f(*args, **kwargs) + elif condition(): + if wraps_async: + return f(*args, **kwargs) + else: + return f(*args, **kwargs) + if "self.pair" in msg: + new_msg = msg.replace("self.pair", self.pair) + else: + new_msg = msg + raise SkipTest(new_msg) + + return wrap + + if func is None: + + def decorate(f): + return make_wrapper(f) + + return decorate + return make_wrapper(func) + + def create_user(self, dbname, user, pwd=None, roles=None, **kwargs): + kwargs["writeConcern"] = {"w": self.w} + return _create_user(self.client[dbname], user, pwd, roles, **kwargs) + + def drop_user(self, dbname, user): + self.client[dbname].command("dropUser", user, writeConcern={"w": self.w}) + + def require_connection(self, func): + """Run a test only if we can connect to MongoDB.""" + return self._require( + lambda: True, # _require checks if we're connected + "Cannot connect to MongoDB on self.pair", + func=func, + ) + + def require_version_min(self, *ver): + """Run a test only if the server version is at least ``version``.""" + other_version = Version(*ver) + return self._require( + lambda: self.version >= other_version, + "Server version must be at least %s" % str(other_version), + ) + + def require_version_max(self, *ver): + """Run a test only if the server version is at most ``version``.""" + other_version = Version(*ver) + return self._require( + lambda: self.version <= other_version, + "Server version must be at most %s" % str(other_version), + ) + + def require_libmongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import libmongocrypt_version + + version = Version.from_string(libmongocrypt_version()) + return self._require( + lambda: version >= other_version, + "Libmongocrypt version must be at least %s" % str(other_version), + ) + + def require_pymongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import __version__ as pymongocrypt_version + + version = Version.from_string(pymongocrypt_version) + return self._require( + lambda: version >= other_version, + "PyMongoCrypt version must be at least %s" % str(other_version), + ) + + def require_auth(self, func): + """Run a test only if the server is running with auth enabled.""" + return self._require( + lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func + ) + + def require_no_auth(self, func): + """Run a test only if the server is running without auth enabled.""" + return self._require( + lambda: not self.auth_enabled, + "Authentication must not be enabled on the server", + func=func, + ) + + def require_no_fips(self, func): + """Run a test only if the host does not have FIPS enabled.""" + return self._require( + lambda: not self.fips_enabled, "Test cannot run on a FIPS-enabled host", func=func + ) + + def require_replica_set(self, func): + """Run a test only if the client is connected to a replica set.""" + return self._require(lambda: self.is_rs, "Not connected to a replica set", func=func) + + def require_secondaries_count(self, count): + """Run a test only if the client is connected to a replica set that has + `count` secondaries. + """ + + def sec_count(): + return 0 if not self.client else len(self.client.secondaries) + + def check(): + return sec_count() >= count + + return self._require(check, "Not enough secondaries available") + + @property + def supports_secondary_read_pref(self): + if self.has_secondaries: + return True + if self.is_mongos: + shard = (self.client.config.shards.find_one())["host"] # type:ignore[index] + num_members = shard.count(",") + 1 + return num_members > 1 + return False + + def require_secondary_read_pref(self): + """Run a test only if the client is connected to a cluster that + supports secondary read preference + """ + return self._require( + lambda: self.supports_secondary_read_pref, + "This cluster does not support secondary read preference", + ) + + def require_no_replica_set(self, func): + """Run a test if the client is *not* connected to a replica set.""" + return self._require( + lambda: not self.is_rs, "Connected to a replica set, not a standalone mongod", func=func + ) + + def require_ipv6(self, func): + """Run a test only if the client can connect to a server via IPv6.""" + return self._require(lambda: self.has_ipv6, "No IPv6", func=func) + + def require_no_mongos(self, func): + """Run a test only if the client is not connected to a mongos.""" + return self._require( + lambda: not self.is_mongos, "Must be connected to a mongod, not a mongos", func=func + ) + + def require_mongos(self, func): + """Run a test only if the client is connected to a mongos.""" + return self._require(lambda: self.is_mongos, "Must be connected to a mongos", func=func) + + def require_multiple_mongoses(self, func): + """Run a test only if the client is connected to a sharded cluster + that has 2 mongos nodes. + """ + return self._require( + lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func + ) + + def require_standalone(self, func): + """Run a test only if the client is connected to a standalone.""" + return self._require( + lambda: not (self.is_mongos or self.is_rs), + "Must be connected to a standalone", + func=func, + ) + + def require_no_standalone(self, func): + """Run a test only if the client is not connected to a standalone.""" + return self._require( + lambda: self.is_mongos or self.is_rs, + "Must be connected to a replica set or mongos", + func=func, + ) + + def require_load_balancer(self, func): + """Run a test only if the client is connected to a load balancer.""" + return self._require( + lambda: self.load_balancer, "Must be connected to a load balancer", func=func + ) + + def require_no_load_balancer(self, func): + """Run a test only if the client is not connected to a load balancer.""" + return self._require( + lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func + ) + + def require_change_streams(self, func): + """Run a test only if the server supports change streams.""" + return self.require_no_standalone(func) + + def is_topology_type(self, topologies): + unknown = set(topologies) - { + "single", + "replicaset", + "sharded", + "load-balanced", + } + if unknown: + raise AssertionError(f"Unknown topologies: {unknown!r}") + if self.load_balancer: + if "load-balanced" in topologies: + return True + return False + if "single" in topologies and not (self.is_mongos or self.is_rs): + return True + if "replicaset" in topologies and self.is_rs: + return True + if "sharded" in topologies and self.is_mongos: + return True + return False + + def require_cluster_type(self, topologies=None): + """Run a test only if the client is connected to a cluster that + conforms to one of the specified topologies. Acceptable topologies + are 'single', 'replicaset', and 'sharded'. + """ + topologies = topologies or [] + + def _is_valid_topology(): + return self.is_topology_type(topologies) + + return self._require(_is_valid_topology, "Cluster type not in %s" % (topologies)) + + def require_test_commands(self, func): + """Run a test only if the server has test commands enabled.""" + return self._require( + lambda: self.test_commands_enabled, "Test commands must be enabled", func=func + ) + + def require_failCommand_fail_point(self, func): + """Run a test only if the server supports the failCommand fail + point. + """ + return self._require( + lambda: self.supports_failCommand_fail_point, + "failCommand fail point must be supported", + func=func, + ) + + def require_failCommand_appName(self, func): + """Run a test only if the server supports the failCommand appName.""" + # SERVER-47195 and SERVER-49336. + return self._require( + lambda: (self.test_commands_enabled and self.version >= (4, 4, 7)), + "failCommand appName must be supported", + func=func, + ) + + def require_failCommand_blockConnection(self, func): + """Run a test only if the server supports failCommand blockConnection.""" + return self._require( + lambda: ( + self.test_commands_enabled + and ( + (not self.is_mongos and self.version >= (4, 2, 9)) + or (self.is_mongos and self.version >= (4, 4)) + ) + ), + "failCommand blockConnection is not supported", + func=func, + ) + + def require_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: self.tls, "Must be able to connect via TLS", func=func) + + def require_no_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: not self.tls, "Must be able to connect without TLS", func=func) + + def require_tlsCertificateKeyFile(self, func): + """Run a test only if the client can connect with tlsCertificateKeyFile.""" + return self._require( + lambda: self.tlsCertificateKeyFile, + "Must be able to connect with tlsCertificateKeyFile", + func=func, + ) + + def require_server_resolvable(self, func): + """Run a test only if the hostname 'server' is resolvable.""" + return self._require( + lambda: self.server_is_resolvable, + "No hosts entry for 'server'. Cannot validate hostname in the certificate", + func=func, + ) + + def require_sessions(self, func): + """Run a test only if the deployment supports sessions.""" + return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) + + def supports_retryable_writes(self): + if not self.sessions_enabled: + return False + return self.is_mongos or self.is_rs + + def require_retryable_writes(self, func): + """Run a test only if the deployment supports retryable writes.""" + return self._require( + self.supports_retryable_writes, + "This server does not support retryable writes", + func=func, + ) -# hostnames retrieved by MongoReplicaSetClient from isMaster will be of unicode -# type in Python 2, so ensure these hostnames are unicodes, too. It makes tests -# like `test_repr` predictable. -host = unicode(os.environ.get("DB_IP", 'localhost')) -port = int(os.environ.get("DB_PORT", 27017)) -pair = '%s:%d' % (host, port) + def supports_transactions(self): + if self.version.at_least(4, 1, 8): + return self.is_mongos or self.is_rs -host2 = unicode(os.environ.get("DB_IP2", 'localhost')) -port2 = int(os.environ.get("DB_PORT2", 27018)) + if self.version.at_least(4, 0): + return self.is_rs + + return False + + def require_transactions(self, func): + """Run a test only if the deployment might support transactions. + + *Might* because this does not test the storage engine or FCV. + """ + return self._require( + self.supports_transactions, "Transactions are not supported", func=func + ) + + def require_no_api_version(self, func): + """Skip this test when testing with requireApiVersion.""" + return self._require( + lambda: not MONGODB_API_VERSION, + "This test does not work with requireApiVersion", + func=func, + ) + + def require_sync(self, func): + """Run a test only if using the synchronous API.""" + return self._require( + lambda: _IS_SYNC, "This test only works with the synchronous API", func=func + ) + + def require_async(self, func): + """Run a test only if using the asynchronous API.""" # unasync: off + return self._require( + lambda: not _IS_SYNC, + "This test only works with the asynchronous API", # unasync: off + func=func, + ) + + def mongos_seeds(self): + return ",".join("{}:{}".format(*address) for address in self.mongoses) + + @property + def supports_failCommand_fail_point(self): + """Does the server support the failCommand fail point?""" + if self.is_mongos: + return self.version.at_least(4, 1, 5) and self.test_commands_enabled + else: + return self.version.at_least(4, 0) and self.test_commands_enabled + + @property + def requires_hint_with_min_max_queries(self): + """Does the server require a hint with min/max queries.""" + # Changed in SERVER-39567. + return self.version.at_least(4, 1, 10) + + @property + def max_bson_size(self): + return (self.hello)["maxBsonObjectSize"] + + @property + def max_write_batch_size(self): + return (self.hello)["maxWriteBatchSize"] + + @property + def max_message_size_bytes(self): + return (self.hello)["maxMessageSizeBytes"] + + +# Reusable client context +client_context = ClientContext() + +# Global event loop for async tests. +LOOP = None + + +def get_loop() -> asyncio.AbstractEventLoop: + """Get the test suite's global event loop.""" + global LOOP + if LOOP is None: + try: + LOOP = asyncio.get_running_loop() + except RuntimeError: + # no running event loop, fallback to get_event_loop. + try: + # Ignore DeprecationWarning: There is no current event loop + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + LOOP = asyncio.get_event_loop() + except RuntimeError: + LOOP = asyncio.new_event_loop() + asyncio.set_event_loop(LOOP) + return LOOP + + +class PyMongoTestCase(unittest.TestCase): + if not _IS_SYNC: + # An async TestCase that uses a single event loop for all tests. + # Inspired by TestCase. + def setUp(self): + pass + + def tearDown(self): + pass + + def addCleanup(self, func, /, *args, **kwargs): + self.addCleanup(*(func, *args), **kwargs) + + def _callSetUp(self): + self.setUp() + self._callAsync(self.setUp) + + def _callTestMethod(self, method): + self._callMaybeAsync(method) + + def _callTearDown(self): + self._callAsync(self.tearDown) + self.tearDown() + + def _callCleanup(self, function, *args, **kwargs): + self._callMaybeAsync(function, *args, **kwargs) + + def _callAsync(self, func, /, *args, **kwargs): + assert inspect.iscoroutinefunction(func), f"{func!r} is not an async function" + return get_loop().run_until_complete(func(*args, **kwargs)) + + def _callMaybeAsync(self, func, /, *args, **kwargs): + if inspect.iscoroutinefunction(func): + return get_loop().run_until_complete(func(*args, **kwargs)) + else: + return func(*args, **kwargs) + + def assertEqualCommand(self, expected, actual, msg=None): + self.assertEqual(sanitize_cmd(expected), sanitize_cmd(actual), msg) + + def assertEqualReply(self, expected, actual, msg=None): + self.assertEqual(sanitize_reply(expected), sanitize_reply(actual), msg) + + @staticmethod + def configure_fail_point(client, command_args, off=False): + cmd = {"configureFailPoint": "failCommand"} + cmd.update(command_args) + if off: + cmd["mode"] = "off" + cmd.pop("data", None) + client.admin.command(cmd) + + @contextmanager + def fail_point(self, command_args): + self.configure_fail_point(client_context.client, command_args) + try: + yield + finally: + self.configure_fail_point(client_context.client, command_args, off=True) + + @contextmanager + def fork( + self, target: Callable, timeout: float = 60 + ) -> Generator[multiprocessing.Process, None, None]: + """Helper for tests that use os.fork() + + Use in a with statement: + + with self.fork(target=lambda: print('in child')) as proc: + self.assertTrue(proc.pid) # Child process was started + """ + + def _print_threads(*args: object) -> None: + if _print_threads.called: # type:ignore[attr-defined] + return + _print_threads.called = True # type:ignore[attr-defined] + print_thread_tracebacks() + + _print_threads.called = False # type:ignore[attr-defined] + + def _target() -> None: + signal.signal(signal.SIGUSR1, _print_threads) + try: + target() + except Exception as exc: + sys.stderr.write(f"Child process failed with: {exc}\n") + _print_threads() + # Sleep for a while to let the parent attach via GDB. + time.sleep(2 * timeout) + raise + + ctx = multiprocessing.get_context("fork") + proc = ctx.Process(target=_target) + proc.start() + try: + yield proc # type: ignore + finally: + proc.join(timeout) + pid = proc.pid + assert pid + if proc.exitcode is None: + # gdb to get C-level tracebacks + print_thread_stacks(pid) + # If it failed, SIGUSR1 to get thread tracebacks. + os.kill(pid, signal.SIGUSR1) + proc.join(5) + if proc.exitcode is None: + # SIGINT to get main thread traceback in case SIGUSR1 didn't work. + os.kill(pid, signal.SIGINT) + proc.join(5) + if proc.exitcode is None: + # SIGKILL in case SIGINT didn't work. + proc.kill() + proc.join(1) + self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") + self.assertEqual(proc.exitcode, 0) + + @classmethod + def _unmanaged_async_mongo_client( + cls, host, port, authenticate=True, directConnection=None, **kwargs + ): + """Create a new client over SSL/TLS if necessary.""" + host = host or client_context.host + port = port or client_context.port + client_options: dict = client_context.default_client_options.copy() + if client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = MongoClient(uri, port, **client_options) + if client._options.connect: + client._connect() + return client + + def _async_mongo_client(self, host, port, authenticate=True, directConnection=None, **kwargs): + """Create a new client over SSL/TLS if necessary.""" + host = host or client_context.host + port = port or client_context.port + client_options: dict = client_context.default_client_options.copy() + if client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = MongoClient(uri, port, **client_options) + if client._options.connect: + client._connect() + self.addCleanup(client.close) + return client + + @classmethod + def unmanaged_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client( + h, p, authenticate=False, directConnection=True, **kwargs + ) + + @classmethod + def unmanaged_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, directConnection=True, **kwargs) + + @classmethod + def unmanaged_rs_client(cls, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return cls._unmanaged_async_mongo_client(h, p, **kwargs) + + @classmethod + def unmanaged_rs_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + def unmanaged_rs_or_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + def unmanaged_rs_or_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, **kwargs) + + def single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return self._async_mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) + + def single_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Make a direct connection, and authenticate if necessary.""" + return self._async_mongo_client(h, p, directConnection=True, **kwargs) + + def rs_client_noauth(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Connect to the replica set. Don't authenticate.""" + return self._async_mongo_client(h, p, authenticate=False, **kwargs) + + def rs_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return self._async_mongo_client(h, p, **kwargs) + + def rs_or_single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Connect to the replica set if there is one, otherwise the standalone. + + Like rs_or_single_client, but does not authenticate. + """ + return self._async_mongo_client(h, p, authenticate=False, **kwargs) + + def rs_or_single_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[Any]: + """Connect to the replica set if there is one, otherwise the standalone. + + Authenticates if necessary. + """ + return self._async_mongo_client(h, p, **kwargs) + + def simple_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient: + if not h and not p: + client = MongoClient(**kwargs) + else: + client = MongoClient(h, p, **kwargs) + self.addCleanup(client.close) + return client + + @classmethod + def unmanaged_simple_client(cls, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient: + if not h and not p: + client = MongoClient(**kwargs) + else: + client = MongoClient(h, p, **kwargs) + return client + + def disable_replication(self, client): + """Disable replication on all secondaries.""" + for h, p in client.secondaries: + secondary = self.single_client(h, p) + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") + + def enable_replication(self, client): + """Enable replication on all secondaries.""" + for h, p in client.secondaries: + secondary = self.single_client(h, p) + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") + + +class UnitTest(PyMongoTestCase): + """Async base class for TestCases that don't require a connection to MongoDB.""" + + def setUp(self) -> None: + pass + + def tearDown(self) -> None: + pass + + +class IntegrationTest(PyMongoTestCase): + """Async base class for TestCases that need a connection to MongoDB to pass.""" + + client: MongoClient[dict] + db: Database + credentials: Dict[str, str] + + @client_context.require_connection + def setUp(self) -> None: + if client_context.load_balancer and not getattr(self, "RUN_ON_LOAD_BALANCER", False): + raise SkipTest("this test does not support load balancers") + self.client = client_context.client + self.db = self.client.pymongo_test + if client_context.auth_enabled: + self.credentials = {"username": db_user, "password": db_pwd} + else: + self.credentials = {} + + def cleanup_colls(self, *collections): + """Cleanup collections faster than drop_collection.""" + for c in collections: + c = self.client[c.database.name][c.name] + c.delete_many({}) + c.drop_indexes() + + def patch_system_certs(self, ca_certs): + patcher = SystemCertsPatcher(ca_certs) + self.addCleanup(patcher.disable) + + +class MockClientTest(UnitTest): + """Base class for TestCases that use MockClient. + + This class is *not* an IntegrationTest: if properly written, MockClient + tests do not require a running server. + + The class temporarily overrides HEARTBEAT_FREQUENCY to speed up tests. + """ + + # MockClients tests that use replicaSet, directConnection=True, pass + # multiple seed addresses, or wait for heartbeat events are incompatible + # with loadBalanced=True. + @client_context.require_no_load_balancer + def setUp(self) -> None: + super().setUp() + + self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) + self.client_knobs.enable() + + def tearDown(self) -> None: + self.client_knobs.disable() + super().tearDown() -host3 = unicode(os.environ.get("DB_IP3", 'localhost')) -port3 = int(os.environ.get("DB_PORT3", 27019)) -# Make sure warnings are always raised, regardless of -# python version. def setup(): + if not _IS_SYNC: + # Set up the event loop. + get_loop() + client_context.init() warnings.resetwarnings() warnings.simplefilter("always") + global_knobs.enable() def teardown(): - try: - c = pymongo.MongoClient(host, port) - except ConnectionFailure: - # Tests where ssl=True can cause connection failures here. - # Ignore and continue. - return - - c.drop_database("pymongo-pooling-tests") - c.drop_database("pymongo_test") - c.drop_database("pymongo_test1") - c.drop_database("pymongo_test2") - c.drop_database("pymongo_test_mike") - c.drop_database("pymongo_test_bernie") + global_knobs.disable() + garbage = [] + for g in gc.garbage: + garbage.append(f"GARBAGE: {g!r}") + garbage.append(f" gc.get_referents: {gc.get_referents(g)!r}") + garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") + if garbage: + raise AssertionError("\n".join(garbage)) + print_running_clients() + + +@contextmanager +def simple_test_client(): + client_context.init() + yield client_context.client + client_context.client.close() + + +def test_cases(suite): + """Iterator over all TestCases within a TestSuite.""" + for suite_or_case in suite._tests: + if isinstance(suite_or_case, unittest.TestCase): + # unittest.TestCase + yield suite_or_case + else: + # unittest.TestSuite + yield from test_cases(suite_or_case) + + +def print_running_clients(): + from pymongo.synchronous.topology import Topology + + processed = set() + # Avoid false positives on the main test client. + # XXX: Can be removed after PYTHON-1634 or PYTHON-1896. + c = client_context.client + if c: + processed.add(c._topology._topology_id) + # Call collect to manually cleanup any would-be gc'd clients to avoid + # false positives. + gc.collect() + for obj in gc.get_objects(): + try: + if isinstance(obj, Topology): + # Avoid printing the same Topology multiple times. + if obj._topology_id in processed: + continue + print_running_topology(obj) + processed.add(obj._topology_id) + except ReferenceError: + pass + + +def _all_users(db): + return {u["user"] for u in (db.command("usersInfo")).get("users", [])} + + +def _create_user(authdb, user, pwd=None, roles=None, **kwargs): + cmd = SON([("createUser", user)]) + # X509 doesn't use a password + if pwd: + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] + cmd.update(**kwargs) + return authdb.command(cmd) + + +def connected(client): + """Convenience to wait for a newly-constructed client to connect.""" + with warnings.catch_warnings(): + # Ignore warning that ping is always routed to primary even + # if client's read preference isn't PRIMARY. + warnings.simplefilter("ignore", UserWarning) + client.admin.command("ping") # Force connection. + + return client + + +def drop_collections(db: Database): + # Drop all non-system collections in this database. + for coll in db.list_collection_names(filter={"name": {"$regex": r"^(?!system\.)"}}): + db.drop_collection(coll) + + +def remove_all_users(db: Database): + db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": client_context.w}) diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py new file mode 100644 index 0000000000..78d0576add --- /dev/null +++ b/test/asynchronous/__init__.py @@ -0,0 +1,1320 @@ +# Copyright 2010-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Asynchronous test suite for pymongo, bson, and gridfs.""" +from __future__ import annotations + +import asyncio +import gc +import inspect +import logging +import multiprocessing +import os +import signal +import socket +import subprocess +import sys +import threading +import time +import traceback +import unittest +import warnings +from inspect import iscoroutinefunction + +from pymongo.asynchronous.uri_parser import parse_uri +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT +from pymongo.errors import AutoReconnect + +try: + import ipaddress + + HAVE_IPADDRESS = True +except ImportError: + HAVE_IPADDRESS = False +from contextlib import asynccontextmanager, contextmanager +from functools import partial, wraps +from typing import Any, Callable, Dict, Generator, overload +from unittest import SkipTest +from urllib.parse import quote_plus + +import pymongo +import pymongo.errors +from bson.son import SON +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.common import partition_node +from pymongo.hello import HelloCompat +from pymongo.server_api import ServerApi +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] + +sys.path[0:0] = [""] + +from test.asynchronous.helpers import client_knobs, global_knobs +from test.helpers_shared import ( + COMPRESSORS, + IS_SRV, + MONGODB_API_VERSION, + MULTI_MONGOS_LB_URI, + TEST_LOADBALANCER, + TLS_OPTIONS, + SystemCertsPatcher, + db_pwd, + db_user, + host, + is_server_resolvable, + port, + print_running_topology, + print_thread_stacks, + print_thread_tracebacks, + sanitize_cmd, + sanitize_reply, +) +from test.version import Version + +_IS_SYNC = False + + +def _connection_string(h): + if h.startswith(("mongodb://", "mongodb+srv://")): + return h + return f"mongodb://{h!s}" + + +class AsyncClientContext: + client: AsyncMongoClient + + MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI + + def __init__(self): + """Create a client and grab essential information from the server.""" + self.connection_attempts = [] + self.connected = False + self.w = None + self.nodes = set() + self.replica_set_name = None + self.cmd_line = None + self.server_status = None + self.version = Version(-1) # Needs to be comparable with Version + self.auth_enabled = False + self.test_commands_enabled = False + self.server_parameters = {} + self._hello = None + self.is_mongos = False + self.mongoses = [] + self.is_rs = False + self.has_ipv6 = False + self.tls = False + self.tlsCertificateKeyFile = False + self.server_is_resolvable = is_server_resolvable() + self.default_client_options: Dict = {} + self.sessions_enabled = False + self.client = None # type: ignore + self.conn_lock = threading.Lock() + self.load_balancer = TEST_LOADBALANCER + self._fips_enabled = None + if self.load_balancer: + self.default_client_options["loadBalanced"] = True + if COMPRESSORS: + self.default_client_options["compressors"] = COMPRESSORS + if MONGODB_API_VERSION: + server_api = ServerApi(MONGODB_API_VERSION) + self.default_client_options["server_api"] = server_api + + @property + def client_options(self): + """Return the MongoClient options for creating a duplicate client.""" + opts = async_client_context.default_client_options.copy() + opts["host"] = host + opts["port"] = port + if async_client_context.auth_enabled: + opts["username"] = db_user + opts["password"] = db_pwd + if self.replica_set_name: + opts["replicaSet"] = self.replica_set_name + return opts + + @property + async def uri(self): + """Return the MongoClient URI for creating a duplicate client.""" + opts = async_client_context.default_client_options.copy() + opts.pop("server_api", None) # Cannot be set from the URI + opts_parts = [] + for opt, val in opts.items(): + strval = str(val) + if isinstance(val, bool): + strval = strval.lower() + opts_parts.append(f"{opt}={quote_plus(strval)}") + opts_part = "&".join(opts_parts) + auth_part = "" + if async_client_context.auth_enabled: + auth_part = f"{quote_plus(db_user)}:{quote_plus(db_pwd)}@" + pair = await self.pair + return f"mongodb://{auth_part}{pair}/?{opts_part}" + + @property + async def hello(self): + if not self._hello: + if self.load_balancer: + self._hello = await self.client.admin.command(HelloCompat.CMD) + else: + self._hello = await self.client.admin.command(HelloCompat.LEGACY_CMD) + return self._hello + + async def _connect(self, host, port, **kwargs): + kwargs.update(self.default_client_options) + client: AsyncMongoClient = pymongo.AsyncMongoClient( + host, port, serverSelectionTimeoutMS=5000, **kwargs + ) + try: + try: + await client.admin.command("ping") # Can we connect? + except pymongo.errors.OperationFailure as exc: + # SERVER-32063 + self.connection_attempts.append( + f"connected client {client!r}, but legacy hello failed: {exc}" + ) + else: + self.connection_attempts.append(f"successfully connected client {client!r}") + # If connected, then return client with default timeout + return pymongo.AsyncMongoClient(host, port, **kwargs) + except pymongo.errors.ConnectionFailure as exc: + self.connection_attempts.append(f"failed to connect client {client!r}: {exc}") + return None + finally: + await client.close() + + async def _init_client(self): + self.mongoses = [] + self.connection_attempts = [] + self.client = await self._connect(host, port) + + if HAVE_SSL and not self.client: + # Is MongoDB configured for SSL? + self.client = await self._connect(host, port, **TLS_OPTIONS) + if self.client: + self.tls = True + self.default_client_options.update(TLS_OPTIONS) + self.tlsCertificateKeyFile = True + + if self.client: + self.connected = True + + try: + self.cmd_line = await self.client.admin.command("getCmdLineOpts") + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: + # Unauthorized. + self.auth_enabled = True + else: + raise + else: + self.auth_enabled = self._server_started_with_auth() + + if self.auth_enabled: + if not IS_SRV: + # See if db_user already exists. + if not await self._check_user_provided(): + await _create_user(self.client.admin, db_user, db_pwd) + + if self.client: + await self.client.close() + + self.client = await self._connect( + host, + port, + username=db_user, + password=db_pwd, + replicaSet=self.replica_set_name, + **self.default_client_options, + ) + + # May not have this if OperationFailure was raised earlier. + self.cmd_line = await self.client.admin.command("getCmdLineOpts") + + self.server_status = await self.client.admin.command("serverStatus") + if self.storage_engine == "mmapv1": + # MMAPv1 does not support retryWrites=True. + self.default_client_options["retryWrites"] = False + + hello = await self.hello + self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello + + if "setName" in hello: + self.replica_set_name = str(hello["setName"]) + self.is_rs = True + if self.client: + await self.client.close() + if self.auth_enabled: + # It doesn't matter which member we use as the seed here. + self.client = pymongo.AsyncMongoClient( + host, + port, + username=db_user, + password=db_pwd, + replicaSet=self.replica_set_name, + **self.default_client_options, + ) + else: + self.client = pymongo.AsyncMongoClient( + host, port, replicaSet=self.replica_set_name, **self.default_client_options + ) + + # Get the authoritative hello result from the primary. + self._hello = None + hello = await self.hello + nodes = [partition_node(node.lower()) for node in hello.get("hosts", [])] + nodes.extend([partition_node(node.lower()) for node in hello.get("passives", [])]) + nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) + self.nodes = set(nodes) + else: + self.nodes = {(host, port)} + self.w = len(hello.get("hosts", [])) or 1 + self.version = await Version.async_from_client(self.client) + + self.server_parameters = await self.client.admin.command("getParameter", "*") + assert self.cmd_line is not None + if self.server_parameters["enableTestCommands"]: + self.test_commands_enabled = True + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: + self.test_commands_enabled = True + else: + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": + self.test_commands_enabled = True + self.has_ipv6 = await self._server_started_with_ipv6() + + self.is_mongos = (await self.hello).get("msg") == "isdbgrid" + if self.is_mongos: + address = await self.client.address + self.mongoses.append(address) + # Check for another mongos on the next port. + assert address is not None + next_address = address[0], address[1] + 1 + mongos_client = await self._connect(*next_address, **self.default_client_options) + if mongos_client: + hello = await mongos_client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") == "isdbgrid": + self.mongoses.append(next_address) + await mongos_client.close() + + async def init(self): + with self.conn_lock: + if not self.client and not self.connection_attempts: + await self._init_client() + + def connection_attempt_info(self): + return "\n".join(self.connection_attempts) + + @property + async def host(self): + if self.is_rs and not IS_SRV: + primary = await self.client.primary + return str(primary[0]) if primary is not None else host + return host + + @property + async def port(self): + if self.is_rs and not IS_SRV: + primary = await self.client.primary + return primary[1] if primary is not None else port + return port + + @property + async def pair(self): + return "%s:%d" % (await self.host, await self.port) + + @property + async def has_secondaries(self): + if not self.client: + return False + return bool(len(await self.client.secondaries)) + + @property + def storage_engine(self): + try: + return self.server_status.get("storageEngine", {}).get( # type:ignore[union-attr] + "name" + ) + except AttributeError: + # Raised if self.server_status is None. + return None + + @property + def fips_enabled(self): + if self._fips_enabled is not None: + return self._fips_enabled + try: + subprocess.run(["fips-mode-setup", "--is-enabled"], check=True) + self._fips_enabled = True + except (subprocess.SubprocessError, FileNotFoundError): + self._fips_enabled = False + if os.environ.get("REQUIRE_FIPS") and not self._fips_enabled: + raise RuntimeError("Expected FIPS to be enabled") + return self._fips_enabled + + def check_auth_type(self, auth_type): + auth_mechs = self.server_parameters.get("authenticationMechanisms", []) + return auth_type in auth_mechs + + async def _check_user_provided(self): + """Return True if db_user/db_password is already an admin user.""" + client: AsyncMongoClient = pymongo.AsyncMongoClient( + host, + port, + username=db_user, + password=db_pwd, + **self.default_client_options, + ) + + try: + return db_user in await _all_users(client.admin) + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 18 or "auth fails" in msg: + # Auth failed. + return False + else: + raise + finally: + await client.close() + + def _server_started_with_auth(self): + # MongoDB >= 2.0 + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + parsed = self.cmd_line["parsed"] + # MongoDB >= 2.6 + if "security" in parsed: + security = parsed["security"] + # >= rc3 + if "authorization" in security: + return security["authorization"] == "enabled" + # < rc3 + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) + # Legacy + argv = self.cmd_line["argv"] + return "--auth" in argv or "--keyFile" in argv + + async def _server_started_with_ipv6(self): + if not socket.has_ipv6: + return False + + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + if not self.cmd_line["parsed"].get("net", {}).get("ipv6"): + return False + else: + if "--ipv6" not in self.cmd_line["argv"]: + return False + + # The server was started with --ipv6. Is there an IPv6 route to it? + try: + for info in socket.getaddrinfo(await self.host, await self.port): + if info[0] == socket.AF_INET6: + return True + except OSError: + pass + + return False + + def _require(self, condition, msg, func=None): + def make_wrapper(f): + if iscoroutinefunction(f): + wraps_async = True + else: + wraps_async = False + + @wraps(f) + async def wrap(*args, **kwargs): + await self.init() + # Always raise SkipTest if we can't connect to MongoDB + if not self.connected: + pair = await self.pair + raise SkipTest(f"Cannot connect to MongoDB on {pair}") + if iscoroutinefunction(condition): + if await condition(): + if wraps_async: + return await f(*args, **kwargs) + else: + return f(*args, **kwargs) + elif condition(): + if wraps_async: + return await f(*args, **kwargs) + else: + return f(*args, **kwargs) + if "self.pair" in msg: + new_msg = msg.replace("self.pair", await self.pair) + else: + new_msg = msg + raise SkipTest(new_msg) + + return wrap + + if func is None: + + def decorate(f): + return make_wrapper(f) + + return decorate + return make_wrapper(func) + + async def create_user(self, dbname, user, pwd=None, roles=None, **kwargs): + kwargs["writeConcern"] = {"w": self.w} + return await _create_user(self.client[dbname], user, pwd, roles, **kwargs) + + async def drop_user(self, dbname, user): + await self.client[dbname].command("dropUser", user, writeConcern={"w": self.w}) + + def require_connection(self, func): + """Run a test only if we can connect to MongoDB.""" + return self._require( + lambda: True, # _require checks if we're connected + "Cannot connect to MongoDB on self.pair", + func=func, + ) + + def require_version_min(self, *ver): + """Run a test only if the server version is at least ``version``.""" + other_version = Version(*ver) + return self._require( + lambda: self.version >= other_version, + "Server version must be at least %s" % str(other_version), + ) + + def require_version_max(self, *ver): + """Run a test only if the server version is at most ``version``.""" + other_version = Version(*ver) + return self._require( + lambda: self.version <= other_version, + "Server version must be at most %s" % str(other_version), + ) + + def require_libmongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import libmongocrypt_version + + version = Version.from_string(libmongocrypt_version()) + return self._require( + lambda: version >= other_version, + "Libmongocrypt version must be at least %s" % str(other_version), + ) + + def require_pymongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import __version__ as pymongocrypt_version + + version = Version.from_string(pymongocrypt_version) + return self._require( + lambda: version >= other_version, + "PyMongoCrypt version must be at least %s" % str(other_version), + ) + + def require_auth(self, func): + """Run a test only if the server is running with auth enabled.""" + return self._require( + lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func + ) + + def require_no_auth(self, func): + """Run a test only if the server is running without auth enabled.""" + return self._require( + lambda: not self.auth_enabled, + "Authentication must not be enabled on the server", + func=func, + ) + + def require_no_fips(self, func): + """Run a test only if the host does not have FIPS enabled.""" + return self._require( + lambda: not self.fips_enabled, "Test cannot run on a FIPS-enabled host", func=func + ) + + def require_replica_set(self, func): + """Run a test only if the client is connected to a replica set.""" + return self._require(lambda: self.is_rs, "Not connected to a replica set", func=func) + + def require_secondaries_count(self, count): + """Run a test only if the client is connected to a replica set that has + `count` secondaries. + """ + + async def sec_count(): + return 0 if not self.client else len(await self.client.secondaries) + + async def check(): + return await sec_count() >= count + + return self._require(check, "Not enough secondaries available") + + @property + async def supports_secondary_read_pref(self): + if await self.has_secondaries: + return True + if self.is_mongos: + shard = (await self.client.config.shards.find_one())["host"] # type:ignore[index] + num_members = shard.count(",") + 1 + return num_members > 1 + return False + + def require_secondary_read_pref(self): + """Run a test only if the client is connected to a cluster that + supports secondary read preference + """ + return self._require( + lambda: self.supports_secondary_read_pref, + "This cluster does not support secondary read preference", + ) + + def require_no_replica_set(self, func): + """Run a test if the client is *not* connected to a replica set.""" + return self._require( + lambda: not self.is_rs, "Connected to a replica set, not a standalone mongod", func=func + ) + + def require_ipv6(self, func): + """Run a test only if the client can connect to a server via IPv6.""" + return self._require(lambda: self.has_ipv6, "No IPv6", func=func) + + def require_no_mongos(self, func): + """Run a test only if the client is not connected to a mongos.""" + return self._require( + lambda: not self.is_mongos, "Must be connected to a mongod, not a mongos", func=func + ) + + def require_mongos(self, func): + """Run a test only if the client is connected to a mongos.""" + return self._require(lambda: self.is_mongos, "Must be connected to a mongos", func=func) + + def require_multiple_mongoses(self, func): + """Run a test only if the client is connected to a sharded cluster + that has 2 mongos nodes. + """ + return self._require( + lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func + ) + + def require_standalone(self, func): + """Run a test only if the client is connected to a standalone.""" + return self._require( + lambda: not (self.is_mongos or self.is_rs), + "Must be connected to a standalone", + func=func, + ) + + def require_no_standalone(self, func): + """Run a test only if the client is not connected to a standalone.""" + return self._require( + lambda: self.is_mongos or self.is_rs, + "Must be connected to a replica set or mongos", + func=func, + ) + + def require_load_balancer(self, func): + """Run a test only if the client is connected to a load balancer.""" + return self._require( + lambda: self.load_balancer, "Must be connected to a load balancer", func=func + ) + + def require_no_load_balancer(self, func): + """Run a test only if the client is not connected to a load balancer.""" + return self._require( + lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func + ) + + def require_change_streams(self, func): + """Run a test only if the server supports change streams.""" + return self.require_no_standalone(func) + + async def is_topology_type(self, topologies): + unknown = set(topologies) - { + "single", + "replicaset", + "sharded", + "load-balanced", + } + if unknown: + raise AssertionError(f"Unknown topologies: {unknown!r}") + if self.load_balancer: + if "load-balanced" in topologies: + return True + return False + if "single" in topologies and not (self.is_mongos or self.is_rs): + return True + if "replicaset" in topologies and self.is_rs: + return True + if "sharded" in topologies and self.is_mongos: + return True + return False + + def require_cluster_type(self, topologies=None): + """Run a test only if the client is connected to a cluster that + conforms to one of the specified topologies. Acceptable topologies + are 'single', 'replicaset', and 'sharded'. + """ + topologies = topologies or [] + + async def _is_valid_topology(): + return await self.is_topology_type(topologies) + + return self._require(_is_valid_topology, "Cluster type not in %s" % (topologies)) + + def require_test_commands(self, func): + """Run a test only if the server has test commands enabled.""" + return self._require( + lambda: self.test_commands_enabled, "Test commands must be enabled", func=func + ) + + def require_failCommand_fail_point(self, func): + """Run a test only if the server supports the failCommand fail + point. + """ + return self._require( + lambda: self.supports_failCommand_fail_point, + "failCommand fail point must be supported", + func=func, + ) + + def require_failCommand_appName(self, func): + """Run a test only if the server supports the failCommand appName.""" + # SERVER-47195 and SERVER-49336. + return self._require( + lambda: (self.test_commands_enabled and self.version >= (4, 4, 7)), + "failCommand appName must be supported", + func=func, + ) + + def require_failCommand_blockConnection(self, func): + """Run a test only if the server supports failCommand blockConnection.""" + return self._require( + lambda: ( + self.test_commands_enabled + and ( + (not self.is_mongos and self.version >= (4, 2, 9)) + or (self.is_mongos and self.version >= (4, 4)) + ) + ), + "failCommand blockConnection is not supported", + func=func, + ) + + def require_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: self.tls, "Must be able to connect via TLS", func=func) + + def require_no_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: not self.tls, "Must be able to connect without TLS", func=func) + + def require_tlsCertificateKeyFile(self, func): + """Run a test only if the client can connect with tlsCertificateKeyFile.""" + return self._require( + lambda: self.tlsCertificateKeyFile, + "Must be able to connect with tlsCertificateKeyFile", + func=func, + ) + + def require_server_resolvable(self, func): + """Run a test only if the hostname 'server' is resolvable.""" + return self._require( + lambda: self.server_is_resolvable, + "No hosts entry for 'server'. Cannot validate hostname in the certificate", + func=func, + ) + + def require_sessions(self, func): + """Run a test only if the deployment supports sessions.""" + return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) + + def supports_retryable_writes(self): + if not self.sessions_enabled: + return False + return self.is_mongos or self.is_rs + + def require_retryable_writes(self, func): + """Run a test only if the deployment supports retryable writes.""" + return self._require( + self.supports_retryable_writes, + "This server does not support retryable writes", + func=func, + ) + + def supports_transactions(self): + if self.version.at_least(4, 1, 8): + return self.is_mongos or self.is_rs + + if self.version.at_least(4, 0): + return self.is_rs + + return False + + def require_transactions(self, func): + """Run a test only if the deployment might support transactions. + + *Might* because this does not test the storage engine or FCV. + """ + return self._require( + self.supports_transactions, "Transactions are not supported", func=func + ) + + def require_no_api_version(self, func): + """Skip this test when testing with requireApiVersion.""" + return self._require( + lambda: not MONGODB_API_VERSION, + "This test does not work with requireApiVersion", + func=func, + ) + + def require_sync(self, func): + """Run a test only if using the synchronous API.""" + return self._require( + lambda: _IS_SYNC, "This test only works with the synchronous API", func=func + ) + + def require_async(self, func): + """Run a test only if using the asynchronous API.""" # unasync: off + return self._require( + lambda: not _IS_SYNC, + "This test only works with the asynchronous API", # unasync: off + func=func, + ) + + def mongos_seeds(self): + return ",".join("{}:{}".format(*address) for address in self.mongoses) + + @property + def supports_failCommand_fail_point(self): + """Does the server support the failCommand fail point?""" + if self.is_mongos: + return self.version.at_least(4, 1, 5) and self.test_commands_enabled + else: + return self.version.at_least(4, 0) and self.test_commands_enabled + + @property + def requires_hint_with_min_max_queries(self): + """Does the server require a hint with min/max queries.""" + # Changed in SERVER-39567. + return self.version.at_least(4, 1, 10) + + @property + async def max_bson_size(self): + return (await self.hello)["maxBsonObjectSize"] + + @property + async def max_write_batch_size(self): + return (await self.hello)["maxWriteBatchSize"] + + @property + async def max_message_size_bytes(self): + return (await self.hello)["maxMessageSizeBytes"] + + +# Reusable client context +async_client_context = AsyncClientContext() + +# Global event loop for async tests. +LOOP = None + + +def get_loop() -> asyncio.AbstractEventLoop: + """Get the test suite's global event loop.""" + global LOOP + if LOOP is None: + try: + LOOP = asyncio.get_running_loop() + except RuntimeError: + # no running event loop, fallback to get_event_loop. + try: + # Ignore DeprecationWarning: There is no current event loop + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + LOOP = asyncio.get_event_loop() + except RuntimeError: + LOOP = asyncio.new_event_loop() + asyncio.set_event_loop(LOOP) + return LOOP + + +class AsyncPyMongoTestCase(unittest.TestCase): + if not _IS_SYNC: + # An async TestCase that uses a single event loop for all tests. + # Inspired by IsolatedAsyncioTestCase. + async def asyncSetUp(self): + pass + + async def asyncTearDown(self): + pass + + def addAsyncCleanup(self, func, /, *args, **kwargs): + self.addCleanup(*(func, *args), **kwargs) + + def _callSetUp(self): + self.setUp() + self._callAsync(self.asyncSetUp) + + def _callTestMethod(self, method): + self._callMaybeAsync(method) + + def _callTearDown(self): + self._callAsync(self.asyncTearDown) + self.tearDown() + + def _callCleanup(self, function, *args, **kwargs): + self._callMaybeAsync(function, *args, **kwargs) + + def _callAsync(self, func, /, *args, **kwargs): + assert inspect.iscoroutinefunction(func), f"{func!r} is not an async function" + return get_loop().run_until_complete(func(*args, **kwargs)) + + def _callMaybeAsync(self, func, /, *args, **kwargs): + if inspect.iscoroutinefunction(func): + return get_loop().run_until_complete(func(*args, **kwargs)) + else: + return func(*args, **kwargs) + + def assertEqualCommand(self, expected, actual, msg=None): + self.assertEqual(sanitize_cmd(expected), sanitize_cmd(actual), msg) + + def assertEqualReply(self, expected, actual, msg=None): + self.assertEqual(sanitize_reply(expected), sanitize_reply(actual), msg) + + @staticmethod + async def configure_fail_point(client, command_args, off=False): + cmd = {"configureFailPoint": "failCommand"} + cmd.update(command_args) + if off: + cmd["mode"] = "off" + cmd.pop("data", None) + await client.admin.command(cmd) + + @asynccontextmanager + async def fail_point(self, command_args): + await self.configure_fail_point(async_client_context.client, command_args) + try: + yield + finally: + await self.configure_fail_point(async_client_context.client, command_args, off=True) + + @contextmanager + def fork( + self, target: Callable, timeout: float = 60 + ) -> Generator[multiprocessing.Process, None, None]: + """Helper for tests that use os.fork() + + Use in a with statement: + + with self.fork(target=lambda: print('in child')) as proc: + self.assertTrue(proc.pid) # Child process was started + """ + + def _print_threads(*args: object) -> None: + if _print_threads.called: # type:ignore[attr-defined] + return + _print_threads.called = True # type:ignore[attr-defined] + print_thread_tracebacks() + + _print_threads.called = False # type:ignore[attr-defined] + + def _target() -> None: + signal.signal(signal.SIGUSR1, _print_threads) + try: + target() + except Exception as exc: + sys.stderr.write(f"Child process failed with: {exc}\n") + _print_threads() + # Sleep for a while to let the parent attach via GDB. + time.sleep(2 * timeout) + raise + + ctx = multiprocessing.get_context("fork") + proc = ctx.Process(target=_target) + proc.start() + try: + yield proc # type: ignore + finally: + proc.join(timeout) + pid = proc.pid + assert pid + if proc.exitcode is None: + # gdb to get C-level tracebacks + print_thread_stacks(pid) + # If it failed, SIGUSR1 to get thread tracebacks. + os.kill(pid, signal.SIGUSR1) + proc.join(5) + if proc.exitcode is None: + # SIGINT to get main thread traceback in case SIGUSR1 didn't work. + os.kill(pid, signal.SIGINT) + proc.join(5) + if proc.exitcode is None: + # SIGKILL in case SIGINT didn't work. + proc.kill() + proc.join(1) + self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") + self.assertEqual(proc.exitcode, 0) + + @classmethod + async def _unmanaged_async_mongo_client( + cls, host, port, authenticate=True, directConnection=None, **kwargs + ): + """Create a new client over SSL/TLS if necessary.""" + host = host or await async_client_context.host + port = port or await async_client_context.port + client_options: dict = async_client_context.default_client_options.copy() + if async_client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = async_client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if async_client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = await parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = AsyncMongoClient(uri, port, **client_options) + if client._options.connect: + await client.aconnect() + return client + + async def _async_mongo_client( + self, host, port, authenticate=True, directConnection=None, **kwargs + ): + """Create a new client over SSL/TLS if necessary.""" + host = host or await async_client_context.host + port = port or await async_client_context.port + client_options: dict = async_client_context.default_client_options.copy() + if async_client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = async_client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if async_client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = await parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = AsyncMongoClient(uri, port, **client_options) + if client._options.connect: + await client.aconnect() + self.addAsyncCleanup(client.close) + return client + + @classmethod + async def unmanaged_async_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client( + h, p, authenticate=False, directConnection=True, **kwargs + ) + + @classmethod + async def unmanaged_async_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, directConnection=True, **kwargs) + + @classmethod + async def unmanaged_async_rs_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return await cls._unmanaged_async_mongo_client(h, p, **kwargs) + + @classmethod + async def unmanaged_async_rs_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + async def unmanaged_async_rs_or_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + async def unmanaged_async_rs_or_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, **kwargs) + + async def async_single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await self._async_mongo_client( + h, p, authenticate=False, directConnection=True, **kwargs + ) + + async def async_single_client( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection, and authenticate if necessary.""" + return await self._async_mongo_client(h, p, directConnection=True, **kwargs) + + async def async_rs_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set. Don't authenticate.""" + return await self._async_mongo_client(h, p, authenticate=False, **kwargs) + + async def async_rs_client( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return await self._async_mongo_client(h, p, **kwargs) + + async def async_rs_or_single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set if there is one, otherwise the standalone. + + Like rs_or_single_client, but does not authenticate. + """ + return await self._async_mongo_client(h, p, authenticate=False, **kwargs) + + async def async_rs_or_single_client( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[Any]: + """Connect to the replica set if there is one, otherwise the standalone. + + Authenticates if necessary. + """ + return await self._async_mongo_client(h, p, **kwargs) + + def simple_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> AsyncMongoClient: + if not h and not p: + client = AsyncMongoClient(**kwargs) + else: + client = AsyncMongoClient(h, p, **kwargs) + self.addAsyncCleanup(client.close) + return client + + @classmethod + def unmanaged_simple_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient: + if not h and not p: + client = AsyncMongoClient(**kwargs) + else: + client = AsyncMongoClient(h, p, **kwargs) + return client + + async def disable_replication(self, client): + """Disable replication on all secondaries.""" + for h, p in await client.secondaries: + secondary = await self.async_single_client(h, p) + await secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") + + async def enable_replication(self, client): + """Enable replication on all secondaries.""" + for h, p in await client.secondaries: + secondary = await self.async_single_client(h, p) + await secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") + + +class AsyncUnitTest(AsyncPyMongoTestCase): + """Async base class for TestCases that don't require a connection to MongoDB.""" + + async def asyncSetUp(self) -> None: + pass + + async def asyncTearDown(self) -> None: + pass + + +class AsyncIntegrationTest(AsyncPyMongoTestCase): + """Async base class for TestCases that need a connection to MongoDB to pass.""" + + client: AsyncMongoClient[dict] + db: AsyncDatabase + credentials: Dict[str, str] + + @async_client_context.require_connection + async def asyncSetUp(self) -> None: + if async_client_context.load_balancer and not getattr(self, "RUN_ON_LOAD_BALANCER", False): + raise SkipTest("this test does not support load balancers") + self.client = async_client_context.client + self.db = self.client.pymongo_test + if async_client_context.auth_enabled: + self.credentials = {"username": db_user, "password": db_pwd} + else: + self.credentials = {} + + async def cleanup_colls(self, *collections): + """Cleanup collections faster than drop_collection.""" + for c in collections: + c = self.client[c.database.name][c.name] + await c.delete_many({}) + await c.drop_indexes() + + def patch_system_certs(self, ca_certs): + patcher = SystemCertsPatcher(ca_certs) + self.addCleanup(patcher.disable) + + +class AsyncMockClientTest(AsyncUnitTest): + """Base class for TestCases that use MockClient. + + This class is *not* an IntegrationTest: if properly written, MockClient + tests do not require a running server. + + The class temporarily overrides HEARTBEAT_FREQUENCY to speed up tests. + """ + + # MockClients tests that use replicaSet, directConnection=True, pass + # multiple seed addresses, or wait for heartbeat events are incompatible + # with loadBalanced=True. + @async_client_context.require_no_load_balancer + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + + self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) + self.client_knobs.enable() + + async def asyncTearDown(self) -> None: + self.client_knobs.disable() + await super().asyncTearDown() + + +async def async_setup(): + if not _IS_SYNC: + # Set up the event loop. + get_loop() + await async_client_context.init() + warnings.resetwarnings() + warnings.simplefilter("always") + global_knobs.enable() + + +async def async_teardown(): + global_knobs.disable() + garbage = [] + for g in gc.garbage: + garbage.append(f"GARBAGE: {g!r}") + garbage.append(f" gc.get_referents: {gc.get_referents(g)!r}") + garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") + if garbage: + raise AssertionError("\n".join(garbage)) + print_running_clients() + + +@asynccontextmanager +async def async_simple_test_client(): + await async_client_context.init() + yield async_client_context.client + await async_client_context.client.close() + + +def test_cases(suite): + """Iterator over all TestCases within a TestSuite.""" + for suite_or_case in suite._tests: + if isinstance(suite_or_case, unittest.TestCase): + # unittest.TestCase + yield suite_or_case + else: + # unittest.TestSuite + yield from test_cases(suite_or_case) + + +def print_running_clients(): + from pymongo.asynchronous.topology import Topology + + processed = set() + # Avoid false positives on the main test client. + # XXX: Can be removed after PYTHON-1634 or PYTHON-1896. + c = async_client_context.client + if c: + processed.add(c._topology._topology_id) + # Call collect to manually cleanup any would-be gc'd clients to avoid + # false positives. + gc.collect() + for obj in gc.get_objects(): + try: + if isinstance(obj, Topology): + # Avoid printing the same Topology multiple times. + if obj._topology_id in processed: + continue + print_running_topology(obj) + processed.add(obj._topology_id) + except ReferenceError: + pass + + +async def _all_users(db): + return {u["user"] for u in (await db.command("usersInfo")).get("users", [])} + + +async def _create_user(authdb, user, pwd=None, roles=None, **kwargs): + cmd = SON([("createUser", user)]) + # X509 doesn't use a password + if pwd: + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] + cmd.update(**kwargs) + return await authdb.command(cmd) + + +async def connected(client): + """Convenience to wait for a newly-constructed client to connect.""" + with warnings.catch_warnings(): + # Ignore warning that ping is always routed to primary even + # if client's read preference isn't PRIMARY. + warnings.simplefilter("ignore", UserWarning) + await client.admin.command("ping") # Force connection. + + return client + + +async def drop_collections(db: AsyncDatabase): + # Drop all non-system collections in this database. + for coll in await db.list_collection_names(filter={"name": {"$regex": r"^(?!system\.)"}}): + await db.drop_collection(coll) + + +async def remove_all_users(db: AsyncDatabase): + await db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": async_client_context.w}) diff --git a/test/asynchronous/conftest.py b/test/asynchronous/conftest.py new file mode 100644 index 0000000000..a27a9f213d --- /dev/null +++ b/test/asynchronous/conftest.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +import asyncio +import sys +from test import pytest_conf +from test.asynchronous import async_setup, async_teardown + +import pytest +import pytest_asyncio + +_IS_SYNC = False + + +@pytest.fixture(scope="session") +def event_loop_policy(): + # The default asyncio loop implementation on Windows + # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) + # We explicitly use a different loop implementation here to prevent that issue + if sys.platform == "win32": + return asyncio.WindowsSelectorEventLoopPolicy() # type: ignore[attr-defined] + + return asyncio.get_event_loop_policy() + + +@pytest_asyncio.fixture(scope="package", autouse=True) +async def test_setup_and_teardown(): + await async_setup() + yield + await async_teardown() + + +pytest_collection_modifyitems = pytest_conf.pytest_collection_modifyitems diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py new file mode 100644 index 0000000000..892c629631 --- /dev/null +++ b/test/asynchronous/helpers.py @@ -0,0 +1,176 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared helper methods for pymongo, bson, and gridfs test suites.""" +from __future__ import annotations + +import asyncio +import threading +import traceback +from functools import wraps +from typing import Optional, no_type_check + +from bson import SON +from pymongo import common +from pymongo._asyncio_task import create_task +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False + + +async def async_repl_set_step_down(client, **kwargs): + """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" + cmd = SON([("replSetStepDown", 1)]) + cmd.update(kwargs) + + # Unfreeze a secondary to ensure a speedy election. + await client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) + await client.admin.command(cmd) + + +class client_knobs: + def __init__( + self, + heartbeat_frequency=None, + min_heartbeat_interval=None, + kill_cursor_frequency=None, + events_queue_frequency=None, + ): + self.heartbeat_frequency = heartbeat_frequency + self.min_heartbeat_interval = min_heartbeat_interval + self.kill_cursor_frequency = kill_cursor_frequency + self.events_queue_frequency = events_queue_frequency + + self.old_heartbeat_frequency = None + self.old_min_heartbeat_interval = None + self.old_kill_cursor_frequency = None + self.old_events_queue_frequency = None + self._enabled = False + self._stack = None + + def enable(self): + self.old_heartbeat_frequency = common.HEARTBEAT_FREQUENCY + self.old_min_heartbeat_interval = common.MIN_HEARTBEAT_INTERVAL + self.old_kill_cursor_frequency = common.KILL_CURSOR_FREQUENCY + self.old_events_queue_frequency = common.EVENTS_QUEUE_FREQUENCY + + if self.heartbeat_frequency is not None: + common.HEARTBEAT_FREQUENCY = self.heartbeat_frequency + + if self.min_heartbeat_interval is not None: + common.MIN_HEARTBEAT_INTERVAL = self.min_heartbeat_interval + + if self.kill_cursor_frequency is not None: + common.KILL_CURSOR_FREQUENCY = self.kill_cursor_frequency + + if self.events_queue_frequency is not None: + common.EVENTS_QUEUE_FREQUENCY = self.events_queue_frequency + self._enabled = True + # Store the allocation traceback to catch non-disabled client_knobs. + self._stack = "".join(traceback.format_stack()) + + def __enter__(self): + self.enable() + + @no_type_check + def disable(self): + common.HEARTBEAT_FREQUENCY = self.old_heartbeat_frequency + common.MIN_HEARTBEAT_INTERVAL = self.old_min_heartbeat_interval + common.KILL_CURSOR_FREQUENCY = self.old_kill_cursor_frequency + common.EVENTS_QUEUE_FREQUENCY = self.old_events_queue_frequency + self._enabled = False + + def __exit__(self, exc_type, exc_val, exc_tb): + self.disable() + + def __call__(self, func): + def make_wrapper(f): + @wraps(f) + async def wrap(*args, **kwargs): + with self: + return await f(*args, **kwargs) + + return wrap + + return make_wrapper(func) + + def __del__(self): + if self._enabled: + msg = ( + "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY={}, " + "MIN_HEARTBEAT_INTERVAL={}, KILL_CURSOR_FREQUENCY={}, " + "EVENTS_QUEUE_FREQUENCY={}, stack:\n{}".format( + common.HEARTBEAT_FREQUENCY, + common.MIN_HEARTBEAT_INTERVAL, + common.KILL_CURSOR_FREQUENCY, + common.EVENTS_QUEUE_FREQUENCY, + self._stack, + ) + ) + self.disable() + raise Exception(msg) + + +# Global knobs to speed up the test suite. +global_knobs = client_knobs(events_queue_frequency=0.05) + + +if _IS_SYNC: + PARENT = threading.Thread +else: + PARENT = object + + +class ConcurrentRunner(PARENT): + def __init__(self, **kwargs): + if _IS_SYNC: + super().__init__(**kwargs) + self.name = kwargs.get("name", "ConcurrentRunner") + self.stopped = False + self.task = None + self.target = kwargs.get("target", None) + self.args = kwargs.get("args", []) + + if not _IS_SYNC: + + async def start(self): + self.task = create_task(self.run(), name=self.name) + + async def join(self, timeout: Optional[float] = None): # type: ignore[override] + if self.task is not None: + await asyncio.wait([self.task], timeout=timeout) + + def is_alive(self): + return not self.stopped + + async def run(self): + try: + await self.target(*self.args) + finally: + self.stopped = True + + +class ExceptionCatchingTask(ConcurrentRunner): + """A Task that stores any exception encountered while running.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.exc = None + + async def run(self): + try: + await super().run() + except BaseException as exc: + self.exc = exc + raise diff --git a/test/asynchronous/pymongo_mocks.py b/test/asynchronous/pymongo_mocks.py new file mode 100644 index 0000000000..40beb3c0dc --- /dev/null +++ b/test/asynchronous/pymongo_mocks.py @@ -0,0 +1,252 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for mocking parts of PyMongo to test other parts.""" +from __future__ import annotations + +import contextlib +import weakref +from functools import partial +from test import client_context +from test.asynchronous import async_client_context + +from pymongo import AsyncMongoClient, common +from pymongo.asynchronous.monitor import Monitor +from pymongo.asynchronous.pool import Pool +from pymongo.errors import AutoReconnect, NetworkTimeout +from pymongo.hello import Hello, HelloCompat +from pymongo.server_description import ServerDescription + +_IS_SYNC = False + + +class MockPool(Pool): + def __init__(self, client, pair, *args, **kwargs): + # MockPool gets a 'client' arg, regular pools don't. Weakref it to + # avoid cycle with __del__, causing ResourceWarnings in Python 3.3. + self.client = weakref.proxy(client) + self.mock_host, self.mock_port = pair + + # Actually connect to the default server. + Pool.__init__(self, (client_context.host, client_context.port), *args, **kwargs) + + @contextlib.asynccontextmanager + async def checkout(self, handler=None): + client = self.client + host_and_port = f"{self.mock_host}:{self.mock_port}" + if host_and_port in client.mock_down_hosts: + raise AutoReconnect("mock error") + + assert host_and_port in ( + client.mock_standalones + client.mock_members + client.mock_mongoses + ), "bad host: %s" % host_and_port + + async with Pool.checkout(self, handler) as conn: + conn.mock_host = self.mock_host + conn.mock_port = self.mock_port + yield conn + + +class DummyMonitor: + def __init__(self, server_description, topology, pool, topology_settings): + self._server_description = server_description + self.opened = False + + def cancel_check(self): + pass + + async def join(self): + pass + + def open(self): + self.opened = True + + def request_check(self): + pass + + async def close(self): + self.opened = False + + +class AsyncMockMonitor(Monitor): + def __init__(self, client, server_description, topology, pool, topology_settings): + # MockMonitor gets a 'client' arg, regular monitors don't. Weakref it + # to avoid cycles. + self.client = weakref.proxy(client) + Monitor.__init__(self, server_description, topology, pool, topology_settings) + + async def _check_once(self): + client = self.client + address = self._server_description.address + response, rtt = client.mock_hello("%s:%d" % address) # type: ignore[str-format] + return ServerDescription(address, Hello(response), rtt) + + +class AsyncMockClient(AsyncMongoClient): + def __init__( + self, + standalones, + members, + mongoses, + hello_hosts=None, + arbiters=None, + down_hosts=None, + *args, + **kwargs, + ): + """An AsyncMongoClient connected to the default server, with a mock topology. + + standalones, members, mongoses, arbiters, and down_hosts determine the + configuration of the topology. They are formatted like ['a:1', 'b:2']. + hello_hosts provides an alternative host list for the server's + mocked hello response; see test_connect_with_internal_ips. + """ + self.mock_standalones = standalones[:] + self.mock_members = members[:] + + if self.mock_members: + self.mock_primary = self.mock_members[0] + else: + self.mock_primary = None + + # Hosts that should be considered an arbiter. + self.mock_arbiters = arbiters[:] if arbiters else [] + + if hello_hosts is not None: + self.mock_hello_hosts = hello_hosts + else: + self.mock_hello_hosts = members[:] + + self.mock_mongoses = mongoses[:] + + # Hosts that should raise socket errors. + self.mock_down_hosts = down_hosts[:] if down_hosts else [] + + # Hostname -> (min wire version, max wire version) + self.mock_wire_versions = {} + + # Hostname -> max write batch size + self.mock_max_write_batch_sizes = {} + + # Hostname -> round trip time + self.mock_rtts = {} + + kwargs["_pool_class"] = partial(MockPool, self) + kwargs["_monitor_class"] = partial(AsyncMockMonitor, self) + + client_options = async_client_context.default_client_options.copy() + client_options.update(kwargs) + + super().__init__(*args, **client_options) + + @classmethod + async def get_async_mock_client( + cls, + standalones, + members, + mongoses, + hello_hosts=None, + arbiters=None, + down_hosts=None, + *args, + **kwargs, + ): + c = AsyncMockClient( + standalones, members, mongoses, hello_hosts, arbiters, down_hosts, *args, **kwargs + ) + + await c.aconnect() + return c + + def kill_host(self, host): + """Host is like 'a:1'.""" + self.mock_down_hosts.append(host) + + def revive_host(self, host): + """Host is like 'a:1'.""" + self.mock_down_hosts.remove(host) + + def set_wire_version_range(self, host, min_version, max_version): + self.mock_wire_versions[host] = (min_version, max_version) + + def set_max_write_batch_size(self, host, size): + self.mock_max_write_batch_sizes[host] = size + + def mock_hello(self, host): + """Return mock hello response (a dict) and round trip time.""" + if host in self.mock_wire_versions: + min_wire_version, max_wire_version = self.mock_wire_versions[host] + else: + min_wire_version = common.MIN_SUPPORTED_WIRE_VERSION + max_wire_version = common.MAX_SUPPORTED_WIRE_VERSION + + max_write_batch_size = self.mock_max_write_batch_sizes.get( + host, common.MAX_WRITE_BATCH_SIZE + ) + + rtt = self.mock_rtts.get(host, 0) + + # host is like 'a:1'. + if host in self.mock_down_hosts: + raise NetworkTimeout("mock timeout") + + elif host in self.mock_standalones: + response = { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } + elif host in self.mock_members: + primary = host == self.mock_primary + + # Simulate a replica set member. + response = { + "ok": 1, + HelloCompat.LEGACY_CMD: primary, + "secondary": not primary, + "setName": "rs", + "hosts": self.mock_hello_hosts, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } + + if self.mock_primary: + response["primary"] = self.mock_primary + + if host in self.mock_arbiters: + response["arbiterOnly"] = True + response["secondary"] = False + elif host in self.mock_mongoses: + response = { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "msg": "isdbgrid", + "maxWriteBatchSize": max_write_batch_size, + } + else: + # In test_internal_ips(), we try to connect to a host listed + # in hello['hosts'] but not publicly accessible. + raise AutoReconnect("Unknown host: %s" % host) + + return response, rtt + + def _process_periodic_tasks(self): + # Avoid the background thread causing races, e.g. a surprising + # reconnect while we're trying to test a disconnected client. + pass diff --git a/test/asynchronous/qcheck.py b/test/asynchronous/qcheck.py new file mode 100644 index 0000000000..190a7f1a91 --- /dev/null +++ b/test/asynchronous/qcheck.py @@ -0,0 +1,255 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import datetime +import random +import re +import sys +import traceback + +sys.path[0:0] = [""] + +from bson.dbref import DBRef +from bson.objectid import ObjectId +from bson.son import SON + +_IS_SYNC = False + +gen_target = 100 +reduction_attempts = 10 +examples = 5 + + +def lift(value): + return lambda: value + + +def choose_lifted(generator_list): + return lambda: random.choice(generator_list) + + +def my_map(generator, function): + return lambda: function(generator()) + + +def choose(list): + return lambda: random.choice(list)() + + +def gen_range(start, stop): + return lambda: random.randint(start, stop) + + +def gen_int(): + max_int = 2147483647 + return lambda: random.randint(-max_int - 1, max_int) + + +def gen_float(): + return lambda: (random.random() - 0.5) * sys.maxsize + + +def gen_boolean(): + return lambda: random.choice([True, False]) + + +def gen_printable_char(): + return lambda: chr(random.randint(32, 126)) + + +def gen_printable_string(gen_length): + return lambda: "".join(gen_list(gen_printable_char(), gen_length)()) + + +def gen_char(set=None): + return lambda: bytes([random.randint(0, 255)]) + + +def gen_string(gen_length): + return lambda: b"".join(gen_list(gen_char(), gen_length)()) + + +def gen_unichar(): + return lambda: chr(random.randint(1, 0xFFF)) + + +def gen_unicode(gen_length): + return lambda: "".join([x for x in gen_list(gen_unichar(), gen_length)() if x not in ".$"]) + + +def gen_list(generator, gen_length): + return lambda: [generator() for _ in range(gen_length())] + + +def gen_datetime(): + return lambda: datetime.datetime( + random.randint(1970, 2037), + random.randint(1, 12), + random.randint(1, 28), + random.randint(0, 23), + random.randint(0, 59), + random.randint(0, 59), + random.randint(0, 999) * 1000, + ) + + +def gen_dict(gen_key, gen_value, gen_length): + def a_dict(gen_key, gen_value, length): + result = {} + for _ in range(length): + result[gen_key()] = gen_value() + return result + + return lambda: a_dict(gen_key, gen_value, gen_length()) + + +def gen_regexp(gen_length): + # TODO our patterns only consist of one letter. + # this is because of a bug in CPython's regex equality testing, + # which I haven't quite tracked down, so I'm just ignoring it... + def pattern(): + return "".join(gen_list(choose_lifted("a"), gen_length)()) + + def gen_flags(): + flags = 0 + if random.random() > 0.5: + flags = flags | re.IGNORECASE + if random.random() > 0.5: + flags = flags | re.MULTILINE + if random.random() > 0.5: + flags = flags | re.VERBOSE + + return flags + + return lambda: re.compile(pattern(), gen_flags()) + + +def gen_objectid(): + return lambda: ObjectId() + + +def gen_dbref(): + collection = gen_unicode(gen_range(0, 20)) + return lambda: DBRef(collection(), gen_mongo_value(1, True)()) + + +def gen_mongo_value(depth, ref): + choices = [ + gen_unicode(gen_range(0, 50)), + gen_printable_string(gen_range(0, 50)), + my_map(gen_string(gen_range(0, 1000)), bytes), + gen_int(), + gen_float(), + gen_boolean(), + gen_datetime(), + gen_objectid(), + lift(None), + ] + if ref: + choices.append(gen_dbref()) + if depth > 0: + choices.append(gen_mongo_list(depth, ref)) + choices.append(gen_mongo_dict(depth, ref)) + return choose(choices) + + +def gen_mongo_list(depth, ref): + return gen_list(gen_mongo_value(depth - 1, ref), gen_range(0, 10)) + + +def gen_mongo_dict(depth, ref=True): + return my_map( + gen_dict(gen_unicode(gen_range(0, 20)), gen_mongo_value(depth - 1, ref), gen_range(0, 10)), + SON, + ) + + +def simplify(case): # TODO this is a hack + if isinstance(case, SON) and "$ref" not in case: + simplified = SON(case) # make a copy! + if random.choice([True, False]): + # delete + simplified_keys = list(simplified) + if not len(simplified_keys): + return (False, case) + simplified.pop(random.choice(simplified_keys)) + return (True, simplified) + else: + # simplify a value + simplified_items = list(simplified.items()) + if not len(simplified_items): + return (False, case) + (key, value) = random.choice(simplified_items) + (success, value) = simplify(value) + simplified[key] = value + return (success, success and simplified or case) + if isinstance(case, list): + simplified = list(case) + if random.choice([True, False]): + # delete + if not len(simplified): + return (False, case) + simplified.pop(random.randrange(len(simplified))) + return (True, simplified) + else: + # simplify an item + if not len(simplified): + return (False, case) + index = random.randrange(len(simplified)) + (success, value) = simplify(simplified[index]) + simplified[index] = value + return (success, success and simplified or case) + return (False, case) + + +async def reduce(case, predicate, reductions=0): + for _ in range(reduction_attempts): + (reduced, simplified) = simplify(case) + if reduced and not await predicate(simplified): + return await reduce(simplified, predicate, reductions + 1) + return (reductions, case) + + +async def isnt(predicate): + async def is_not(x): + return not await predicate(x) + + return is_not + + +async def check(predicate, generator): + counter_examples = [] + for _ in range(gen_target): + case = generator() + try: + if not await predicate(case): + reduction = await reduce(case, predicate) + counter_examples.append("after {} reductions: {!r}".format(*reduction)) + except: + counter_examples.append(f"{case!r} : {traceback.format_exc()}") + return counter_examples + + +async def check_unittest(test, predicate, generator): + counter_examples = await check(predicate, generator) + if counter_examples: + failures = len(counter_examples) + message = "\n".join([" -> %s" % f for f in counter_examples[:examples]]) + message = "found %d counter examples, displaying first %d:\n%s" % ( + failures, + min(failures, examples), + message, + ) + test.fail(message) diff --git a/test/asynchronous/test_async_cancellation.py b/test/asynchronous/test_async_cancellation.py new file mode 100644 index 0000000000..f450ea23cc --- /dev/null +++ b/test/asynchronous/test_async_cancellation.py @@ -0,0 +1,129 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that async cancellation performed by users clean up resources correctly.""" +from __future__ import annotations + +import asyncio +import sys +from test.asynchronous.utils import async_get_pool +from test.utils_shared import delay, one + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, connected + + +class TestAsyncCancellation(AsyncIntegrationTest): + async def test_async_cancellation_closes_connection(self): + pool = await async_get_pool(self.client) + await self.client.db.test.insert_one({"x": 1}) + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + + conn = one(pool.conns) + + async def task(): + await self.client.db.test.find_one({"$where": delay(0.2)}) + + task = asyncio.create_task(task()) + + await asyncio.sleep(0.1) + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertTrue(conn.closed) + + @async_client_context.require_transactions + async def test_async_cancellation_aborts_transaction(self): + await self.client.db.test.insert_one({"x": 1}) + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + + session = self.client.start_session() + + async def callback(session): + await self.client.db.test.find_one({"$where": delay(0.2)}, session=session) + + async def task(): + await session.with_transaction(callback) + + task = asyncio.create_task(task()) + + await asyncio.sleep(0.1) + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertFalse(session.in_transaction) + + @async_client_context.require_failCommand_blockConnection + async def test_async_cancellation_closes_cursor(self): + await self.client.db.test.insert_many([{"x": 1}, {"x": 2}]) + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + + cursor = self.client.db.test.find({}, batch_size=1) + await cursor.next() + + # Make sure getMore commands block + fail_command = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 200}, + } + + async def task(): + async with self.fail_point(fail_command): + await cursor.next() + + task = asyncio.create_task(task()) + + await asyncio.sleep(0.1) + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertTrue(cursor._killed) + + @async_client_context.require_change_streams + @async_client_context.require_failCommand_blockConnection + async def test_async_cancellation_closes_change_stream(self): + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + change_stream = await self.client.db.test.watch(batch_size=2) + event = asyncio.Event() + + # Make sure getMore commands block + fail_command = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 200}, + } + + async def task(): + async with self.fail_point(fail_command): + await self.client.db.test.insert_many([{"x": 1}, {"x": 2}]) + event.set() + await change_stream.next() + + task = asyncio.create_task(task()) + + await event.wait() + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertTrue(change_stream._closed) diff --git a/test/asynchronous/test_async_contextvars_reset.py b/test/asynchronous/test_async_contextvars_reset.py new file mode 100644 index 0000000000..c6e825bbdf --- /dev/null +++ b/test/asynchronous/test_async_contextvars_reset.py @@ -0,0 +1,41 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that AsyncPeriodicExecutors do not copy ContextVars from their parents.""" +from __future__ import annotations + +import asyncio +import sys +from test.asynchronous.utils import async_get_pool +from test.utils_shared import delay, one + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest + + +class TestAsyncContextVarsReset(AsyncIntegrationTest): + async def test_context_vars_are_reset_in_executor(self): + if sys.version_info < (3, 12): + self.skipTest("Test requires asyncio.Task.get_context (added in Python 3.12)") + + await self.client.db.test.insert_one({"x": 1}) + for server in self.client._topology._servers.values(): + for context in [ + c + for c in server._monitor._executor._task.get_context() + if c.name in ["TIMEOUT", "RTT", "DEADLINE"] + ]: + self.assertIn(context.get(), [None, float("inf"), 0.0]) + await self.client.db.test.delete_many({}) diff --git a/test/asynchronous/test_async_loop_safety.py b/test/asynchronous/test_async_loop_safety.py new file mode 100644 index 0000000000..7516cb8eeb --- /dev/null +++ b/test/asynchronous/test_async_loop_safety.py @@ -0,0 +1,36 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that the asynchronous API detects event loop changes and fails correctly.""" +from __future__ import annotations + +import asyncio +import unittest + +from pymongo import AsyncMongoClient + + +class TestClientLoopSafety(unittest.TestCase): + def test_client_errors_on_different_loop(self): + client = AsyncMongoClient() + loop1 = asyncio.new_event_loop() + loop1.run_until_complete(client.aconnect()) + loop2 = asyncio.new_event_loop() + with self.assertRaisesRegex( + RuntimeError, "Cannot use AsyncMongoClient in different event loop" + ): + loop2.run_until_complete(client.aconnect()) + loop1.run_until_complete(client.close()) + loop1.close() + loop2.close() diff --git a/test/asynchronous/test_async_loop_unblocked.py b/test/asynchronous/test_async_loop_unblocked.py new file mode 100644 index 0000000000..86f934b798 --- /dev/null +++ b/test/asynchronous/test_async_loop_unblocked.py @@ -0,0 +1,56 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that the asynchronous API does not block the event loop.""" +from __future__ import annotations + +import asyncio +import time +from test.asynchronous import AsyncIntegrationTest + +from pymongo.errors import ServerSelectionTimeoutError + + +class TestClientLoopUnblocked(AsyncIntegrationTest): + async def test_client_does_not_block_loop(self): + # Use an unreachable TEST-NET host to ensure that the client times out attempting to create a connection. + client = self.simple_client("192.0.2.1", serverSelectionTimeoutMS=500) + latencies = [] + + # If the loop is being blocked, at least one iteration will have a latency much more than 0.1 seconds + async def background_task(): + start = time.monotonic() + try: + while True: + start = time.monotonic() + await asyncio.sleep(0.1) + latencies.append(time.monotonic() - start) + except asyncio.CancelledError: + latencies.append(time.monotonic() - start) + raise + + t = asyncio.create_task(background_task()) + + with self.assertRaisesRegex(ServerSelectionTimeoutError, "No servers found yet"): + await client.admin.command("ping") + + t.cancel() + with self.assertRaises(asyncio.CancelledError): + await t + + self.assertLessEqual( + sorted(latencies, reverse=True)[0], + 1.0, + "Background task was blocked from running", + ) diff --git a/test/asynchronous/test_auth.py b/test/asynchronous/test_auth.py new file mode 100644 index 0000000000..904674db16 --- /dev/null +++ b/test/asynchronous/test_auth.py @@ -0,0 +1,748 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Authentication Tests.""" +from __future__ import annotations + +import asyncio +import os +import sys +import threading +from urllib.parse import quote_plus + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + SkipTest, + async_client_context, + unittest, +) +from test.utils_shared import AllowListEventListener, delay, ignore_deprecations + +import pytest + +from pymongo import AsyncMongoClient, monitoring +from pymongo.asynchronous.auth import HAVE_KERBEROS, _canonicalize_hostname +from pymongo.auth_shared import _build_credentials_tuple +from pymongo.errors import OperationFailure +from pymongo.hello import HelloCompat +from pymongo.read_preferences import ReadPreference +from pymongo.saslprep import HAVE_STRINGPREP + +_IS_SYNC = False + +pytestmark = pytest.mark.auth + +# YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. +GSSAPI_HOST = os.environ.get("GSSAPI_HOST") +GSSAPI_PORT = int(os.environ.get("GSSAPI_PORT", "27017")) +GSSAPI_PRINCIPAL = os.environ.get("GSSAPI_PRINCIPAL") +GSSAPI_SERVICE_NAME = os.environ.get("GSSAPI_SERVICE_NAME", "mongodb") +GSSAPI_CANONICALIZE = os.environ.get("GSSAPI_CANONICALIZE", "false") +GSSAPI_SERVICE_REALM = os.environ.get("GSSAPI_SERVICE_REALM") +GSSAPI_PASS = os.environ.get("GSSAPI_PASS") +GSSAPI_DB = os.environ.get("GSSAPI_DB", "test") + +SASL_HOST = os.environ.get("SASL_HOST") +SASL_PORT = int(os.environ.get("SASL_PORT", "27017")) +SASL_USER = os.environ.get("SASL_USER") +SASL_PASS = os.environ.get("SASL_PASS") +SASL_DB = os.environ.get("SASL_DB", "$external") + + +class AutoAuthenticateThread(threading.Thread): + """Used in testing threaded authentication. + + This does await collection.find_one() with a 1-second delay to ensure it must + check out and authenticate multiple connections from the pool concurrently. + + :Parameters: + `collection`: An auth-protected collection containing one document. + """ + + def __init__(self, collection): + super().__init__() + self.collection = collection + self.success = False + + def run(self): + assert self.collection.find_one({"$where": delay(1)}) is not None + self.success = True + + +class TestGSSAPI(AsyncPyMongoTestCase): + mech_properties: str + service_realm_required: bool + + @classmethod + def setUpClass(cls): + if not HAVE_KERBEROS: + raise SkipTest("Kerberos module not available.") + if not GSSAPI_HOST or not GSSAPI_PRINCIPAL: + raise SkipTest("Must set GSSAPI_HOST and GSSAPI_PRINCIPAL to test GSSAPI") + cls.service_realm_required = ( + GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL + ) + mech_properties = dict( + SERVICE_NAME=GSSAPI_SERVICE_NAME, CANONICALIZE_HOST_NAME=GSSAPI_CANONICALIZE + ) + if GSSAPI_SERVICE_REALM is not None: + mech_properties["SERVICE_REALM"] = GSSAPI_SERVICE_REALM + cls.mech_properties = mech_properties + + async def test_credentials_hashing(self): + # GSSAPI credentials are properly hashed. + creds0 = _build_credentials_tuple("GSSAPI", None, "user", "pass", {}, None) + + creds1 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) + + creds2 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) + + creds3 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "B"}}, None + ) + + self.assertEqual(1, len({creds1, creds2})) + self.assertEqual(3, len({creds0, creds1, creds2, creds3})) + + @ignore_deprecations + async def test_gssapi_simple(self): + assert GSSAPI_PRINCIPAL is not None + if GSSAPI_PASS is not None: + uri = "mongodb://%s:%s@%s:%d/?authMechanism=GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_PASS, + GSSAPI_HOST, + GSSAPI_PORT, + ) + else: + uri = "mongodb://%s@%s:%d/?authMechanism=GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_HOST, + GSSAPI_PORT, + ) + + if not self.service_realm_required: + # Without authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + ) + + await client[GSSAPI_DB].collection.find_one() + + # Log in using URI, without authMechanismProperties. + client = self.simple_client(uri) + await client[GSSAPI_DB].collection.find_one() + + # Authenticate with authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + + await client[GSSAPI_DB].collection.find_one() + + # Log in using URI, with authMechanismProperties. + mech_properties_str = "" + for key, value in self.mech_properties.items(): + mech_properties_str += f"{key}:{value}," + mech_uri = uri + f"&authMechanismProperties={mech_properties_str[:-1]}" + client = self.simple_client(mech_uri) + await client[GSSAPI_DB].collection.find_one() + + set_name = async_client_context.replica_set_name + if set_name: + if not self.service_realm_required: + # Without authMechanismProperties + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + replicaSet=set_name, + ) + + await client[GSSAPI_DB].list_collection_names() + + uri = uri + f"&replicaSet={set_name!s}" + client = self.simple_client(uri) + await client[GSSAPI_DB].list_collection_names() + + # With authMechanismProperties + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) + + await client[GSSAPI_DB].list_collection_names() + + mech_uri = mech_uri + f"&replicaSet={set_name!s}" + client = self.simple_client(mech_uri) + await client[GSSAPI_DB].list_collection_names() + + @ignore_deprecations + @async_client_context.require_sync + async def test_gssapi_threaded(self): + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + + # Authentication succeeded? + await client.server_info() + db = client[GSSAPI_DB] + + # Need one document in the collection. AutoAuthenticateThread does + # collection.find_one with a 1-second delay, forcing it to check out + # multiple connections from the pool concurrently, proving that + # auto-authentication works with GSSAPI. + collection = db.test + if not await collection.count_documents({}): + try: + await collection.drop() + await collection.insert_one({"_id": 1}) + except OperationFailure: + raise SkipTest("User must be able to write.") + + threads = [] + for _ in range(4): + threads.append(AutoAuthenticateThread(collection)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + self.assertTrue(thread.success) + + set_name = async_client_context.replica_set_name + if set_name: + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) + + # Succeeded? + await client.server_info() + + threads = [] + for _ in range(4): + threads.append(AutoAuthenticateThread(collection)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + self.assertTrue(thread.success) + + async def test_gssapi_canonicalize_host_name(self): + # Test the low level method. + assert GSSAPI_HOST is not None + result = await _canonicalize_hostname(GSSAPI_HOST, "forward") + if "compute-1.amazonaws.com" not in result: + self.assertEqual(result, GSSAPI_HOST) + result = await _canonicalize_hostname(GSSAPI_HOST, "forwardAndReverse") + self.assertEqual(result, GSSAPI_HOST) + + # Use the equivalent named CANONICALIZE_HOST_NAME. + props = self.mech_properties.copy() + if props["CANONICALIZE_HOST_NAME"] == "true": + props["CANONICALIZE_HOST_NAME"] = "forwardAndReverse" + else: + props["CANONICALIZE_HOST_NAME"] = "none" + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=props, + ) + await client.server_info() + + async def test_gssapi_host_name(self): + props = self.mech_properties + props["SERVICE_HOST"] = "example.com" + + # Authenticate with authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + with self.assertRaises(OperationFailure): + await client.server_info() + + props["SERVICE_HOST"] = GSSAPI_HOST + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + await client.server_info() + + +class TestSASLPlain(AsyncPyMongoTestCase): + @classmethod + def setUpClass(cls): + if not SASL_HOST or not SASL_USER or not SASL_PASS: + raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") + + async def test_sasl_plain(self): + client = self.simple_client( + SASL_HOST, + SASL_PORT, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) + await client.ldap.test.find_one() + + assert SASL_USER is not None + assert SASL_PASS is not None + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) + client = self.simple_client(uri) + await client.ldap.test.find_one() + + set_name = async_client_context.replica_set_name + if set_name: + client = self.simple_client( + SASL_HOST, + SASL_PORT, + replicaSet=set_name, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) + await client.ldap.test.find_one() + + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s;replicaSet=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + str(set_name), + ) + client = self.simple_client(uri) + await client.ldap.test.find_one() + + async def test_sasl_plain_bad_credentials(self): + def auth_string(user, password): + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( + quote_plus(user), + quote_plus(password), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) + return uri + + bad_user = self.simple_client(auth_string("not-user", SASL_PASS)) + bad_pwd = self.simple_client(auth_string(SASL_USER, "not-pwd")) + # OperationFailure raised upon connecting. + with self.assertRaises(OperationFailure): + await bad_user.admin.command("ping") + with self.assertRaises(OperationFailure): + await bad_pwd.admin.command("ping") + + +class TestSCRAMSHA1(AsyncIntegrationTest): + @async_client_context.require_auth + async def asyncSetUp(self): + await super().asyncSetUp() + await async_client_context.create_user( + "pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"] + ) + + async def asyncTearDown(self): + await async_client_context.drop_user("pymongo_test", "user") + await super().asyncTearDown() + + @async_client_context.require_no_fips + async def test_scram_sha1(self): + host, port = await async_client_context.host, await async_client_context.port + + client = await self.async_rs_or_single_client_noauth( + "mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" % (host, port) + ) + await client.pymongo_test.command("dbstats") + + if async_client_context.is_rs: + uri = ( + "mongodb://user:pass" + "@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" + "&replicaSet=%s" % (host, port, async_client_context.replica_set_name) + ) + client = await self.async_single_client_noauth(uri) + await client.pymongo_test.command("dbstats") + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + await db.command("dbstats") + + +# https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#scram-sha-256-and-mechanism-negotiation +class TestSCRAM(AsyncIntegrationTest): + @async_client_context.require_auth + @async_client_context.require_version_min(3, 7, 2) + async def asyncSetUp(self): + await super().asyncSetUp() + self._SENSITIVE_COMMANDS = monitoring._SENSITIVE_COMMANDS + monitoring._SENSITIVE_COMMANDS = set() + self.listener = AllowListEventListener("saslStart") + + async def asyncTearDown(self): + monitoring._SENSITIVE_COMMANDS = self._SENSITIVE_COMMANDS + await async_client_context.client.testscram.command("dropAllUsersFromDatabase") + await async_client_context.client.drop_database("testscram") + await super().asyncTearDown() + + async def test_scram_skip_empty_exchange(self): + listener = AllowListEventListener("saslStart", "saslContinue") + await async_client_context.create_user( + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + + client = await self.async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", event_listeners=[listener] + ) + await client.testscram.command("dbstats") + + if async_client_context.version < (4, 4, -1): + # Assert we sent the skipEmptyExchange option. + first_event = listener.started_events[0] + self.assertEqual(first_event.command_name, "saslStart") + self.assertEqual(first_event.command["options"], {"skipEmptyExchange": True}) + + # Assert the third exchange was skipped on servers that support it. + # Note that the first exchange occurs on the connection handshake. + started = listener.started_command_names() + if async_client_context.version.at_least(4, 4, -1): + self.assertEqual(started, ["saslContinue"]) + else: + self.assertEqual(started, ["saslStart", "saslContinue", "saslContinue"]) + + @async_client_context.require_no_fips + async def test_scram(self): + # Step 1: create users + await async_client_context.create_user( + "testscram", "sha1", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-1"] + ) + await async_client_context.create_user( + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + await async_client_context.create_user( + "testscram", + "both", + "pwd", + roles=["dbOwner"], + mechanisms=["SCRAM-SHA-1", "SCRAM-SHA-256"], + ) + + # Step 2: verify auth success cases + client = await self.async_rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + await client.testscram.command("dbstats") + + # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 + client = await self.async_rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + await client.testscram.command("dbstats") + client = await self.async_rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + await client.testscram.command("dbstats") + + self.listener.reset() + client = await self.async_rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] + ) + await client.testscram.command("dbstats") + if async_client_context.version.at_least(4, 4, -1): + # Speculative authentication in 4.4+ sends saslStart with the + # handshake. + self.assertEqual(self.listener.started_events, []) + else: + started = self.listener.started_events[0] + self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") + + # Step 3: verify auth failure conditions + client = await self.async_rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + with self.assertRaises(OperationFailure): + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + with self.assertRaises(OperationFailure): + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="not-a-user", password="pwd", authSource="testscram" + ) + with self.assertRaises(OperationFailure): + await client.testscram.command("dbstats") + + if async_client_context.is_rs: + host, port = await async_client_context.host, await async_client_context.port + uri = "mongodb://both:pwd@%s:%d/testscram?replicaSet=%s" % ( + host, + port, + async_client_context.replica_set_name, + ) + client = await self.async_single_client_noauth(uri) + await client.testscram.command("dbstats") + db = client.get_database("testscram", read_preference=ReadPreference.SECONDARY) + await db.command("dbstats") + + @unittest.skipUnless(HAVE_STRINGPREP, "Cannot test without stringprep") + async def test_scram_saslprep(self): + # Step 4: test SASLprep + host, port = await async_client_context.host, await async_client_context.port + # Test the use of SASLprep on passwords. For example, + # saslprep('\u2136') becomes 'IV' and saslprep('I\u00ADX') + # becomes 'IX'. SASLprep is only supported when the standard + # library provides stringprep. + await async_client_context.create_user( + "testscram", "\u2168", "\u2163", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + await async_client_context.create_user( + "testscram", "IX", "IX", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + + client = await self.async_rs_or_single_client_noauth( + username="\u2168", password="\u2163", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="\u2168", + password="\u2163", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="\u2168", password="IV", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="IX", password="I\u00ADX", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="IX", + password="I\u00ADX", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="IX", password="IX", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + "mongodb://\u2168:\u2163@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + client = await self.async_rs_or_single_client_noauth( + "mongodb://\u2168:IV@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + "mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + client = await self.async_rs_or_single_client_noauth( + "mongodb://IX:IX@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + + async def test_cache(self): + client = await self.async_single_client() + credentials = client.options.pool_options._credentials + cache = credentials.cache + self.assertIsNotNone(cache) + self.assertIsNone(cache.data) + # Force authentication. + await client.admin.command("ping") + cache = credentials.cache + self.assertIsNotNone(cache) + data = cache.data + self.assertIsNotNone(data) + self.assertEqual(len(data), 4) + ckey, skey, salt, iterations = data + self.assertIsInstance(ckey, bytes) + self.assertIsInstance(skey, bytes) + self.assertIsInstance(salt, bytes) + self.assertIsInstance(iterations, int) + + @async_client_context.require_sync + async def test_scram_threaded(self): + coll = async_client_context.client.db.test + await coll.drop() + await coll.insert_one({"_id": 1}) + + # The first thread to call find() will authenticate + client = await self.async_rs_or_single_client() + coll = client.db.test + threads = [] + for _ in range(4): + threads.append(AutoAuthenticateThread(coll)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + self.assertTrue(thread.success) + + +class TestAuthURIOptions(AsyncIntegrationTest): + @async_client_context.require_auth + async def asyncSetUp(self): + await super().asyncSetUp() + await async_client_context.create_user("admin", "admin", "pass") + await async_client_context.create_user( + "pymongo_test", "user", "pass", ["userAdmin", "readWrite"] + ) + + async def asyncTearDown(self): + await async_client_context.drop_user("pymongo_test", "user") + await async_client_context.drop_user("admin", "admin") + await super().asyncTearDown() + + async def test_uri_options(self): + # Test default to admin + host, port = await async_client_context.host, await async_client_context.port + client = await self.async_rs_or_single_client_noauth( + "mongodb://admin:pass@%s:%d" % (host, port) + ) + self.assertTrue(await client.admin.command("dbstats")) + + if async_client_context.is_rs: + uri = "mongodb://admin:pass@%s:%d/?replicaSet=%s" % ( + host, + port, + async_client_context.replica_set_name, + ) + client = await self.async_single_client_noauth(uri) + self.assertTrue(await client.admin.command("dbstats")) + db = client.get_database("admin", read_preference=ReadPreference.SECONDARY) + self.assertTrue(await db.command("dbstats")) + + # Test explicit database + uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) + client = await self.async_rs_or_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.admin.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + + if async_client_context.is_rs: + uri = "mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s" % ( + host, + port, + async_client_context.replica_set_name, + ) + client = await self.async_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.admin.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(await db.command("dbstats")) + + # Test authSource + uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) + client = await self.async_rs_or_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.pymongo_test2.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + + if async_client_context.is_rs: + uri = ( + "mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=" + "%s;authSource=pymongo_test" % (host, port, async_client_context.replica_set_name) + ) + client = await self.async_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.pymongo_test2.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(await db.command("dbstats")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_auth_oidc.py b/test/asynchronous/test_auth_oidc.py new file mode 100644 index 0000000000..639c155e73 --- /dev/null +++ b/test/asynchronous/test_auth_oidc.py @@ -0,0 +1,1192 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-OIDC Authentication.""" +from __future__ import annotations + +import os +import sys +import time +import unittest +import warnings +from contextlib import asynccontextmanager +from pathlib import Path +from test.asynchronous import AsyncPyMongoTestCase +from test.asynchronous.helpers import ConcurrentRunner +from typing import Dict + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import EventListener, OvertCommandListener + +from bson import SON +from pymongo import AsyncMongoClient +from pymongo._azure_helpers import _get_azure_response +from pymongo._gcp_helpers import _get_gcp_response +from pymongo.asynchronous.auth_oidc import ( + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + _get_authenticator, +) +from pymongo.auth_oidc_shared import _get_k8s_token +from pymongo.auth_shared import _build_credentials_tuple +from pymongo.cursor_shared import CursorType +from pymongo.errors import AutoReconnect, ConfigurationError, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.operations import InsertOne +from pymongo.synchronous.uri_parser import parse_uri + +_IS_SYNC = False + +ROOT = Path(__file__).parent.parent.resolve() +TEST_PATH = ROOT / "auth" / "unified" +ENVIRON = os.environ.get("OIDC_ENV", "test") +DOMAIN = os.environ.get("OIDC_DOMAIN", "") +TOKEN_DIR = os.environ.get("OIDC_TOKEN_DIR", "") +TOKEN_FILE = os.environ.get("OIDC_TOKEN_FILE", "") + +# Generate unified tests. +globals().update(generate_test_classes(str(TEST_PATH), module=__name__)) + +pytestmark = pytest.mark.auth_oidc + + +class OIDCTestBase(AsyncPyMongoTestCase): + @classmethod + def setUpClass(cls): + cls.uri_single = os.environ["MONGODB_URI_SINGLE"] + cls.uri_multiple = os.environ.get("MONGODB_URI_MULTI") + cls.uri_admin = os.environ["MONGODB_URI"] + if ENVIRON == "test": + if not TOKEN_DIR: + raise ValueError("Please set OIDC_TOKEN_DIR") + if not TOKEN_FILE: + raise ValueError("Please set OIDC_TOKEN_FILE") + + async def asyncSetUp(self): + self.request_called = 0 + + def get_token(self, username=None): + """Get a token for the current provider.""" + if ENVIRON == "test": + if username is None: + token_file = TOKEN_FILE + else: + token_file = os.path.join(TOKEN_DIR, username) + with open(token_file) as fid: # noqa: ASYNC101,RUF100 + return fid.read() + elif ENVIRON == "azure": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + return _get_azure_response(token_aud, username)["access_token"] + elif ENVIRON == "gcp": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + return _get_gcp_response(token_aud, username)["access_token"] + elif ENVIRON == "k8s": + return _get_k8s_token() + else: + raise ValueError(f"Unknown ENVIRON: {ENVIRON}") + + @asynccontextmanager + async def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client = AsyncMongoClient(self.uri_admin) + await client.admin.command(cmd_on) + try: + yield + finally: + await client.admin.command( + "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) + await client.close() + + +class TestAuthOIDCHuman(OIDCTestBase): + uri: str + + @classmethod + def setUpClass(cls): + if ENVIRON != "test": + raise unittest.SkipTest("Human workflows are only tested with the test environment") + if DOMAIN is None: + raise ValueError("Missing OIDC_DOMAIN") + super().setUpClass() + + async def asyncSetUp(self): + self.refresh_present = 0 + await super().asyncSetUp() + + def create_request_cb(self, username="test_user1", sleep=0): + def request_token(context: OIDCCallbackContext): + # Validate the info. + self.assertIsInstance(context.idp_info.issuer, str) + if context.idp_info.clientId is not None: + self.assertIsInstance(context.idp_info.clientId, str) + + # Validate the timeout. + timeout_seconds = context.timeout_seconds + self.assertEqual(timeout_seconds, 60 * 5) + + if context.refresh_token: + self.refresh_present += 1 + + token = self.get_token(username) + resp = OIDCCallbackResult(access_token=token, refresh_token=token) + + time.sleep(sleep) + self.request_called += 1 + return resp + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + async def create_client(self, *args, **kwargs): + username = kwargs.get("username", "test_user1") + if kwargs.get("username") in ["test_user1", "test_user2"]: + kwargs["username"] = f"{username}@{DOMAIN}" + request_cb = kwargs.pop("request_cb", self.create_request_cb(username=username)) + props = kwargs.pop("authmechanismproperties", {"OIDC_HUMAN_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + + client = self.simple_client(*args, authmechanismproperties=props, **kwargs) + + return client + + async def test_1_1_single_principal_implicit_username(self): + # Create default OIDC client with authMechanism=MONGODB-OIDC. + client = await self.create_client() + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_2_single_principal_explicit_username(self): + # Create a client with MONGODB_URI_SINGLE, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(username="test_user1") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_3_multiple_principal_user_1(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(self.uri_multiple, username="test_user1") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_4_multiple_principal_user_2(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a human callback that reads in the generated test_user2 token file. + # Create a client with MONGODB_URI_MULTI, a username of test_user2, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(self.uri_multiple, username="test_user2") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_5_multiple_principal_no_user(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, no username, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(self.uri_multiple) + # Assert that a find operation fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_6_allowed_hosts_blocked(self): + # Create a default OIDC client, with an ALLOWED_HOSTS that is an empty list. + request_token = self.create_request_cb() + props: Dict = {"OIDC_HUMAN_CALLBACK": request_token, "ALLOWED_HOSTS": []} + client = await self.create_client(authmechanismproperties=props) + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + await client.test.test.find_one() + # Close the client. + await client.close() + + # Create a client that uses the URL mongodb://localhost/?authMechanism=MONGODB-OIDC&ignored=example.com, + # a human callback, and an ALLOWED_HOSTS that contains ["example.com"]. + props: Dict = { + "OIDC_HUMAN_CALLBACK": request_token, + "ALLOWED_HOSTS": ["example.com"], + } + with warnings.catch_warnings(): + warnings.simplefilter("default") + client = await self.create_client( + self.uri_single + "&ignored=example.com", + authmechanismproperties=props, + connect=False, + ) + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_7_allowed_hosts_in_connection_string_ignored(self): + # Create an OIDC configured client with the connection string: `mongodb+srv://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D` and a Human Callback. + # Assert that the creation of the client raises a configuration error. + uri = "mongodb+srv://example.com?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D" + with self.assertRaises(ConfigurationError), warnings.catch_warnings(): + warnings.simplefilter("ignore") + c = AsyncMongoClient( + uri, + authmechanismproperties=dict(OIDC_HUMAN_CALLBACK=self.create_request_cb()), + ) + await c.aconnect() + + async def test_1_8_machine_idp_human_callback(self): + if not os.environ.get("OIDC_IS_LOCAL"): + raise unittest.SkipTest("Test Requires Local OIDC server") + # Create a client with MONGODB_URI_SINGLE, a username of test_machine, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(username="test_machine") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_2_1_valid_callback_inputs(self): + # Create a AsyncMongoClient with a human callback that validates its inputs and returns a valid access token. + client = await self.create_client() + # Perform a find operation that succeeds. Verify that the human callback was called with the appropriate inputs, including the timeout parameter if possible. + # Ensure that there are no unexpected fields. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_2_2_callback_returns_missing_data(self): + # Create a AsyncMongoClient with a human callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCB(OIDCCallback): + def fetch(self, ctx): + return dict() + + client = await self.create_client(request_cb=CustomCB()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_2_3_refresh_token_is_passed_to_the_callback(self): + # Create a AsyncMongoClient with a human callback that checks for the presence of a refresh token. + client = await self.create_client() + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Set a fail point for ``find`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + + # Assert that the callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the refresh token was used once. + self.assertEqual(self.refresh_present, 1) + + async def test_3_1_uses_speculative_authentication_if_there_is_a_cached_token(self): + # Create a client with a human callback that returns a valid token. + client = await self.create_client() + + # Set a fail point for ``find`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(AutoReconnect): + await client.test.test.find_one() + + # Set a fail point for ``saslStart`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that succeeds + await client.test.test.find_one() + + # Close the client. + await client.close() + + async def test_3_2_does_not_use_speculative_authentication_if_there_is_no_cached_token(self): + # Create a ``AsyncMongoClient`` with a human callback that returns a valid token + client = await self.create_client() + + # Set a fail point for ``saslStart`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Close the client. + await client.close() + + async def test_4_1_reauthenticate_succeeds(self): + # Create a default OIDC client and add an event listener. + # The following assumes that the driver does not emit saslStart or saslContinue events. + # If the driver does emit those events, ignore/filter them for the purposes of this test. + listener = OvertCommandListener() + client = await self.create_client(event_listeners=[listener]) + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Clear the listener state if possible. + listener.reset() + + # Force a reauthenication using a fail point. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform another find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the ordering of list started events is [find, find]. + # Note that if the listener stat could not be cleared then there will be an extra find command. + started_events = [ + i.command_name for i in listener.started_events if not i.command_name.startswith("sasl") + ] + succeeded_events = [ + i.command_name + for i in listener.succeeded_events + if not i.command_name.startswith("sasl") + ] + failed_events = [ + i.command_name for i in listener.failed_events if not i.command_name.startswith("sasl") + ] + + self.assertEqual( + started_events, + [ + "find", + "find", + ], + ) + # Assert that the list of command succeeded events is [find]. + self.assertEqual(succeeded_events, ["find"]) + # Assert that a find operation failed once during the command execution. + self.assertEqual(failed_events, ["find"]) + # Close the client. + await client.close() + + async def test_4_2_reauthenticate_succeeds_no_refresh(self): + # Create a default OIDC client with a human callback that does not return a refresh token. + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = None + return result + + client = await self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthenication using a fail point. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + # Close the client. + await client.close() + + async def test_4_3_reauthenticate_succeeds_after_refresh_fails(self): + # Create a default OIDC client with a human callback that returns an invalid refresh token + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + return result + + client = await self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthenication using a fail point. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called 2 times. + self.assertEqual(self.request_called, 2) + + # Close the client. + await client.close() + + async def test_4_4_reauthenticate_fails(self): + # Create a default OIDC client with a human callback that returns invalid refresh tokens and + # Returns invalid access tokens after the first access. + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + fetch_called = 0 + + def fetch(self, *args, **kwargs): + self.fetch_called += 1 + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + if self.fetch_called > 1: + result.access_token = "bad" + return result + + client = await self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds (to force a speculative auth). + await client.test.test.find_one() + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthentication using a failCommand. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Assert that the human callback has been called three times. + self.assertEqual(self.request_called, 3) + + # Close the client. + await client.close() + + async def test_request_callback_returns_null(self): + class RequestTokenNull(OIDCCallback): + def fetch(self, a): + return None + + client = await self.create_client(request_cb=RequestTokenNull()) + with self.assertRaises(ValueError): + await client.test.test.find_one() + await client.close() + + async def test_request_callback_invalid_result(self): + class CallbackInvalidToken(OIDCCallback): + def fetch(self, a): + return {} + + client = await self.create_client(request_cb=CallbackInvalidToken()) + with self.assertRaises(ValueError): + await client.test.test.find_one() + await client.close() + + async def test_reauthentication_succeeds_multiple_connections(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + client1 = await self.create_client(request_cb=request_cb) + client2 = await self.create_client(request_cb=request_cb) + + # Perform an insert operation. + await client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + await client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + await client1.test.test.find_one() + await client2.test.test.find_one() + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + await client1.close() + await client2.close() + + # PyMongo specific tests, since we have multiple code paths for reauth handling. + + async def test_reauthenticate_succeeds_bulk_write(self): + # Create a client. + client = await self.create_client() + + # Perform a find operation. + await client.test.test.find_one() + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a bulk write operation. + await client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_bulk_read(self): + # Create a client. + client = await self.create_client() + + # Perform a find operation. + await client.test.test.find_one() + + # Perform a bulk write operation. + await client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a bulk read operation. + cursor = client.test.test.find_raw_batches({}) + await cursor.to_list() + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_cursor(self): + # Create a client. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}) + self.assertGreaterEqual(len(await cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_get_more(self): + # Create a client. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1) + self.assertGreaterEqual(len(await cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_get_more_exhaust(self): + # Ensure no mongos + client = await self.create_client() + hello = await client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") != "isdbgrid": + raise unittest.SkipTest("Must not be a mongos") + + # Create a client with the callback. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1, cursor_type=CursorType.EXHAUST) + self.assertGreaterEqual(len(await cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_command(self): + # Create a client. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["count"], "errorCode": 391}, + } + ): + # Perform a count operation. + cursor = await client.test.command({"count": "test"}) + + self.assertGreaterEqual(len(cursor), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + +class TestAuthOIDCMachine(OIDCTestBase): + uri: str + + async def asyncSetUp(self): + self.request_called = 0 + + def create_request_cb(self, username=None, sleep=0): + def request_token(context): + assert isinstance(context.timeout_seconds, int) + assert context.version == 1 + assert context.refresh_token is None + assert context.idp_info is None + token = self.get_token(username) + time.sleep(sleep) + self.request_called += 1 + return OIDCCallbackResult(access_token=token) + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + async def create_client(self, *args, **kwargs): + request_cb = kwargs.pop("request_cb", self.create_request_cb()) + props = kwargs.pop("authmechanismproperties", {"OIDC_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + client = AsyncMongoClient(*args, authmechanismproperties=props, **kwargs) + self.addAsyncCleanup(client.close) + return client + + async def test_1_1_callback_is_called_during_reauthentication(self): + # Create a ``AsyncMongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = await self.create_client() + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + async def test_1_2_callback_is_called_once_for_multiple_connections(self): + # Create a ``AsyncMongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = await self.create_client() + await client.aconnect() + + # Start 10 tasks and run 100 find operations that all succeed in each task. + async def target(): + for _ in range(100): + await client.test.test.find_one() + + tasks = [] + for i in range(10): + tasks.append(ConcurrentRunner(target=target)) + for t in tasks: + await t.start() + for t in tasks: + await t.join() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + async def test_2_1_valid_callback_inputs(self): + # Create a AsyncMongoClient configured with an OIDC callback that validates its inputs and returns a valid access token. + client = await self.create_client() + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Assert that the OIDC callback was called with the appropriate inputs, including the timeout parameter if possible. Ensure that there are no unexpected fields. + self.assertEqual(self.request_called, 1) + + async def test_2_2_oidc_callback_returns_null(self): + # Create a AsyncMongoClient configured with an OIDC callback that returns null. + class CallbackNullToken(OIDCCallback): + def fetch(self, a): + return None + + client = await self.create_client(request_cb=CallbackNullToken()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + await client.test.test.find_one() + + async def test_2_3_oidc_callback_returns_missing_data(self): + # Create a AsyncMongoClient configured with an OIDC callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return object() + + client = await self.create_client(request_cb=CustomCallback()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + await client.test.test.find_one() + + async def test_2_4_invalid_client_configuration_with_callback(self): + # Create a AsyncMongoClient configured with an OIDC callback and auth mechanism property ENVIRONMENT:test. + request_cb = self.create_request_cb() + props: Dict = {"OIDC_CALLBACK": request_cb, "ENVIRONMENT": "test"} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + await self.create_client(authmechanismproperties=props) + + async def test_2_5_invalid_use_of_ALLOWED_HOSTS(self): + # Create an OIDC configured client with auth mechanism properties `{"ENVIRONMENT": "test", "ALLOWED_HOSTS": []}`. + props: Dict = {"ENVIRONMENT": "test", "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + await self.create_client(authmechanismproperties=props) + + # Create an OIDC configured client with auth mechanism properties `{"OIDC_CALLBACK": "", "ALLOWED_HOSTS": []}`. + props: Dict = {"OIDC_CALLBACK": self.create_request_cb(), "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + await self.create_client(authmechanismproperties=props) + + async def test_2_6_ALLOWED_HOSTS_defaults_ignored(self): + # Create a MongoCredential for OIDC with a machine callback. + props = {"OIDC_CALLBACK": self.create_request_cb()} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, "foo", None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "foo" + + # Create a MongoCredential for OIDC with an ENVIRONMENT. + props = {"ENVIRONMENT": "test"} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, None, None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "" + + async def test_3_1_authentication_failure_with_cached_tokens_fetch_a_new_token_and_retry(self): + # Create a AsyncMongoClient and an OIDC callback that implements the provider logic. + client = await self.create_client() + await client.aconnect() + # Poison the cache with an invalid access token. + # Set a fail point for ``find`` command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. This is to force the ``AsyncMongoClient`` + # to cache an access token. + with self.assertRaises(AutoReconnect): + await client.test.test.find_one() + # Poison the cache of the client. + client.options.pool_options._credentials.cache.data.access_token = "bad" + # Reset the request count. + self.request_called = 0 + # Verify that a find succeeds. + await client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + async def test_3_2_authentication_failures_without_cached_tokens_returns_an_error(self): + # Create a AsyncMongoClient configured with retryReads=false and an OIDC callback that always returns invalid access tokens. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return OIDCCallbackResult(access_token="bad value") + + callback = CustomCallback() + client = await self.create_client(request_cb=callback) + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(callback.count, 1) + + async def test_3_3_unexpected_error_code_does_not_clear_cache(self): + # Create a ``AsyncMongoClient`` with a human callback that returns a valid token + client = await self.create_client() + + # Set a fail point for ``saslStart`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 20}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + async def test_4_1_reauthentication_succeeds(self): + # Create a ``AsyncMongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = await self.create_client() + await client.aconnect() + + # Set a fail point for the find command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + + # Verify that the callback was called 2 times (once during the connection + # handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + + async def test_4_2_read_commands_fail_if_reauthentication_fails(self): + # Create a ``AsyncMongoClient`` whose OIDC callback returns one good token and then + # bad tokens after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = await self.create_client(request_cb=callback) + + # Perform a read operation that succeeds. + await client.test.test.find_one() + + # Set a fail point for the find command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + async def test_4_3_write_commands_fail_if_reauthentication_fails(self): + # Create a ``AsyncMongoClient`` whose OIDC callback returns one good token and then + # bad token after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = await self.create_client(request_cb=callback) + + # Perform an insert operation that succeeds. + await client.test.test.insert_one({}) + + # Set a fail point for the find command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a ``insert`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.insert_one({}) + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + async def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(self): + # Create an OIDC configured client that can listen for `SaslStart` commands. + listener = EventListener() + client = await self.create_client(event_listeners=[listener]) + await client.aconnect() + + # Preload the *Client Cache* with a valid access token to enforce Speculative Authentication. + client2 = await self.create_client() + await client2.test.test.find_one() + client.options.pool_options._credentials.cache.data = ( + client2.options.pool_options._credentials.cache.data + ) + await client2.close() + self.request_called = 0 + + # Perform an `insert` operation that succeeds. + await client.test.test.insert_one({}) + + # Assert that the callback was not called. + self.assertEqual(self.request_called, 0) + + # Assert there were no `SaslStart` commands executed. + assert not any( + event.command_name.lower() == "saslstart" for event in listener.started_events + ) + listener.reset() + + # Set a fail point for `insert` commands of the form: + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform an `insert` operation that succeeds. + await client.test.test.insert_one({}) + + # Assert that the callback was called once. + self.assertEqual(self.request_called, 1) + + # Assert there were `SaslStart` commands executed. + assert any(event.command_name.lower() == "saslstart" for event in listener.started_events) + + async def test_4_5_reauthentication_succeeds_when_a_session_is_involved(self): + # Create an OIDC configured client. + client = await self.create_client() + + # Set a fail point for `find` commands of the form: + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Start a new session. + async with client.start_session() as session: + # In the started session perform a `find` operation that succeeds. + await client.test.test.find_one({}, session=session) + + # Assert that the callback was called 2 times (once during the connection handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + + async def test_5_1_azure_with_no_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + opts = parse_uri(self.uri_single)["options"] + resource = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=resource, ENVIRONMENT="azure") + client = await self.create_client(authMechanismProperties=props) + await client.test.test.find_one() + + async def test_5_2_azure_with_bad_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=token_aud, ENVIRONMENT="azure") + client = await self.create_client(username="bad", authmechanismproperties=props) + with self.assertRaises(ValueError): + await client.test.test.find_one() + + async def test_speculative_auth_success(self): + client1 = await self.create_client() + await client1.test.test.find_one() + client2 = await self.create_client() + await client2.aconnect() + + # Prime the cache of the second client. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + # Set a fail point for saslStart commands. + async with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + await client2.test.test.find_one() + + async def test_reauthentication_succeeds_multiple_connections(self): + client1 = await self.create_client() + client2 = await self.create_client() + + # Perform an insert operation. + await client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + await client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + await client1.test.test.find_one() + await client2.test.test.find_one() + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_auth_spec.py b/test/asynchronous/test_auth_spec.py new file mode 100644 index 0000000000..7c659c6d93 --- /dev/null +++ b/test/asynchronous/test_auth_spec.py @@ -0,0 +1,113 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the auth spec tests.""" +from __future__ import annotations + +import glob +import json +import os +import sys +import warnings +from test.asynchronous import AsyncPyMongoTestCase + +import pytest + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +from pymongo import AsyncMongoClient +from pymongo.auth_oidc_shared import OIDCCallback + +pytestmark = pytest.mark.auth + +_IS_SYNC = False + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") + + +class TestAuthSpec(AsyncPyMongoTestCase): + pass + + +class SampleHumanCallback(OIDCCallback): + def fetch(self, context): + pass + + +def create_test(test_case): + def run_test(self): + uri = test_case["uri"] + valid = test_case["valid"] + credential = test_case.get("credential") + + if not valid: + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.assertRaises(Exception, AsyncMongoClient, uri, connect=False) + else: + client = self.simple_client(uri, connect=False) + credentials = client.options.pool_options._credentials + if credential is None: + self.assertIsNone(credentials) + else: + self.assertIsNotNone(credentials) + self.assertEqual(credentials.username, credential["username"]) + self.assertEqual(credentials.password, credential["password"]) + self.assertEqual(credentials.source, credential["source"]) + if credential["mechanism"] is not None: + self.assertEqual(credentials.mechanism, credential["mechanism"]) + else: + self.assertEqual(credentials.mechanism, "DEFAULT") + expected = credential["mechanism_properties"] + if expected is not None: + actual = credentials.mechanism_properties + for key, value in expected.items(): + self.assertEqual(getattr(actual, key.lower()), value) + else: + if credential["mechanism"] == "MONGODB-AWS": + self.assertIsNone(credentials.mechanism_properties.aws_session_token) + else: + self.assertIsNone(credentials.mechanism_properties) + + return run_test + + +def create_tests(): + for filename in glob.glob(os.path.join(_TEST_PATH, "legacy", "*.json")): + test_suffix, _ = os.path.splitext(os.path.basename(filename)) + with open(filename) as auth_tests: + test_cases = json.load(auth_tests)["tests"] + for test_case in test_cases: + if test_case.get("optional", False): + continue + test_method = create_test(test_case) + name = str(test_case["description"].lower().replace(" ", "_")) + setattr(TestAuthSpec, f"test_{test_suffix}_{name}", test_method) + + +create_tests() + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py new file mode 100644 index 0000000000..02958e6f0e --- /dev/null +++ b/test/asynchronous/test_bulk.py @@ -0,0 +1,1122 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the bulk API.""" +from __future__ import annotations + +import sys +import uuid +from typing import Any, Optional + +from pymongo.asynchronous.mongo_client import AsyncMongoClient + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, remove_all_users, unittest +from test.utils_shared import async_wait_until + +from bson.binary import Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.common import partition_node +from pymongo.errors import BulkWriteError, ConfigurationError, InvalidOperation, OperationFailure +from pymongo.operations import * +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class AsyncBulkTestBase(AsyncIntegrationTest): + coll: AsyncCollection + coll_w0: AsyncCollection + + async def asyncSetUp(self): + await super().asyncSetUp() + self.coll = self.db.test + await self.coll.drop() + self.coll_w0 = self.coll.with_options(write_concern=WriteConcern(w=0)) + + def assertEqualResponse(self, expected, actual): + """Compare response from bulk.execute() to expected response.""" + for key, value in expected.items(): + if key == "nModified": + self.assertEqual(value, actual["nModified"]) + elif key == "upserted": + expected_upserts = value + actual_upserts = actual["upserted"] + self.assertEqual( + len(expected_upserts), + len(actual_upserts), + 'Expected %d elements in "upserted", got %d' + % (len(expected_upserts), len(actual_upserts)), + ) + + for e, a in zip(expected_upserts, actual_upserts): + self.assertEqualUpsert(e, a) + + elif key == "writeErrors": + expected_errors = value + actual_errors = actual["writeErrors"] + self.assertEqual( + len(expected_errors), + len(actual_errors), + 'Expected %d elements in "writeErrors", got %d' + % (len(expected_errors), len(actual_errors)), + ) + + for e, a in zip(expected_errors, actual_errors): + self.assertEqualWriteError(e, a) + + else: + self.assertEqual( + actual.get(key), + value, + f"{key!r} value of {actual.get(key)!r} does not match expected {value!r}", + ) + + def assertEqualUpsert(self, expected, actual): + """Compare bulk.execute()['upserts'] to expected value. + + Like: {'index': 0, '_id': ObjectId()} + """ + self.assertEqual(expected["index"], actual["index"]) + if expected["_id"] == "...": + # Unspecified value. + self.assertIn("_id", actual) + else: + self.assertEqual(expected["_id"], actual["_id"]) + + def assertEqualWriteError(self, expected, actual): + """Compare bulk.execute()['writeErrors'] to expected value. + + Like: {'index': 0, 'code': 123, 'errmsg': '...', 'op': { ... }} + """ + self.assertEqual(expected["index"], actual["index"]) + self.assertEqual(expected["code"], actual["code"]) + if expected["errmsg"] == "...": + # Unspecified value. + self.assertIn("errmsg", actual) + else: + self.assertEqual(expected["errmsg"], actual["errmsg"]) + + expected_op = expected["op"].copy() + actual_op = actual["op"].copy() + if expected_op.get("_id") == "...": + # Unspecified _id. + self.assertIn("_id", actual_op) + actual_op.pop("_id") + expected_op.pop("_id") + + self.assertEqual(expected_op, actual_op) + + +class AsyncTestBulk(AsyncBulkTestBase): + async def test_empty(self): + with self.assertRaises(InvalidOperation): + await self.coll.bulk_write([]) + + async def test_insert(self): + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + result = await self.coll.bulk_write([InsertOne({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.inserted_count) + self.assertEqual(1, await self.coll.count_documents({})) + + async def _test_update_many(self, update): + expected = { + "nMatched": 2, + "nModified": 2, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([UpdateMany({}, update)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(2, result.matched_count) + self.assertIn(result.modified_count, (2, None)) + + async def test_update_many(self): + await self._test_update_many({"$set": {"foo": "bar"}}) + + @async_client_context.require_version_min(4, 2, 0) + async def test_update_many_pipeline(self): + await self._test_update_many([{"$set": {"foo": "bar"}}]) + + async def test_array_filters_validation(self): + with self.assertRaises(TypeError): + await UpdateMany({}, {}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await UpdateOne({}, {}, array_filters={}) # type: ignore[arg-type] + + async def test_array_filters_unacknowledged(self): + coll = self.coll_w0 + update_one = UpdateOne({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + update_many = UpdateMany({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + await coll.bulk_write([update_one]) + with self.assertRaises(ConfigurationError): + await coll.bulk_write([update_many]) + + async def _test_update_one(self, update): + expected = { + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([UpdateOne({}, update)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (1, None)) + + async def test_update_one(self): + await self._test_update_one({"$set": {"foo": "bar"}}) + + @async_client_context.require_version_min(4, 2, 0) + async def test_update_one_pipeline(self): + await self._test_update_one([{"$set": {"foo": "bar"}}]) + + async def test_replace_one(self): + expected = { + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (1, None)) + + async def test_remove(self): + # Test removing all documents, ordered. + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 2, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([DeleteMany({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(2, result.deleted_count) + + async def test_remove_one(self): + # Test removing one document, empty selector. + await self.coll.insert_many([{}, {}]) + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 1, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + result = await self.coll.bulk_write([DeleteOne({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.deleted_count) + self.assertEqual(await self.coll.count_documents({}), 1) + + async def test_upsert(self): + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + } + + result = await self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"}, upsert=True)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.upserted_count) + assert result.upserted_ids is not None + self.assertEqual(1, len(result.upserted_ids)) + self.assertIsInstance(result.upserted_ids.get(0), ObjectId) + + self.assertEqual(await self.coll.count_documents({"foo": "bar"}), 1) + + async def test_numerous_inserts(self): + # Ensure we don't exceed server's maxWriteBatchSize size limit. + n_docs = await async_client_context.max_write_batch_size + 100 + requests = [InsertOne[dict]({}) for _ in range(n_docs)] + result = await self.coll.bulk_write(requests, ordered=False) + self.assertEqual(n_docs, result.inserted_count) + self.assertEqual(n_docs, await self.coll.count_documents({})) + + # Same with ordered bulk. + await self.coll.drop() + result = await self.coll.bulk_write(requests) + self.assertEqual(n_docs, result.inserted_count) + self.assertEqual(n_docs, await self.coll.count_documents({})) + + async def test_bulk_max_message_size(self): + await self.coll.delete_many({}) + self.addAsyncCleanup(self.coll.delete_many, {}) + _16_MB = 16 * 1000 * 1000 + # Generate a list of documents such that the first batched OP_MSG is + # as close as possible to the 48MB limit. + docs = [ + {"_id": 1, "l": "s" * _16_MB}, + {"_id": 2, "l": "s" * _16_MB}, + {"_id": 3, "l": "s" * (_16_MB - 10000)}, + ] + # Fill in the remaining ~10000 bytes with small documents. + for i in range(4, 10000): + docs.append({"_id": i}) + result = await self.coll.insert_many(docs) + self.assertEqual(len(docs), len(result.inserted_ids)) + + async def test_generator_insert(self): + def gen(): + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} + + result = await self.coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + + async def test_bulk_write_no_results(self): + result = await self.coll_w0.bulk_write([InsertOne({})]) + self.assertFalse(result.acknowledged) + self.assertRaises(InvalidOperation, lambda: result.inserted_count) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_ids) + + async def test_bulk_write_invalid_arguments(self): + # The requests argument must be a list. + generator = (InsertOne[dict]({}) for _ in range(10)) + with self.assertRaises(TypeError): + await self.coll.bulk_write(generator) # type: ignore[arg-type] + + # Document is not wrapped in a bulk write operation. + with self.assertRaises(TypeError): + await self.coll.bulk_write([{}]) # type: ignore[list-item] + + async def test_upsert_large(self): + big = "a" * (await async_client_context.max_bson_size - 37) + result = await self.coll.bulk_write( + [UpdateOne({"x": 1}, {"$set": {"s": big}}, upsert=True)] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + }, + result.bulk_api_result, + ) + + self.assertEqual(1, await self.coll.count_documents({"x": 1})) + + async def test_client_generated_upsert_id(self): + result = await self.coll.bulk_write( + [ + UpdateOne({"_id": 0}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": 1}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": 2}, {"_id": 2}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": 0}, + {"index": 1, "_id": 1}, + {"index": 2, "_id": 2}, + ], + }, + result.bulk_api_result, + ) + + async def test_upsert_uuid_standard(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = await coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + async def test_upsert_uuid_unspecified(self): + options = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) + coll = self.coll.with_options(codec_options=options) + uuids = [Binary.from_uuid(uuid.uuid4()) for _ in range(3)] + result = await coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + async def test_upsert_uuid_standard_subdocuments(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + ids: list = [{"f": Binary(bytes(i)), "f2": uuid.uuid4()} for i in range(3)] + + result = await coll.bulk_write( + [ + UpdateOne({"_id": ids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": ids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": ids[2]}, {"_id": ids[2]}, upsert=True), + ] + ) + + # The `Binary` values are returned as `bytes` objects. + for _id in ids: + _id["f"] = bytes(_id["f"]) + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": ids[0]}, + {"index": 1, "_id": ids[1]}, + {"index": 2, "_id": ids[2]}, + ], + }, + result.bulk_api_result, + ) + + async def test_single_ordered_batch(self): + result = await self.coll.bulk_write( + [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + ) + self.assertEqualResponse( + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + }, + result.bulk_api_result, + ) + + async def test_single_error_ordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), + ] + try: + await self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + async def test_multiple_error_ordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 2}}, upsert=True), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 4, "a": 3}), + InsertOne({"b": 5, "a": 1}), + ] + + try: + await self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + async def test_single_unordered_batch(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + result = await self.coll.bulk_write(requests, ordered=False) + self.assertEqualResponse( + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + "writeErrors": [], + "writeConcernErrors": [], + }, + result.bulk_api_result, + ) + + async def test_single_error_unordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), + ] + + try: + await self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + async def test_multiple_error_unordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 3}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 4}}, upsert=True), + UpdateOne({"b": 4}, {"$set": {"a": 3}}, upsert=True), + InsertOne({"b": 5, "a": 2}), + InsertOne({"b": 6, "a": 1}), + ] + + try: + await self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + # Assume the update at index 1 runs before the update at index 3, + # although the spec does not require it. Same for inserts. + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 2, + "nInserted": 2, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}, {"index": 2, "_id": "..."}], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 3, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 4}, + "u": {"$set": {"a": 3}}, + "multi": False, + "upsert": True, + }, + }, + { + "index": 5, + "code": 11000, + "errmsg": "...", + "op": {"_id": "...", "b": 6, "a": 1}, + }, + ], + }, + result, + ) + + async def test_large_inserts_ordered(self): + big = "x" * await async_client_context.max_bson_size + requests = [ + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), + ] + + try: + await self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(1, result["nInserted"]) + + await self.coll.delete_many({}) + + big = "x" * (1024 * 1024 * 4) + write_result = await self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ] + ) + + self.assertEqual(6, write_result.inserted_count) + self.assertEqual(6, await self.coll.count_documents({})) + + async def test_large_inserts_unordered(self): + big = "x" * await async_client_context.max_bson_size + requests = [ + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), + ] + + try: + await self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + + await self.coll.delete_many({}) + + big = "x" * (1024 * 1024 * 4) + result = await self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ], + ordered=False, + ) + + self.assertEqual(6, result.inserted_count) + self.assertEqual(6, await self.coll.count_documents({})) + + +class AsyncBulkAuthorizationTestBase(AsyncBulkTestBase): + @async_client_context.require_auth + @async_client_context.require_no_api_version + async def asyncSetUp(self): + await super().asyncSetUp() + await async_client_context.create_user(self.db.name, "readonly", "pw", ["read"]) + await self.db.command( + "createRole", + "noremove", + privileges=[ + { + "actions": ["insert", "update", "find"], + "resource": {"db": "pymongo_test", "collection": "test"}, + } + ], + roles=[], + ) + + await async_client_context.create_user(self.db.name, "noremove", "pw", ["noremove"]) + + async def asyncTearDown(self): + await self.db.command("dropRole", "noremove") + await remove_all_users(self.db) + + +class AsyncTestBulkUnacknowledged(AsyncBulkTestBase): + async def asyncTearDown(self): + await self.coll.delete_many({}) + + async def test_no_results_ordered_success(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), + ] + result = await self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 2 + + await async_wait_until(predicate, "insert 2 documents") + + async def predicate(): + return await self.coll.find_one({"_id": 1}) is None + + await async_wait_until(predicate, 'removed {"_id": 1}') + + async def test_no_results_ordered_failure(self): + requests: list = [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), + # Fails with duplicate key error. + InsertOne({"_id": 1}), + # Should not be executed since the batch is ordered. + DeleteOne({"_id": 1}), + ] + result = await self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 3 + + await async_wait_until(predicate, "insert 3 documents") + self.assertEqual({"_id": 1}, await self.coll.find_one({"_id": 1})) + + async def test_no_results_unordered_success(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), + ] + result = await self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 2 + + await async_wait_until(predicate, "insert 2 documents") + + async def predicate(): + return await self.coll.find_one({"_id": 1}) is None + + await async_wait_until(predicate, 'removed {"_id": 1}') + + async def test_no_results_unordered_failure(self): + requests: list = [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), + # Fails with duplicate key error. + InsertOne({"_id": 1}), + # Should be executed since the batch is unordered. + DeleteOne({"_id": 1}), + ] + result = await self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 2 + + await async_wait_until(predicate, "insert 2 documents") + + async def predicate(): + return await self.coll.find_one({"_id": 1}) is None + + await async_wait_until(predicate, 'removed {"_id": 1}') + + +class AsyncTestBulkAuthorization(AsyncBulkAuthorizationTestBase): + async def test_readonly(self): + # We test that an authorization failure aborts the batch and is raised + # as OperationFailure. + cli = await self.async_rs_or_single_client_noauth( + username="readonly", password="pw", authSource="pymongo_test" + ) + coll = cli.pymongo_test.test + await coll.find_one() + with self.assertRaises(OperationFailure): + await coll.bulk_write([InsertOne({"x": 1})]) + + async def test_no_remove(self): + # We test that an authorization failure aborts the batch and is raised + # as OperationFailure. + cli = await self.async_rs_or_single_client_noauth( + username="noremove", password="pw", authSource="pymongo_test" + ) + coll = cli.pymongo_test.test + await coll.find_one() + requests = [ + InsertOne({"x": 1}), + ReplaceOne({"x": 2}, {"x": 2}, upsert=True), + DeleteMany({}), # Prohibited. + InsertOne({"x": 3}), # Never attempted. + ] + with self.assertRaises(OperationFailure): + await coll.bulk_write(requests) # type: ignore[arg-type] + self.assertEqual({1, 2}, set(await self.coll.distinct("x"))) + + +class AsyncTestBulkWriteConcern(AsyncBulkTestBase): + w: Optional[int] + secondary: AsyncMongoClient + + async def asyncSetUp(self): + await super().asyncSetUp() + self.w = async_client_context.w + self.secondary = None + if self.w is not None and self.w > 1: + for member in (await async_client_context.hello)["hosts"]: + if member != (await async_client_context.hello)["primary"]: + self.secondary = await self.async_single_client(*partition_node(member)) + break + + async def asyncTearDown(self): + if self.secondary: + await self.secondary.close() + + async def cause_wtimeout(self, requests, ordered): + if not async_client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled.") + + # Use the rsSyncApplyStop failpoint to pause replication on a + # secondary which will cause a wtimeout error. + await self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="alwaysOn") + + try: + coll = self.coll.with_options(write_concern=WriteConcern(w=self.w, wtimeout=1)) + return await coll.bulk_write(requests, ordered=ordered) + finally: + await self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="off") + + @async_client_context.require_version_max(7, 1) # PYTHON-4560 + @async_client_context.require_replica_set + @async_client_context.require_secondaries_count(1) + async def test_write_concern_failure_ordered(self): + details = None + + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = await coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})]) + self.assertTrue(result.acknowledged) + + requests: list[Any] = [InsertOne({"a": 1}), InsertOne({"a": 2})] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + await self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + }, + details, + ) + + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertGreater(len(details["writeConcernErrors"]), 0) + + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertIsInstance(failed["errmsg"], str) + + await self.coll.delete_many({}) + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + + # Fail due to write concern support as well + # as duplicate key error on ordered batch. + requests = [ + InsertOne({"a": 1}), + ReplaceOne({"a": 3}, {"b": 1}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), + ] + try: + await self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 1, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}], + "writeErrors": [ + {"index": 2, "code": 11000, "errmsg": "...", "op": {"_id": "...", "a": 1}} + ], + }, + details, + ) + + self.assertGreater(len(details["writeConcernErrors"]), 1) + failed = details["writeErrors"][0] + self.assertIn("duplicate", failed["errmsg"]) + + @async_client_context.require_version_max(7, 1) # PYTHON-4560 + @async_client_context.require_replica_set + @async_client_context.require_secondaries_count(1) + async def test_write_concern_failure_unordered(self): + self.skipTest("Skipping until PYTHON-4865 is resolved.") + details = None + + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = await coll_ww.bulk_write( + [DeleteOne({"something": "that does no exist"})], ordered=False + ) + self.assertTrue(result.acknowledged) + + requests = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 2}), + ] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + await self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(0, len(details["writeErrors"])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertGreater(len(details["writeConcernErrors"]), 1) + + await self.coll.delete_many({}) + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + + # Fail due to write concern support as well + # as duplicate key error on unordered batch. + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), + ] + try: + await self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(1, len(details["writeErrors"])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertGreater(len(details["writeConcernErrors"]), 1) + + failed = details["writeErrors"][0] + self.assertEqual(2, failed["index"]) + self.assertEqual(11000, failed["code"]) + self.assertIsInstance(failed["errmsg"], str) + self.assertEqual(1, failed["op"]["a"]) + + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertIsInstance(failed["errmsg"], str) + + upserts = details["upserted"] + self.assertEqual(1, len(upserts)) + self.assertEqual(1, upserts[0]["index"]) + self.assertTrue(upserts[0].get("_id")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py new file mode 100644 index 0000000000..3fb8b517f3 --- /dev/null +++ b/test/asynchronous/test_change_stream.py @@ -0,0 +1,1158 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the change_stream module.""" +from __future__ import annotations + +import asyncio +import os +import random +import string +import sys +import threading +import time +import uuid +from itertools import product +from typing import no_type_check + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + Version, + async_client_context, + unittest, +) +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import ( + AllowListEventListener, + EventListener, + OvertCommandListener, + async_wait_until, +) + +from bson import SON, ObjectId, Timestamp, encode +from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument +from pymongo import AsyncMongoClient +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.errors import ( + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, +) +from pymongo.message import _CursorAddress +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestAsyncChangeStreamBase(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + + async def change_stream_with_client(self, client, *args, **kwargs): + """Create a change stream using the given client and return it.""" + raise NotImplementedError + + async def change_stream(self, *args, **kwargs): + """Create a change stream using the default client and return it.""" + return await self.change_stream_with_client(self.client, *args, **kwargs) + + async def client_with_listener(self, *commands): + """Return a client with a AllowListEventListener.""" + listener = AllowListEventListener(*commands) + client = await self.async_rs_or_single_client(event_listeners=[listener]) + return client, listener + + def watched_collection(self, *args, **kwargs): + """Return a collection that is watched by self.change_stream().""" + # Construct a unique collection for each test. + collname = ".".join(self.id().rsplit(".", 2)[1:]) + return self.db.get_collection(collname, *args, **kwargs) + + async def generate_invalidate_event(self, change_stream): + """Cause a change stream invalidate event.""" + raise NotImplementedError + + def generate_unique_collnames(self, numcolls): + """Generate numcolls collection names unique to a test.""" + collnames = [] + for idx in range(1, numcolls + 1): + collnames.append(self.id() + "_" + str(idx)) + return collnames + + async def get_resume_token(self, invalidate=False): + """Get a resume token to use for starting a change stream.""" + # Ensure targeted collection exists before starting. + coll = self.watched_collection(write_concern=WriteConcern("majority")) + await coll.insert_one({}) + + if invalidate: + async with await self.change_stream( + [{"$match": {"operationType": "invalidate"}}] + ) as cs: + if isinstance(cs._target, AsyncMongoClient): + self.skipTest("cluster-level change streams cannot be invalidated") + await self.generate_invalidate_event(cs) + return (await cs.next())["_id"] + else: + async with await self.change_stream() as cs: + await coll.insert_one({"data": 1}) + return (await cs.next())["_id"] + + async def get_start_at_operation_time(self): + """Get an operationTime. Advances the operation clock beyond the most + recently returned timestamp. + """ + optime = (await self.client.admin.command("ping"))["operationTime"] + return Timestamp(optime.time, optime.inc + 1) + + async def insert_one_and_check(self, change_stream, doc): + """Insert a document and check that it shows up in the change stream.""" + raise NotImplementedError + + async def kill_change_stream_cursor(self, change_stream): + """Cause a cursor not found error on the next getMore.""" + cursor = change_stream._cursor + address = _CursorAddress(cursor.address, cursor._ns) + client = self.watched_collection().database.client + await client._close_cursor_now(cursor.cursor_id, address) + + +class APITestsMixin: + @no_type_check + async def test_watch(self): + async with await self.change_stream( + [{"$project": {"foo": 0}}], + full_document="updateLookup", + max_await_time_ms=1000, + batch_size=100, + ) as change_stream: + self.assertEqual([{"$project": {"foo": 0}}], change_stream._pipeline) + self.assertEqual("updateLookup", change_stream._full_document) + self.assertEqual(1000, change_stream._max_await_time_ms) + self.assertEqual(100, change_stream._batch_size) + self.assertIsInstance(change_stream._cursor, AsyncCommandCursor) + self.assertEqual(1000, change_stream._cursor._max_await_time_ms) + await self.watched_collection(write_concern=WriteConcern("majority")).insert_one({}) + _ = await change_stream.next() + resume_token = change_stream.resume_token + with self.assertRaises(TypeError): + await self.change_stream(pipeline={}) + with self.assertRaises(TypeError): + await self.change_stream(full_document={}) + # No Error. + async with await self.change_stream(resume_after=resume_token): + pass + + @no_type_check + async def test_try_next(self): + # AsyncChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + await coll.drop() + await coll.insert_one({}) + self.addAsyncCleanup(coll.drop) + async with await self.change_stream(max_await_time_ms=250) as stream: + self.assertIsNone(await stream.try_next()) # No changes initially. + await coll.insert_one({}) # Generate a change. + + # On sharded clusters, even majority-committed changes only show + # up once an event that sorts after it shows up on the other + # shard. So, we wait on try_next to eventually return changes. + async def _wait_until(): + return await stream.try_next() is not None + + await async_wait_until(_wait_until, "get change from try_next") + + @no_type_check + async def test_try_next_runs_one_getmore(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + # Connect to the cluster. + await client.admin.command("ping") + listener.reset() + # AsyncChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + await coll.drop() + # Create the watched collection before starting the change stream to + # skip any "create" events. + await coll.insert_one({"_id": 1}) + self.addAsyncCleanup(coll.drop) + async with await self.change_stream_with_client(client, max_await_time_ms=250) as stream: + self.assertEqual(listener.started_command_names(), ["aggregate"]) + listener.reset() + + # Confirm that only a single getMore is run even when no documents + # are returned. + self.assertIsNone(await stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore"]) + listener.reset() + self.assertIsNone(await stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore"]) + listener.reset() + + # Get at least one change before resuming. + await coll.insert_one({"_id": 2}) + + async def _wait_until(): + return await stream.try_next() is not None + + await async_wait_until(_wait_until, "get change from try_next") + listener.reset() + + # Cause the next request to initiate the resume process. + await self.kill_change_stream_cursor(stream) + listener.reset() + + # The sequence should be: + # - getMore, fail + # - resume with aggregate command + # - no results, return immediately without another getMore + self.assertIsNone(await stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore", "aggregate"]) + listener.reset() + + # Stream still works after a resume. + await coll.insert_one({"_id": 3}) + + async def _wait_until(): + return await stream.try_next() is not None + + await async_wait_until(_wait_until, "get change from try_next") + self.assertEqual(set(listener.started_command_names()), {"getMore"}) + self.assertIsNone(await stream.try_next()) + + @no_type_check + async def test_batch_size_is_honored(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + # Connect to the cluster. + await client.admin.command("ping") + listener.reset() + # AsyncChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + await coll.drop() + # Create the watched collection before starting the change stream to + # skip any "create" events. + await coll.insert_one({"_id": 1}) + self.addAsyncCleanup(coll.drop) + # Expected batchSize. + expected = {"batchSize": 23} + async with await self.change_stream_with_client( + client, max_await_time_ms=250, batch_size=23 + ) as stream: + # Confirm that batchSize is honored for initial batch. + cmd = listener.started_events[0].command + self.assertEqual(cmd["cursor"], expected) + listener.reset() + # Confirm that batchSize is honored by getMores. + self.assertIsNone(await stream.try_next()) + cmd = listener.started_events[0].command + key = next(iter(expected)) + self.assertEqual(expected[key], cmd[key]) + + # $changeStream.startAtOperationTime was added in 4.0.0. + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_start_at_operation_time(self): + optime = await self.get_start_at_operation_time() + + coll = self.watched_collection(write_concern=WriteConcern("majority")) + ndocs = 3 + await coll.insert_many([{"data": i} for i in range(ndocs)]) + + async with await self.change_stream(start_at_operation_time=optime) as cs: + for _i in range(ndocs): + await cs.next() + + @no_type_check + async def _test_full_pipeline(self, expected_cs_stage): + client, listener = await self.client_with_listener("aggregate") + async with await self.change_stream_with_client(client, [{"$project": {"foo": 0}}]) as _: + pass + + self.assertEqual(1, len(listener.started_events)) + command = listener.started_events[0] + self.assertEqual("aggregate", command.command_name) + self.assertEqual( + [{"$changeStream": expected_cs_stage}, {"$project": {"foo": 0}}], + command.command["pipeline"], + ) + + @no_type_check + async def test_full_pipeline(self): + """$changeStream must be the first stage in a change stream pipeline + sent to the server. + """ + await self._test_full_pipeline({}) + + @no_type_check + async def test_iteration(self): + async with await self.change_stream(batch_size=2) as change_stream: + num_inserted = 10 + await self.watched_collection().insert_many([{} for _ in range(num_inserted)]) + inserts_received = 0 + async for change in change_stream: + self.assertEqual(change["operationType"], "insert") + inserts_received += 1 + if inserts_received == num_inserted: + break + await self._test_invalidate_stops_iteration(change_stream) + + @no_type_check + @async_client_context.require_sync + def _test_next_blocks(self, change_stream): + inserted_doc = {"_id": ObjectId()} + changes = [] + t = threading.Thread(target=lambda: changes.append(change_stream.next())) + t.start() + # Sleep for a bit to prove that the call to next() blocks. + time.sleep(1) + self.assertTrue(t.is_alive()) + self.assertFalse(changes) + self.watched_collection().insert_one(inserted_doc) + # Join with large timeout to give the server time to return the change, + # in particular for shard clusters. + t.join(30) + self.assertFalse(t.is_alive()) + self.assertEqual(1, len(changes)) + self.assertEqual(changes[0]["operationType"], "insert") + self.assertEqual(changes[0]["fullDocument"], inserted_doc) + + @no_type_check + @async_client_context.require_sync + async def test_next_blocks(self): + """Test that next blocks until a change is readable""" + # Use a short wait time to speed up the test. + async with await self.change_stream(max_await_time_ms=250) as change_stream: + self._test_next_blocks(change_stream) + + @no_type_check + @async_client_context.require_sync + async def test_aggregate_cursor_blocks(self): + """Test that an aggregate cursor blocks until a change is readable.""" + async with await self.watched_collection().aggregate( + [{"$changeStream": {}}], maxAwaitTimeMS=250 + ) as change_stream: + self._test_next_blocks(change_stream) + + @no_type_check + @async_client_context.require_sync + def test_concurrent_close(self): + """Ensure a ChangeStream can be closed from another thread.""" + # Use a short wait time to speed up the test. + with self.change_stream(max_await_time_ms=250) as change_stream: + + def iterate_cursor(): + try: + for _ in change_stream: + pass + except OperationFailure as e: + if e.code != 237: # AsyncCursorKilled error code + raise + + t = threading.Thread(target=iterate_cursor) + t.start() + self.watched_collection().insert_one({}) + asyncio.sleep(1) + change_stream.close() + t.join(3) + self.assertFalse(t.is_alive()) + + @no_type_check + async def test_unknown_full_document(self): + """Must rely on the server to raise an error on unknown fullDocument.""" + try: + async with await self.change_stream(full_document="notValidatedByPyMongo"): + pass + except OperationFailure: + pass + + @no_type_check + async def test_change_operations(self): + """Test each operation type.""" + expected_ns = { + "db": self.watched_collection().database.name, + "coll": self.watched_collection().name, + } + async with await self.change_stream() as change_stream: + # Insert. + inserted_doc = {"_id": ObjectId(), "foo": "bar"} + await self.watched_collection().insert_one(inserted_doc) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) + # Update. + update_spec = {"$set": {"new": 1}, "$unset": {"foo": 1}} + await self.watched_collection().update_one(inserted_doc, update_spec) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "update") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + + expected_update_description = {"updatedFields": {"new": 1}, "removedFields": ["foo"]} + if async_client_context.version.at_least(4, 5, 0): + expected_update_description["truncatedArrays"] = [] + self.assertEqual( + expected_update_description, + { + k: v + for k, v in change["updateDescription"].items() + if k in expected_update_description + }, + ) + # Replace. + await self.watched_collection().replace_one({"new": 1}, {"foo": "bar"}) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "replace") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) + # Delete. + await self.watched_collection().delete_one({"foo": "bar"}) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "delete") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + # Invalidate. + await self._test_get_invalidate_event(change_stream) + + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_start_after(self): + resume_token = await self.get_resume_token(invalidate=True) + + # resume_after cannot resume after invalidate. + with self.assertRaises(OperationFailure): + await self.change_stream(resume_after=resume_token) + + # start_after can resume after invalidate. + async with await self.change_stream(start_after=resume_token) as change_stream: + await self.watched_collection().insert_one({"_id": 2}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_start_after_resume_process_with_changes(self): + resume_token = await self.get_resume_token(invalidate=True) + + async with await self.change_stream( + start_after=resume_token, max_await_time_ms=250 + ) as change_stream: + await self.watched_collection().insert_one({"_id": 2}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + self.assertIsNone(await change_stream.try_next()) + await self.kill_change_stream_cursor(change_stream) + + await self.watched_collection().insert_one({"_id": 3}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 3}) + + @no_type_check + @async_client_context.require_version_min(4, 2) + async def test_start_after_resume_process_without_changes(self): + resume_token = await self.get_resume_token(invalidate=True) + + async with await self.change_stream( + start_after=resume_token, max_await_time_ms=250 + ) as change_stream: + self.assertIsNone(await change_stream.try_next()) + await self.kill_change_stream_cursor(change_stream) + + await self.watched_collection().insert_one({"_id": 2}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + +class ProseSpecTestsMixin: + @no_type_check + async def _client_with_listener(self, *commands): + listener = AllowListEventListener(*commands) + client = await AsyncPyMongoTestCase.unmanaged_async_rs_or_single_client( + event_listeners=[listener] + ) + self.addAsyncCleanup(client.close) + return client, listener + + @no_type_check + async def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): + await self.watched_collection().insert_many([{"data": k} for k in range(batch_size)]) + for _ in range(batch_size): + change = await anext(change_stream) + return change + + def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): + """Predicts what the resume token should currently be for server + versions that don't support postBatchResumeToken. Assumes the stream + has never returned any changes if previous_change is None. + """ + if previous_change is None: + agg_cmd = listener.started_events[0] + stage = agg_cmd.command["pipeline"][0]["$changeStream"] + return stage.get("resumeAfter") or stage.get("startAfter") + + return previous_change["_id"] + + def _get_expected_resume_token(self, stream, listener, previous_change=None): + """Predicts what the resume token should currently be for server + versions that support postBatchResumeToken. Assumes the stream has + never returned any changes if previous_change is None. Assumes + listener is a AllowListEventListener that listens for aggregate and + getMore commands. + """ + if previous_change is None or stream._cursor._has_next(): + token = self._get_expected_resume_token_legacy(stream, listener, previous_change) + if token is not None: + return token + + response = listener.succeeded_events[-1].reply + return response["cursor"]["postBatchResumeToken"] + + @no_type_check + async def _test_raises_error_on_missing_id(self, expected_exception): + """AsyncChangeStream will raise an exception if the server response is + missing the resume token. + """ + async with await self.change_stream([{"$project": {"_id": 0}}]) as change_stream: + await self.watched_collection().insert_one({}) + with self.assertRaises(expected_exception): + await anext(change_stream) + # The cursor should now be closed. + with self.assertRaises(StopAsyncIteration): + await anext(change_stream) + + @no_type_check + async def _test_update_resume_token(self, expected_rt_getter): + """AsyncChangeStream must continuously track the last seen resumeToken.""" + client, listener = await self._client_with_listener("aggregate", "getMore") + coll = self.watched_collection(write_concern=WriteConcern("majority")) + async with await self.change_stream_with_client(client) as change_stream: + self.assertEqual( + change_stream.resume_token, expected_rt_getter(change_stream, listener) + ) + for _ in range(3): + await coll.insert_one({}) + change = await anext(change_stream) + self.assertEqual( + change_stream.resume_token, expected_rt_getter(change_stream, listener, change) + ) + + # Prose test no. 1 + @async_client_context.require_version_min(4, 2, 0) + async def test_update_resume_token(self): + await self._test_update_resume_token(self._get_expected_resume_token) + + # Prose test no. 2 + @async_client_context.require_version_min(4, 2, 0) + async def test_raises_error_on_missing_id_418plus(self): + # Server returns an error on 4.1.8+ + await self._test_raises_error_on_missing_id(OperationFailure) + + # Prose test no. 3 + @no_type_check + async def test_resume_on_error(self): + async with await self.change_stream() as change_stream: + await self.insert_one_and_check(change_stream, {"_id": 1}) + # Cause a cursor not found error on the next getMore. + await self.kill_change_stream_cursor(change_stream) + await self.insert_one_and_check(change_stream, {"_id": 2}) + + # Prose test no. 4 + @no_type_check + @async_client_context.require_failCommand_fail_point + async def test_no_resume_attempt_if_aggregate_command_fails(self): + # Set non-retryable error on aggregate command. + fail_point = {"mode": {"times": 1}, "data": {"errorCode": 2, "failCommands": ["aggregate"]}} + client, listener = await self._client_with_listener("aggregate", "getMore") + async with self.fail_point(fail_point): + try: + _ = await self.change_stream_with_client(client) + except OperationFailure: + pass + + # Driver should have attempted aggregate command only once. + self.assertEqual(len(listener.started_events), 1) + self.assertEqual(listener.started_events[0].command_name, "aggregate") + + # Prose test no. 5 - REMOVED + # Prose test no. 6 - SKIPPED + # Reason: readPreference is not configurable using the watch() helpers + # so we can skip this test. Also, PyMongo performs server selection for + # each operation which ensure compliance with this prose test. + + # Prose test no. 7 + @no_type_check + async def test_initial_empty_batch(self): + async with await self.change_stream() as change_stream: + # The first batch should be empty. + self.assertFalse(change_stream._cursor._has_next()) + cursor_id = change_stream._cursor.cursor_id + self.assertTrue(cursor_id) + await self.insert_one_and_check(change_stream, {}) + # Make sure we're still using the same cursor. + self.assertEqual(cursor_id, change_stream._cursor.cursor_id) + + # Prose test no. 8 + @no_type_check + async def test_kill_cursors(self): + def raise_error(): + raise ServerSelectionTimeoutError("mock error") + + async with await self.change_stream() as change_stream: + await self.insert_one_and_check(change_stream, {"_id": 1}) + # Cause a cursor not found error on the next getMore. + cursor = change_stream._cursor + await self.kill_change_stream_cursor(change_stream) + cursor.close = raise_error + await self.insert_one_and_check(change_stream, {"_id": 2}) + + # Prose test no. 10 - SKIPPED + # This test is identical to prose test no. 3. + + # Prose test no. 11 + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_resumetoken_empty_batch(self): + client, listener = await self._client_with_listener("getMore") + async with await self.change_stream_with_client(client) as change_stream: + self.assertIsNone(await change_stream.try_next()) + resume_token = change_stream.resume_token + + response = listener.succeeded_events[0].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) + + # Prose test no. 11 + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_resumetoken_exhausted_batch(self): + client, listener = await self._client_with_listener("getMore") + async with await self.change_stream_with_client(client) as change_stream: + await self._populate_and_exhaust_change_stream(change_stream) + resume_token = change_stream.resume_token + + response = listener.succeeded_events[-1].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) + + # Prose test no. 13 + @no_type_check + async def test_resumetoken_partially_iterated_batch(self): + # When batch has been iterated up to but not including the last element. + # Resume token should be _id of previous change document. + async with await self.change_stream() as change_stream: + await self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"data": k} for k in range(3)] + ) + for _ in range(2): + change = await anext(change_stream) + resume_token = change_stream.resume_token + + self.assertEqual(resume_token, change["_id"]) + + @no_type_check + async def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): + # When the batch is not empty and hasn't been iterated at all. + # Resume token should be same as the resume option used. + resume_point = await self.get_resume_token() + + # Insert some documents so that firstBatch isn't empty. + await self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"a": 1}, {"b": 2}, {"c": 3}] + ) + + # Resume token should be same as the resume option. + async with await self.change_stream(**{resume_option: resume_point}) as change_stream: + self.assertTrue(change_stream._cursor._has_next()) + resume_token = change_stream.resume_token + self.assertEqual(resume_token, resume_point) + + # Prose test no. 14 + @no_type_check + @async_client_context.require_no_mongos + async def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): + await self._test_resumetoken_uniterated_nonempty_batch("resume_after") + + # Prose test no. 14 + @no_type_check + @async_client_context.require_no_mongos + @async_client_context.require_version_min(4, 2, 0) + async def test_resumetoken_uniterated_nonempty_batch_startafter(self): + await self._test_resumetoken_uniterated_nonempty_batch("start_after") + + # Prose test no. 17 + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_startafter_resume_uses_startafter_after_empty_getMore(self): + # Resume should use startAfter after no changes have been returned. + resume_point = await self.get_resume_token() + + client, listener = await self._client_with_listener("aggregate") + async with await self.change_stream_with_client( + client, start_after=resume_point + ) as change_stream: + self.assertFalse(change_stream._cursor._has_next()) # No changes + await change_stream.try_next() # No changes + await self.kill_change_stream_cursor(change_stream) + await change_stream.try_next() # Resume attempt + + response = listener.started_events[-1] + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + + # Prose test no. 18 + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): + # Resume should use resumeAfter after some changes have been returned. + resume_point = await self.get_resume_token() + + client, listener = await self._client_with_listener("aggregate") + async with await self.change_stream_with_client( + client, start_after=resume_point + ) as change_stream: + self.assertFalse(change_stream._cursor._has_next()) # No changes + await self.watched_collection().insert_one({}) + await anext(change_stream) # Changes + await self.kill_change_stream_cursor(change_stream) + await change_stream.try_next() # Resume attempt + + response = listener.started_events[-1] + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + + # Prose test no. 19 + @no_type_check + async def test_split_large_change(self): + server_version = async_client_context.version + if not server_version.at_least(6, 0, 9): + self.skipTest("$changeStreamSplitLargeEvent requires MongoDB 6.0.9+") + if server_version.at_least(6, 1, 0) and server_version < Version(7, 0, 0): + self.skipTest("$changeStreamSplitLargeEvent is not available in 6.x rapid releases") + await self.db.drop_collection("test_split_large_change") + coll = await self.db.create_collection( + "test_split_large_change", changeStreamPreAndPostImages={"enabled": True} + ) + await coll.insert_one({"_id": 1, "value": "q" * 10 * 1024 * 1024}) + async with await coll.watch( + [{"$changeStreamSplitLargeEvent": {}}], full_document_before_change="required" + ) as change_stream: + await coll.update_one({"_id": 1}, {"$set": {"value": "z" * 10 * 1024 * 1024}}) + doc_1 = await change_stream.next() + self.assertIn("splitEvent", doc_1) + self.assertEqual(doc_1["splitEvent"], {"fragment": 1, "of": 2}) + doc_2 = await change_stream.next() + self.assertIn("splitEvent", doc_2) + self.assertEqual(doc_2["splitEvent"], {"fragment": 2, "of": 2}) + + +class TestClusterAsyncChangeStream(TestAsyncChangeStreamBase, APITestsMixin): + dbs: list + + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_change_streams + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.dbs = [self.db, self.client.pymongo_test_2] + + async def asyncTearDown(self): + for db in self.dbs: + await self.client.drop_database(db) + await super().asyncTearDown() + + async def change_stream_with_client(self, client, *args, **kwargs): + return await client.watch(*args, **kwargs) + + async def generate_invalidate_event(self, change_stream): + self.skipTest("cluster-level change streams cannot be invalidated") + + async def _test_get_invalidate_event(self, change_stream): + # Cluster-level change streams don't get invalidated. + pass + + async def _test_invalidate_stops_iteration(self, change_stream): + # Cluster-level change streams don't get invalidated. + pass + + async def _insert_and_check(self, change_stream, db, collname, doc): + coll = db[collname] + await coll.insert_one(doc) + change = await anext(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) + + async def insert_one_and_check(self, change_stream, doc): + db = random.choice(self.dbs) + collname = self.id() + await self._insert_and_check(change_stream, db, collname, doc) + + async def test_simple(self): + collnames = self.generate_unique_collnames(3) + async with await self.change_stream() as change_stream: + for db, collname in product(self.dbs, collnames): + await self._insert_and_check(change_stream, db, collname, {"_id": collname}) + + @async_client_context.require_sync + async def test_aggregate_cursor_blocks(self): + """Test that an aggregate cursor blocks until a change is readable.""" + async with await self.client.admin.aggregate( + [{"$changeStream": {"allChangesForCluster": True}}], maxAwaitTimeMS=250 + ) as change_stream: + self._test_next_blocks(change_stream) + + async def test_full_pipeline(self): + """$changeStream must be the first stage in a change stream pipeline + sent to the server. + """ + await self._test_full_pipeline({"allChangesForCluster": True}) + + +class TestAsyncDatabaseAsyncChangeStream(TestAsyncChangeStreamBase, APITestsMixin): + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_change_streams + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + + async def change_stream_with_client(self, client, *args, **kwargs): + return await client[self.db.name].watch(*args, **kwargs) + + async def generate_invalidate_event(self, change_stream): + # Dropping the database invalidates the change stream. + await change_stream._client.drop_database(self.db.name) + + async def _test_get_invalidate_event(self, change_stream): + # Cache collection names. + dropped_colls = await self.db.list_collection_names() + # Drop the watched database to get an invalidate event. + await self.generate_invalidate_event(change_stream) + change = await change_stream.next() + # 4.1+ returns "drop" events for each collection in dropped database + # and a "dropDatabase" event for the database itself. + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + for _ in range(len(dropped_colls)): + ns = change["ns"] + self.assertEqual(ns["db"], change_stream._target.name) + self.assertIn(ns["coll"], dropped_colls) + change = await change_stream.next() + self.assertEqual(change["operationType"], "dropDatabase") + self.assertTrue(change["_id"]) + self.assertEqual(change["ns"], {"db": change_stream._target.name}) + # Get next change. + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) + # The AsyncChangeStream should be dead. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + + async def _test_invalidate_stops_iteration(self, change_stream): + # Drop the watched database to get an invalidate event. + await change_stream._client.drop_database(self.db.name) + # Check drop and dropDatabase events. + async for change in change_stream: + self.assertIn(change["operationType"], ("drop", "dropDatabase", "invalidate")) + # Last change must be invalidate. + self.assertEqual(change["operationType"], "invalidate") + # Change stream must not allow further iteration. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + with self.assertRaises(StopAsyncIteration): + await anext(change_stream) + + async def _insert_and_check(self, change_stream, collname, doc): + coll = self.db[collname] + await coll.insert_one(doc) + change = await anext(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": self.db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) + + async def insert_one_and_check(self, change_stream, doc): + await self._insert_and_check(change_stream, self.id(), doc) + + async def test_simple(self): + collnames = self.generate_unique_collnames(3) + async with await self.change_stream() as change_stream: + for collname in collnames: + await self._insert_and_check( + change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())} + ) + + async def test_isolation(self): + # Ensure inserts to other dbs don't show up in our AsyncChangeStream. + other_db = self.client.pymongo_test_temp + self.assertNotEqual(other_db, self.db, msg="Isolation must be tested on separate DBs") + collname = self.id() + async with await self.change_stream() as change_stream: + await other_db[collname].insert_one({"_id": Binary.from_uuid(uuid.uuid4())}) + await self._insert_and_check( + change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())} + ) + await self.client.drop_database(other_db) + + +class TestAsyncCollectionAsyncChangeStream( + TestAsyncChangeStreamBase, APITestsMixin, ProseSpecTestsMixin +): + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + # Use a new collection for each test. + await self.watched_collection().drop() + await self.watched_collection().insert_one({}) + + async def change_stream_with_client(self, client, *args, **kwargs): + return ( + await client[self.db.name] + .get_collection(self.watched_collection().name) + .watch(*args, **kwargs) + ) + + async def generate_invalidate_event(self, change_stream): + # Dropping the collection invalidates the change stream. + await change_stream._target.drop() + + async def _test_invalidate_stops_iteration(self, change_stream): + await self.generate_invalidate_event(change_stream) + # Check drop and dropDatabase events. + async for change in change_stream: + self.assertIn(change["operationType"], ("drop", "invalidate")) + # Last change must be invalidate. + self.assertEqual(change["operationType"], "invalidate") + # Change stream must not allow further iteration. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + with self.assertRaises(StopAsyncIteration): + await anext(change_stream) + + async def _test_get_invalidate_event(self, change_stream): + # Drop the watched database to get an invalidate event. + await change_stream._target.drop() + change = await change_stream.next() + # 4.1+ returns a "drop" change document. + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + self.assertEqual( + change["ns"], + {"db": change_stream._target.database.name, "coll": change_stream._target.name}, + ) + # Last change should be invalidate. + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) + # The AsyncChangeStream should be dead. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + + async def insert_one_and_check(self, change_stream, doc): + await self.watched_collection().insert_one(doc) + change = await anext(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual( + change["ns"], + {"db": self.watched_collection().database.name, "coll": self.watched_collection().name}, + ) + self.assertEqual(change["fullDocument"], doc) + + async def test_raw(self): + """Test with RawBSONDocument.""" + raw_coll = self.watched_collection(codec_options=DEFAULT_RAW_BSON_OPTIONS) + async with await raw_coll.watch() as change_stream: + raw_doc = RawBSONDocument(encode({"_id": 1})) + await self.watched_collection().insert_one(raw_doc) + change = await anext(change_stream) + self.assertIsInstance(change, RawBSONDocument) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"]["db"], self.watched_collection().database.name) + self.assertEqual(change["ns"]["coll"], self.watched_collection().name) + self.assertEqual(change["fullDocument"], raw_doc) + + @async_client_context.require_version_min(4, 0) # Needed for start_at_operation_time. + async def test_uuid_representations(self): + """Test with uuid document _ids and different uuid_representation.""" + optime = (await self.db.command("ping"))["operationTime"] + await self.watched_collection().insert_many( + [ + {"_id": Binary(uuid.uuid4().bytes, id_subtype)} + for id_subtype in (STANDARD, PYTHON_LEGACY) + ] + ) + for uuid_representation in ALL_UUID_REPRESENTATIONS: + options = self.watched_collection().codec_options.with_options( + uuid_representation=uuid_representation + ) + coll = self.watched_collection(codec_options=options) + async with await coll.watch( + start_at_operation_time=optime, max_await_time_ms=1 + ) as change_stream: + _ = await change_stream.next() + resume_token_1 = change_stream.resume_token + _ = await change_stream.next() + resume_token_2 = change_stream.resume_token + + # Should not error. + async with await coll.watch(resume_after=resume_token_1): + pass + async with await coll.watch(resume_after=resume_token_2): + pass + + async def test_document_id_order(self): + """Test with document _ids that need their order preserved.""" + random_keys = random.sample(string.ascii_letters, len(string.ascii_letters)) + random_doc = {"_id": SON([(key, key) for key in random_keys])} + for document_class in (dict, SON, RawBSONDocument): + options = self.watched_collection().codec_options.with_options( + document_class=document_class + ) + coll = self.watched_collection(codec_options=options) + async with await coll.watch() as change_stream: + await coll.insert_one(random_doc) + _ = await change_stream.next() + resume_token = change_stream.resume_token + + # The resume token is always a document. + self.assertIsInstance(resume_token, document_class) + # Should not error. + async with await coll.watch(resume_after=resume_token): + pass + await coll.delete_many({}) + + async def test_read_concern(self): + """Test readConcern is not validated by the driver.""" + # Read concern 'local' is not allowed for $changeStream. + coll = self.watched_collection(read_concern=ReadConcern("local")) + with self.assertRaises(OperationFailure): + await coll.watch() + + # Does not error. + coll = self.watched_collection(read_concern=ReadConcern("majority")) + async with await coll.watch(): + pass + + +class TestAllLegacyScenarios(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + listener: AllowListEventListener + + @async_client_context.require_connection + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = AllowListEventListener("aggregate", "getMore") + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + self.listener.reset() + + async def asyncSetUpCluster(self, scenario_dict): + assets = [ + (scenario_dict["database_name"], scenario_dict["collection_name"]), + ( + scenario_dict.get("database2_name", "db2"), + scenario_dict.get("collection2_name", "coll2"), + ), + ] + for db, coll in assets: + await self.client.drop_database(db) + await self.client[db].create_collection(coll) + + async def setFailPoint(self, scenario_dict): + fail_point = scenario_dict.get("failPoint") + if fail_point is None: + return + elif not async_client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") + + fail_cmd = SON([("configureFailPoint", "failCommand")]) + fail_cmd.update(fail_point) + await async_client_context.client.admin.command(fail_cmd) + self.addAsyncCleanup( + async_client_context.client.admin.command, + "configureFailPoint", + fail_cmd["configureFailPoint"], + mode="off", + ) + + def assert_list_contents_are_subset(self, superlist, sublist): + """Check that each element in sublist is a subset of the corresponding + element in superlist. + """ + self.assertEqual(len(superlist), len(sublist)) + for sup, sub in zip(superlist, sublist): + if isinstance(sub, dict): + self.assert_dict_is_subset(sup, sub) + continue + if isinstance(sub, (list, tuple)): + self.assert_list_contents_are_subset(sup, sub) + continue + self.assertEqual(sup, sub) + + def assert_dict_is_subset(self, superdict, subdict): + """Check that subdict is a subset of superdict.""" + exempt_fields = ["documentKey", "_id", "getMore"] + for key, value in subdict.items(): + if key not in superdict: + self.fail(f"Key {key} not found in {superdict}") + if isinstance(value, dict): + self.assert_dict_is_subset(superdict[key], value) + continue + if isinstance(value, (list, tuple)): + self.assert_list_contents_are_subset(superdict[key], value) + continue + if key in exempt_fields: + # Only check for presence of these exempt fields, but not value. + self.assertIn(key, superdict) + else: + self.assertEqual(superdict[key], value) + + def check_event(self, event, expectation_dict): + if event is None: + self.fail() + for key, value in expectation_dict.items(): + if isinstance(value, dict): + self.assert_dict_is_subset(getattr(event, key), value) + else: + self.assertEqual(getattr(event, key), value) + + def asyncTearDown(self): + self.listener.reset() + + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py new file mode 100644 index 0000000000..6794605339 --- /dev/null +++ b/test/asynchronous/test_client.py @@ -0,0 +1,2700 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the mongo_client module.""" +from __future__ import annotations + +import _thread as thread +import asyncio +import base64 +import contextlib +import copy +import datetime +import gc +import logging +import os +import re +import signal +import socket +import struct +import subprocess +import sys +import threading +import time +import uuid +from typing import Any, Iterable, Type, no_type_check +from unittest import mock, skipIf +from unittest.mock import patch + +import pytest +import pytest_asyncio + +from bson.binary import CSHARP_LEGACY, JAVA_LEGACY, PYTHON_LEGACY, Binary, UuidRepresentation +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import ( + HAVE_IPADDRESS, + AsyncIntegrationTest, + AsyncMockClientTest, + AsyncUnitTest, + SkipTest, + async_client_context, + client_knobs, + connected, + db_pwd, + db_user, + remove_all_users, + unittest, +) +from test.asynchronous.pymongo_mocks import AsyncMockClient +from test.asynchronous.utils import ( + async_get_pool, + async_wait_until, + asyncAssertRaisesExactly, +) +from test.test_binary import BinaryData +from test.utils_shared import ( + NTHREADS, + CMAPListener, + FunctionCallRecorder, + delay, + gevent_monkey_patched, + is_greenthread_patched, + lazy_client_trial, + one, +) + +import bson +import pymongo +from bson import encode +from bson.codec_options import ( + CodecOptions, + DatetimeConversion, + TypeEncoder, + TypeRegistry, +) +from bson.son import SON +from bson.tz_util import utc +from pymongo import event_loggers, message, monitoring +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor, CursorType +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.asynchronous.pool import ( + AsyncConnection, +) +from pymongo.asynchronous.settings import TOPOLOGY_TYPE +from pymongo.asynchronous.topology import _ErrorContext +from pymongo.client_options import ClientOptions +from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT, MIN_SUPPORTED_WIRE_VERSION, has_c +from pymongo.compression_support import _have_snappy, _have_zstd +from pymongo.driver_info import DriverInfo +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + ConnectionFailure, + InvalidName, + InvalidOperation, + InvalidURI, + NetworkTimeout, + OperationFailure, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, +) +from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent +from pymongo.pool_options import _MAX_METADATA_SIZE, _METADATA, ENV_VAR_K8S, PoolOptions +from pymongo.read_preferences import ReadPreference +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import readable_server_selector, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.topology_description import TopologyDescription +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class AsyncClientUnitTest(AsyncUnitTest): + """AsyncMongoClient tests that don't require a server.""" + + client: AsyncMongoClient + + async def asyncSetUp(self) -> None: + self.client = await self.async_rs_or_single_client( + connect=False, serverSelectionTimeoutMS=100 + ) + + @pytest.fixture(autouse=True) + def inject_fixtures(self, caplog): + self._caplog = caplog + + async def test_keyword_arg_defaults(self): + client = self.simple_client( + socketTimeoutMS=None, + connectTimeoutMS=20000, + waitQueueTimeoutMS=None, + replicaSet=None, + read_preference=ReadPreference.PRIMARY, + ssl=False, + tlsCertificateKeyFile=None, + tlsAllowInvalidCertificates=True, + tlsCAFile=None, + connect=False, + serverSelectionTimeoutMS=12000, + ) + + options = client.options + pool_opts = options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + # socket.Socket.settimeout takes a float in seconds + self.assertEqual(20.0, pool_opts.connect_timeout) + self.assertEqual(None, pool_opts.wait_queue_timeout) + self.assertEqual(None, pool_opts._ssl_context) + self.assertEqual(None, options.replica_set_name) + self.assertEqual(ReadPreference.PRIMARY, client.read_preference) + self.assertAlmostEqual(12, client.options.server_selection_timeout) + + async def test_connect_timeout(self): + client = self.simple_client(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) + pool_opts = client.options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + + client = self.simple_client(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) + pool_opts = client.options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + + client = self.simple_client( + "mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0", connect=False + ) + pool_opts = client.options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + + def test_types(self): + self.assertRaises(TypeError, AsyncMongoClient, 1) + self.assertRaises(TypeError, AsyncMongoClient, 1.14) + self.assertRaises(TypeError, AsyncMongoClient, "localhost", "27017") + self.assertRaises(TypeError, AsyncMongoClient, "localhost", 1.14) + self.assertRaises(TypeError, AsyncMongoClient, "localhost", []) + + self.assertRaises(ConfigurationError, AsyncMongoClient, []) + + async def test_max_pool_size_zero(self): + self.simple_client(maxPoolSize=0) + + def test_uri_detection(self): + self.assertRaises(ConfigurationError, AsyncMongoClient, "/foo") + self.assertRaises(ConfigurationError, AsyncMongoClient, "://") + self.assertRaises(ConfigurationError, AsyncMongoClient, "foo/") + + def test_get_db(self): + def make_db(base, name): + return base[name] + + self.assertRaises(InvalidName, make_db, self.client, "") + self.assertRaises(InvalidName, make_db, self.client, "te$t") + self.assertRaises(InvalidName, make_db, self.client, "te.t") + self.assertRaises(InvalidName, make_db, self.client, "te\\t") + self.assertRaises(InvalidName, make_db, self.client, "te/t") + self.assertRaises(InvalidName, make_db, self.client, "te st") + + self.assertIsInstance(self.client.test, AsyncDatabase) + self.assertEqual(self.client.test, self.client["test"]) + self.assertEqual(self.client.test, AsyncDatabase(self.client, "test")) + + def test_get_database(self): + codec_options = CodecOptions(tz_aware=True) + write_concern = WriteConcern(w=2, j=True) + db = self.client.get_database("foo", codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) + self.assertEqual(codec_options, db.codec_options) + self.assertEqual(ReadPreference.SECONDARY, db.read_preference) + self.assertEqual(write_concern, db.write_concern) + + def test_getattr(self): + self.assertIsInstance(self.client["_does_not_exist"], AsyncDatabase) + + with self.assertRaises(AttributeError) as context: + self.client._does_not_exist + + # Message should be: + # "AttributeError: AsyncMongoClient has no attribute '_does_not_exist'. To + # access the _does_not_exist database, use client['_does_not_exist']". + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) + + def test_iteration(self): + client = self.client + msg = "'AsyncMongoClient' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in client: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = client[0] + # next fails + with self.assertRaisesRegex(TypeError, "'AsyncMongoClient' object is not iterable"): + _ = next(client) + # .next() fails + with self.assertRaisesRegex(TypeError, "'AsyncMongoClient' object is not iterable"): + _ = client.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(client, Iterable) + + async def test_get_default_database(self): + c = await self.async_rs_or_single_client( + "mongodb://%s:%d/foo" + % (await async_client_context.host, await async_client_context.port), + connect=False, + ) + self.assertEqual(AsyncDatabase(c, "foo"), c.get_default_database()) + # Test that default doesn't override the URI value. + self.assertEqual(AsyncDatabase(c, "foo"), c.get_default_database("bar")) + + codec_options = CodecOptions(tz_aware=True) + write_concern = WriteConcern(w=2, j=True) + db = c.get_default_database(None, codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) + self.assertEqual(codec_options, db.codec_options) + self.assertEqual(ReadPreference.SECONDARY, db.read_preference) + self.assertEqual(write_concern, db.write_concern) + + c = await self.async_rs_or_single_client( + "mongodb://%s:%d/" % (await async_client_context.host, await async_client_context.port), + connect=False, + ) + self.assertEqual(AsyncDatabase(c, "foo"), c.get_default_database("foo")) + + async def test_get_default_database_error(self): + # URI with no database. + c = await self.async_rs_or_single_client( + "mongodb://%s:%d/" % (await async_client_context.host, await async_client_context.port), + connect=False, + ) + self.assertRaises(ConfigurationError, c.get_default_database) + + async def test_get_default_database_with_authsource(self): + # Ensure we distinguish database name from authSource. + uri = "mongodb://%s:%d/foo?authSource=src" % ( + await async_client_context.host, + await async_client_context.port, + ) + c = await self.async_rs_or_single_client(uri, connect=False) + self.assertEqual(AsyncDatabase(c, "foo"), c.get_default_database()) + + async def test_get_database_default(self): + c = await self.async_rs_or_single_client( + "mongodb://%s:%d/foo" + % (await async_client_context.host, await async_client_context.port), + connect=False, + ) + self.assertEqual(AsyncDatabase(c, "foo"), c.get_database()) + + async def test_get_database_default_error(self): + # URI with no database. + c = await self.async_rs_or_single_client( + "mongodb://%s:%d/" % (await async_client_context.host, await async_client_context.port), + connect=False, + ) + self.assertRaises(ConfigurationError, c.get_database) + + async def test_get_database_default_with_authsource(self): + # Ensure we distinguish database name from authSource. + uri = "mongodb://%s:%d/foo?authSource=src" % ( + await async_client_context.host, + await async_client_context.port, + ) + c = await self.async_rs_or_single_client(uri, connect=False) + self.assertEqual(AsyncDatabase(c, "foo"), c.get_database()) + + async def test_primary_read_pref_with_tags(self): + # No tags allowed with "primary". + with self.assertRaises(ConfigurationError): + await self.async_single_client("mongodb://host/?readpreferencetags=dc:east") + + with self.assertRaises(ConfigurationError): + await self.async_single_client( + "mongodb://host/?readpreference=primary&readpreferencetags=dc:east" + ) + + async def test_read_preference(self): + c = await self.async_rs_or_single_client( + "mongodb://host", connect=False, readpreference=ReadPreference.NEAREST.mongos_mode + ) + self.assertEqual(c.read_preference, ReadPreference.NEAREST) + + async def test_metadata(self): + metadata = copy.deepcopy(_METADATA) + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|async" + else: + metadata["driver"]["name"] = "PyMongo|async" + metadata["application"] = {"name": "foobar"} + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + client = self.simple_client("foo", 27017, appname="foobar", connect=False) + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + # No error + self.simple_client(appname="x" * 128) + with self.assertRaises(ValueError): + self.simple_client(appname="x" * 129) + # Bad "driver" options. + self.assertRaises(TypeError, DriverInfo, "Foo", 1, "a") + self.assertRaises(TypeError, DriverInfo, version="1", platform="a") + self.assertRaises(TypeError, DriverInfo) + with self.assertRaises(TypeError): + self.simple_client(driver=1) + with self.assertRaises(TypeError): + self.simple_client(driver="abc") + with self.assertRaises(TypeError): + self.simple_client(driver=("Foo", "1", "a")) + # Test appending to driver info. + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|async|FooDriver" + else: + metadata["driver"]["name"] = "PyMongo|async|FooDriver" + metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) + client = self.simple_client( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", None), + connect=False, + ) + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + metadata["platform"] = "{}|FooPlatform".format(_METADATA["platform"]) + client = self.simple_client( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", "FooPlatform"), + connect=False, + ) + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + # Test truncating driver info metadata. + client = self.simple_client( + driver=DriverInfo(name="s" * _MAX_METADATA_SIZE), + connect=False, + ) + options = client.options + self.assertLessEqual( + len(bson.encode(options.pool_options.metadata)), + _MAX_METADATA_SIZE, + ) + client = self.simple_client( + driver=DriverInfo(name="s" * _MAX_METADATA_SIZE, version="s" * _MAX_METADATA_SIZE), + connect=False, + ) + options = client.options + self.assertLessEqual( + len(bson.encode(options.pool_options.metadata)), + _MAX_METADATA_SIZE, + ) + + @mock.patch.dict("os.environ", {ENV_VAR_K8S: "1"}) + def test_container_metadata(self): + metadata = copy.deepcopy(_METADATA) + metadata["driver"]["name"] = "PyMongo|async" + metadata["env"] = {} + metadata["env"]["container"] = {"orchestrator": "kubernetes"} + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") + options = client.options + self.assertEqual(options.pool_options.metadata["env"], metadata["env"]) + + async def test_kwargs_codec_options(self): + class MyFloatType: + def __init__(self, x): + self.__x = x + + @property + def x(self): + return self.__x + + class MyFloatAsIntEncoder(TypeEncoder): + python_type = MyFloatType + + def transform_python(self, value): + return int(value) + + # Ensure codec options are passed in correctly + document_class: Type[SON] = SON + type_registry = TypeRegistry([MyFloatAsIntEncoder()]) + tz_aware = True + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" + tzinfo = utc + c = self.simple_client( + document_class=document_class, + type_registry=type_registry, + tz_aware=tz_aware, + uuidrepresentation=uuid_representation_label, + unicode_decode_error_handler=unicode_decode_error_handler, + tzinfo=tzinfo, + connect=False, + ) + self.assertEqual(c.codec_options.document_class, document_class) + self.assertEqual(c.codec_options.type_registry, type_registry) + self.assertEqual(c.codec_options.tz_aware, tz_aware) + self.assertEqual( + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) + self.assertEqual(c.codec_options.tzinfo, tzinfo) + + async def test_uri_codec_options(self): + # Ensure codec options are passed in correctly + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" + datetime_conversion = "DATETIME_CLAMP" + uri = ( + "mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" + "%s&unicode_decode_error_handler=%s" + "&datetime_conversion=%s" + % ( + await async_client_context.host, + await async_client_context.port, + uuid_representation_label, + unicode_decode_error_handler, + datetime_conversion, + ) + ) + c = self.simple_client(uri, connect=False) + self.assertEqual(c.codec_options.tz_aware, True) + self.assertEqual( + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] + ) + + # Change the passed datetime_conversion to a number and re-assert. + uri = uri.replace(datetime_conversion, f"{int(DatetimeConversion[datetime_conversion])}") + c = self.simple_client(uri, connect=False) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] + ) + + async def test_uri_option_precedence(self): + # Ensure kwarg options override connection string options. + uri = "mongodb://localhost/?ssl=true&replicaSet=name&readPreference=primary" + c = self.simple_client( + uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred" + ) + clopts = c.options + opts = clopts._options + + self.assertEqual(opts["tls"], False) + self.assertEqual(clopts.replica_set_name, "newname") + self.assertEqual(clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) + + async def test_connection_timeout_ms_propagates_to_DNS_resolver(self): + # Patch the resolver. + from pymongo.asynchronous.srv_resolver import _resolve + + patched_resolver = FunctionCallRecorder(_resolve) + pymongo.asynchronous.srv_resolver._resolve = patched_resolver + + def reset_resolver(): + pymongo.asynchronous.srv_resolver._resolve = _resolve + + self.addCleanup(reset_resolver) + + # Setup. + base_uri = "mongodb+srv://test5.test.build.10gen.cc" + connectTimeoutMS = 5000 + expected_kw_value = 5.0 + uri_with_timeout = base_uri + "/?connectTimeoutMS=6000" + expected_uri_value = 6.0 + + async def test_scenario(args, kwargs, expected_value): + patched_resolver.reset() + self.simple_client(*args, **kwargs) + for _, kw in patched_resolver.call_list(): + self.assertAlmostEqual(kw["lifetime"], expected_value) + + # No timeout specified. + await test_scenario((base_uri,), {}, CONNECT_TIMEOUT) + + # Timeout only specified in connection string. + await test_scenario((uri_with_timeout,), {}, expected_uri_value) + + # Timeout only specified in keyword arguments. + kwarg = {"connectTimeoutMS": connectTimeoutMS} + await test_scenario((base_uri,), kwarg, expected_kw_value) + + # Timeout specified in both kwargs and connection string. + await test_scenario((uri_with_timeout,), kwarg, expected_kw_value) + + async def test_uri_security_options(self): + # Ensure that we don't silently override security-related options. + with self.assertRaises(InvalidURI): + self.simple_client("mongodb://localhost/?ssl=true", tls=False, connect=False) + + # Matching SSL and TLS options should not cause errors. + c = self.simple_client("mongodb://localhost/?ssl=false", tls=False, connect=False) + self.assertEqual(c.options._options["tls"], False) + + # Conflicting tlsInsecure options should raise an error. + with self.assertRaises(InvalidURI): + self.simple_client( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidHostnames=True, + ) + + # Conflicting legacy tlsInsecure options should also raise an error. + with self.assertRaises(InvalidURI): + self.simple_client( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidCertificates=False, + ) + + # Conflicting kwargs should raise InvalidURI + with self.assertRaises(InvalidURI): + self.simple_client(ssl=True, tls=False) + + async def test_event_listeners(self): + c = self.simple_client(event_listeners=[], connect=False) + self.assertEqual(c.options.event_listeners, []) + listeners = [ + event_loggers.CommandLogger(), + event_loggers.HeartbeatLogger(), + event_loggers.ServerLogger(), + event_loggers.TopologyLogger(), + event_loggers.ConnectionPoolLogger(), + ] + c = self.simple_client(event_listeners=listeners, connect=False) + self.assertEqual(c.options.event_listeners, listeners) + + async def test_client_options(self): + c = self.simple_client(connect=False) + self.assertIsInstance(c.options, ClientOptions) + self.assertIsInstance(c.options.pool_options, PoolOptions) + self.assertEqual(c.options.server_selection_timeout, 30) + self.assertEqual(c.options.pool_options.max_idle_time_seconds, None) + self.assertIsInstance(c.options.retry_writes, bool) + self.assertIsInstance(c.options.retry_reads, bool) + + def test_validate_suggestion(self): + """Validate kwargs in constructor.""" + for typo in ["auth", "Auth", "AUTH"]: + expected = f"Unknown option: {typo}. Did you mean one of (authsource, authmechanism, authoidcallowedhosts) or maybe a camelCase version of one? Refer to docstring." + expected = re.escape(expected) + with self.assertRaisesRegex(ConfigurationError, expected): + AsyncMongoClient(**{typo: "standard"}) # type: ignore[arg-type] + + @patch("pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts") + def test_detected_environment_logging(self, mock_get_hosts): + normal_hosts = [ + "normal.host.com", + "host.cosmos.azure.com", + "host.docdb.amazonaws.com", + "host.docdb-elastic.amazonaws.com", + ] + srv_hosts = ["mongodb+srv://:@" + s for s in normal_hosts] + multi_host = ( + "host.cosmos.azure.com,host.docdb.amazonaws.com,host.docdb-elastic.amazonaws.com" + ) + with self.assertLogs("pymongo", level="INFO") as cm: + for host in normal_hosts: + AsyncMongoClient(host, connect=False) + for host in srv_hosts: + mock_get_hosts.return_value = [(host, 1)] + AsyncMongoClient(host, connect=False) + AsyncMongoClient(multi_host, connect=False) + logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] + self.assertEqual(len(logs), 7) + + @skipIf(os.environ.get("DEBUG_LOG"), "Enabling debug logs breaks this test") + @patch("pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts") + async def test_detected_environment_warning(self, mock_get_hosts): + with self._caplog.at_level(logging.WARN): + normal_hosts = [ + "host.cosmos.azure.com", + "host.docdb.amazonaws.com", + "host.docdb-elastic.amazonaws.com", + ] + srv_hosts = ["mongodb+srv://:@" + s for s in normal_hosts] + multi_host = ( + "host.cosmos.azure.com,host.docdb.amazonaws.com,host.docdb-elastic.amazonaws.com" + ) + for host in normal_hosts: + with self.assertWarns(UserWarning): + self.simple_client(host) + for host in srv_hosts: + mock_get_hosts.return_value = [(host, 1)] + with self.assertWarns(UserWarning): + self.simple_client(host) + with self.assertWarns(UserWarning): + self.simple_client(multi_host) + + +class TestClient(AsyncIntegrationTest): + def test_multiple_uris(self): + with self.assertRaises(ConfigurationError): + AsyncMongoClient( + host=[ + "mongodb+srv://cluster-a.abc12.mongodb.net", + "mongodb+srv://cluster-b.abc12.mongodb.net", + "mongodb+srv://cluster-c.abc12.mongodb.net", + ] + ) + + async def test_max_idle_time_reaper_default(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper doesn't remove connections when maxIdleTimeMS not set + client = await self.async_rs_or_single_client() + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn: + pass + self.assertEqual(1, len(server._pool.conns)) + self.assertIn(conn, server._pool.conns) + + async def test_max_idle_time_reaper_removes_stale_minPoolSize(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper removes idle socket and replaces it with a new one + client = await self.async_rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn: + pass + # When the reaper runs at the same time as the get_socket, two + # connections could be created and checked into the pool. + self.assertGreaterEqual(len(server._pool.conns), 1) + await async_wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + await async_wait_until(lambda: len(server._pool.conns) >= 1, "replace stale socket") + + async def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper respects maxPoolSize when adding new connections. + client = await self.async_rs_or_single_client( + maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1 + ) + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn: + pass + # When the reaper runs at the same time as the get_socket, + # maxPoolSize=1 should prevent two connections from being created. + self.assertEqual(1, len(server._pool.conns)) + await async_wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + await async_wait_until(lambda: len(server._pool.conns) == 1, "replace stale socket") + + async def test_max_idle_time_reaper_removes_stale(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper has removed idle socket and NOT replaced it + client = await self.async_rs_or_single_client(maxIdleTimeMS=500) + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn_one: + pass + # Assert that the pool does not close connections prematurely. + await asyncio.sleep(0.300) + async with server._pool.checkout() as conn_two: + pass + self.assertIs(conn_one, conn_two) + await async_wait_until( + lambda: len(server._pool.conns) == 0, + "stale socket reaped and new one NOT added to the pool", + ) + + async def test_min_pool_size(self): + with client_knobs(kill_cursor_frequency=0.1): + client = await self.async_rs_or_single_client() + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + self.assertEqual(0, len(server._pool.conns)) + + # Assert that pool started up at minPoolSize + client = await self.async_rs_or_single_client(minPoolSize=10) + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + await async_wait_until( + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", + ) + + # Assert that if a socket is closed, a new one takes its place + async with server._pool.checkout() as conn: + await conn.close_conn(None) + await async_wait_until( + lambda: len(server._pool.conns) == 10, + "a closed socket gets replaced from the pool", + ) + self.assertNotIn(conn, server._pool.conns) + + async def test_max_idle_time_checkout(self): + # Use high frequency to test _get_socket_no_auth. + with client_knobs(kill_cursor_frequency=99999999): + client = await self.async_rs_or_single_client(maxIdleTimeMS=500) + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn: + pass + self.assertEqual(1, len(server._pool.conns)) + await asyncio.sleep(1) # Sleep so that the socket becomes stale. + + async with server._pool.checkout() as new_con: + self.assertNotEqual(conn, new_con) + self.assertEqual(1, len(server._pool.conns)) + self.assertNotIn(conn, server._pool.conns) + self.assertIn(new_con, server._pool.conns) + + # Test that connections are reused if maxIdleTimeMS is not set. + client = await self.async_rs_or_single_client() + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn: + pass + self.assertEqual(1, len(server._pool.conns)) + await asyncio.sleep(1) + async with server._pool.checkout() as new_con: + self.assertEqual(conn, new_con) + self.assertEqual(1, len(server._pool.conns)) + + async def test_constants(self): + """This test uses AsyncMongoClient explicitly to make sure that host and + port are not overloaded. + """ + host, port = await async_client_context.host, await async_client_context.port + kwargs: dict = async_client_context.default_client_options.copy() + if async_client_context.auth_enabled: + kwargs["username"] = db_user + kwargs["password"] = db_pwd + + # Set bad defaults. + AsyncMongoClient.HOST = "somedomainthatdoesntexist.org" + AsyncMongoClient.PORT = 123456789 + with self.assertRaises(AutoReconnect): + c = self.simple_client(serverSelectionTimeoutMS=10, **kwargs) + await connected(c) + + c = self.simple_client(host, port, **kwargs) + # Override the defaults. No error. + await connected(c) + + # Set good defaults. + AsyncMongoClient.HOST = host + AsyncMongoClient.PORT = port + + # No error. + c = self.simple_client(**kwargs) + await connected(c) + + async def test_init_disconnected(self): + host, port = await async_client_context.host, await async_client_context.port + c = await self.async_rs_or_single_client(connect=False) + # is_primary causes client to block until connected + self.assertIsInstance(await c.is_primary, bool) + c = await self.async_rs_or_single_client(connect=False) + self.assertIsInstance(await c.is_mongos, bool) + c = await self.async_rs_or_single_client(connect=False) + self.assertIsInstance(c.options.pool_options.max_pool_size, int) + self.assertIsInstance(c.nodes, frozenset) + + c = await self.async_rs_or_single_client(connect=False) + self.assertEqual(c.codec_options, CodecOptions()) + c = await self.async_rs_or_single_client(connect=False) + self.assertFalse(await c.primary) + self.assertFalse(await c.secondaries) + c = await self.async_rs_or_single_client(connect=False) + self.assertIsInstance(c.topology_description, TopologyDescription) + self.assertEqual(c.topology_description, c._topology._description) + if async_client_context.is_rs: + # The primary's host and port are from the replica set config. + self.assertIsNotNone(await c.address) + else: + self.assertEqual(await c.address, (host, port)) + + bad_host = "somedomainthatdoesntexist.org" + c = self.simple_client(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + with self.assertRaises(ConnectionFailure): + await c.pymongo_test.test.find_one() + + async def test_init_disconnected_with_auth(self): + uri = "mongodb://user:pass@somedomainthatdoesntexist" + c = self.simple_client(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + with self.assertRaises(ConnectionFailure): + await c.pymongo_test.test.find_one() + + @async_client_context.require_replica_set + @async_client_context.require_no_load_balancer + @async_client_context.require_tls + async def test_init_disconnected_with_srv(self): + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # nodes returns an empty set if not connected + self.assertEqual(c.nodes, frozenset()) + # topology_description returns the initial seed description if not connected + topology_description = c.topology_description + self.assertEqual(topology_description.topology_type, TOPOLOGY_TYPE.Unknown) + self.assertEqual( + { + ("test1.test.build.10gen.cc", None): ServerDescription( + ("test1.test.build.10gen.cc", None) + ) + }, + topology_description.server_descriptions(), + ) + + # address causes client to block until connected + self.assertIsNotNone(await c.address) + # Initial seed topology and connected topology have the same ID + self.assertEqual( + c._topology._topology_id, topology_description._topology_settings._topology_id + ) + await c.close() + + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # primary causes client to block until connected + await c.primary + self.assertIsNotNone(c._topology) + await c.close() + + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # secondaries causes client to block until connected + await c.secondaries + self.assertIsNotNone(c._topology) + await c.close() + + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # arbiters causes client to block until connected + await c.arbiters + self.assertIsNotNone(c._topology) + + async def test_equality(self): + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) + c = await self.async_rs_or_single_client(seed, connect=False) + self.assertEqual(async_client_context.client, c) + # Explicitly test inequality + self.assertFalse(async_client_context.client != c) + + c = await self.async_rs_or_single_client("invalid.com", connect=False) + self.assertNotEqual(async_client_context.client, c) + self.assertTrue(async_client_context.client != c) + + c1 = self.simple_client("a", connect=False) + c2 = self.simple_client("b", connect=False) + + # Seeds differ: + self.assertNotEqual(c1, c2) + + c1 = self.simple_client(["a", "b", "c"], connect=False) + c2 = self.simple_client(["c", "a", "b"], connect=False) + + # Same seeds but out of order still compares equal: + self.assertEqual(c1, c2) + + async def test_hashable(self): + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) + c = await self.async_rs_or_single_client(seed, connect=False) + self.assertIn(c, {async_client_context.client}) + c = await self.async_rs_or_single_client("invalid.com", connect=False) + self.assertNotIn(c, {async_client_context.client}) + + async def test_host_w_port(self): + with self.assertRaises(ValueError): + host = await async_client_context.host + await connected( + AsyncMongoClient( + f"{host}:1234567", + connectTimeoutMS=1, + serverSelectionTimeoutMS=10, + ) + ) + + async def test_repr(self): + # Used to test 'eval' below. + import bson + + client = AsyncMongoClient( # type: ignore[type-var] + "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" + "&connectTimeoutMS=12345&w=1&wtimeoutms=100", + connect=False, + document_class=SON, + ) + + the_repr = repr(client) + self.assertIn("AsyncMongoClient(host=", the_repr) + self.assertIn("document_class=bson.son.SON, tz_aware=False, connect=False, ", the_repr) + self.assertIn("connecttimeoutms=12345", the_repr) + self.assertIn("replicaset='replset'", the_repr) + self.assertIn("w=1", the_repr) + self.assertIn("wtimeoutms=100", the_repr) + + async with eval(the_repr) as client_two: + self.assertEqual(client_two, client) + + client = self.simple_client( + "localhost:27017,localhost:27018", + replicaSet="replset", + connectTimeoutMS=12345, + socketTimeoutMS=None, + w=1, + wtimeoutms=100, + connect=False, + ) + the_repr = repr(client) + self.assertIn("AsyncMongoClient(host=", the_repr) + self.assertIn("document_class=dict, tz_aware=False, connect=False, ", the_repr) + self.assertIn("connecttimeoutms=12345", the_repr) + self.assertIn("replicaset='replset'", the_repr) + self.assertIn("sockettimeoutms=None", the_repr) + self.assertIn("w=1", the_repr) + self.assertIn("wtimeoutms=100", the_repr) + + async with eval(the_repr) as client_two: + self.assertEqual(client_two, client) + + async def test_repr_srv_host(self): + client = AsyncMongoClient("mongodb+srv://test1.test.build.10gen.cc/", connect=False) + # before srv resolution + self.assertIn("host='mongodb+srv://test1.test.build.10gen.cc'", repr(client)) + await client.aconnect() + # after srv resolution + self.assertIn("host=['localhost.test.build.10gen.cc:", repr(client)) + await client.close() + + async def test_getters(self): + await async_wait_until( + lambda: async_client_context.nodes == self.client.nodes, "find all nodes" + ) + + async def test_list_databases(self): + cmd_docs = (await self.client.admin.command("listDatabases"))["databases"] + cursor = await self.client.list_databases() + self.assertIsInstance(cursor, AsyncCommandCursor) + helper_docs = await cursor.to_list() + self.assertGreater(len(helper_docs), 0) + self.assertEqual(len(helper_docs), len(cmd_docs)) + # PYTHON-3529 Some fields may change between calls, just compare names. + for helper_doc, cmd_doc in zip(helper_docs, cmd_docs): + self.assertIs(type(helper_doc), dict) + self.assertEqual(helper_doc.keys(), cmd_doc.keys()) + client = await self.async_rs_or_single_client(document_class=SON) + async for doc in await client.list_databases(): + self.assertIs(type(doc), dict) + + await self.client.pymongo_test.test.insert_one({}) + cursor = await self.client.list_databases(filter={"name": "admin"}) + docs = await cursor.to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(docs[0]["name"], "admin") + + cursor = await self.client.list_databases(nameOnly=True) + async for doc in cursor: + self.assertEqual(["name"], list(doc)) + + async def test_list_database_names(self): + await self.client.pymongo_test.test.insert_one({"dummy": "object"}) + await self.client.pymongo_test_mike.test.insert_one({"dummy": "object"}) + cmd_docs = (await self.client.admin.command("listDatabases"))["databases"] + cmd_names = [doc["name"] for doc in cmd_docs] + + db_names = await self.client.list_database_names() + self.assertIn("pymongo_test", db_names) + self.assertIn("pymongo_test_mike", db_names) + self.assertEqual(db_names, cmd_names) + + async def test_drop_database(self): + with self.assertRaises(TypeError): + await self.client.drop_database(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await self.client.drop_database(None) # type: ignore[arg-type] + + await self.client.pymongo_test.test.insert_one({"dummy": "object"}) + await self.client.pymongo_test2.test.insert_one({"dummy": "object"}) + dbs = await self.client.list_database_names() + self.assertIn("pymongo_test", dbs) + self.assertIn("pymongo_test2", dbs) + await self.client.drop_database("pymongo_test") + + if async_client_context.is_rs: + wc_client = await self.async_rs_or_single_client(w=len(async_client_context.nodes) + 1) + with self.assertRaises(WriteConcernError): + await wc_client.drop_database("pymongo_test2") + + await self.client.drop_database(self.client.pymongo_test2) + dbs = await self.client.list_database_names() + self.assertNotIn("pymongo_test", dbs) + self.assertNotIn("pymongo_test2", dbs) + + async def test_close(self): + test_client = await self.async_rs_or_single_client() + coll = test_client.pymongo_test.bar + await test_client.close() + with self.assertRaises(InvalidOperation): + await coll.count_documents({}) + + async def test_close_kills_cursors(self): + if sys.platform.startswith("java"): + # We can't figure out how to make this test reliable with Jython. + raise SkipTest("Can't test with Jython") + test_client = await self.async_rs_or_single_client() + # Kill any cursors possibly queued up by previous tests. + gc.collect() + await test_client._process_periodic_tasks() + + # Add some test data. + coll = test_client.pymongo_test.test_close_kills_cursors + docs_inserted = 1000 + await coll.insert_many([{"i": i} for i in range(docs_inserted)]) + + # Open a cursor and leave it open on the server. + cursor = coll.find().batch_size(10) + self.assertTrue(bool(await anext(cursor))) + self.assertLess(cursor.retrieved, docs_inserted) + + # Open a command cursor and leave it open on the server. + cursor = await coll.aggregate([], batchSize=10) + self.assertTrue(bool(await anext(cursor))) + del cursor + # Required for PyPy, Jython and other Python implementations that + # don't use reference counting garbage collection. + gc.collect() + + # Close the client and ensure the topology is closed. + self.assertTrue(test_client._topology._opened) + await test_client.close() + self.assertFalse(test_client._topology._opened) + test_client = await self.async_rs_or_single_client() + # The killCursors task should not need to re-open the topology. + await test_client._process_periodic_tasks() + self.assertTrue(test_client._topology._opened) + + async def test_close_stops_kill_cursors_thread(self): + client = await self.async_rs_client() + await client.test.test.find_one() + self.assertFalse(client._kill_cursors_executor._stopped) + + # Closing the client should stop the thread. + await client.close() + self.assertTrue(client._kill_cursors_executor._stopped) + + # Reusing the closed client should raise an InvalidOperation error. + with self.assertRaises(InvalidOperation): + await client.admin.command("ping") + # Thread is still stopped. + self.assertTrue(client._kill_cursors_executor._stopped) + + async def test_uri_connect_option(self): + # Ensure that topology is not opened if connect=False. + client = await self.async_rs_client(connect=False) + self.assertFalse(client._topology._opened) + + # Ensure kill cursors thread has not been started. + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertFalse(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertFalse(kc_task and not kc_task.done()) + # Using the client should open topology and start the thread. + await client.admin.command("ping") + self.assertTrue(client._topology._opened) + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertTrue(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertTrue(kc_task and not kc_task.done()) + + async def test_close_does_not_open_servers(self): + client = await self.async_rs_client(connect=False) + topology = client._topology + self.assertEqual(topology._servers, {}) + await client.close() + self.assertEqual(topology._servers, {}) + + async def test_close_closes_sockets(self): + client = await self.async_rs_client() + await client.test.test.find_one() + topology = client._topology + await client.close() + for server in topology._servers.values(): + self.assertFalse(server._pool.conns) + self.assertTrue(server._monitor._executor._stopped) + self.assertTrue(server._monitor._rtt_monitor._executor._stopped) + self.assertFalse(server._monitor._pool.conns) + self.assertFalse(server._monitor._rtt_monitor._pool.conns) + + def test_bad_uri(self): + with self.assertRaises(InvalidURI): + AsyncMongoClient("http://localhost") + + @async_client_context.require_auth + @async_client_context.require_no_fips + async def test_auth_from_uri(self): + host, port = await async_client_context.host, await async_client_context.port + await async_client_context.create_user("admin", "admin", "pass") + self.addAsyncCleanup(async_client_context.drop_user, "admin", "admin") + self.addAsyncCleanup(remove_all_users, self.client.pymongo_test) + + await async_client_context.create_user( + "pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"] + ) + + with self.assertRaises(OperationFailure): + await connected( + await self.async_rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port)) + ) + + # No error. + await connected( + await self.async_rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + ) + + # Wrong database. + uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) + with self.assertRaises(OperationFailure): + await connected(await self.async_rs_or_single_client_noauth(uri)) + + # No error. + await connected( + await self.async_rs_or_single_client_noauth( + "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) + ) + ) + + # Auth with lazy connection. + await ( + await self.async_rs_or_single_client_noauth( + "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False + ) + ).pymongo_test.test.find_one() + + # Wrong password. + bad_client = await self.async_rs_or_single_client_noauth( + "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), connect=False + ) + + with self.assertRaises(OperationFailure): + await bad_client.pymongo_test.test.find_one() + + @async_client_context.require_auth + async def test_username_and_password(self): + await async_client_context.create_user("admin", "ad min", "pa/ss") + self.addAsyncCleanup(async_client_context.drop_user, "admin", "ad min") + + c = await self.async_rs_or_single_client_noauth(username="ad min", password="pa/ss") + + # Username and password aren't in strings that will likely be logged. + self.assertNotIn("ad min", repr(c)) + self.assertNotIn("ad min", str(c)) + self.assertNotIn("pa/ss", repr(c)) + self.assertNotIn("pa/ss", str(c)) + + # Auth succeeds. + await c.server_info() + + with self.assertRaises(OperationFailure): + await ( + await self.async_rs_or_single_client_noauth(username="ad min", password="foo") + ).server_info() + + @async_client_context.require_auth + @async_client_context.require_no_fips + async def test_lazy_auth_raises_operation_failure(self): + host = await async_client_context.host + lazy_client = await self.async_rs_or_single_client_noauth( + f"mongodb://user:wrong@{host}/pymongo_test", connect=False + ) + + await asyncAssertRaisesExactly(OperationFailure, lazy_client.test.collection.find_one) + + @async_client_context.require_no_tls + async def test_unix_socket(self): + if not hasattr(socket, "AF_UNIX"): + raise SkipTest("UNIX-sockets are not supported on this system") + + mongodb_socket = "/tmp/mongodb-%d.sock" % (await async_client_context.port,) + encoded_socket = "%2Ftmp%2F" + "mongodb-%d.sock" % (await async_client_context.port,) + if not os.access(mongodb_socket, os.R_OK): + raise SkipTest("Socket file is not accessible") + + uri = "mongodb://%s" % encoded_socket + # Confirm we can do operations via the socket. + client = await self.async_rs_or_single_client(uri) + await client.pymongo_test.test.insert_one({"dummy": "object"}) + dbs = await client.list_database_names() + self.assertIn("pymongo_test", dbs) + + self.assertIn(mongodb_socket, repr(client)) + + # Confirm it fails with a missing socket. + with self.assertRaises(ConnectionFailure): + c = self.simple_client( + "mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100 + ) + await connected(c) + + async def test_document_class(self): + c = self.client + db = c.pymongo_test + await db.test.insert_one({"x": 1}) + + self.assertEqual(dict, c.codec_options.document_class) + self.assertIsInstance(await db.test.find_one(), dict) + self.assertNotIsInstance(await db.test.find_one(), SON) + + c = await self.async_rs_or_single_client(document_class=SON) + + db = c.pymongo_test + + self.assertEqual(SON, c.codec_options.document_class) + self.assertIsInstance(await db.test.find_one(), SON) + + async def test_timeouts(self): + client = await self.async_rs_or_single_client( + connectTimeoutMS=10500, + socketTimeoutMS=10500, + maxIdleTimeMS=10500, + serverSelectionTimeoutMS=10500, + ) + self.assertEqual(10.5, (await async_get_pool(client)).opts.connect_timeout) + self.assertEqual(10.5, (await async_get_pool(client)).opts.socket_timeout) + self.assertEqual(10.5, (await async_get_pool(client)).opts.max_idle_time_seconds) + self.assertEqual(10.5, client.options.pool_options.max_idle_time_seconds) + self.assertEqual(10.5, client.options.server_selection_timeout) + + async def test_socket_timeout_ms_validation(self): + c = await self.async_rs_or_single_client(socketTimeoutMS=10 * 1000) + self.assertEqual(10, (await async_get_pool(c)).opts.socket_timeout) + + c = await connected(await self.async_rs_or_single_client(socketTimeoutMS=None)) + self.assertEqual(None, (await async_get_pool(c)).opts.socket_timeout) + + c = await connected(await self.async_rs_or_single_client(socketTimeoutMS=0)) + self.assertEqual(None, (await async_get_pool(c)).opts.socket_timeout) + + with self.assertRaises(ValueError): + async with await self.async_rs_or_single_client(socketTimeoutMS=-1): + pass + + with self.assertRaises(ValueError): + async with await self.async_rs_or_single_client(socketTimeoutMS=1e10): + pass + + with self.assertRaises(ValueError): + async with await self.async_rs_or_single_client(socketTimeoutMS="foo"): + pass + + async def test_socket_timeout(self): + no_timeout = self.client + timeout_sec = 1 + timeout = await self.async_rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) + + await no_timeout.pymongo_test.drop_collection("test") + await no_timeout.pymongo_test.test.insert_one({"x": 1}) + + # A $where clause that takes a second longer than the timeout + where_func = delay(timeout_sec + 1) + + async def get_x(db): + doc = await anext(db.test.find().where(where_func)) + return doc["x"] + + self.assertEqual(1, await get_x(no_timeout.pymongo_test)) + with self.assertRaises(NetworkTimeout): + await get_x(timeout.pymongo_test) + + async def test_server_selection_timeout(self): + client = AsyncMongoClient(serverSelectionTimeoutMS=100, connect=False) + self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + await client.close() + + client = AsyncMongoClient(serverSelectionTimeoutMS=0, connect=False) + + self.assertAlmostEqual(0, client.options.server_selection_timeout) + + self.assertRaises( + ValueError, AsyncMongoClient, serverSelectionTimeoutMS="foo", connect=False + ) + self.assertRaises(ValueError, AsyncMongoClient, serverSelectionTimeoutMS=-1, connect=False) + self.assertRaises( + ConfigurationError, AsyncMongoClient, serverSelectionTimeoutMS=None, connect=False + ) + await client.close() + + client = AsyncMongoClient( + "mongodb://localhost/?serverSelectionTimeoutMS=100", connect=False + ) + self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + await client.close() + + client = AsyncMongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) + self.assertAlmostEqual(0, client.options.server_selection_timeout) + await client.close() + + # Test invalid timeout in URI ignored and set to default. + client = AsyncMongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) + self.assertAlmostEqual(30, client.options.server_selection_timeout) + await client.close() + + client = AsyncMongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) + self.assertAlmostEqual(30, client.options.server_selection_timeout) + + async def test_waitQueueTimeoutMS(self): + listener = CMAPListener() + client = await self.async_rs_or_single_client( + waitQueueTimeoutMS=10, maxPoolSize=1, event_listeners=[listener] + ) + pool = await async_get_pool(client) + self.assertEqual(pool.opts.wait_queue_timeout, 0.01) + async with pool.checkout(): + with self.assertRaises(WaitQueueTimeoutError): + await client.test.command("ping") + self.assertFalse(listener.events_by_type(monitoring.PoolClearedEvent)) + + async def test_socketKeepAlive(self): + pool = await async_get_pool(self.client) + async with pool.checkout() as conn: + keepalive = conn.conn.sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) + self.assertTrue(keepalive) + + @no_type_check + async def test_tz_aware(self): + self.assertRaises(ValueError, AsyncMongoClient, tz_aware="foo") + + aware = await self.async_rs_or_single_client(tz_aware=True) + self.addAsyncCleanup(aware.close) + naive = self.client + await aware.pymongo_test.drop_collection("test") + + now = datetime.datetime.now(tz=datetime.timezone.utc) + await aware.pymongo_test.test.insert_one({"x": now}) + + self.assertEqual(None, (await naive.pymongo_test.test.find_one())["x"].tzinfo) + self.assertEqual(utc, (await aware.pymongo_test.test.find_one())["x"].tzinfo) + self.assertEqual( + (await aware.pymongo_test.test.find_one())["x"].replace(tzinfo=None), + (await naive.pymongo_test.test.find_one())["x"], + ) + + @async_client_context.require_ipv6 + async def test_ipv6(self): + if async_client_context.tls: + if not HAVE_IPADDRESS: + raise SkipTest("Need the ipaddress module to test with SSL") + + if async_client_context.auth_enabled: + auth_str = f"{db_user}:{db_pwd}@" + else: + auth_str = "" + + uri = "mongodb://%s[::1]:%d" % (auth_str, await async_client_context.port) + if async_client_context.is_rs: + uri += "/?replicaSet=" + (async_client_context.replica_set_name or "") + + client = await self.async_rs_or_single_client_noauth(uri) + await client.pymongo_test.test.insert_one({"dummy": "object"}) + await client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) + + dbs = await client.list_database_names() + self.assertIn("pymongo_test", dbs) + self.assertIn("pymongo_test_bernie", dbs) + + async def test_contextlib(self): + client = await self.async_rs_or_single_client() + await client.pymongo_test.drop_collection("test") + await client.pymongo_test.test.insert_one({"foo": "bar"}) + + # The socket used for the previous commands has been returned to the + # pool + self.assertEqual(1, len((await async_get_pool(client)).conns)) + + # contextlib async support was added in Python 3.10 + if _IS_SYNC or sys.version_info >= (3, 10): + async with contextlib.aclosing(client): + self.assertEqual("bar", (await client.pymongo_test.test.find_one())["foo"]) + with self.assertRaises(InvalidOperation): + await client.pymongo_test.test.find_one() + client = await self.async_rs_or_single_client() + async with client as client: + self.assertEqual("bar", (await client.pymongo_test.test.find_one())["foo"]) + with self.assertRaises(InvalidOperation): + await client.pymongo_test.test.find_one() + + @async_client_context.require_sync + def test_interrupt_signal(self): + if sys.platform.startswith("java"): + # We can't figure out how to raise an exception on a thread that's + # blocked on a socket, whether that's the main thread or a worker, + # without simply killing the whole thread in Jython. This suggests + # PYTHON-294 can't actually occur in Jython. + raise SkipTest("Can't test interrupts in Jython") + if is_greenthread_patched(): + raise SkipTest("Can't reliably test interrupts with green threads") + + # Test fix for PYTHON-294 -- make sure AsyncMongoClient closes its + # socket if it gets an interrupt while waiting to recv() from it. + db = self.client.pymongo_test + + # A $where clause which takes 1.5 sec to execute + where = delay(1.5) + + # Need exactly 1 document so find() will execute its $where clause once + db.drop_collection("foo") + db.foo.insert_one({"_id": 1}) + + old_signal_handler = None + try: + # Platform-specific hacks for raising a KeyboardInterrupt on the + # main thread while find() is in-progress: On Windows, SIGALRM is + # unavailable so we use a second thread. In our Evergreen setup on + # Linux, the thread technique causes an error in the test at + # conn.recv(): TypeError: 'int' object is not callable + # We don't know what causes this, so we hack around it. + + if sys.platform == "win32": + + def interrupter(): + # Raises KeyboardInterrupt in the main thread + time.sleep(0.25) + thread.interrupt_main() + + thread.start_new_thread(interrupter, ()) + else: + # Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT + # for one second in the future, but easy to schedule SIGALRM. + def sigalarm(num, frame): + raise KeyboardInterrupt + + old_signal_handler = signal.signal(signal.SIGALRM, sigalarm) + signal.alarm(1) + + raised = False + try: + # Will be interrupted by a KeyboardInterrupt. + next(db.foo.find({"$where": where})) # type: ignore[call-overload] + except KeyboardInterrupt: + raised = True + + # Can't use self.assertRaises() because it doesn't catch system + # exceptions + self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt") + + # Raises AssertionError due to PYTHON-294 -- Mongo's response to + # the previous find() is still waiting to be read on the socket, + # so the request id's don't match. + self.assertEqual({"_id": 1}, next(db.foo.find())) # type: ignore[call-overload] + finally: + if old_signal_handler: + signal.signal(signal.SIGALRM, old_signal_handler) + + async def test_operation_failure(self): + # Ensure AsyncMongoClient doesn't close socket after it gets an error + # response to getLastError. PYTHON-395. We need a new client here + # to avoid race conditions caused by replica set failover or idle + # socket reaping. + client = await self.async_single_client() + await client.pymongo_test.test.find_one() + pool = await async_get_pool(client) + socket_count = len(pool.conns) + self.assertGreaterEqual(socket_count, 1) + old_conn = next(iter(pool.conns)) + await client.pymongo_test.test.drop() + await client.pymongo_test.test.insert_one({"_id": "foo"}) + with self.assertRaises(OperationFailure): + await client.pymongo_test.test.insert_one({"_id": "foo"}) + + self.assertEqual(socket_count, len(pool.conns)) + new_con = next(iter(pool.conns)) + self.assertEqual(old_conn, new_con) + + async def test_lazy_connect_w0(self): + # Ensure that connect-on-demand works when the first operation is + # an unacknowledged write. This exercises _writable_max_wire_version(). + + # Use a separate collection to avoid races where we're still + # completing an operation on a collection while the next test begins. + await async_client_context.client.drop_database("test_lazy_connect_w0") + self.addAsyncCleanup(async_client_context.client.drop_database, "test_lazy_connect_w0") + + client = await self.async_rs_or_single_client(connect=False, w=0) + await client.test_lazy_connect_w0.test.insert_one({}) + + async def predicate(): + return await client.test_lazy_connect_w0.test.count_documents({}) == 1 + + await async_wait_until(predicate, "find one document") + + client = await self.async_rs_or_single_client(connect=False, w=0) + await client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) + + async def predicate(): + return (await client.test_lazy_connect_w0.test.find_one()).get("x") == 1 + + await async_wait_until(predicate, "update one document") + + client = await self.async_rs_or_single_client(connect=False, w=0) + await client.test_lazy_connect_w0.test.delete_one({}) + + async def predicate(): + return await client.test_lazy_connect_w0.test.count_documents({}) == 0 + + await async_wait_until(predicate, "delete one document") + + @async_client_context.require_no_mongos + async def test_exhaust_network_error(self): + # When doing an exhaust query, the socket stays checked out on success + # but must be checked in on error to avoid semaphore leaks. + client = await self.async_rs_or_single_client(maxPoolSize=1, retryReads=False) + collection = client.pymongo_test.test + pool = await async_get_pool(client) + pool._check_interval_seconds = None # Never check. + + # Ensure a socket. + await connected(client) + + # Cause a network error. + conn = one(pool.conns) + await conn.conn.close() + cursor = collection.find(cursor_type=CursorType.EXHAUST) + with self.assertRaises(ConnectionFailure): + await anext(cursor) + + self.assertTrue(conn.closed) + + # The semaphore was decremented despite the error. + self.assertEqual(0, pool.requests) + + @async_client_context.require_auth + async def test_auth_network_error(self): + # Make sure there's no semaphore leak if we get a network error + # when authenticating a new socket with cached credentials. + + # Get a client with one socket so we detect if it's leaked. + c = await connected( + await self.async_rs_or_single_client( + maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False + ) + ) + + # Cause a network error on the actual socket. + pool = await async_get_pool(c) + conn = one(pool.conns) + await conn.conn.close() + + # AsyncConnection.authenticate logs, but gets a socket.error. Should be + # reraised as AutoReconnect. + with self.assertRaises(AutoReconnect): + await c.test.collection.find_one() + + # No semaphore leak, the pool is allowed to make a new socket. + await c.test.collection.find_one() + + @async_client_context.require_no_replica_set + async def test_connect_to_standalone_using_replica_set_name(self): + client = await self.async_single_client(replicaSet="anything", serverSelectionTimeoutMS=100) + with self.assertRaises(AutoReconnect): + await client.test.test.find_one() + + @async_client_context.require_replica_set + async def test_stale_getmore(self): + # A cursor is created, but its member goes down and is removed from + # the topology before the getMore message is sent. Test that + # AsyncMongoClient._run_operation_with_response handles the error. + with self.assertRaises(AutoReconnect): + client = await self.async_rs_client(connect=False, serverSelectionTimeoutMS=100) + await client._run_operation( + operation=message._GetMore( + "pymongo_test", + "collection", + 101, + 1234, + client.codec_options, + ReadPreference.PRIMARY, + None, + client, + None, + None, + False, + None, + ), + unpack_res=AsyncCursor(client.pymongo_test.collection)._unpack_response, + address=("not-a-member", 27017), + ) + + async def test_heartbeat_frequency_ms(self): + class HeartbeatStartedListener(ServerHeartbeatListener): + def __init__(self): + self.results = [] + + def started(self, event): + self.results.append(event) + + def succeeded(self, event): + pass + + def failed(self, event): + pass + + old_init = ServerHeartbeatStartedEvent.__init__ + heartbeat_times = [] + + def init(self, *args): + old_init(self, *args) + heartbeat_times.append(time.time()) + + try: + ServerHeartbeatStartedEvent.__init__ = init # type: ignore + listener = HeartbeatStartedListener() + uri = "mongodb://%s:%d/?heartbeatFrequencyMS=500" % ( + await async_client_context.host, + await async_client_context.port, + ) + await self.async_single_client(uri, event_listeners=[listener]) + await async_wait_until( + lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents" + ) + + # Default heartbeatFrequencyMS is 10 sec. Check the interval was + # closer to 0.5 sec with heartbeatFrequencyMS configured. + self.assertAlmostEqual(heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) + + finally: + ServerHeartbeatStartedEvent.__init__ = old_init # type: ignore + + def test_small_heartbeat_frequency_ms(self): + uri = "mongodb://example/?heartbeatFrequencyMS=499" + with self.assertRaises(ConfigurationError) as context: + AsyncMongoClient(uri) + + self.assertIn("heartbeatFrequencyMS", str(context.exception)) + + async def test_compression(self): + def compression_settings(client): + pool_options = client.options.pool_options + return pool_options._compression_settings + + uri = "mongodb://localhost:27017/?compressors=zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=4" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, 4) + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-1" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017/?compressors=foobar" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017/?compressors=foobar,zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + + # According to the connection string spec, unsupported values + # just raise a warning and are ignored. + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=10" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-2" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + + if not _have_snappy(): + uri = "mongodb://localhost:27017/?compressors=snappy" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + else: + uri = "mongodb://localhost:27017/?compressors=snappy" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["snappy"]) + uri = "mongodb://localhost:27017/?compressors=snappy,zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["snappy", "zlib"]) + + if not _have_zstd(): + uri = "mongodb://localhost:27017/?compressors=zstd" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + else: + uri = "mongodb://localhost:27017/?compressors=zstd" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zstd"]) + uri = "mongodb://localhost:27017/?compressors=zstd,zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zstd", "zlib"]) + + options = async_client_context.default_client_options + if "compressors" in options and "zlib" in options["compressors"]: + for level in range(-1, 10): + client = await self.async_single_client(zlibcompressionlevel=level) + # No error + await client.pymongo_test.test.find_one() + + @async_client_context.require_sync + async def test_reset_during_update_pool(self): + client = await self.async_rs_or_single_client(minPoolSize=10) + await client.admin.command("ping") + pool = await async_get_pool(client) + generation = pool.gen.get_overall() + + # Continuously reset the pool. + class ResetPoolThread(threading.Thread): + def __init__(self, pool): + super().__init__() + self.running = True + self.pool = pool + + def stop(self): + self.running = False + + async def _run(self): + while self.running: + exc = AutoReconnect("mock pool error") + ctx = _ErrorContext(exc, 0, pool.gen.get_overall(), False, None) + await client._topology.handle_error(pool.address, ctx) + await asyncio.sleep(0.001) + + def run(self): + self._run() + + t = ResetPoolThread(pool) + t.start() + + # Ensure that update_pool completes without error even when the pool + # is reset concurrently. + try: + while True: + for _ in range(10): + await client._topology.update_pool() + if generation != pool.gen.get_overall(): + break + finally: + t.stop() + t.join() + await client.admin.command("ping") + + async def test_background_connections_do_not_hold_locks(self): + min_pool_size = 10 + client = await self.async_rs_or_single_client( + serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, connect=False + ) + # Create a single connection in the pool. + await client.admin.command("ping") + + # Cause new connections stall for a few seconds. + pool = await async_get_pool(client) + original_connect = pool.connect + + async def stall_connect(*args, **kwargs): + await asyncio.sleep(2) + return await original_connect(*args, **kwargs) + + pool.connect = stall_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + # Wait for the background thread to start creating connections + await async_wait_until(lambda: len(pool.conns) > 1, "start creating connections") + + # Assert that application operations do not block. + for _ in range(10): + start = time.monotonic() + await client.admin.command("ping") + total = time.monotonic() - start + # Each ping command should not take more than 2 seconds + self.assertLess(total, 2) + + async def test_background_connections_log_on_error(self): + with self.assertLogs("pymongo.client", level="ERROR") as cm: + client = await self.async_rs_or_single_client(minPoolSize=1) + # Create a single connection in the pool. + await client.admin.command("ping") + + # Cause new connections to fail. + pool = await async_get_pool(client) + + async def fail_connect(*args, **kwargs): + raise Exception("failed to connect") + + pool.connect = fail_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + await pool.reset_without_pause() + + await async_wait_until( + lambda: "failed to connect" in "".join(cm.output), "start creating connections" + ) + self.assertIn("MongoClient background task encountered an error", "".join(cm.output)) + + @async_client_context.require_replica_set + async def test_direct_connection(self): + # direct_connection=True should result in Single topology. + client = await self.async_rs_or_single_client(directConnection=True) + await client.admin.command("ping") + self.assertEqual(len(client.nodes), 1) + self.assertEqual(client._topology_settings.get_topology_type(), TOPOLOGY_TYPE.Single) + + # direct_connection=False should result in RS topology. + client = await self.async_rs_or_single_client(directConnection=False) + await client.admin.command("ping") + self.assertGreaterEqual(len(client.nodes), 1) + self.assertIn( + client._topology_settings.get_topology_type(), + [TOPOLOGY_TYPE.ReplicaSetNoPrimary, TOPOLOGY_TYPE.ReplicaSetWithPrimary], + ) + + # directConnection=True, should error with multiple hosts as a list. + with self.assertRaises(ConfigurationError): + AsyncMongoClient(["host1", "host2"], directConnection=True) + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2927 fails often on PyPy") + async def test_continuous_network_errors(self): + def server_description_count(): + i = 0 + for obj in gc.get_objects(): + try: + if isinstance(obj, ServerDescription): + i += 1 + except ReferenceError: + pass + return i + + gc.collect() + with client_knobs(min_heartbeat_interval=0.002): + client = self.simple_client( + "invalid:27017", heartbeatFrequencyMS=2, serverSelectionTimeoutMS=200 + ) + initial_count = server_description_count() + with self.assertRaises(ServerSelectionTimeoutError): + await client.test.test.find_one() + gc.collect() + final_count = server_description_count() + await client.close() + # If a bug like PYTHON-2433 is reintroduced then too many + # ServerDescriptions will be kept alive and this test will fail: + # AssertionError: 11 != 47 within 20 delta (36 difference) + self.assertAlmostEqual(initial_count, final_count, delta=30) + + @async_client_context.require_failCommand_fail_point + async def test_network_error_message(self): + client = await self.async_single_client(retryReads=False) + await client.admin.command("ping") # connect + async with self.fail_point( + {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} + ): + assert await client.address is not None + expected = "{}:{}: ".format(*(await client.address)) + with self.assertRaisesRegex(AutoReconnect, expected): + await client.pymongo_test.test.find_one({}) + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2938 could fail on PyPy") + async def test_process_periodic_tasks(self): + client = await self.async_rs_or_single_client() + coll = client.db.collection + await coll.insert_many([{} for _ in range(5)]) + cursor = coll.find(batch_size=2) + await cursor.next() + c_id = cursor.cursor_id + self.assertIsNotNone(c_id) + await client.close() + # Add cursor to kill cursors queue + del cursor + await async_wait_until( + lambda: client._kill_cursors_queue, + "waited for cursor to be added to queue", + ) + await client._process_periodic_tasks() # This must not raise or print any exceptions + with self.assertRaises(InvalidOperation): + await coll.insert_many([{} for _ in range(5)]) + + async def test_service_name_from_kwargs(self): + client = AsyncMongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc", + srvServiceName="customname", + connect=False, + ) + await client.aconnect() + self.assertEqual(client._topology_settings.srv_service_name, "customname") + await client.close() + client = AsyncMongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc" + "/?srvServiceName=shouldbeoverriden", + srvServiceName="customname", + connect=False, + ) + await client.aconnect() + self.assertEqual(client._topology_settings.srv_service_name, "customname") + await client.close() + client = AsyncMongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc/?srvServiceName=customname", + connect=False, + ) + await client.aconnect() + self.assertEqual(client._topology_settings.srv_service_name, "customname") + await client.close() + + async def test_srv_max_hosts_kwarg(self): + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/") + await client.aconnect() + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + await client.aconnect() + self.assertEqual(len(client.topology_description.server_descriptions()), 1) + client = self.simple_client( + "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 + ) + await client.aconnect() + self.assertEqual(len(client.topology_description.server_descriptions()), 2) + + @unittest.skipIf( + async_client_context.load_balancer, + "loadBalanced clients do not run SDAM", + ) + @unittest.skipIf(sys.platform == "win32", "Windows does not support SIGSTOP") + @async_client_context.require_sync + def test_sigstop_sigcont(self): + test_dir = os.path.dirname(os.path.realpath(__file__)) + script = os.path.join(test_dir, "sigstop_sigcont.py") + p = subprocess.Popen( + [sys.executable, script, async_client_context.uri], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + self.addCleanup(p.wait, timeout=1) + self.addCleanup(p.kill) + time.sleep(1) + # Stop the child, sleep for twice the streaming timeout + # (heartbeatFrequencyMS + connectTimeoutMS), and restart. + os.kill(p.pid, signal.SIGSTOP) + time.sleep(2) + os.kill(p.pid, signal.SIGCONT) + time.sleep(0.5) + # Tell the script to exit gracefully. + outs, _ = p.communicate(input=b"q\n", timeout=10) + self.assertTrue(outs) + log_output = outs.decode("utf-8") + self.assertIn("TEST STARTED", log_output) + self.assertIn("ServerHeartbeatStartedEvent", log_output) + self.assertIn("ServerHeartbeatSucceededEvent", log_output) + self.assertIn("TEST COMPLETED", log_output) + self.assertNotIn("ServerHeartbeatFailedEvent", log_output) + + async def _test_handshake(self, env_vars, expected_env): + with patch.dict("os.environ", env_vars): + metadata = copy.deepcopy(_METADATA) + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|async" + else: + metadata["driver"]["name"] = "PyMongo|async" + if expected_env is not None: + metadata["env"] = expected_env + + if "AWS_REGION" not in env_vars: + os.environ["AWS_REGION"] = "" + client = await self.async_rs_or_single_client(serverSelectionTimeoutMS=10000) + await client.admin.command("ping") + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + + async def test_handshake_01_aws(self): + await self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", + "AWS_REGION": "us-east-2", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "1024", + }, + {"name": "aws.lambda", "region": "us-east-2", "memory_mb": 1024}, + ) + + async def test_handshake_02_azure(self): + await self._test_handshake({"FUNCTIONS_WORKER_RUNTIME": "python"}, {"name": "azure.func"}) + + async def test_handshake_03_gcp(self): + await self._test_handshake( + { + "K_SERVICE": "servicename", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + # Extra case for FUNCTION_NAME. + await self._test_handshake( + { + "FUNCTION_NAME": "funcname", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + + async def test_handshake_04_vercel(self): + await self._test_handshake( + {"VERCEL": "1", "VERCEL_REGION": "cdg1"}, {"name": "vercel", "region": "cdg1"} + ) + + async def test_handshake_05_multiple(self): + await self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "FUNCTIONS_WORKER_RUNTIME": "python"}, + None, + ) + # Extra cases for other combos. + await self._test_handshake( + {"FUNCTIONS_WORKER_RUNTIME": "python", "K_SERVICE": "servicename"}, + None, + ) + await self._test_handshake({"K_SERVICE": "servicename", "VERCEL": "1"}, None) + + async def test_handshake_06_region_too_long(self): + await self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "AWS_REGION": "a" * 512}, + {"name": "aws.lambda"}, + ) + + async def test_handshake_07_memory_invalid_int(self): + await self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big", + }, + {"name": "aws.lambda"}, + ) + + async def test_handshake_08_invalid_aws_ec2(self): + # AWS_EXECUTION_ENV needs to start with "AWS_Lambda_". + await self._test_handshake( + {"AWS_EXECUTION_ENV": "EC2"}, + None, + ) + + async def test_handshake_09_container_with_provider(self): + await self._test_handshake( + { + ENV_VAR_K8S: "1", + "AWS_LAMBDA_RUNTIME_API": "1", + "AWS_REGION": "us-east-1", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "256", + }, + { + "container": {"orchestrator": "kubernetes"}, + "name": "aws.lambda", + "region": "us-east-1", + "memory_mb": 256, + }, + ) + + def test_dict_hints(self): + self.db.t.find(hint={"x": 1}) + + def test_dict_hints_sort(self): + result = self.db.t.find() + result.sort({"x": 1}) + + self.db.t.find(sort={"x": 1}) + + async def test_dict_hints_create_index(self): + await self.db.t.create_index({"x": pymongo.ASCENDING}) + + async def test_legacy_java_uuid_roundtrip(self): + data = BinaryData.java_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) + + await async_client_context.client.pymongo_test.drop_collection("java_uuid") + db = async_client_context.client.pymongo_test + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) + + await coll.insert_many(docs) + self.assertEqual(5, await coll.count_documents({})) + async for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + async for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + await async_client_context.client.pymongo_test.drop_collection("java_uuid") + + async def test_legacy_csharp_uuid_roundtrip(self): + data = BinaryData.csharp_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) + + await async_client_context.client.pymongo_test.drop_collection("csharp_uuid") + db = async_client_context.client.pymongo_test + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) + + await coll.insert_many(docs) + self.assertEqual(5, await coll.count_documents({})) + async for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + async for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + await async_client_context.client.pymongo_test.drop_collection("csharp_uuid") + + async def test_uri_to_uuid(self): + uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" + client = await self.async_single_client(uri, connect=False) + self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) + + async def test_uuid_queries(self): + db = async_client_context.client.pymongo_test + coll = db.test + await coll.drop() + + uu = uuid.uuid4() + await coll.insert_one({"uuid": Binary(uu.bytes, 3)}) + self.assertEqual(1, await coll.count_documents({})) + + # Test regular UUID queries (using subtype 4). + coll = db.get_collection( + "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + ) + self.assertEqual(0, await coll.count_documents({"uuid": uu})) + await coll.insert_one({"uuid": uu}) + self.assertEqual(2, await coll.count_documents({})) + docs = await coll.find({"uuid": uu}).to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(uu, docs[0]["uuid"]) + + # Test both. + uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) + predicate = {"uuid": {"$in": [uu, uu_legacy]}} + self.assertEqual(2, await coll.count_documents(predicate)) + docs = await coll.find(predicate).to_list() + self.assertEqual(2, len(docs)) + await coll.drop() + + +class TestExhaustCursor(AsyncIntegrationTest): + """Test that clients properly handle errors from exhaust cursors.""" + + def setUp(self): + super().setUp() + if async_client_context.is_mongos: + raise SkipTest("mongos doesn't support exhaust, SERVER-2627") + + async def test_exhaust_query_server_error(self): + # When doing an exhaust query, the socket stays checked out on success + # but must be checked in on error to avoid semaphore leaks. + client = await connected(await self.async_rs_or_single_client(maxPoolSize=1)) + + collection = client.pymongo_test.test + pool = await async_get_pool(client) + conn = one(pool.conns) + + # This will cause OperationFailure in all mongo versions since + # the value for $orderby must be a document. + cursor = collection.find( + SON([("$query", {}), ("$orderby", True)]), cursor_type=CursorType.EXHAUST + ) + + with self.assertRaises(OperationFailure): + await cursor.next() + self.assertFalse(conn.closed) + + # The socket was checked in and the semaphore was decremented. + self.assertIn(conn, pool.conns) + self.assertEqual(0, pool.requests) + + async def test_exhaust_getmore_server_error(self): + # When doing a getmore on an exhaust cursor, the socket stays checked + # out on success but it's checked in on error to avoid semaphore leaks. + client = await self.async_rs_or_single_client(maxPoolSize=1) + collection = client.pymongo_test.test + await collection.drop() + + await collection.insert_many([{} for _ in range(200)]) + self.addAsyncCleanup(async_client_context.client.pymongo_test.test.drop) + + pool = await async_get_pool(client) + pool._check_interval_seconds = None # Never check. + conn = one(pool.conns) + + cursor = collection.find(cursor_type=CursorType.EXHAUST) + + # Initial query succeeds. + await cursor.next() + + # Cause a server error on getmore. + async def receive_message(request_id): + # Discard the actual server response. + await AsyncConnection.receive_message(conn, request_id) + + # responseFlags bit 1 is QueryFailure. + msg = struct.pack("= _NAMESPACE_DOC_BYTES: + num_models += 1 + b_repeated = "b" * (remainder_bytes - _OPERATION_DOC_BYTES) + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + return num_models, models + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_11_no_batch_splits_if_new_namespace_is_not_too_large(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + + num_models, models = await self._setup_namespace_test_models() + models.append( + InsertOne( + namespace="db.coll", + document={"a": "b"}, + ) + ) + self.addAsyncCleanup(client.db["coll"].drop) + + # No batch splitting required. + result = await client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 1) + event = bulk_write_events[0] + + self.assertEqual(len(event.command["ops"]), num_models + 1) + self.assertEqual(len(event.command["nsInfo"]), 1) + self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_11_batch_splits_if_new_namespace_is_too_large(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + + num_models, models = await self._setup_namespace_test_models() + c_repeated = "c" * 200 + namespace = f"db.{c_repeated}" + models.append( + InsertOne( + namespace=namespace, + document={"a": "b"}, + ) + ) + self.addAsyncCleanup(client.db["coll"].drop) + self.addAsyncCleanup(client.db[c_repeated].drop) + + # Batch splitting required. + result = await client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 2) + first_event, second_event = bulk_write_events + + self.assertEqual(len(first_event.command["ops"]), num_models) + self.assertEqual(len(first_event.command["nsInfo"]), 1) + self.assertEqual(first_event.command["nsInfo"][0]["ns"], "db.coll") + + self.assertEqual(len(second_event.command["ops"]), 1) + self.assertEqual(len(second_event.command["nsInfo"]), 1) + self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_12_returns_error_if_no_writes_can_be_added_to_ops(self): + client = await self.async_rs_or_single_client() + + # Document too large. + b_repeated = "b" * self.max_message_size_bytes + models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] + with self.assertRaises(DocumentTooLarge) as context: + await client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + + # Namespace too large. + c_repeated = "c" * self.max_message_size_bytes + namespace = f"db.{c_repeated}" + models = [InsertOne(namespace=namespace, document={"a": "b"})] + with self.assertRaises(DocumentTooLarge) as context: + await client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + + @async_client_context.require_version_min(8, 0, 0, -24) + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def test_13_returns_error_if_auto_encryption_configured(self): + opts = AutoEncryptionOpts( + key_vault_namespace="db.coll", + kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, + ) + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + models = [InsertOne(namespace="db.coll", document={"a": "b"})] + with self.assertRaises(InvalidOperation) as context: + await client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + self.assertIn( + "bulk_write does not currently support automatic encryption", context.exception._message + ) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_upserted_result(self): + client = await self.async_rs_or_single_client() + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + + models = [] + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": "a"}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + ) + ) + result = await client.bulk_write(models=models, verbose_results=True) + + self.assertEqual(result.upserted_count, 2) + self.assertEqual(result.update_results[0].did_upsert, True) + self.assertEqual(result.update_results[1].did_upsert, True) + self.assertEqual(result.update_results[2].did_upsert, False) + + # Note: test 14 is optional and intentionally not implemented because we provide multiple APIs to specify explain. + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_15_unacknowledged_write_across_batches(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + await client.db.command({"create": "db.coll"}) + + b_repeated = "b" * (self.max_bson_object_size - 500) + models = [ + InsertOne(namespace="db.coll", document={"a": b_repeated}) + for _ in range(int(self.max_message_size_bytes / self.max_bson_object_size) + 1) + ] + + listener.reset() + + res = await client.bulk_write(models, ordered=False, write_concern=WriteConcern(w=0)) + self.assertEqual(False, res.acknowledged) + + events = listener.started_events + self.assertEqual(2, len(events)) + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size), + len(events[0].command["ops"]), + ) + self.assertEqual(1, len(events[1].command["ops"])) + self.assertEqual(events[0].operation_id, events[1].operation_id) + self.assertEqual({"w": 0}, events[0].command["writeConcern"]) + self.assertEqual({"w": 0}, events[1].command["writeConcern"]) + + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size) + 1, + await collection.count_documents({}), + ) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites +class TestClientBulkWriteCSOT(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.max_write_batch_size = await async_client_context.max_write_batch_size + self.max_bson_object_size = await async_client_context.max_bson_size + self.max_message_size_bytes = await async_client_context.max_message_size_bytes + + @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_failCommand_fail_point + @flaky(reason="PYTHON-5290", max_runs=3, affects_cpython_linux=True) + async def test_timeout_in_multi_batch_bulk_write(self): + if sys.platform != "linux" and "CI" in os.environ: + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows and MacOS") + _OVERHEAD = 500 + + internal_client = await self.async_rs_or_single_client(timeoutMS=None) + + collection = internal_client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["bulkWrite"], "blockConnection": True, "blockTimeMS": 1010}, + } + async with self.fail_point(fail_command): + models = [] + num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) + b_repeated = "b" * (self.max_bson_object_size - _OVERHEAD) + for _ in range(num_models): + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + event_listeners=[listener], + readConcernLevel="majority", + readPreference="primary", + timeoutMS=2000, + w="majority", + ) + # Initialize the client with a larger timeout to help make test less flakey + with pymongo.timeout(10): + await client.admin.command("ping") + with self.assertRaises(ClientBulkWriteException) as context: + await client.bulk_write(models=models) + self.assertIsInstance(context.exception.error, NetworkTimeout) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) diff --git a/test/asynchronous/test_client_context.py b/test/asynchronous/test_client_context.py new file mode 100644 index 0000000000..652b32e798 --- /dev/null +++ b/test/asynchronous/test_client_context.py @@ -0,0 +1,57 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncUnitTest, SkipTest, async_client_context, unittest + +_IS_SYNC = False + + +class TestAsyncClientContext(AsyncUnitTest): + def test_must_connect(self): + if not os.environ.get("PYMONGO_MUST_CONNECT"): + raise SkipTest("PYMONGO_MUST_CONNECT is not set") + + self.assertTrue( + async_client_context.connected, + "client context must be connected when " + "PYMONGO_MUST_CONNECT is set. Failed attempts:\n{}".format( + async_client_context.connection_attempt_info() + ), + ) + + def test_enableTestCommands_is_disabled(self): + if not os.environ.get("DISABLE_TEST_COMMANDS"): + raise SkipTest("DISABLE_TEST_COMMANDS is not set") + + self.assertFalse( + async_client_context.test_commands_enabled, + "enableTestCommands must be disabled when DISABLE_TEST_COMMANDS is set.", + ) + + def test_free_threading_is_enabled(self): + if "free-threading build" not in sys.version: + raise SkipTest("this test requires the Python free-threading build") + + # If the GIL is enabled then pymongo or one of our deps does not support free-threading. + self.assertFalse(sys._is_gil_enabled()) # type: ignore[attr-defined] + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_client_metadata.py b/test/asynchronous/test_client_metadata.py new file mode 100644 index 0000000000..2f175cceed --- /dev/null +++ b/test/asynchronous/test_client_metadata.py @@ -0,0 +1,232 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import asyncio +import os +import pathlib +import time +import unittest +from test.asynchronous import AsyncIntegrationTest +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import CMAPListener +from typing import Any, Optional + +import pytest + +from pymongo import AsyncMongoClient +from pymongo.driver_info import DriverInfo +from pymongo.monitoring import ConnectionClosedEvent + +try: + from mockupdb import MockupDB, OpMsgReply + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +pytestmark = pytest.mark.mockupdb + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "handshake", "unified") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "handshake", "unified" + ) + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + + +def _get_handshake_driver_info(request): + assert "client" in request + return request["client"] + + +class TestClientMetadataProse(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.server = MockupDB() + self.handshake_req = None + + def respond(r): + if "ismaster" in r: + # then this is a handshake request + self.handshake_req = r + return r.reply(OpMsgReply(maxWireVersion=13)) + + self.server.autoresponds(respond) + self.server.run() + self.addAsyncCleanup(self.server.stop) + + async def send_ping_and_get_metadata( + self, client: AsyncMongoClient, is_handshake: bool + ) -> tuple[str, Optional[str], Optional[str], dict[str, Any]]: + # reset if handshake request + if is_handshake: + self.handshake_req: Optional[dict] = None + + await client.admin.command("ping") + metadata = _get_handshake_driver_info(self.handshake_req) + driver_metadata = metadata["driver"] + name, version, platform = ( + driver_metadata["name"], + driver_metadata["version"], + metadata["platform"], + ) + return name, version, platform, metadata + + async def check_metadata_added( + self, + client: AsyncMongoClient, + add_name: str, + add_version: Optional[str], + add_platform: Optional[str], + ) -> None: + # send initial metadata + name, version, platform, metadata = await self.send_ping_and_get_metadata(client, True) + # wait for connection to become idle + await asyncio.sleep(0.005) + + # add new metadata + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + new_name, new_version, new_platform, new_metadata = await self.send_ping_and_get_metadata( + client, True + ) + if add_name is not None and add_name.lower() in name.lower().split("|"): + self.assertEqual(name, new_name) + self.assertEqual(version, new_version) + self.assertEqual(platform, new_platform) + else: + self.assertEqual(new_name, f"{name}|{add_name}" if add_name is not None else name) + self.assertEqual( + new_version, + f"{version}|{add_version}" if add_version is not None else version, + ) + self.assertEqual( + new_platform, + f"{platform}|{add_platform}" if add_platform is not None else platform, + ) + + metadata.pop("driver") + metadata.pop("platform") + new_metadata.pop("driver") + new_metadata.pop("platform") + self.assertEqual(metadata, new_metadata) + + async def test_append_metadata(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + async def test_append_metadata_platform_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", "2.0", None) + + async def test_append_metadata_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", None, "Framework Platform") + + async def test_append_metadata_platform_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", None, None) + + async def test_multiple_successive_metadata_updates(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, maxIdleTimeMS=1, connect=False + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + async def test_multiple_successive_metadata_updates_platform_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", "2.0", None) + + async def test_multiple_successive_metadata_updates_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", None, "Framework Platform") + + async def test_multiple_successive_metadata_updates_platform_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", None, None) + + async def test_doesnt_update_established_connections(self): + listener = CMAPListener() + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + event_listeners=[listener], + ) + + # send initial metadata + name, version, platform, metadata = await self.send_ping_and_get_metadata(client, True) + self.assertIsNotNone(name) + self.assertIsNotNone(version) + self.assertIsNotNone(platform) + + # add data + add_name, add_version, add_platform = "framework", "2.0", "Framework Platform" + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + # check new data isn't sent + self.handshake_req: Optional[dict] = None + await client.admin.command("ping") + self.assertIsNone(self.handshake_req) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 0) + + async def test_duplicate_driver_name_no_op(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", None, None) + # wait for connection to become idle + await asyncio.sleep(0.005) + # add same metadata again + await self.check_metadata_added(client, "Framework", None, None) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_collation.py b/test/asynchronous/test_collation.py new file mode 100644 index 0000000000..da810a2a9f --- /dev/null +++ b/test/asynchronous/test_collation.py @@ -0,0 +1,282 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collation module.""" +from __future__ import annotations + +import functools +import warnings +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import EventListener, OvertCommandListener +from typing import Any + +from pymongo.collation import ( + Collation, + CollationAlternate, + CollationCaseFirst, + CollationMaxVariable, + CollationStrength, +) +from pymongo.errors import ConfigurationError +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + ReplaceOne, + UpdateMany, + UpdateOne, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCollationObject(unittest.TestCase): + def test_constructor(self): + self.assertRaises(TypeError, Collation, locale=42) + # Fill in a locale to test the other options. + _Collation = functools.partial(Collation, "en_US") + # No error. + _Collation(caseFirst=CollationCaseFirst.UPPER) + self.assertRaises(TypeError, _Collation, caseLevel="true") + self.assertRaises(ValueError, _Collation, strength="six") + self.assertRaises(TypeError, _Collation, numericOrdering="true") + self.assertRaises(TypeError, _Collation, alternate=5) + self.assertRaises(TypeError, _Collation, maxVariable=2) + self.assertRaises(TypeError, _Collation, normalization="false") + self.assertRaises(TypeError, _Collation, backwards="true") + + # No errors. + Collation("en_US", future_option="bar", another_option=42) + collation = Collation( + "en_US", + caseLevel=True, + caseFirst=CollationCaseFirst.UPPER, + strength=CollationStrength.QUATERNARY, + numericOrdering=True, + alternate=CollationAlternate.SHIFTED, + maxVariable=CollationMaxVariable.SPACE, + normalization=True, + backwards=True, + ) + + self.assertEqual( + { + "locale": "en_US", + "caseLevel": True, + "caseFirst": "upper", + "strength": 4, + "numericOrdering": True, + "alternate": "shifted", + "maxVariable": "space", + "normalization": True, + "backwards": True, + }, + collation.document, + ) + + self.assertEqual( + {"locale": "en_US", "backwards": True}, Collation("en_US", backwards=True).document + ) + + +class TestCollation(AsyncIntegrationTest): + listener: EventListener + warn_context: Any + collation: Collation + + @async_client_context.require_connection + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test + self.collation = Collation("en_US") + self.warn_context = warnings.catch_warnings() + self.warn_context.__enter__() + + async def asyncTearDown(self) -> None: + self.warn_context.__exit__() + self.warn_context = None + self.listener.reset() + await super().asyncTearDown() + + def last_command_started(self): + return self.listener.started_events[-1].command + + def assertCollationInLastCommand(self): + self.assertEqual(self.collation.document, self.last_command_started()["collation"]) + + async def test_create_collection(self): + await self.db.test.drop() + await self.db.create_collection("test", collation=self.collation) + self.assertCollationInLastCommand() + + # Test passing collation as a dict as well. + await self.db.test.drop() + self.listener.reset() + await self.db.create_collection("test", collation=self.collation.document) + self.assertCollationInLastCommand() + + def test_index_model(self): + model = IndexModel([("a", 1), ("b", -1)], collation=self.collation) + self.assertEqual(self.collation.document, model.document["collation"]) + + async def test_create_index(self): + await self.db.test.create_index("foo", collation=self.collation) + ci_cmd = self.listener.started_events[0].command + self.assertEqual(self.collation.document, ci_cmd["indexes"][0]["collation"]) + + async def test_aggregate(self): + await self.db.test.aggregate([{"$group": {"_id": 42}}], collation=self.collation) + self.assertCollationInLastCommand() + + async def test_count_documents(self): + await self.db.test.count_documents({}, collation=self.collation) + self.assertCollationInLastCommand() + + async def test_distinct(self): + await self.db.test.distinct("foo", collation=self.collation) + self.assertCollationInLastCommand() + + self.listener.reset() + await self.db.test.find(collation=self.collation).distinct("foo") + self.assertCollationInLastCommand() + + async def test_find_command(self): + await self.db.test.insert_one({"is this thing on?": True}) + self.listener.reset() + await anext(self.db.test.find(collation=self.collation)) + self.assertCollationInLastCommand() + + async def test_explain_command(self): + self.listener.reset() + await self.db.test.find(collation=self.collation).explain() + # The collation should be part of the explained command. + self.assertEqual( + self.collation.document, self.last_command_started()["explain"]["collation"] + ) + + async def test_delete(self): + await self.db.test.delete_one({"foo": 42}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) + + self.listener.reset() + await self.db.test.delete_many({"foo": 42}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) + + async def test_update(self): + await self.db.test.replace_one({"foo": 42}, {"foo": 43}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + self.listener.reset() + await self.db.test.update_one({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + self.listener.reset() + await self.db.test.update_many({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + async def test_find_and(self): + await self.db.test.find_one_and_delete({"foo": 42}, collation=self.collation) + self.assertCollationInLastCommand() + + self.listener.reset() + await self.db.test.find_one_and_update( + {"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation + ) + self.assertCollationInLastCommand() + + self.listener.reset() + await self.db.test.find_one_and_replace({"foo": 42}, {"foo": 43}, collation=self.collation) + self.assertCollationInLastCommand() + + async def test_bulk_write(self): + await self.db.test.collection.bulk_write( + [ + DeleteOne({"noCollation": 42}), + DeleteMany({"noCollation": 42}), + DeleteOne({"foo": 42}, collation=self.collation), + DeleteMany({"foo": 42}, collation=self.collation), + ReplaceOne({"noCollation": 24}, {"bar": 42}), + UpdateOne({"noCollation": 84}, {"$set": {"bar": 10}}, upsert=True), + UpdateMany({"noCollation": 45}, {"$set": {"bar": 42}}), + ReplaceOne({"foo": 24}, {"foo": 42}, collation=self.collation), + UpdateOne( + {"foo": 84}, {"$set": {"foo": 10}}, upsert=True, collation=self.collation + ), + UpdateMany({"foo": 45}, {"$set": {"foo": 42}}, collation=self.collation), + ] + ) + + delete_cmd = self.listener.started_events[0].command + update_cmd = self.listener.started_events[1].command + + def check_ops(ops): + for op in ops: + if "noCollation" in op["q"]: + self.assertNotIn("collation", op) + else: + self.assertEqual(self.collation.document, op["collation"]) + + check_ops(delete_cmd["deletes"]) + check_ops(update_cmd["updates"]) + + async def test_indexes_same_keys_different_collations(self): + await self.db.test.drop() + usa_collation = Collation("en_US") + ja_collation = Collation("ja") + await self.db.test.create_indexes( + [ + IndexModel("fieldname", collation=usa_collation), + IndexModel("fieldname", name="japanese_version", collation=ja_collation), + IndexModel("fieldname", name="simple"), + ] + ) + indexes = await self.db.test.index_information() + self.assertEqual( + usa_collation.document["locale"], indexes["fieldname_1"]["collation"]["locale"] + ) + self.assertEqual( + ja_collation.document["locale"], indexes["japanese_version"]["collation"]["locale"] + ) + self.assertNotIn("collation", indexes["simple"]) + await self.db.test.drop_index("fieldname_1") + indexes = await self.db.test.index_information() + self.assertIn("japanese_version", indexes) + self.assertIn("simple", indexes) + self.assertNotIn("fieldname", indexes) + + async def test_unacknowledged_write(self): + unacknowledged = WriteConcern(w=0) + collection = self.db.get_collection("test", write_concern=unacknowledged) + with self.assertRaises(ConfigurationError): + await collection.update_one( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + update_one = UpdateOne( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + with self.assertRaises(ConfigurationError): + await collection.bulk_write([update_one]) + + async def test_cursor_collation(self): + await self.db.test.insert_one({"hello": "world"}) + await anext(self.db.test.find().collation(self.collation)) + self.assertCollationInLastCommand() diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py new file mode 100644 index 0000000000..498563fe83 --- /dev/null +++ b/test/asynchronous/test_collection.py @@ -0,0 +1,2263 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection module.""" +from __future__ import annotations + +import asyncio +import contextlib +import re +import sys +from codecs import utf_8_decode +from collections import defaultdict +from test.asynchronous.utils import async_get_pool, async_is_mongos +from typing import Any, Iterable, no_type_check + +from pymongo.asynchronous.database import AsyncDatabase + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous import ( # TODO: fix sync imports in PYTHON-4528 + AsyncIntegrationTest, + AsyncUnitTest, + async_client_context, +) +from test.utils_shared import ( + IMPOSSIBLE_WRITE_CONCERN, + EventListener, + OvertCommandListener, + async_wait_until, +) +from test.version import Version + +from bson import encode +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.regex import Regex +from bson.son import SON +from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT +from pymongo.asynchronous.collection import AsyncCollection, ReturnDocument +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.bulk_shared import BulkWriteError +from pymongo.cursor_shared import CursorType +from pymongo.errors import ( + ConfigurationError, + DocumentTooLarge, + DuplicateKeyError, + ExecutionTimeout, + InvalidDocument, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) +from pymongo.message import _COMMAND_OVERHEAD, _gen_find_command +from pymongo.operations import * +from pymongo.read_concern import DEFAULT_READ_CONCERN +from pymongo.read_preferences import ReadPreference +from pymongo.results import ( + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCollectionNoConnect(AsyncUnitTest): + """Test Collection features on a client that does not connect.""" + + db: AsyncDatabase + client: AsyncMongoClient + + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.client = self.simple_client(connect=False) + self.db = self.client.pymongo_test + + def test_collection(self): + self.assertRaises(TypeError, AsyncCollection, self.db, 5) + + def make_col(base, name): + return base[name] + + self.assertRaises(InvalidName, make_col, self.db, "") + self.assertRaises(InvalidName, make_col, self.db, "te$t") + self.assertRaises(InvalidName, make_col, self.db, ".test") + self.assertRaises(InvalidName, make_col, self.db, "test.") + self.assertRaises(InvalidName, make_col, self.db, "tes..t") + self.assertRaises(InvalidName, make_col, self.db.test, "") + self.assertRaises(InvalidName, make_col, self.db.test, "te$t") + self.assertRaises(InvalidName, make_col, self.db.test, ".test") + self.assertRaises(InvalidName, make_col, self.db.test, "test.") + self.assertRaises(InvalidName, make_col, self.db.test, "tes..t") + self.assertRaises(InvalidName, make_col, self.db.test, "tes\x00t") + + def test_getattr(self): + coll = self.db.test + self.assertIsInstance(coll["_does_not_exist"], AsyncCollection) + + with self.assertRaises(AttributeError) as context: + coll._does_not_exist + + # Message should be: + # "AttributeError: Collection has no attribute '_does_not_exist'. To + # access the test._does_not_exist collection, use + # database['test._does_not_exist']." + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) + + coll2 = coll.with_options(write_concern=WriteConcern(w=0)) + self.assertEqual(coll2.write_concern, WriteConcern(w=0)) + self.assertNotEqual(coll.write_concern, coll2.write_concern) + coll3 = coll2.subcoll + self.assertEqual(coll2.write_concern, coll3.write_concern) + coll4 = coll2["subcoll"] + self.assertEqual(coll2.write_concern, coll4.write_concern) + + def test_iteration(self): + coll = self.db.coll + msg = "'AsyncCollection' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] + break + # Non-string indices will start failing in PyMongo 5. + self.assertEqual(coll[0].name, "coll.0") + self.assertEqual(coll[{}].name, "coll.{}") + # next fails + with self.assertRaisesRegex(TypeError, msg): + _ = next(coll) + # .next() fails + with self.assertRaisesRegex(TypeError, msg): + _ = coll.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(coll, Iterable) + + +class AsyncTestCollection(AsyncIntegrationTest): + w: int + + async def asyncSetUp(self): + await super().asyncSetUp() + self.w = async_client_context.w # type: ignore + + async def asyncTearDown(self): + await self.db.test.drop() + await self.db.drop_collection("test_large_limit") + await super().asyncTearDown() + + @contextlib.contextmanager + def write_concern_collection(self): + if async_client_context.is_rs: + with self.assertRaises(WriteConcernError): + # Unsatisfiable write concern. + yield AsyncCollection( + self.db, + "test", + write_concern=WriteConcern(w=len(async_client_context.nodes) + 1), + ) + else: + yield self.db.test + + async def test_equality(self): + self.assertIsInstance(self.db.test, AsyncCollection) + self.assertEqual(self.db.test, self.db["test"]) + self.assertEqual(self.db.test, AsyncCollection(self.db, "test")) + self.assertEqual(self.db.test.mike, self.db["test.mike"]) + self.assertEqual(self.db.test["mike"], self.db["test.mike"]) + + async def test_hashable(self): + self.assertIn(self.db.test.mike, {self.db["test.mike"]}) + + async def test_create(self): + # No Exception. + db = async_client_context.client.pymongo_test + await db.create_test_no_wc.drop() + + async def lambda_test(): + return "create_test_no_wc" not in await db.list_collection_names() + + async def lambda_test_2(): + return "create_test_no_wc" in await db.list_collection_names() + + await async_wait_until( + lambda_test, + "drop create_test_no_wc collection", + ) + await db.create_collection("create_test_no_wc") + await async_wait_until( + lambda_test_2, + "create create_test_no_wc collection", + ) + # SERVER-33317 + if not async_client_context.is_mongos or not async_client_context.version.at_least(3, 7, 0): + with self.assertRaises(OperationFailure): + await db.create_collection("create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN) + + async def test_drop_nonexistent_collection(self): + await self.db.drop_collection("test") + self.assertNotIn("test", await self.db.list_collection_names()) + + # No exception + await self.db.drop_collection("test") + + async def test_create_indexes(self): + db = self.db + + with self.assertRaises(TypeError): + await db.test.create_indexes("foo") # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.test.create_indexes(["foo"]) # type: ignore[list-item] + self.assertRaises(TypeError, IndexModel, 5) + self.assertRaises(ValueError, IndexModel, []) + + await db.test.drop_indexes() + await db.test.insert_one({}) + self.assertEqual(len(await db.test.index_information()), 1) + + await db.test.create_indexes([IndexModel("hello")]) + await db.test.create_indexes([IndexModel([("hello", DESCENDING), ("world", ASCENDING)])]) + + # Tuple instead of list. + await db.test.create_indexes([IndexModel((("world", ASCENDING),))]) + + self.assertEqual(len(await db.test.index_information()), 4) + + await db.test.drop_indexes() + names = await db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world")] + ) + self.assertEqual(names, ["hello_world"]) + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + await db.test.create_indexes([IndexModel("hello")]) + self.assertIn("hello_1", await db.test.index_information()) + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + names = await db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)]), IndexModel("hello")] + ) + info = await db.test.index_information() + for name in names: + self.assertIn(name, info) + + await db.test.drop() + await db.test.insert_one({"a": 1}) + await db.test.insert_one({"a": 1}) + with self.assertRaises(DuplicateKeyError): + await db.test.create_indexes([IndexModel("a", unique=True)]) + + with self.write_concern_collection() as coll: + await coll.create_indexes([IndexModel("hello")]) + + @async_client_context.require_version_max(4, 3, -1) + async def test_create_indexes_commitQuorum_requires_44(self): + db = self.db + with self.assertRaisesRegex( + ConfigurationError, + r"Must be connected to MongoDB 4\.4\+ to use the commitQuorum option for createIndexes", + ): + await db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") + + @async_client_context.require_no_standalone + @async_client_context.require_version_min(4, 4, -1) + async def test_create_indexes_commitQuorum(self): + await self.db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") + + async def test_create_index(self): + db = self.db + + with self.assertRaises(TypeError): + await db.test.create_index(5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + await db.test.create_index([]) + + await db.test.drop_indexes() + await db.test.insert_one({}) + self.assertEqual(len(await db.test.index_information()), 1) + + await db.test.create_index("hello") + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + + # Tuple instead of list. + await db.test.create_index((("world", ASCENDING),)) + + self.assertEqual(len(await db.test.index_information()), 4) + + await db.test.drop_indexes() + ix = await db.test.create_index( + [("hello", DESCENDING), ("world", ASCENDING)], name="hello_world" + ) + self.assertEqual(ix, "hello_world") + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + await db.test.create_index("hello") + self.assertIn("hello_1", await db.test.index_information()) + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + self.assertIn("hello_-1_world_1", await db.test.index_information()) + + await db.test.drop_indexes() + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) + self.assertIn("hello_-1_world_1", await db.test.index_information()) + + await db.test.drop() + await db.test.insert_one({"a": 1}) + await db.test.insert_one({"a": 1}) + with self.assertRaises(DuplicateKeyError): + await db.test.create_index("a", unique=True) + + with self.write_concern_collection() as coll: + await coll.create_index([("hello", DESCENDING)]) + + await db.test.create_index(["hello", "world"]) + await db.test.create_index(["hello", ("world", DESCENDING)]) + await db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] + + async def test_drop_index(self): + db = self.db + await db.test.drop_indexes() + await db.test.create_index("hello") + name = await db.test.create_index("goodbye") + + self.assertEqual(len(await db.test.index_information()), 3) + self.assertEqual(name, "goodbye_1") + await db.test.drop_index(name) + + # Drop it again. + if async_client_context.version < Version(8, 3, -1): + with self.assertRaises(OperationFailure): + await db.test.drop_index(name) + else: + await db.test.drop_index(name) + self.assertEqual(len(await db.test.index_information()), 2) + self.assertIn("hello_1", await db.test.index_information()) + + await db.test.drop_indexes() + await db.test.create_index("hello") + name = await db.test.create_index("goodbye") + + self.assertEqual(len(await db.test.index_information()), 3) + self.assertEqual(name, "goodbye_1") + await db.test.drop_index([("goodbye", ASCENDING)]) + self.assertEqual(len(await db.test.index_information()), 2) + self.assertIn("hello_1", await db.test.index_information()) + + with self.write_concern_collection() as coll: + await coll.drop_index("hello_1") + + @async_client_context.require_no_mongos + @async_client_context.require_test_commands + async def test_index_management_max_time_ms(self): + coll = self.db.test + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn" + ) + try: + with self.assertRaises(ExecutionTimeout): + await coll.create_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + await coll.create_indexes([IndexModel("foo")], maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + await coll.drop_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + await coll.drop_indexes(maxTimeMS=1) + finally: + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="off" + ) + + async def test_list_indexes(self): + db = self.db + await db.test.drop() + await db.test.insert_one({}) # create collection + + def map_indexes(indexes): + return {index["name"]: index for index in indexes} + + indexes = await (await db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 1) + self.assertIn("_id_", map_indexes(indexes)) + + await db.test.create_index("hello") + indexes = await (await db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 2) + self.assertEqual(map_indexes(indexes)["hello_1"]["key"], SON([("hello", ASCENDING)])) + + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + indexes = await (await db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 3) + index_map = map_indexes(indexes) + self.assertEqual( + index_map["hello_-1_world_1"]["key"], SON([("hello", DESCENDING), ("world", ASCENDING)]) + ) + self.assertEqual(True, index_map["hello_-1_world_1"]["unique"]) + + # List indexes on a collection that does not exist. + indexes = await (await db.does_not_exist.list_indexes()).to_list() + self.assertEqual(len(indexes), 0) + + # List indexes on a database that does not exist. + indexes = await (await db.does_not_exist.list_indexes()).to_list() + self.assertEqual(len(indexes), 0) + + async def test_index_info(self): + db = self.db + await db.test.drop() + await db.test.insert_one({}) # create collection + self.assertEqual(len(await db.test.index_information()), 1) + self.assertIn("_id_", await db.test.index_information()) + + await db.test.create_index("hello") + self.assertEqual(len(await db.test.index_information()), 2) + self.assertEqual( + (await db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)] + ) + + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + self.assertEqual( + (await db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)] + ) + self.assertEqual(len(await db.test.index_information()), 3) + self.assertEqual( + [("hello", DESCENDING), ("world", ASCENDING)], + (await db.test.index_information())["hello_-1_world_1"]["key"], + ) + self.assertEqual(True, (await db.test.index_information())["hello_-1_world_1"]["unique"]) + + async def test_index_geo2d(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("loc_2d", await db.test.create_index([("loc", GEO2D)])) + index_info = (await db.test.index_information())["loc_2d"] + self.assertEqual([("loc", "2d")], index_info["key"]) + + # geoSearch was deprecated in 4.4 and removed in 5.0 + @async_client_context.require_version_max(4, 5) + @async_client_context.require_no_mongos + async def test_index_haystack(self): + db = self.db + await db.test.drop() + _id = ( + await db.test.insert_one({"pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}) + ).inserted_id + await db.test.insert_one({"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"}) + await db.test.insert_one({"pos": {"long": 59.1, "lat": 87.2}, "type": "office"}) + await db.test.create_index([("pos", "geoHaystack"), ("type", ASCENDING)], bucketSize=1) + + results = ( + await db.command( + SON( + [ + ("geoSearch", "test"), + ("near", [33, 33]), + ("maxDistance", 6), + ("search", {"type": "restaurant"}), + ("limit", 30), + ] + ) + ) + )["results"] + + self.assertEqual(2, len(results)) + self.assertEqual( + {"_id": _id, "pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}, results[0] + ) + + @async_client_context.require_no_mongos + async def test_index_text(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("t_text", await db.test.create_index([("t", TEXT)])) + index_info = (await db.test.index_information())["t_text"] + self.assertIn("weights", index_info) + + await db.test.insert_many( + [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] + ) + + # MongoDB 2.6 text search. Create 'score' field in projection. + cursor = db.test.find({"$text": {"$search": "spam"}}, {"score": {"$meta": "textScore"}}) + + # Sort by 'score' field. + cursor.sort([("score", {"$meta": "textScore"})]) + results = await cursor.to_list() + self.assertGreaterEqual(results[0]["score"], results[1]["score"]) + + await db.test.drop_indexes() + + async def test_index_2dsphere(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("geo_2dsphere", await db.test.create_index([("geo", GEOSPHERE)])) + + for dummy, info in (await db.test.index_information()).items(): + field, idx_type = info["key"][0] + if field == "geo" and idx_type == "2dsphere": + break + else: + self.fail("2dsphere index not found.") + + poly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} + query = {"geo": {"$within": {"$geometry": poly}}} + + # This query will error without a 2dsphere index. + db.test.find(query) + await db.test.drop_indexes() + + async def test_index_hashed(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("a_hashed", await db.test.create_index([("a", HASHED)])) + + for dummy, info in (await db.test.index_information()).items(): + field, idx_type = info["key"][0] + if field == "a" and idx_type == "hashed": + break + else: + self.fail("hashed index not found.") + + await db.test.drop_indexes() + + async def test_index_sparse(self): + db = self.db + await db.test.drop_indexes() + await db.test.create_index([("key", ASCENDING)], sparse=True) + self.assertTrue((await db.test.index_information())["key_1"]["sparse"]) + + async def test_index_background(self): + db = self.db + await db.test.drop_indexes() + await db.test.create_index([("keya", ASCENDING)]) + await db.test.create_index([("keyb", ASCENDING)], background=False) + await db.test.create_index([("keyc", ASCENDING)], background=True) + self.assertNotIn("background", (await db.test.index_information())["keya_1"]) + self.assertFalse((await db.test.index_information())["keyb_1"]["background"]) + self.assertTrue((await db.test.index_information())["keyc_1"]["background"]) + + async def _drop_dups_setup(self, db): + await db.drop_collection("test") + await db.test.insert_one({"i": 1}) + await db.test.insert_one({"i": 2}) + await db.test.insert_one({"i": 2}) # duplicate + await db.test.insert_one({"i": 3}) + + async def test_index_dont_drop_dups(self): + # Try *not* dropping duplicates + db = self.db + await self._drop_dups_setup(db) + + # There's a duplicate + async def _test_create(): + await db.test.create_index([("i", ASCENDING)], unique=True, dropDups=False) + + with self.assertRaises(DuplicateKeyError): + await _test_create() + + # Duplicate wasn't dropped + self.assertEqual(4, await db.test.count_documents({})) + + # Index wasn't created, only the default index on _id + self.assertEqual(1, len(await db.test.index_information())) + + # Get the plan dynamically because the explain format will change. + def get_plan_stage(self, root, stage): + if root.get("stage") == stage: + return root + elif "inputStage" in root: + return self.get_plan_stage(root["inputStage"], stage) + elif "inputStages" in root: + for i in root["inputStages"]: + stage = self.get_plan_stage(i, stage) + if stage: + return stage + elif "queryPlan" in root: + # queryPlan (and slotBasedPlan) are new in 5.0. + return self.get_plan_stage(root["queryPlan"], stage) + elif "shards" in root: + for i in root["shards"]: + stage = self.get_plan_stage(i["winningPlan"], stage) + if stage: + return stage + return {} + + async def test_index_filter(self): + db = self.db + await db.drop_collection("test") + + # Test bad filter spec on create. + with self.assertRaises(OperationFailure): + await db.test.create_index("x", partialFilterExpression=5) + with self.assertRaises(OperationFailure): + await db.test.create_index("x", partialFilterExpression={"x": {"$asdasd": 3}}) + with self.assertRaises(OperationFailure): + await db.test.create_index("x", partialFilterExpression={"$and": 5}) + + self.assertEqual( + "x_1", + await db.test.create_index( + [("x", ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}} + ), + ) + await db.test.insert_one({"x": 5, "a": 2}) + await db.test.insert_one({"x": 6, "a": 1}) + + # Operations that use the partial index. + explain = await db.test.find({"x": 6, "a": 1}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + explain = await db.test.find({"x": {"$gt": 1}, "a": 1}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + explain = await db.test.find({"x": 6, "a": {"$lte": 1}}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + # Operations that do not use the partial index. + explain = await db.test.find({"x": 6, "a": {"$lte": 1.6}}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + explain = await db.test.find({"x": 6}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + + # Test drop_indexes. + await db.test.drop_index("x_1") + explain = await db.test.find({"x": 6, "a": 1}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + + async def test_field_selection(self): + db = self.db + await db.drop_collection("test") + + doc = {"a": 1, "b": 5, "c": {"d": 5, "e": 10}} + await db.test.insert_one(doc) + + # Test field inclusion + doc = await anext(db.test.find({}, ["_id"])) + self.assertEqual(list(doc), ["_id"]) + doc = await anext(db.test.find({}, ["a"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "a"]) + doc = await anext(db.test.find({}, ["b"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "b"]) + doc = await anext(db.test.find({}, ["c"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "c"]) + doc = await anext(db.test.find({}, ["a"])) + self.assertEqual(doc["a"], 1) + doc = await anext(db.test.find({}, ["b"])) + self.assertEqual(doc["b"], 5) + doc = await anext(db.test.find({}, ["c"])) + self.assertEqual(doc["c"], {"d": 5, "e": 10}) + + # Test inclusion of fields with dots + doc = await anext(db.test.find({}, ["c.d"])) + self.assertEqual(doc["c"], {"d": 5}) + doc = await anext(db.test.find({}, ["c.e"])) + self.assertEqual(doc["c"], {"e": 10}) + doc = await anext(db.test.find({}, ["b", "c.e"])) + self.assertEqual(doc["c"], {"e": 10}) + + doc = await anext(db.test.find({}, ["b", "c.e"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "b", "c"]) + doc = await anext(db.test.find({}, ["b", "c.e"])) + self.assertEqual(doc["b"], 5) + + # Test field exclusion + doc = await anext(db.test.find({}, {"a": False, "b": 0})) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "c"]) + + doc = await anext(db.test.find({}, {"_id": False})) + l = list(doc) + self.assertNotIn("_id", l) + + async def test_options(self): + db = self.db + await db.drop_collection("test") + await db.create_collection("test", capped=True, size=4096) + result = await db.test.options() + self.assertEqual(result, {"capped": True, "size": 4096}) + await db.drop_collection("test") + + async def test_insert_one(self): + db = self.db + await db.test.drop() + + document: dict[str, Any] = {"_id": 1000} + result = await db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, int) + self.assertEqual(document["_id"], result.inserted_id) + self.assertTrue(result.acknowledged) + self.assertIsNotNone(await db.test.find_one({"_id": document["_id"]})) + self.assertEqual(1, await db.test.count_documents({})) + + document = {"foo": "bar"} + result = await db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) + self.assertEqual(document["_id"], result.inserted_id) + self.assertTrue(result.acknowledged) + self.assertIsNotNone(await db.test.find_one({"_id": document["_id"]})) + self.assertEqual(2, await db.test.count_documents({})) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) + self.assertEqual(document["_id"], result.inserted_id) + self.assertFalse(result.acknowledged) + # The insert failed duplicate key... + + async def async_lambda(): + return await db.test.count_documents({}) == 2 + + await async_wait_until(async_lambda, "forcing duplicate key error") + + document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) + result = await db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertEqual(result.inserted_id, None) + + async def test_insert_many(self): + db = self.db + await db.test.drop() + + docs: list = [{} for _ in range(5)] + result = await db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) + self.assertEqual(5, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, ObjectId) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"_id": _id})) + self.assertTrue(result.acknowledged) + + docs = [{"_id": i} for i in range(5)] + result = await db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) + self.assertEqual(5, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"_id": _id})) + self.assertTrue(result.acknowledged) + + docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] + result = await db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) + self.assertEqual([], result.inserted_ids) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + docs: list = [{} for _ in range(5)] + result = await db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertFalse(result.acknowledged) + self.assertEqual(20, await db.test.count_documents({})) + + async def test_insert_many_generator(self): + coll = self.db.test + await coll.delete_many({}) + + def gen(): + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} + + result = await coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + + async def test_insert_many_invalid(self): + db = self.db + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many({}) + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many([]) + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many(1) # type: ignore[arg-type] + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) + + async def test_delete_one(self): + await self.db.test.drop() + + await self.db.test.insert_one({"x": 1}) + await self.db.test.insert_one({"y": 1}) + await self.db.test.insert_one({"z": 1}) + + result = await self.db.test.delete_one({"x": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertEqual(1, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(2, await self.db.test.count_documents({})) + + result = await self.db.test.delete_one({"y": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertEqual(1, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await self.db.test.count_documents({})) + + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + result = await db.test.delete_one({"z": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertFalse(result.acknowledged) + + async def lambda_async(): + return await db.test.count_documents({}) == 0 + + await async_wait_until(lambda_async, "delete 1 documents") + + async def test_delete_many(self): + await self.db.test.drop() + + await self.db.test.insert_one({"x": 1}) + await self.db.test.insert_one({"x": 1}) + await self.db.test.insert_one({"y": 1}) + await self.db.test.insert_one({"y": 1}) + + result = await self.db.test.delete_many({"x": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertEqual(2, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(0, await self.db.test.count_documents({"x": 1})) + + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + result = await db.test.delete_many({"y": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertFalse(result.acknowledged) + + async def lambda_async(): + return await db.test.count_documents({}) == 0 + + await async_wait_until(lambda_async, "delete 2 documents") + + async def test_command_document_too_large(self): + large = "*" * (await async_client_context.max_bson_size + _COMMAND_OVERHEAD) + coll = self.db.test + with self.assertRaises(DocumentTooLarge): + await coll.insert_one({"data": large}) + # update_one and update_many are the same + with self.assertRaises(DocumentTooLarge): + await coll.replace_one({}, {"data": large}) + with self.assertRaises(DocumentTooLarge): + await coll.delete_one({"data": large}) + + async def test_write_large_document(self): + max_size = await async_client_context.max_bson_size + half_size = int(max_size / 2) + max_str = "x" * max_size + half_str = "x" * half_size + self.assertEqual(max_size, 16777216) + + with self.assertRaises(OperationFailure): + await self.db.test.insert_one({"foo": max_str}) + with self.assertRaises(OperationFailure): + await self.db.test.replace_one({}, {"foo": max_str}, upsert=True) + with self.assertRaises(OperationFailure): + await self.db.test.insert_many([{"x": 1}, {"foo": max_str}]) + await self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}]) + + await self.db.test.insert_one({"bar": "x"}) + # Use w=0 here to test legacy doc size checking in all server versions + unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) + with self.assertRaises(DocumentTooLarge): + await unack_coll.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 14)}) + await self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) + + async def test_insert_bypass_document_validation(self): + db = self.db + await db.test.drop() + await db.create_collection("test", validator={"a": {"$exists": True}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test insert_one + with self.assertRaises(OperationFailure): + await db.test.insert_one({"_id": 1, "x": 100}) + result = await db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) + self.assertIsInstance(result, InsertOneResult) + self.assertEqual(1, result.inserted_id) + result = await db.test.insert_one({"_id": 2, "a": 0}) + self.assertIsInstance(result, InsertOneResult) + self.assertEqual(2, result.inserted_id) + + await db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) + + async def async_lambda(): + return await db_w0.test.find_one({"y": 1}) + + await async_wait_until(async_lambda, "find w:0 inserted document") + + # Test insert_many + docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)] + with self.assertRaises(OperationFailure): + await db.test.insert_many(docs) + result = await db.test.insert_many(docs, bypass_document_validation=True) + self.assertIsInstance(result, InsertManyResult) + self.assertTrue(97, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"x": doc["x"]})) + self.assertTrue(result.acknowledged) + docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)] + result = await db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertTrue(97, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"a": doc["a"]})) + self.assertTrue(result.acknowledged) + + with self.assertRaises(OperationFailure): + await db_w0.test.insert_many( + [{"x": 1}, {"x": 2}], + bypass_document_validation=True, + ) + + async def test_replace_bypass_document_validation(self): + db = self.db + await db.test.drop() + await db.create_collection("test", validator={"a": {"$exists": True}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test replace_one + await db.test.insert_one({"a": 101}) + with self.assertRaises(OperationFailure): + await db.test.replace_one({"a": 101}, {"y": 1}) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual(1, await db.test.count_documents({"a": 101})) + await db.test.replace_one({"a": 101}, {"y": 1}, bypass_document_validation=True) + self.assertEqual(0, await db.test.count_documents({"a": 101})) + self.assertEqual(1, await db.test.count_documents({"y": 1})) + await db.test.replace_one({"y": 1}, {"a": 102}) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual(0, await db.test.count_documents({"a": 101})) + self.assertEqual(1, await db.test.count_documents({"a": 102})) + + await db.test.insert_one({"y": 1}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.replace_one({"y": 1}, {"x": 101}) + self.assertEqual(0, await db.test.count_documents({"x": 101})) + self.assertEqual(1, await db.test.count_documents({"y": 1})) + await db.test.replace_one({"y": 1}, {"x": 101}, bypass_document_validation=True) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual(1, await db.test.count_documents({"x": 101})) + await db.test.replace_one({"x": 101}, {"a": 103}, bypass_document_validation=False) + self.assertEqual(0, await db.test.count_documents({"x": 101})) + self.assertEqual(1, await db.test.count_documents({"a": 103})) + + await db.test.insert_one({"y": 1}, bypass_document_validation=True) + await db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) + + async def predicate(): + return await db_w0.test.find_one({"x": 1}) + + await async_wait_until(predicate, "find w:0 replaced document") + + async def test_update_bypass_document_validation(self): + db = self.db + await db.test.drop() + await db.test.insert_one({"z": 5}) + await db.command(SON([("collMod", "test"), ("validator", {"z": {"$gte": 0}})])) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test update_one + with self.assertRaises(OperationFailure): + await db.test.update_one({"z": 5}, {"$inc": {"z": -10}}) + self.assertEqual(0, await db.test.count_documents({"z": -5})) + self.assertEqual(1, await db.test.count_documents({"z": 5})) + await db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, bypass_document_validation=True) + self.assertEqual(0, await db.test.count_documents({"z": 5})) + self.assertEqual(1, await db.test.count_documents({"z": -5})) + await db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, bypass_document_validation=False) + self.assertEqual(1, await db.test.count_documents({"z": 1})) + self.assertEqual(0, await db.test.count_documents({"z": -5})) + + await db.test.insert_one({"z": -10}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.update_one({"z": -10}, {"$inc": {"z": 1}}) + self.assertEqual(0, await db.test.count_documents({"z": -9})) + self.assertEqual(1, await db.test.count_documents({"z": -10})) + await db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, bypass_document_validation=True) + self.assertEqual(1, await db.test.count_documents({"z": -9})) + self.assertEqual(0, await db.test.count_documents({"z": -10})) + await db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, bypass_document_validation=False) + self.assertEqual(0, await db.test.count_documents({"z": -9})) + self.assertEqual(1, await db.test.count_documents({"z": 0})) + + await db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True) + await db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + + async def async_lambda(): + return await db_w0.test.find_one({"y": 1, "x": 1}) + + await async_wait_until(async_lambda, "find w:0 updated document") + + # Test update_many + await db.test.insert_many([{"z": i} for i in range(3, 101)]) + await db.test.insert_one({"y": 0}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.update_many({}, {"$inc": {"z": -100}}) + self.assertEqual(100, await db.test.count_documents({"z": {"$gte": 0}})) + self.assertEqual(0, await db.test.count_documents({"z": {"$lt": 0}})) + self.assertEqual(0, await db.test.count_documents({"y": 0, "z": -100})) + await db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) + self.assertEqual(0, await db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(100, await db.test.count_documents({"z": {"$lte": 0}})) + await db.test.update_many( + {"z": {"$gt": -50}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) + self.assertEqual(50, await db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(50, await db.test.count_documents({"z": {"$lt": 0}})) + + await db.test.insert_many([{"z": -i} for i in range(50)], bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.update_many({}, {"$inc": {"z": 1}}) + self.assertEqual(100, await db.test.count_documents({"z": {"$lte": 0}})) + self.assertEqual(50, await db.test.count_documents({"z": {"$gt": 1}})) + await db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) + self.assertEqual(0, await db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(150, await db.test.count_documents({"z": {"$lte": 0}})) + await db.test.update_many( + {"z": {"$lte": 0}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) + self.assertEqual(150, await db.test.count_documents({"z": {"$gte": 0}})) + self.assertEqual(0, await db.test.count_documents({"z": {"$lt": 0}})) + + await db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) + await db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) + await db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + + async def async_lambda(): + return await db_w0.test.count_documents({"m": 1, "x": 1}) == 2 + + await async_wait_until(async_lambda, "find w:0 updated documents") + + async def test_bypass_document_validation_bulk_write(self): + db = self.db + await db.test.drop() + await db.create_collection("test", validator={"a": {"$gte": 0}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + ops: list = [ + InsertOne({"a": -10}), + InsertOne({"a": -11}), + InsertOne({"a": -12}), + UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + ReplaceOne({"a": {"$lte": -10}}, {"a": -1}), + ] + await db.test.bulk_write(ops, bypass_document_validation=True) + + self.assertEqual(3, await db.test.count_documents({})) + self.assertEqual(1, await db.test.count_documents({"a": -11})) + self.assertEqual(1, await db.test.count_documents({"a": -1})) + self.assertEqual(1, await db.test.count_documents({"a": -9})) + + # Assert that the operations would fail without bypass_doc_val + for op in ops: + with self.assertRaises(BulkWriteError): + await db.test.bulk_write([op]) + + with self.assertRaises(OperationFailure): + await db_w0.test.bulk_write(ops, bypass_document_validation=True) + + async def test_find_by_default_dct(self): + db = self.db + await db.test.insert_one({"foo": "bar"}) + dct = defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] + self.assertIsNotNone(await db.test.find_one(dct)) + self.assertEqual(dct, defaultdict(dict, [("foo", "bar")])) + + async def test_find_w_fields(self): + db = self.db + await db.test.delete_many({}) + + await db.test.insert_one( + {"x": 1, "mike": "awesome", "extra thing": "abcdefghijklmnopqrstuvwxyz"} + ) + self.assertEqual(1, await db.test.count_documents({})) + doc = await anext(db.test.find({})) + self.assertIn("x", doc) + doc = await anext(db.test.find({})) + self.assertIn("mike", doc) + doc = await anext(db.test.find({})) + self.assertIn("extra thing", doc) + doc = await anext(db.test.find({}, ["x", "mike"])) + self.assertIn("x", doc) + doc = await anext(db.test.find({}, ["x", "mike"])) + self.assertIn("mike", doc) + doc = await anext(db.test.find({}, ["x", "mike"])) + self.assertNotIn("extra thing", doc) + doc = await anext(db.test.find({}, ["mike"])) + self.assertNotIn("x", doc) + doc = await anext(db.test.find({}, ["mike"])) + self.assertIn("mike", doc) + doc = await anext(db.test.find({}, ["mike"])) + self.assertNotIn("extra thing", doc) + + @no_type_check + async def test_fields_specifier_as_dict(self): + db = self.db + await db.test.delete_many({}) + + await db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"}) + + self.assertEqual([1, 2, 3], (await db.test.find_one())["x"]) + self.assertEqual([2, 3], (await db.test.find_one(projection={"x": {"$slice": -2}}))["x"]) + self.assertNotIn("x", await db.test.find_one(projection={"x": 0})) + self.assertIn("mike", await db.test.find_one(projection={"x": 0})) + + async def test_find_w_regex(self): + db = self.db + await db.test.delete_many({}) + + await db.test.insert_one({"x": "hello_world"}) + await db.test.insert_one({"x": "hello_mike"}) + await db.test.insert_one({"x": "hello_mikey"}) + await db.test.insert_one({"x": "hello_test"}) + + self.assertEqual(len(await db.test.find().to_list()), 4) + self.assertEqual(len(await db.test.find({"x": re.compile("^hello.*")}).to_list()), 4) + self.assertEqual(len(await db.test.find({"x": re.compile("ello")}).to_list()), 4) + self.assertEqual(len(await db.test.find({"x": re.compile("^hello$")}).to_list()), 0) + self.assertEqual(len(await db.test.find({"x": re.compile("^hello_mi.*$")}).to_list()), 2) + + async def test_id_can_be_anything(self): + db = self.db + + await db.test.delete_many({}) + auto_id = {"hello": "world"} + await db.test.insert_one(auto_id) + self.assertIsInstance(auto_id["_id"], ObjectId) + + numeric = {"_id": 240, "hello": "world"} + await db.test.insert_one(numeric) + self.assertEqual(numeric["_id"], 240) + + obj = {"_id": numeric, "hello": "world"} + await db.test.insert_one(obj) + self.assertEqual(obj["_id"], numeric) + + async for x in db.test.find(): + self.assertEqual(x["hello"], "world") + self.assertIn("_id", x) + + async def test_unique_index(self): + db = self.db + await db.drop_collection("test") + await db.test.create_index("hello") + + # No error. + await db.test.insert_one({"hello": "world"}) + await db.test.insert_one({"hello": "world"}) + + await db.drop_collection("test") + await db.test.create_index("hello", unique=True) + + with self.assertRaises(DuplicateKeyError): + await db.test.insert_one({"hello": "world"}) + await db.test.insert_one({"hello": "world"}) + + async def test_duplicate_key_error(self): + db = self.db + await db.drop_collection("test") + + await db.test.create_index("x", unique=True) + + await db.test.insert_one({"_id": 1, "x": 1}) + + with self.assertRaises(DuplicateKeyError) as context: + await db.test.insert_one({"x": 1}) + + self.assertIsNotNone(context.exception.details) + + with self.assertRaises(DuplicateKeyError) as context: + await db.test.insert_one({"x": 1}) + + self.assertIsNotNone(context.exception.details) + self.assertEqual(1, await db.test.count_documents({})) + + async def test_write_error_text_handling(self): + db = self.db + await db.drop_collection("test") + + await db.test.create_index("text", unique=True) + + # Test workaround for SERVER-24007 + data = ( + b"a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + ) + + text = utf_8_decode(data, None, True) + await db.test.insert_one({"text": text}) + + # Should raise DuplicateKeyError, not InvalidBSON + with self.assertRaises(DuplicateKeyError): + await db.test.insert_one({"text": text}) + + with self.assertRaises(DuplicateKeyError): + await db.test.replace_one({"_id": ObjectId()}, {"text": text}, upsert=True) + + # Should raise BulkWriteError, not InvalidBSON + with self.assertRaises(BulkWriteError): + await db.test.insert_many([{"text": text}]) + + async def test_write_error_unicode(self): + coll = self.db.test + self.addAsyncCleanup(coll.drop) + + await coll.create_index("a", unique=True) + await coll.insert_one({"a": "unicode \U0001f40d"}) + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error") as ctx: + await coll.insert_one({"a": "unicode \U0001f40d"}) + + # Once more for good measure. + self.assertIn("E11000 duplicate key error", str(ctx.exception)) + + async def test_wtimeout(self): + # Ensure setting wtimeout doesn't disable write concern altogether. + # See SERVER-12596. + collection = self.db.test + await collection.drop() + await collection.insert_one({"_id": 1}) + + coll = collection.with_options(write_concern=WriteConcern(w=1, wtimeout=1000)) + with self.assertRaises(DuplicateKeyError): + await coll.insert_one({"_id": 1}) + + coll = collection.with_options(write_concern=WriteConcern(wtimeout=1000)) + with self.assertRaises(DuplicateKeyError): + await coll.insert_one({"_id": 1}) + + async def test_error_code(self): + try: + await self.db.test.update_many({}, {"$thismodifierdoesntexist": 1}) + except OperationFailure as exc: + self.assertIn(exc.code, (9, 10147, 16840, 17009)) + # Just check that we set the error document. Fields + # vary by MongoDB version. + self.assertIsNotNone(exc.details) + else: + self.fail("OperationFailure was not raised") + + async def test_index_on_subfield(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_one({"hello": {"a": 4, "b": 5}}) + await db.test.insert_one({"hello": {"a": 7, "b": 2}}) + await db.test.insert_one({"hello": {"a": 4, "b": 10}}) + + await db.drop_collection("test") + await db.test.create_index("hello.a", unique=True) + + await db.test.insert_one({"hello": {"a": 4, "b": 5}}) + await db.test.insert_one({"hello": {"a": 7, "b": 2}}) + with self.assertRaises(DuplicateKeyError): + await db.test.insert_one({"hello": {"a": 4, "b": 10}}) + + async def test_replace_one(self): + db = self.db + await db.drop_collection("test") + + with self.assertRaises(ValueError): + await db.test.replace_one({}, {"$set": {"x": 1}}) + + id1 = (await db.test.insert_one({"x": 1})).inserted_id + result = await db.test.replace_one({"x": 1}, {"y": 1}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"y": 1})) + self.assertEqual(0, await db.test.count_documents({"x": 1})) + self.assertEqual((await db.test.find_one(id1))["y"], 1) # type: ignore + + replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) + result = await db.test.replace_one({"y": 1}, replacement, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"z": 1})) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual((await db.test.find_one(id1))["z"], 1) # type: ignore + + result = await db.test.replace_one({"x": 2}, {"y": 2}, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(0, result.matched_count) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"y": 2})) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.replace_one({"x": 0}, {"y": 0}) + self.assertIsInstance(result, UpdateResult) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + async def test_update_one(self): + db = self.db + await db.drop_collection("test") + + with self.assertRaises(ValueError): + await db.test.update_one({}, {"x": 1}) + + id1 = (await db.test.insert_one({"x": 5})).inserted_id + result = await db.test.update_one({}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual((await db.test.find_one(id1))["x"], 6) # type: ignore + + id2 = (await db.test.insert_one({"x": 1})).inserted_id + result = await db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual((await db.test.find_one(id1))["x"], 7) # type: ignore + self.assertEqual((await db.test.find_one(id2))["x"], 1) # type: ignore + + result = await db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(0, result.matched_count) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) + self.assertTrue(result.acknowledged) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + async def test_update_result(self): + db = self.db + await db.drop_collection("test") + + result = await db.test.update_one({"x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) + + result = await db.test.update_one({"_id": None, "x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) + + result = await db.test.update_one({"_id": None}, {"$inc": {"x": 1}}) + self.assertEqual(result.did_upsert, False) + + async def test_update_many(self): + db = self.db + await db.drop_collection("test") + + with self.assertRaises(ValueError): + await db.test.update_many({}, {"x": 1}) + + await db.test.insert_one({"x": 4, "y": 3}) + await db.test.insert_one({"x": 5, "y": 5}) + await db.test.insert_one({"x": 4, "y": 4}) + + result = await db.test.update_many({"x": 4}, {"$set": {"y": 5}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(2, result.matched_count) + self.assertIn(result.modified_count, (None, 2)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(3, await db.test.count_documents({"y": 5})) + + result = await db.test.update_many({"x": 5}, {"$set": {"y": 6}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"y": 6})) + + result = await db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(0, result.matched_count) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) + self.assertTrue(result.acknowledged) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + async def test_update_check_keys(self): + await self.db.drop_collection("test") + self.assertTrue(await self.db.test.insert_one({"hello": "world"})) + + # Modify shouldn't check keys... + self.assertTrue( + await self.db.test.update_one( + {"hello": "world"}, {"$set": {"foo.bar": "baz"}}, upsert=True + ) + ) + + # I know this seems like testing the server but I'd like to be notified + # by CI if the server's behavior changes here. + doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")]) + with self.assertRaises(OperationFailure): + await self.db.test.update_one({"hello": "world"}, doc, upsert=True) + + # This is going to cause keys to be checked and raise InvalidDocument. + # That's OK assuming the server's behavior in the previous assert + # doesn't change. If the behavior changes checking the first key for + # '$' in update won't be good enough anymore. + doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})]) + with self.assertRaises(OperationFailure): + await self.db.test.replace_one({"hello": "world"}, doc, upsert=True) + + # Replace with empty document + self.assertNotEqual( + 0, (await self.db.test.replace_one({"hello": "world"}, {})).matched_count + ) + + async def test_acknowledged_delete(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many([{"x": 1}, {"x": 1}]) + self.assertEqual(2, (await db.test.delete_many({})).deleted_count) + self.assertEqual(0, (await db.test.delete_many({})).deleted_count) + + @async_client_context.require_version_max(4, 9) + async def test_manual_last_error(self): + coll = self.db.get_collection("test", write_concern=WriteConcern(w=0)) + await coll.insert_one({"x": 1}) + await self.db.command("getlasterror", w=1, wtimeout=1) + + async def test_count_documents(self): + db = self.db + await db.drop_collection("test") + self.addAsyncCleanup(db.drop_collection, "test") + + self.assertEqual(await db.test.count_documents({}), 0) + await db.wrong.insert_many([{}, {}]) + self.assertEqual(await db.test.count_documents({}), 0) + await db.test.insert_many([{}, {}]) + self.assertEqual(await db.test.count_documents({}), 2) + await db.test.insert_many([{"foo": "bar"}, {"foo": "baz"}]) + self.assertEqual(await db.test.count_documents({"foo": "bar"}), 1) + self.assertEqual(await db.test.count_documents({"foo": re.compile(r"ba.*")}), 2) + + async def test_estimated_document_count(self): + db = self.db + await db.drop_collection("test") + self.addAsyncCleanup(db.drop_collection, "test") + + self.assertEqual(await db.test.estimated_document_count(), 0) + await db.wrong.insert_many([{}, {}]) + self.assertEqual(await db.test.estimated_document_count(), 0) + await db.test.insert_many([{}, {}]) + self.assertEqual(await db.test.estimated_document_count(), 2) + + async def test_aggregate(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_one({"foo": [1, 2]}) + + with self.assertRaises(TypeError): + await db.test.aggregate("wow") # type: ignore[arg-type] + + pipeline = {"$project": {"_id": False, "foo": True}} + result = await db.test.aggregate([pipeline]) + self.assertIsInstance(result, AsyncCommandCursor) + self.assertEqual([{"foo": [1, 2]}], await result.to_list()) + + # Test write concern. + with self.write_concern_collection() as coll: + await coll.aggregate([{"$out": "output-collection"}]) + + async def test_aggregate_raw_bson(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_one({"foo": [1, 2]}) + + with self.assertRaises(TypeError): + await db.test.aggregate("wow") # type: ignore[arg-type] + + pipeline = {"$project": {"_id": False, "foo": True}} + coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) + result = await coll.aggregate([pipeline]) + self.assertIsInstance(result, AsyncCommandCursor) + first_result = await anext(result) + self.assertIsInstance(first_result, RawBSONDocument) + self.assertEqual([1, 2], list(first_result["foo"])) + + async def test_aggregation_cursor_validation(self): + db = self.db + projection = {"$project": {"_id": "$_id"}} + cursor = await db.test.aggregate([projection], cursor={}) + self.assertIsInstance(cursor, AsyncCommandCursor) + + async def test_aggregation_cursor(self): + db = self.db + if await async_client_context.has_secondaries: + # Test that getMore messages are sent to the right server. + db = self.client.get_database( + db.name, + read_preference=ReadPreference.SECONDARY, + write_concern=WriteConcern(w=self.w), + ) + + for collection_size in (10, 1000): + await db.drop_collection("test") + await db.test.insert_many([{"_id": i} for i in range(collection_size)]) + expected_sum = sum(range(collection_size)) + # Use batchSize to ensure multiple getMore messages + cursor = await db.test.aggregate([{"$project": {"_id": "$_id"}}], batchSize=5) + + self.assertEqual(expected_sum, sum(doc["_id"] for doc in await cursor.to_list())) + + # Test that batchSize is handled properly. + cursor = await db.test.aggregate([], batchSize=5) + self.assertEqual(5, len(cursor._data)) + # Force a getMore + cursor._data.clear() + await anext(cursor) + # batchSize - 1 + self.assertEqual(4, len(cursor._data)) + # Exhaust the cursor. There shouldn't be any errors. + async for _doc in cursor: + pass + + async def test_aggregation_cursor_alive(self): + await self.db.test.delete_many({}) + await self.db.test.insert_many([{} for _ in range(3)]) + self.addAsyncCleanup(self.db.test.delete_many, {}) + cursor = await self.db.test.aggregate(pipeline=[], cursor={"batchSize": 2}) + n = 0 + while True: + await cursor.next() + n += 1 + if n == 3: + self.assertFalse(cursor.alive) + break + + self.assertTrue(cursor.alive) + + async def test_invalid_session_parameter(self): + async def try_invalid_session(): + with await self.db.test.aggregate([], {}): # type:ignore + pass + + with self.assertRaisesRegex(ValueError, "must be an AsyncClientSession"): + await try_invalid_session() + + async def test_large_limit(self): + db = self.db + await db.drop_collection("test_large_limit") + await db.test_large_limit.create_index([("x", 1)]) + my_str = "mongomongo" * 1000 + + await db.test_large_limit.insert_many({"x": i, "y": my_str} for i in range(2000)) + + i = 0 + y = 0 + async for doc in db.test_large_limit.find(limit=1900).sort([("x", 1)]): + i += 1 + y += doc["x"] + + self.assertEqual(1900, i) + self.assertEqual((1900 * 1899) / 2, y) + + async def test_find_kwargs(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many({"x": i} for i in range(10)) + + self.assertEqual(10, await db.test.count_documents({})) + + total = 0 + async for x in db.test.find({}, skip=4, limit=2): + total += x["x"] + + self.assertEqual(9, total) + + async def test_rename(self): + db = self.db + await db.drop_collection("test") + await db.drop_collection("foo") + + with self.assertRaises(TypeError): + await db.test.rename(5) # type: ignore[arg-type] + with self.assertRaises(InvalidName): + await db.test.rename("") + with self.assertRaises(InvalidName): + await db.test.rename("te$t") + with self.assertRaises(InvalidName): + await db.test.rename(".test") + with self.assertRaises(InvalidName): + await db.test.rename("test.") + with self.assertRaises(InvalidName): + await db.test.rename("tes..t") + + self.assertEqual(0, await db.test.count_documents({})) + self.assertEqual(0, await db.foo.count_documents({})) + + await db.test.insert_many({"x": i} for i in range(10)) + + self.assertEqual(10, await db.test.count_documents({})) + + await db.test.rename("foo") + + self.assertEqual(0, await db.test.count_documents({})) + self.assertEqual(10, await db.foo.count_documents({})) + + x = 0 + async for doc in db.foo.find(): + self.assertEqual(x, doc["x"]) + x += 1 + + await db.test.insert_one({}) + with self.assertRaises(OperationFailure): + await db.foo.rename("test") + await db.foo.rename("test", dropTarget=True) + + with self.write_concern_collection() as coll: + await coll.rename("foo") + + @no_type_check + async def test_find_one(self): + db = self.db + await db.drop_collection("test") + + _id = (await db.test.insert_one({"hello": "world", "foo": "bar"})).inserted_id + + self.assertEqual("world", (await db.test.find_one())["hello"]) + self.assertEqual(await db.test.find_one(_id), await db.test.find_one()) + self.assertEqual(await db.test.find_one(None), await db.test.find_one()) + self.assertEqual(await db.test.find_one({}), await db.test.find_one()) + self.assertEqual(await db.test.find_one({"hello": "world"}), await db.test.find_one()) + + self.assertIn("hello", await db.test.find_one(projection=["hello"])) + self.assertNotIn("hello", await db.test.find_one(projection=["foo"])) + + self.assertIn("hello", await db.test.find_one(projection=("hello",))) + self.assertNotIn("hello", await db.test.find_one(projection=("foo",))) + + self.assertIn("hello", await db.test.find_one(projection={"hello"})) + self.assertNotIn("hello", await db.test.find_one(projection={"foo"})) + + self.assertIn("hello", await db.test.find_one(projection=frozenset(["hello"]))) + self.assertNotIn("hello", await db.test.find_one(projection=frozenset(["foo"]))) + + self.assertEqual(["_id"], list(await db.test.find_one(projection={"_id": True}))) + self.assertIn("hello", list(await db.test.find_one(projection={}))) + self.assertIn("hello", list(await db.test.find_one(projection=[]))) + + self.assertEqual(None, await db.test.find_one({"hello": "foo"})) + self.assertEqual(None, await db.test.find_one(ObjectId())) + + async def test_find_one_non_objectid(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_one({"_id": 5}) + + self.assertTrue(await db.test.find_one(5)) + self.assertFalse(await db.test.find_one(6)) + + async def test_find_one_with_find_args(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_many([{"x": i} for i in range(1, 4)]) + + self.assertEqual(1, (await db.test.find_one())["x"]) + self.assertEqual(2, (await db.test.find_one(skip=1, limit=2))["x"]) + + async def test_find_with_sort(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_many([{"x": 2}, {"x": 1}, {"x": 3}]) + + self.assertEqual(2, (await db.test.find_one())["x"]) + self.assertEqual(1, (await db.test.find_one(sort=[("x", 1)]))["x"]) + self.assertEqual(3, (await db.test.find_one(sort=[("x", -1)]))["x"]) + + async def to_list(things): + return [thing["x"] async for thing in things] + + self.assertEqual([2, 1, 3], await to_list(db.test.find())) + self.assertEqual([1, 2, 3], await to_list(db.test.find(sort=[("x", 1)]))) + self.assertEqual([3, 2, 1], await to_list(db.test.find(sort=[("x", -1)]))) + + with self.assertRaises(TypeError): + await db.test.find(sort=5) + with self.assertRaises(TypeError): + await db.test.find(sort="hello") + with self.assertRaises(TypeError): + await db.test.find(sort=["hello", 1]) + + # TODO doesn't actually test functionality, just that it doesn't blow up + async def test_cursor_timeout(self): + await self.db.test.find(no_cursor_timeout=True).to_list() + await self.db.test.find(no_cursor_timeout=False).to_list() + + async def test_exhaust(self): + if await async_is_mongos(self.db.client): + with self.assertRaises(InvalidOperation): + await anext(self.db.test.find(cursor_type=CursorType.EXHAUST)) + return + + # Limit is incompatible with exhaust. + with self.assertRaises(InvalidOperation): + await anext(self.db.test.find(cursor_type=CursorType.EXHAUST, limit=5)) + cur = self.db.test.find(cursor_type=CursorType.EXHAUST) + with self.assertRaises(InvalidOperation): + cur.limit(5) + await cur.next() + cur = self.db.test.find(limit=5) + with self.assertRaises(InvalidOperation): + await cur.add_option(64) + cur = self.db.test.find() + await cur.add_option(64) + with self.assertRaises(InvalidOperation): + cur.limit(5) + + await self.db.drop_collection("test") + # Insert enough documents to require more than one batch + await self.db.test.insert_many([{"i": i} for i in range(150)]) + + client = await self.async_rs_or_single_client(maxPoolSize=1) + pool = await async_get_pool(client) + + # Make sure the socket is returned after exhaustion. + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) + await anext(cur) + self.assertEqual(0, len(pool.conns)) + async for _ in cur: + pass + self.assertEqual(1, len(pool.conns)) + + # Same as previous but don't call next() + async for _ in client[self.db.name].test.find(cursor_type=CursorType.EXHAUST): + pass + self.assertEqual(1, len(pool.conns)) + + # If the Cursor instance is discarded before being completely iterated + # and the socket has pending data (more_to_come=True) we have to close + # and discard the socket. + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, batch_size=2) + if async_client_context.version.at_least(4, 2): + # On 4.2+ we use OP_MSG which only sets more_to_come=True after the + # first getMore. + for _ in range(3): + await anext(cur) + else: + await anext(cur) + self.assertEqual(0, len(pool.conns)) + # if sys.platform.startswith("java") or "PyPy" in sys.version: + # # Don't wait for GC or use gc.collect(), it's unreliable. + await cur.close() + cur = None + # Wait until the background thread returns the socket. + await async_wait_until(lambda: pool.active_sockets == 0, "return socket") + # The socket should be discarded. + self.assertEqual(0, len(pool.conns)) + + async def test_distinct(self): + await self.db.drop_collection("test") + + test = self.db.test + await test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) + + distinct = await test.distinct("a") + distinct.sort() + + self.assertEqual([1, 2, 3], distinct) + + distinct = await test.find({"a": {"$gt": 1}}).distinct("a") + distinct.sort() + self.assertEqual([2, 3], distinct) + + distinct = await test.distinct("a", {"a": {"$gt": 1}}) + distinct.sort() + self.assertEqual([2, 3], distinct) + + await self.db.drop_collection("test") + + await test.insert_one({"a": {"b": "a"}, "c": 12}) + await test.insert_one({"a": {"b": "b"}, "c": 12}) + await test.insert_one({"a": {"b": "c"}, "c": 12}) + await test.insert_one({"a": {"b": "c"}, "c": 12}) + + distinct = await test.distinct("a.b") + distinct.sort() + + self.assertEqual(["a", "b", "c"], distinct) + + async def test_query_on_query_field(self): + await self.db.drop_collection("test") + await self.db.test.insert_one({"query": "foo"}) + await self.db.test.insert_one({"bar": "foo"}) + + self.assertEqual(1, await self.db.test.count_documents({"query": {"$ne": None}})) + self.assertEqual(1, len(await self.db.test.find({"query": {"$ne": None}}).to_list())) + + async def test_min_query(self): + await self.db.drop_collection("test") + await self.db.test.insert_many([{"x": 1}, {"x": 2}]) + await self.db.test.create_index("x") + + cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, hint="x_1") + + docs = await cursor.to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(2, docs[0]["x"]) + + async def test_numerous_inserts(self): + # Ensure we don't exceed server's maxWriteBatchSize size limit. + await self.db.test.drop() + n_docs = await async_client_context.max_write_batch_size + 100 + await self.db.test.insert_many([{} for _ in range(n_docs)]) + self.assertEqual(n_docs, await self.db.test.count_documents({})) + await self.db.test.drop() + + async def test_insert_many_large_batch(self): + # Tests legacy insert. + db = self.client.test_insert_large_batch + self.addAsyncCleanup(self.client.drop_database, "test_insert_large_batch") + max_bson_size = await async_client_context.max_bson_size + # Write commands are limited to 16MB + 16k per batch + big_string = "x" * int(max_bson_size / 2) + + # Batch insert that requires 2 batches. + successful_insert = [ + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + ] + await db.collection_0.insert_many(successful_insert) + self.assertEqual(4, await db.collection_0.count_documents({})) + + await db.collection_0.drop() + + # Test that inserts fail after first error. + insert_second_fails = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id2", "x": big_string}, + ] + + with self.assertRaises(BulkWriteError): + await db.collection_1.insert_many(insert_second_fails) + + self.assertEqual(1, await db.collection_1.count_documents({})) + + await db.collection_1.drop() + + # 2 batches, 2nd insert fails, unacknowledged, ordered. + unack_coll = db.collection_2.with_options(write_concern=WriteConcern(w=0)) + await unack_coll.insert_many(insert_second_fails) + + async def async_lambda(): + return await db.collection_2.count_documents({}) == 1 + + await async_wait_until(async_lambda, "insert 1 document", timeout=60) + + await db.collection_2.drop() + + # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are + # dupes. Acknowledged, unordered. + insert_two_failures = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id1", "x": big_string}, + ] + + with self.assertRaises(OperationFailure) as context: + await db.collection_3.insert_many(insert_two_failures, ordered=False) + + self.assertIn("id1", str(context.exception)) + + # Only the first and third documents should be inserted. + self.assertEqual(2, await db.collection_3.count_documents({})) + + await db.collection_3.drop() + + # 2 batches, 2 errors, unacknowledged, unordered. + unack_coll = db.collection_4.with_options(write_concern=WriteConcern(w=0)) + await unack_coll.insert_many(insert_two_failures, ordered=False) + + async def async_lambda(): + return await db.collection_4.count_documents({}) == 2 + + # Only the first and third documents are inserted. + await async_wait_until(async_lambda, "insert 2 documents", timeout=60) + + await db.collection_4.drop() + + async def test_messages_with_unicode_collection_names(self): + db = self.db + + await db["Employés"].insert_one({"x": 1}) + await db["Employés"].replace_one({"x": 1}, {"x": 2}) + await db["Employés"].delete_many({}) + await db["Employés"].find_one() + await db["Employés"].find().to_list() + + async def test_drop_indexes_non_existent(self): + await self.db.drop_collection("test") + await self.db.test.drop_indexes() + + # This is really a bson test but easier to just reproduce it here... + # (Shame on me) + async def test_bad_encode(self): + c = self.db.test + await c.drop() + with self.assertRaises(InvalidDocument): + await c.insert_one({"x": c}) + + class BadGetAttr(dict): + def __getattr__(self, name): + pass + + bad = BadGetAttr([("foo", "bar")]) + await c.insert_one({"bad": bad}) + self.assertEqual("bar", (await c.find_one())["bad"]["foo"]) # type: ignore + + async def test_array_filters_validation(self): + # array_filters must be a list. + c = self.db.test + with self.assertRaises(TypeError): + await c.update_one({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await c.update_many({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + update = {"$set": {"a": 1}} + await c.find_one_and_update({}, update, array_filters={}) # type: ignore[arg-type] + + async def test_array_filters_unacknowledged(self): + c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) + with self.assertRaises(ConfigurationError): + await c_w0.update_one({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + await c_w0.update_many({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + await c_w0.find_one_and_update( + {}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}] + ) + + async def test_find_one_and(self): + c = self.db.test + await c.drop() + await c.insert_one({"_id": 1, "i": 1}) + + self.assertEqual( + {"_id": 1, "i": 1}, await c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}}) + ) + self.assertEqual( + {"_id": 1, "i": 3}, + await c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual({"_id": 1, "i": 3}, await c.find_one_and_delete({"_id": 1})) + self.assertEqual(None, await c.find_one({"_id": 1})) + + self.assertEqual(None, await c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 1}, + await c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER, upsert=True + ), + ) + self.assertEqual( + {"_id": 1, "i": 2}, + await c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual( + {"_id": 1, "i": 3}, + await c.find_one_and_replace( + {"_id": 1}, {"i": 3, "j": 1}, projection=["i"], return_document=ReturnDocument.AFTER + ), + ) + self.assertEqual( + {"i": 4}, + await c.find_one_and_update( + {"_id": 1}, + {"$inc": {"i": 1}}, + projection={"i": 1, "_id": 0}, + return_document=ReturnDocument.AFTER, + ), + ) + + await c.drop() + for j in range(5): + await c.insert_one({"j": j, "i": 0}) + + sort = [("j", DESCENDING)] + self.assertEqual(4, (await c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort))["j"]) + + async def test_find_one_and_write_concern(self): + listener = OvertCommandListener() + db = (await self.async_single_client(event_listeners=[listener]))[self.db.name] + # non-default WriteConcern. + c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) + # default WriteConcern. + c_default = db.get_collection("test", write_concern=WriteConcern()) + # Authenticate the client and throw out auth commands from the listener. + await db.command("ping") + listener.reset() + await c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + await c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + await c_w0.find_one_and_delete({"_id": 1}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + # Test write concern errors. + if async_client_context.is_rs: + c_wc_error = db.get_collection( + "test", write_concern=WriteConcern(w=len(async_client_context.nodes) + 1) + ) + with self.assertRaises(WriteConcernError): + await c_wc_error.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + with self.assertRaises(WriteConcernError): + await c_wc_error.find_one_and_replace( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + with self.assertRaises(WriteConcernError): + await c_wc_error.find_one_and_delete( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + listener.reset() + + await c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + await c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + await c_default.find_one_and_delete({"_id": 1}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + async def test_find_with_nested(self): + c = self.db.test + await c.drop() + await c.insert_many([{"i": i} for i in range(5)]) # [0, 1, 2, 3, 4] + self.assertEqual( + [2], + [ + i["i"] + async for i in c.find( + { + "$and": [ + { + # This clause gives us [1,2,4] + "$or": [ + {"i": {"$lte": 2}}, + {"i": {"$gt": 3}}, + ], + }, + { + # This clause gives us [2,3] + "$or": [ + {"i": 2}, + {"i": 3}, + ] + }, + ] + } + ) + ], + ) + + self.assertEqual( + [0, 1, 2], + [ + i["i"] + async for i in c.find( + { + "$or": [ + { + # This clause gives us [2] + "$and": [ + {"i": {"$gte": 2}}, + {"i": {"$lt": 3}}, + ], + }, + { + # This clause gives us [0,1] + "$and": [ + {"i": {"$gt": -100}}, + {"i": {"$lt": 2}}, + ] + }, + ] + } + ) + ], + ) + + async def test_find_regex(self): + c = self.db.test + await c.drop() + await c.insert_one({"r": re.compile(".*")}) + + self.assertIsInstance((await c.find_one())["r"], Regex) # type: ignore + async for doc in c.find(): + self.assertIsInstance(doc["r"], Regex) + + def test_find_command_generation(self): + cmd = _gen_find_command( + "coll", + {"$query": {"foo": 1}, "$dumb": 2}, + None, + 0, + 0, + 0, + None, + DEFAULT_READ_CONCERN, + None, + None, + ) + self.assertEqual(cmd, {"find": "coll", "$dumb": 2, "filter": {"foo": 1}}) + + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(AsyncCollection(self.db, "test")) + + @async_client_context.require_version_min(5, 0, 0) + async def test_helpers_with_let(self): + c = self.db.test + + async def afind(*args, **kwargs): + return c.find(*args, **kwargs) + + helpers = [ + (c.delete_many, ({}, {})), + (c.delete_one, ({}, {})), + (afind, ({})), + (c.update_many, ({}, {"$inc": {"x": 3}})), + (c.update_one, ({}, {"$inc": {"x": 3}})), + (c.find_one_and_delete, ({}, {})), + (c.find_one_and_replace, ({}, {})), + (c.aggregate, ([],)), + ] + for let in [10, "str", [], False]: + for helper, args in helpers: + with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): + await helper(*args, let=let) # type: ignore + for helper, args in helpers: + await helper(*args, let={}) # type: ignore + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_collection_management.py b/test/asynchronous/test_collection_management.py new file mode 100644 index 0000000000..c0edf91581 --- /dev/null +++ b/test/asynchronous/test_collection_management.py @@ -0,0 +1,41 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection management unified spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "collection_management") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "collection_management" + ) + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_command_logging.py b/test/asynchronous/test_command_logging.py new file mode 100644 index 0000000000..f9b459c152 --- /dev/null +++ b/test/asynchronous/test_command_logging.py @@ -0,0 +1,44 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_command_monitoring.py b/test/asynchronous/test_command_monitoring.py new file mode 100644 index 0000000000..311fd1fdc1 --- /dev/null +++ b/test/asynchronous/test_command_monitoring.py @@ -0,0 +1,45 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_monitoring") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_monitoring") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_comment.py b/test/asynchronous/test_comment.py new file mode 100644 index 0000000000..2d6d0f5f1e --- /dev/null +++ b/test/asynchronous/test_comment.py @@ -0,0 +1,159 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the keyword argument 'comment' in various helpers.""" + +from __future__ import annotations + +import inspect +import sys + +sys.path[0:0] = [""] +from inspect import iscoroutinefunction +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import OvertCommandListener + +from bson.dbref import DBRef +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.operations import IndexModel + +_IS_SYNC = False + + +class AsyncTestComment(AsyncIntegrationTest): + async def _test_ops( + self, + helpers, + already_supported, + listener, + ): + for h, args in helpers: + c = "testing comment with " + h.__name__ + with self.subTest("collection-" + h.__name__ + "-comment"): + for cc in [c, {"key": c}, ["any", 1]]: + listener.reset() + kwargs = {"comment": cc} + try: + maybe_cursor = await h(*args, **kwargs) + except Exception: + maybe_cursor = None + self.assertIn( + "comment", + inspect.signature(h).parameters, + msg="Could not find 'comment' in the " + "signature of function %s" % (h.__name__), + ) + self.assertEqual( + inspect.signature(h).parameters["comment"].annotation, "Optional[Any]" + ) + if isinstance(maybe_cursor, AsyncCommandCursor): + await maybe_cursor.close() + + cmd = listener.started_events[0] + self.assertEqual(cc, cmd.command.get("comment"), msg=cmd) + + if h.__name__ != "aggregate_raw_batches": + self.assertIn( + ":param comment:", + h.__doc__, + ) + if h not in already_supported: + self.assertIn( + "Added ``comment`` parameter", + h.__doc__, + ) + else: + self.assertNotIn( + "Added ``comment`` parameter", + h.__doc__, + ) + + listener.reset() + + @async_client_context.require_version_min(4, 7, -1) + @async_client_context.require_replica_set + async def test_database_helpers(self): + listener = OvertCommandListener() + db = (await self.async_rs_or_single_client(event_listeners=[listener])).db + helpers = [ + (db.watch, []), + (db.command, ["hello"]), + (db.list_collections, []), + (db.list_collection_names, []), + (db.drop_collection, ["hello"]), + (db.validate_collection, ["test"]), + (db.dereference, [DBRef("collection", 1)]), + ] + already_supported = [db.command, db.list_collections, db.list_collection_names] + await self._test_ops(helpers, already_supported, listener) + + @async_client_context.require_version_min(4, 7, -1) + @async_client_context.require_replica_set + async def test_client_helpers(self): + listener = OvertCommandListener() + cli = await self.async_rs_or_single_client(event_listeners=[listener]) + helpers = [ + (cli.watch, []), + (cli.list_databases, []), + (cli.list_database_names, []), + (cli.drop_database, ["test"]), + ] + already_supported = [ + cli.list_databases, + ] + await self._test_ops(helpers, already_supported, listener) + + @async_client_context.require_version_min(4, 7, -1) + async def test_collection_helpers(self): + listener = OvertCommandListener() + db = (await self.async_rs_or_single_client(event_listeners=[listener]))[self.db.name] + coll = db.get_collection("test") + + helpers = [ + (coll.list_indexes, []), + (coll.drop, []), + (coll.index_information, []), + (coll.options, []), + (coll.aggregate, [[{"$set": {"x": 1}}]]), + (coll.aggregate_raw_batches, [[{"$set": {"x": 1}}]]), + (coll.rename, ["temp_temp_temp"]), + (coll.distinct, ["_id"]), + (coll.find_one_and_delete, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.estimated_document_count, []), + (coll.count_documents, [{}]), + (coll.create_indexes, [[IndexModel("a")]]), + (coll.create_index, ["a"]), + (coll.drop_index, [[("a", 1)]]), + (coll.drop_indexes, []), + ] + already_supported = [ + coll.estimated_document_count, + coll.count_documents, + coll.create_indexes, + coll.drop_indexes, + coll.options, + coll.find_one_and_replace, + coll.drop_index, + coll.rename, + coll.distinct, + coll.find_one_and_delete, + coll.find_one_and_update, + ] + await self._test_ops(helpers, already_supported, listener) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_common.py b/test/asynchronous/test_common.py new file mode 100644 index 0000000000..00495e7c30 --- /dev/null +++ b/test/asynchronous/test_common.py @@ -0,0 +1,185 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the pymongo common module.""" +from __future__ import annotations + +import sys +import uuid + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, connected, unittest + +from bson.binary import PYTHON_LEGACY, STANDARD, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from pymongo.errors import OperationFailure +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCommon(AsyncIntegrationTest): + async def test_uuid_representation(self): + coll = self.db.uuid + await coll.drop() + + # Test property + self.assertEqual(UuidRepresentation.UNSPECIFIED, coll.codec_options.uuid_representation) + + # Test basic query + uu = uuid.uuid4() + # Insert as binary subtype 3 + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + legacy_opts = coll.codec_options + await coll.insert_one({"uu": uu}) + self.assertEqual(uu, (await coll.find_one({"uu": uu}))["uu"]) # type: ignore + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual(STANDARD, coll.codec_options.uuid_representation) + self.assertEqual(None, await coll.find_one({"uu": uu})) + uul = Binary.from_uuid(uu, PYTHON_LEGACY) + self.assertEqual(uul, (await coll.find_one({"uu": uul}))["uu"]) # type: ignore + + # Test count_documents + self.assertEqual(0, await coll.count_documents({"uu": uu})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, await coll.count_documents({"uu": uu})) + + # Test delete + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + await coll.delete_one({"uu": uu}) + self.assertEqual(1, await coll.count_documents({})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + await coll.delete_one({"uu": uu}) + self.assertEqual(0, await coll.count_documents({})) + + # Test update_one + await coll.insert_one({"_id": uu, "i": 1}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + await coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, (await coll.find_one({"_id": uu}))["i"]) # type: ignore + await coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + self.assertEqual(2, (await coll.find_one({"_id": uu}))["i"]) # type: ignore + + # Test Cursor.distinct + self.assertEqual([2], await coll.find({"_id": uu}).distinct("i")) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual([], await coll.find({"_id": uu}).distinct("i")) + + # Test findAndModify + self.assertEqual(None, await coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(2, (await coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}}))["i"]) + self.assertEqual(5, (await coll.find_one({"_id": uu}))["i"]) # type: ignore + + # Test command + self.assertEqual( + 5, + ( + await self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 6}}, + query={"_id": uu}, + codec_options=legacy_opts, + ) + )["value"]["i"], + ) + self.assertEqual( + 6, + ( + await self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 7}}, + query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + ) + )["value"]["i"], + ) + + async def test_write_concern(self): + c = await self.async_rs_or_single_client(connect=False) + self.assertEqual(WriteConcern(), c.write_concern) + + c = await self.async_rs_or_single_client(connect=False, w=2, wTimeoutMS=1000) + wc = WriteConcern(w=2, wtimeout=1000) + self.assertEqual(wc, c.write_concern) + + # Can we override back to the server default? + db = c.get_database("pymongo_test", write_concern=WriteConcern()) + self.assertEqual(db.write_concern, WriteConcern()) + + db = c.pymongo_test + self.assertEqual(wc, db.write_concern) + coll = db.test + self.assertEqual(wc, coll.write_concern) + + cwc = WriteConcern(j=True) + coll = db.get_collection("test", write_concern=cwc) + self.assertEqual(cwc, coll.write_concern) + self.assertEqual(wc, db.write_concern) + + async def test_mongo_client(self): + pair = await async_client_context.pair + m = await self.async_rs_or_single_client(w=0) + coll = m.pymongo_test.write_concern_test + await coll.drop() + doc = {"_id": ObjectId()} + await coll.insert_one(doc) + self.assertTrue(await coll.insert_one(doc)) + coll = coll.with_options(write_concern=WriteConcern(w=1)) + with self.assertRaises(OperationFailure): + await coll.insert_one(doc) + + m = await self.async_rs_or_single_client() + coll = m.pymongo_test.write_concern_test + new_coll = coll.with_options(write_concern=WriteConcern(w=0)) + self.assertTrue(await new_coll.insert_one(doc)) + with self.assertRaises(OperationFailure): + await coll.insert_one(doc) + + m = await self.async_rs_or_single_client( + f"mongodb://{pair}/", replicaSet=async_client_context.replica_set_name + ) + + coll = m.pymongo_test.write_concern_test + with self.assertRaises(OperationFailure): + await coll.insert_one(doc) + m = await self.async_rs_or_single_client( + f"mongodb://{pair}/?w=0", replicaSet=async_client_context.replica_set_name + ) + + coll = m.pymongo_test.write_concern_test + await coll.insert_one(doc) + + # Equality tests + direct = await connected(await self.async_single_client(w=0)) + direct2 = await connected( + await self.async_single_client(f"mongodb://{pair}/?w=0", **self.credentials) + ) + self.assertEqual(direct, direct2) + self.assertFalse(direct != direct2) + + async def test_validate_boolean(self): + await self.db.test.update_one({}, {"$set": {"total": 1}}, upsert=True) + with self.assertRaisesRegex( + TypeError, "upsert must be True or False, was: upsert={'upsert': True}" + ): + await self.db.test.update_one({}, {"$set": {"total": 1}}, {"upsert": True}) # type: ignore + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_concurrency.py b/test/asynchronous/test_concurrency.py new file mode 100644 index 0000000000..65ea90c03f --- /dev/null +++ b/test/asynchronous/test_concurrency.py @@ -0,0 +1,54 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests to ensure that the async API is properly concurrent with asyncio.""" +from __future__ import annotations + +import asyncio +import time +from test.asynchronous import AsyncIntegrationTest, async_client_context +from test.utils_shared import delay + +_IS_SYNC = False + + +class TestAsyncConcurrency(AsyncIntegrationTest): + async def _task(self, client): + await client.db.test.find_one({"$where": delay(0.20)}) + + async def test_concurrency(self): + tasks = [] + iterations = 5 + + client = await self.async_single_client() + await client.db.test.drop() + await client.db.test.insert_one({"x": 1}) + + start = time.time() + + for _ in range(iterations): + await self._task(client) + + sequential_time = time.time() - start + start = time.time() + + for i in range(iterations): + tasks.append(self._task(client)) + + await asyncio.gather(*tasks) + concurrent_time = time.time() - start + + percent_faster = (sequential_time - concurrent_time) / concurrent_time * 100 + # We expect the concurrent tasks to be at least 50% faster on all platforms as a conservative benchmark + self.assertGreaterEqual(percent_faster, 50) diff --git a/test/asynchronous/test_connection_logging.py b/test/asynchronous/test_connection_logging.py new file mode 100644 index 0000000000..945c6c59b5 --- /dev/null +++ b/test/asynchronous/test_connection_logging.py @@ -0,0 +1,45 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the connection logging unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "connection_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "connection_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_connection_monitoring.py b/test/asynchronous/test_connection_monitoring.py new file mode 100644 index 0000000000..c6dc6f0a69 --- /dev/null +++ b/test/asynchronous/test_connection_monitoring.py @@ -0,0 +1,472 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Execute Transactions Spec tests.""" +from __future__ import annotations + +import asyncio +import os +import sys +import time +from pathlib import Path +from test.asynchronous.utils import async_get_pool, async_get_pools + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs, unittest +from test.asynchronous.pymongo_mocks import DummyMonitor +from test.asynchronous.utils_spec_runner import AsyncSpecTestCreator, SpecRunnerTask +from test.utils_shared import ( + CMAPListener, + async_wait_until, + camel_to_snake, +) + +from bson.objectid import ObjectId +from bson.son import SON +from pymongo.asynchronous.pool import PoolState, _PoolClosedError +from pymongo.errors import ( + ConnectionFailure, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) +from pymongo.monitoring import ( + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionClosedReason, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) +from pymongo.read_preferences import ReadPreference +from pymongo.topology_description import updated_topology_description + +_IS_SYNC = False + +OBJECT_TYPES = { + # Event types. + "ConnectionCheckedIn": ConnectionCheckedInEvent, + "ConnectionCheckedOut": ConnectionCheckedOutEvent, + "ConnectionCheckOutFailed": ConnectionCheckOutFailedEvent, + "ConnectionClosed": ConnectionClosedEvent, + "ConnectionCreated": ConnectionCreatedEvent, + "ConnectionReady": ConnectionReadyEvent, + "ConnectionCheckOutStarted": ConnectionCheckOutStartedEvent, + "ConnectionPoolCreated": PoolCreatedEvent, + "ConnectionPoolReady": PoolReadyEvent, + "ConnectionPoolCleared": PoolClearedEvent, + "ConnectionPoolClosed": PoolClosedEvent, + # Error types. + "PoolClosedError": _PoolClosedError, + "WaitQueueTimeoutError": WaitQueueTimeoutError, +} + + +class AsyncTestCMAP(AsyncIntegrationTest): + # Location of JSON test specifications. + if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "connection_monitoring") + else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "connection_monitoring") + + # Test operations: + + async def start(self, op): + """Run the 'start' thread operation.""" + target = op["target"] + thread = SpecRunnerTask(target) + await thread.start() + self.targets[target] = thread + + async def wait(self, op): + """Run the 'wait' operation.""" + await asyncio.sleep(op["ms"] / 1000.0) + + async def wait_for_thread(self, op): + """Run the 'waitForThread' operation.""" + target = op["target"] + thread = self.targets[target] + await thread.stop() + await thread.join() + if thread.exc: + raise thread.exc + self.assertFalse(thread.ops) + + async def wait_for_event(self, op): + """Run the 'waitForEvent' operation.""" + event = OBJECT_TYPES[op["event"]] + count = op["count"] + timeout = op.get("timeout", 10000) / 1000.0 + await async_wait_until( + lambda: self.listener.event_count(event) >= count, + f"find {count} {event} event(s)", + timeout=timeout, + ) + + async def check_out(self, op): + """Run the 'checkOut' operation.""" + label = op["label"] + async with self.pool.checkout() as conn: + # Call 'pin_cursor' so we can hold the socket. + conn.pin_cursor() + if label: + self.labels[label] = conn + else: + self.addAsyncCleanup(conn.close_conn, None) + + async def check_in(self, op): + """Run the 'checkIn' operation.""" + label = op["connection"] + conn = self.labels[label] + await self.pool.checkin(conn) + + async def ready(self, op): + """Run the 'ready' operation.""" + await self.pool.ready() + + async def clear(self, op): + """Run the 'clear' operation.""" + if "interruptInUseConnections" in op: + await self.pool.reset(interrupt_connections=op["interruptInUseConnections"]) + else: + await self.pool.reset() + + async def close(self, op): + """Run the 'close' operation.""" + await self.pool.close() + + async def run_operation(self, op): + """Run a single operation in a test.""" + op_name = camel_to_snake(op["name"]) + thread = op["thread"] + meth = getattr(self, op_name) + if thread: + await self.targets[thread].schedule(lambda: meth(op)) + else: + await meth(op) + + async def run_operations(self, ops): + """Run a test's operations.""" + for op in ops: + self._ops.append(op) + await self.run_operation(op) + + def check_object(self, actual, expected): + """Assert that the actual object matches the expected object.""" + self.assertEqual(type(actual), OBJECT_TYPES[expected["type"]]) + for attr, expected_val in expected.items(): + if attr == "type": + continue + c2s = camel_to_snake(attr) + if c2s == "interrupt_in_use_connections": + c2s = "interrupt_connections" + actual_val = getattr(actual, c2s) + if expected_val == 42: + self.assertIsNotNone(actual_val) + else: + self.assertEqual(actual_val, expected_val) + + def check_event(self, actual, expected): + """Assert that the actual event matches the expected event.""" + self.check_object(actual, expected) + + def actual_events(self, ignore): + """Return all the non-ignored events.""" + ignore = tuple(OBJECT_TYPES[name] for name in ignore) + return [event for event in self.listener.events if not isinstance(event, ignore)] + + def check_events(self, events, ignore): + """Check the events of a test.""" + actual_events = self.actual_events(ignore) + for actual, expected in zip(actual_events, events): + self.logs.append(f"Checking event actual: {actual!r} vs expected: {expected!r}") + self.check_event(actual, expected) + + if len(events) > len(actual_events): + self.fail(f"missing events: {events[len(actual_events) :]!r}") + + def check_error(self, actual, expected): + message = expected.pop("message") + self.check_object(actual, expected) + self.assertIn(message, str(actual)) + + async def set_fail_point(self, command_args): + if not async_client_context.supports_failCommand_fail_point: + self.skipTest("failCommand fail point must be supported") + await self.configure_fail_point(self.client, command_args) + + async def run_scenario(self, scenario_def, test): + """Run a CMAP spec test.""" + self.logs: list = [] + self.assertEqual(scenario_def["version"], 1) + self.assertIn(scenario_def["style"], ["unit", "integration"]) + self.listener = CMAPListener() + self._ops: list = [] + + # Configure the fail point before creating the client. + if "failPoint" in test: + fp = test["failPoint"] + await self.set_fail_point(fp) + self.addAsyncCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + opts = test["poolOptions"].copy() + opts["event_listeners"] = [self.listener] + opts["_monitor_class"] = DummyMonitor + opts["connect"] = False + # Support backgroundThreadIntervalMS, default to 50ms. + interval = opts.pop("backgroundThreadIntervalMS", 50) + if interval < 0: + kill_cursor_frequency = 99999999 + else: + kill_cursor_frequency = interval / 1000.0 + with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05): + client = await self.async_single_client(**opts) + # Update the SD to a known type because the DummyMonitor will not. + # Note we cannot simply call topology.on_change because that would + # internally call pool.ready() which introduces unexpected + # PoolReadyEvents. Instead, update the initial state before + # opening the Topology. + td = async_client_context.client._topology.description + sd = td.server_descriptions()[ + (await async_client_context.host, await async_client_context.port) + ] + client._topology._description = updated_topology_description( + client._topology._description, sd + ) + # When backgroundThreadIntervalMS is negative we do not start the + # background thread to ensure it never runs. + if interval < 0: + await client._topology.open() + else: + await client._get_topology() + self.pool = list(client._topology._servers.values())[0].pool + + # Map of target names to Thread objects. + self.targets: dict = {} + # Map of label names to AsyncConnection objects + self.labels: dict = {} + + async def cleanup(): + for t in self.targets.values(): + await t.stop() + for t in self.targets.values(): + await t.join(5) + for conn in self.labels.values(): + await conn.close_conn(None) + + self.addAsyncCleanup(cleanup) + + try: + if test["error"]: + with self.assertRaises(PyMongoError) as ctx: + await self.run_operations(test["operations"]) + self.check_error(ctx.exception, test["error"]) + else: + await self.run_operations(test["operations"]) + + self.check_events(test["events"], test["ignore"]) + except Exception: + # Print the events after a test failure. + print("\nFailed test: {!r}".format(test["description"])) + print("Operations:") + for op in self._ops: + print(op) + print("Threads:") + print(self.targets) + print("AsyncConnections:") + print(self.labels) + print("Events:") + for event in self.listener.events: + print(event) + print("Log:") + for log in self.logs: + print(log) + raise + + POOL_OPTIONS = { + "maxPoolSize": 50, + "minPoolSize": 1, + "maxIdleTimeMS": 10000, + "waitQueueTimeoutMS": 10000, + } + + # + # Prose tests. Numbers correspond to the prose test number in the spec. + # + async def test_1_client_connection_pool_options(self): + client = await self.async_rs_or_single_client(**self.POOL_OPTIONS) + pool_opts = (await async_get_pool(client)).opts + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + + async def test_2_all_client_pools_have_same_options(self): + client = await self.async_rs_or_single_client(**self.POOL_OPTIONS) + await client.admin.command("ping") + # Discover at least one secondary. + if await async_client_context.has_secondaries: + await client.admin.command("ping", read_preference=ReadPreference.SECONDARY) + pools = await async_get_pools(client) + pool_opts = pools[0].opts + + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + for pool in pools[1:]: + self.assertEqual(pool.opts, pool_opts) + + async def test_3_uri_connection_pool_options(self): + opts = "&".join([f"{k}={v}" for k, v in self.POOL_OPTIONS.items()]) + uri = f"mongodb://{await async_client_context.pair}/?{opts}" + client = await self.async_rs_or_single_client(uri) + pool_opts = (await async_get_pool(client)).opts + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + + async def test_4_subscribe_to_events(self): + listener = CMAPListener() + client = await self.async_single_client(event_listeners=[listener]) + self.assertEqual(listener.event_count(PoolCreatedEvent), 1) + + # Creates a new connection. + await client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) + self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) + self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) + self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) + self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) + + # Uses the existing connection. + await client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) + self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) + self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) + + await client.close() + self.assertEqual(listener.event_count(PoolClosedEvent), 1) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 1) + + async def test_5_check_out_fails_connection_error(self): + listener = CMAPListener() + client = await self.async_single_client(event_listeners=[listener]) + pool = await async_get_pool(client) + + def mock_connect(*args, **kwargs): + raise ConnectionFailure("connect failed") + + pool.connect = mock_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + # Attempt to create a new connection. + with self.assertRaisesRegex(ConnectionFailure, "connect failed"): + await client.admin.command("ping") + + self.assertIsInstance(listener.events[0], PoolCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) + self.assertIsInstance(listener.events[4], PoolClearedEvent) + + failed_event = listener.events[3] + self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + + @async_client_context.require_no_fips + async def test_5_check_out_fails_auth_error(self): + listener = CMAPListener() + client = await self.async_single_client_noauth( + username="notauser", password="fail", event_listeners=[listener] + ) + + # Attempt to create a new connection. + with self.assertRaisesRegex(OperationFailure, "failed"): + await client.admin.command("ping") + + self.assertIsInstance(listener.events[0], PoolCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) + # Error happens here. + self.assertIsInstance(listener.events[4], ConnectionClosedEvent) + self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) + self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR) + + # + # Extra non-spec tests + # + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + async def test_events_repr(self): + host = ("localhost", 27017) + self.assertRepr(ConnectionCheckedInEvent(host, 1)) + self.assertRepr(ConnectionCheckedOutEvent(host, 1, time.monotonic())) + self.assertRepr( + ConnectionCheckOutFailedEvent( + host, ConnectionCheckOutFailedReason.POOL_CLOSED, time.monotonic() + ) + ) + self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED)) + self.assertRepr(ConnectionCreatedEvent(host, 1)) + self.assertRepr(ConnectionReadyEvent(host, 1, time.monotonic())) + self.assertRepr(ConnectionCheckOutStartedEvent(host)) + self.assertRepr(PoolCreatedEvent(host, {})) + self.assertRepr(PoolClearedEvent(host)) + self.assertRepr(PoolClearedEvent(host, service_id=ObjectId())) + self.assertRepr(PoolClosedEvent(host)) + + async def test_close_leaves_pool_unpaused(self): + listener = CMAPListener() + client = await self.async_single_client(event_listeners=[listener]) + await client.admin.command("ping") + pool = await async_get_pool(client) + await client.close() + self.assertEqual(1, listener.event_count(PoolClosedEvent)) + self.assertEqual(PoolState.CLOSED, pool.state) + # Checking out a connection should fail + with self.assertRaises(_PoolClosedError): + async with pool.checkout(): + pass + + +def create_test(scenario_def, test, name): + async def run_scenario(self): + await self.run_scenario(scenario_def, test) + + return run_scenario + + +class CMAPSpecTestCreator(AsyncSpecTestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + CMAP tests do not have a 'tests' field. The whole file represents + a single test case. + """ + return [scenario_def] + + +test_creator = CMAPSpecTestCreator(create_test, AsyncTestCMAP, AsyncTestCMAP.TEST_PATH) +test_creator.create_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py new file mode 100644 index 0000000000..aed3c1ce7b --- /dev/null +++ b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py @@ -0,0 +1,137 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test compliance with the connections survive primary step down spec.""" +from __future__ import annotations + +import sys +from test.asynchronous.utils import async_ensure_all_connected + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + async_client_context, + unittest, +) +from test.asynchronous.helpers import async_repl_set_step_down +from test.utils_shared import ( + CMAPListener, +) + +from bson import SON +from pymongo import monitoring +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.errors import NotPrimaryError +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestAsyncConnectionsSurvivePrimaryStepDown(AsyncIntegrationTest): + listener: CMAPListener + coll: AsyncCollection + + @async_client_context.require_replica_set + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = CMAPListener() + self.client = await self.async_rs_or_single_client( + event_listeners=[self.listener], retryWrites=False, heartbeatFrequencyMS=500 + ) + + # Ensure connections to all servers in replica set. This is to test + # that the is_writable flag is properly updated for connections that + # survive a replica set election. + await async_ensure_all_connected(self.client) + self.db = self.client.get_database("step-down", write_concern=WriteConcern("majority")) + self.coll = self.db.get_collection("step-down", write_concern=WriteConcern("majority")) + # Note that all ops use same write-concern as self.db (majority). + await self.db.drop_collection("step-down") + await self.db.create_collection("step-down") + self.listener.reset() + + async def set_fail_point(self, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + await self.client.admin.command(cmd) + + def verify_pool_cleared(self): + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 1) + + def verify_pool_not_cleared(self): + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 0) + + @async_client_context.require_version_min(4, 2, -1) + async def test_get_more_iteration(self): + # Insert 5 documents with WC majority. + await self.coll.insert_many([{"data": k} for k in range(5)]) + # Start a find operation and retrieve first batch of results. + batch_size = 2 + cursor = self.coll.find(batch_size=batch_size) + for _ in range(batch_size): + await cursor.next() + # Force step-down the primary. + await async_repl_set_step_down(self.client, replSetStepDown=5, force=True) + # Get await anext batch of results. + for _ in range(batch_size): + await cursor.next() + # Verify pool not cleared. + self.verify_pool_not_cleared() + # Attempt insertion to mark server description as stale and prevent a + # NotPrimaryError on the subsequent operation. + try: + await self.coll.insert_one({}) + except NotPrimaryError: + pass + # Next insert should succeed on the new primary without clearing pool. + await self.coll.insert_one({}) + self.verify_pool_not_cleared() + + async def run_scenario(self, error_code, retry, pool_status_checker): + # Set fail point. + await self.set_fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["insert"], "errorCode": error_code}} + ) + self.addAsyncCleanup(self.set_fail_point, {"mode": "off"}) + # Insert record and verify failure. + with self.assertRaises(NotPrimaryError) as exc: + await self.coll.insert_one({"test": 1}) + self.assertEqual(exc.exception.details["code"], error_code) # type: ignore[call-overload] + # Retry before CMAPListener assertion if retry_before=True. + if retry: + await self.coll.insert_one({"test": 1}) + # Verify pool cleared/not cleared. + pool_status_checker() + # Always retry here to ensure discovery of new primary. + await self.coll.insert_one({"test": 1}) + + @async_client_context.require_version_min(4, 2, -1) + @async_client_context.require_test_commands + async def test_not_primary_keep_connection_pool(self): + await self.run_scenario(10107, True, self.verify_pool_not_cleared) + + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_test_commands + async def test_shutdown_in_progress(self): + await self.run_scenario(91, False, self.verify_pool_cleared) + + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_test_commands + async def test_interrupted_at_shutdown(self): + await self.run_scenario(11600, False, self.verify_pool_cleared) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_create_entities.py b/test/asynchronous/test_create_entities.py new file mode 100644 index 0000000000..1f68cf6ddc --- /dev/null +++ b/test/asynchronous/test_create_entities.py @@ -0,0 +1,134 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest +from test.asynchronous.unified_format import UnifiedSpecTestMixinV1 + +_IS_SYNC = False + + +class TestCreateEntities(AsyncIntegrationTest): + async def test_store_events_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "blank", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "events1", + "events": [ + "PoolCreatedEvent", + ], + } + ], + } + }, + ], + "tests": [{"description": "foo", "operations": []}], + } + self.scenario_runner.TEST_SPEC = spec + await self.scenario_runner.asyncSetUp() + await self.scenario_runner.run_scenario(spec["tests"][0]) + await self.scenario_runner.entity_map["client0"].close() + final_entity_map = self.scenario_runner.entity_map + self.assertIn("events1", final_entity_map) + self.assertGreater(len(final_entity_map["events1"]), 0) + for event in final_entity_map["events1"]: + self.assertIn("PoolCreatedEvent", event["name"]) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + await client.close() + + async def test_store_all_others_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "Find", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": {"retryReads": True}, + } + }, + {"database": {"id": "database0", "client": "client0", "databaseName": "dat"}}, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "dat", + } + }, + ], + "tests": [ + { + "description": "test loops", + "operations": [ + { + "name": "loop", + "object": "testRunner", + "arguments": { + "storeIterationsAsEntity": "iterations", + "storeSuccessesAsEntity": "successes", + "storeFailuresAsEntity": "failures", + "storeErrorsAsEntity": "errors", + "numIterations": 5, + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 1, "x": 44}}, + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 2, "x": 44}}, + }, + ], + }, + } + ], + } + ], + } + + await self.client.dat.dat.delete_many({}) + self.scenario_runner.TEST_SPEC = spec + await self.scenario_runner.asyncSetUp() + await self.scenario_runner.run_scenario(spec["tests"][0]) + await self.scenario_runner.entity_map["client0"].close() + entity_map = self.scenario_runner.entity_map + self.assertEqual(len(entity_map["errors"]), 4) + for error in entity_map["errors"]: + self.assertEqual(error["type"], "DuplicateKeyError") + self.assertEqual(entity_map["failures"], []) + self.assertEqual(entity_map["successes"], 2) + self.assertEqual(entity_map["iterations"], 5) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + await client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_crud_unified.py b/test/asynchronous/test_crud_unified.py new file mode 100644 index 0000000000..8b1f9b8e38 --- /dev/null +++ b/test/asynchronous/test_crud_unified.py @@ -0,0 +1,39 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CRUD unified spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "crud", "unified") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "crud", "unified") + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_csot.py b/test/asynchronous/test_csot.py new file mode 100644 index 0000000000..a978d1ccc0 --- /dev/null +++ b/test/asynchronous/test_csot.py @@ -0,0 +1,116 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CSOT unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.asynchronous.utils import flaky + +import pymongo +from pymongo import _csot +from pymongo.errors import PyMongoError + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "csot") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "csot") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestCSOT(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + + @flaky(reason="PYTHON-3522") + async def test_timeout_nested(self): + coll = self.db.coll + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + with pymongo.timeout(10): + await coll.find_one() + self.assertEqual(_csot.get_timeout(), 10) + deadline_10 = _csot.get_deadline() + + # Capped at the original 10 deadline. + with pymongo.timeout(15): + await coll.find_one() + self.assertEqual(_csot.get_timeout(), 15) + self.assertEqual(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + await coll.find_one() + + with pymongo.timeout(5): + await coll.find_one() + self.assertEqual(_csot.get_timeout(), 5) + self.assertLess(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + await coll.find_one() + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + + @async_client_context.require_change_streams + @flaky(reason="PYTHON-3522") + async def test_change_stream_can_resume_after_timeouts(self): + coll = self.db.test + await coll.insert_one({}) + async with await coll.watch() as stream: + with pymongo.timeout(0.1): + with self.assertRaises(PyMongoError) as ctx: + await stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + with self.assertRaises(PyMongoError) as ctx: + await stream.try_next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + # Resume before the insert on 3.6 because 4.0 is required to avoid skipping documents + if async_client_context.version < (4, 0): + await stream.try_next() + await coll.insert_one({}) + with pymongo.timeout(10): + self.assertTrue(await stream.next()) + self.assertTrue(stream.alive) + # Timeout applies to entire next() call, not only individual commands. + with pymongo.timeout(0.5): + with self.assertRaises(PyMongoError) as ctx: + await stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + self.assertFalse(stream.alive) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py new file mode 100644 index 0000000000..906f78cc97 --- /dev/null +++ b/test/asynchronous/test_cursor.py @@ -0,0 +1,1867 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the cursor module.""" +from __future__ import annotations + +import copy +import gc +import itertools +import os +import random +import re +import sys +import threading +import time +from typing import Any + +import pymongo + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils import flaky +from test.utils_shared import ( + AllowListEventListener, + EventListener, + OvertCommandListener, + async_wait_until, + delay, + ignore_deprecations, +) + +from bson import decode_all +from bson.code import Code +from bson.raw_bson import RawBSONDocument +from pymongo import ASCENDING, DESCENDING +from pymongo.asynchronous.cursor import AsyncCursor, CursorType +from pymongo.collation import Collation +from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure, PyMongoError +from pymongo.operations import _IndexList +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCursor(AsyncIntegrationTest): + async def test_deepcopy_cursor_littered_with_regexes(self): + cursor = self.db.test.find( + { + "x": re.compile("^hmmm.*"), + "y": [re.compile("^hmm.*")], + "z": {"a": [re.compile("^hm.*")]}, + re.compile("^key.*"): {"a": [re.compile("^hm.*")]}, + } + ) + + cursor2 = copy.deepcopy(cursor) + self.assertEqual(cursor._spec, cursor2._spec) + + async def test_add_remove_option(self): + cursor = self.db.test.find() + self.assertEqual(0, cursor._query_flags) + await cursor.add_option(2) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) + self.assertEqual(2, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + await cursor.add_option(32) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(34, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + await cursor.add_option(128) + cursor2 = await self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) + self.assertEqual(162, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + + self.assertEqual(162, cursor._query_flags) + await cursor.add_option(128) + self.assertEqual(162, cursor._query_flags) + + cursor.remove_option(128) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(34, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(32) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) + self.assertEqual(2, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + + self.assertEqual(2, cursor._query_flags) + cursor.remove_option(32) + self.assertEqual(2, cursor._query_flags) + + # Timeout + cursor = self.db.test.find(no_cursor_timeout=True) + self.assertEqual(16, cursor._query_flags) + cursor2 = await self.db.test.find().add_option(16) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(16) + self.assertEqual(0, cursor._query_flags) + + # Tailable / Await data + cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(34, cursor._query_flags) + cursor2 = await self.db.test.find().add_option(34) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(32) + self.assertEqual(2, cursor._query_flags) + + # Partial + cursor = self.db.test.find(allow_partial_results=True) + self.assertEqual(128, cursor._query_flags) + cursor2 = await self.db.test.find().add_option(128) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(128) + self.assertEqual(0, cursor._query_flags) + + async def test_add_remove_option_exhaust(self): + # Exhaust - which mongos doesn't support + if async_client_context.is_mongos: + with self.assertRaises(InvalidOperation): + await anext(self.db.test.find(cursor_type=CursorType.EXHAUST)) + else: + cursor = self.db.test.find(cursor_type=CursorType.EXHAUST) + self.assertEqual(64, cursor._query_flags) + cursor2 = await self.db.test.find().add_option(64) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + self.assertTrue(cursor._exhaust) + cursor.remove_option(64) + self.assertEqual(0, cursor._query_flags) + self.assertFalse(cursor._exhaust) + + async def test_allow_disk_use(self): + db = self.db + await db.pymongo_test.drop() + coll = db.pymongo_test + + with self.assertRaises(TypeError): + coll.find().allow_disk_use("baz") # type: ignore[arg-type] + + cursor = coll.find().allow_disk_use(True) + self.assertEqual(True, cursor._allow_disk_use) + cursor = coll.find().allow_disk_use(False) + self.assertEqual(False, cursor._allow_disk_use) + + async def test_max_time_ms(self): + db = self.db + await db.pymongo_test.drop() + coll = db.pymongo_test + with self.assertRaises(TypeError): + coll.find().max_time_ms("foo") # type: ignore[arg-type] + await coll.insert_one({"amalia": 1}) + await coll.insert_one({"amalia": 2}) + + coll.find().max_time_ms(None) + coll.find().max_time_ms(1) + + cursor = coll.find().max_time_ms(999) + self.assertEqual(999, cursor._max_time_ms) + cursor = coll.find().max_time_ms(10).max_time_ms(1000) + self.assertEqual(1000, cursor._max_time_ms) + + cursor = coll.find().max_time_ms(999) + c2 = cursor.clone() + self.assertEqual(999, c2._max_time_ms) + self.assertIn("$maxTimeMS", cursor._query_spec()) + self.assertIn("$maxTimeMS", c2._query_spec()) + + self.assertTrue(await coll.find_one(max_time_ms=1000)) + + client = self.client + if not async_client_context.is_mongos and async_client_context.test_commands_enabled: + # Cursor parses server timeout error in response to initial query. + await client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn" + ) + try: + cursor = coll.find().max_time_ms(1) + try: + await anext(cursor) + except ExecutionTimeout: + pass + else: + self.fail("ExecutionTimeout not raised") + with self.assertRaises(ExecutionTimeout): + await coll.find_one(max_time_ms=1) + finally: + await client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") + + async def test_maxtime_ms_message(self): + db = self.db + await db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + await db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + + client = await self.async_rs_client(document_class=RawBSONDocument) + await client.db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + await client.db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + + async def test_max_await_time_ms(self): + db = self.db + await db.pymongo_test.drop() + coll = await db.create_collection("pymongo_test", capped=True, size=4096) + + with self.assertRaises(TypeError): + coll.find().max_await_time_ms("foo") # type: ignore[arg-type] + await coll.insert_one({"amalia": 1}) + await coll.insert_one({"amalia": 2}) + + coll.find().max_await_time_ms(None) + coll.find().max_await_time_ms(1) + + # When cursor is not tailable_await + cursor = coll.find() + self.assertEqual(None, cursor._max_await_time_ms) + cursor = coll.find().max_await_time_ms(99) + self.assertEqual(None, cursor._max_await_time_ms) + + # If cursor is tailable_await and timeout is unset + cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(None, cursor._max_await_time_ms) + + # If cursor is tailable_await and timeout is set + cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) + self.assertEqual(99, cursor._max_await_time_ms) + + cursor = ( + coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + .max_await_time_ms(10) + .max_await_time_ms(90) + ) + self.assertEqual(90, cursor._max_await_time_ms) + + listener = AllowListEventListener("find", "getMore") + coll = (await self.async_rs_or_single_client(event_listeners=[listener]))[ + self.db.name + ].pymongo_test + + # Tailable_await defaults. + await coll.find(cursor_type=CursorType.TAILABLE_AWAIT).to_list() + # find + self.assertNotIn("maxTimeMS", listener.started_events[0].command) + # getMore + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() + + # Tailable_await with max_await_time_ms set. + await coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertIn("maxTimeMS", listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() + + # Tailable_await with max_time_ms and make sure list() works on synchronous cursors + if _IS_SYNC: + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # type: ignore[call-overload] + else: + await coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() + + # Tailable_await with both max_time_ms and max_await_time_ms + await ( + coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + .max_time_ms(99) + .max_await_time_ms(99) + .to_list() + ) + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertIn("maxTimeMS", listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() + + # Non tailable_await with max_await_time_ms + await coll.find(batch_size=1).max_await_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() + + # Non tailable_await with max_time_ms + await coll.find(batch_size=1).max_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + + # Non tailable_await with both max_time_ms and max_await_time_ms + await coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + + @async_client_context.require_test_commands + @async_client_context.require_no_mongos + async def test_max_time_ms_getmore(self): + # Test that Cursor handles server timeout error in response to getmore. + coll = self.db.pymongo_test + await coll.insert_many([{} for _ in range(200)]) + cursor = coll.find().max_time_ms(100) + + # Send initial query before turning on failpoint. + await anext(cursor) + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn" + ) + try: + try: + # Iterate up to first getmore. + await cursor.to_list() + except ExecutionTimeout: + pass + else: + self.fail("ExecutionTimeout not raised") + finally: + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="off" + ) + + async def test_explain(self): + a = self.db.test.find() + await a.explain() + async for _ in a: + break + b = await a.explain() + self.assertIn("executionStats", b) + + async def test_explain_with_read_concern(self): + # Do not add readConcern level to explain. + listener = AllowListEventListener("explain") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) + self.assertTrue(await coll.find().explain()) + started = listener.started_events + self.assertEqual(len(started), 1) + self.assertNotIn("readConcern", started[0].command) + + # https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.md#14-explain-helpers-allow-users-to-specify-maxtimems + async def test_explain_csot(self): + # Create a MongoClient with command monitoring enabled (referred to as client). + listener = AllowListEventListener("explain") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + + # Create a collection, referred to as collection, with the namespace explain-test.collection. + # Workaround for SERVER-108463 + names = await client["explain-test"].list_collection_names() + if "collection" not in names: + collection = await client["explain-test"].create_collection("collection") + else: + collection = client["explain-test"]["collection"] + + # Run an explained find on collection. The find will have the query predicate { name: 'john doe' }. Specify a maxTimeMS value of 2000ms for the explain. + with pymongo.timeout(2.0): + self.assertTrue(await collection.find({"name": "john doe"}).explain()) + + # Obtain the command started event for the explain. Confirm that the top-level explain command should has a maxTimeMS value of 2000. + started = listener.started_events + self.assertEqual(len(started), 1) + assert 1500 < started[0].command["maxTimeMS"] <= 2000 + + async def test_hint(self): + db = self.db + with self.assertRaises(TypeError): + db.test.find().hint(5.5) # type: ignore[arg-type] + await db.test.drop() + + await db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + + with self.assertRaises(OperationFailure): + await db.test.find({"num": 17, "foo": 17}).hint([("num", ASCENDING)]).explain() + with self.assertRaises(OperationFailure): + await db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain() + + spec: list[Any] = [("num", DESCENDING)] + _ = await db.test.create_index(spec) + + first = await anext(db.test.find()) + self.assertEqual(0, first.get("num")) + first = await anext(db.test.find().hint(spec)) + self.assertEqual(99, first.get("num")) + with self.assertRaises(OperationFailure): + await db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain() + + a = db.test.find({"num": 17}) + a.hint(spec) + async for _ in a: + break + self.assertRaises(InvalidOperation, a.hint, spec) + + await db.test.drop() + await db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec: _IndexList = ["num", ("foo", DESCENDING)] + await db.test.create_index(spec) + first = await anext(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + self.assertEqual(0, first.get("foo")) + + await db.test.drop() + await db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec = ["num"] + await db.test.create_index(spec) + first = await anext(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + + async def test_hint_by_name(self): + db = self.db + await db.test.drop() + + await db.test.insert_many([{"i": i} for i in range(100)]) + + await db.test.create_index([("i", DESCENDING)], name="fooindex") + first = await anext(db.test.find()) + self.assertEqual(0, first.get("i")) + first = await anext(db.test.find().hint("fooindex")) + self.assertEqual(99, first.get("i")) + + async def test_limit(self): + db = self.db + + with self.assertRaises(TypeError): + db.test.find().limit(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().limit("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().limit(5.5) # type: ignore[arg-type] + self.assertTrue((db.test.find()).limit(5)) + + await db.test.drop() + await db.test.insert_many([{"x": i} for i in range(100)]) + + count = 0 + async for _ in db.test.find(): + count += 1 + self.assertEqual(count, 100) + + count = 0 + async for _ in db.test.find().limit(20): + count += 1 + self.assertEqual(count, 20) + + count = 0 + async for _ in db.test.find().limit(99): + count += 1 + self.assertEqual(count, 99) + + count = 0 + async for _ in db.test.find().limit(1): + count += 1 + self.assertEqual(count, 1) + + count = 0 + async for _ in db.test.find().limit(0): + count += 1 + self.assertEqual(count, 100) + + count = 0 + async for _ in db.test.find().limit(0).limit(50).limit(10): + count += 1 + self.assertEqual(count, 10) + + a = db.test.find() + a.limit(10) + async for _ in a: + break + with self.assertRaises(InvalidOperation): + a.limit(5) + + async def test_max(self): + db = self.db + await db.test.drop() + j_index = [("j", ASCENDING)] + await db.test.create_index(j_index) + + await db.test.insert_many([{"j": j, "k": j} for j in range(10)]) + + def find(max_spec, expected_index): + return db.test.find().max(max_spec).hint(expected_index) + + cursor = find([("j", 3)], j_index) + self.assertEqual(len(await cursor.to_list()), 3) + + # Tuple. + cursor = find((("j", 3),), j_index) + self.assertEqual(len(await cursor.to_list()), 3) + + # Compound index. + index_keys = [("j", ASCENDING), ("k", ASCENDING)] + await db.test.create_index(index_keys) + cursor = find([("j", 3), ("k", 3)], index_keys) + self.assertEqual(len(await cursor.to_list()), 3) + + # Wrong order. + cursor = find([("k", 3), ("j", 3)], index_keys) + with self.assertRaises(OperationFailure): + await cursor.to_list() + + # No such index. + cursor = find([("k", 3)], "k") + with self.assertRaises(OperationFailure): + await cursor.to_list() + with self.assertRaises(TypeError): + db.test.find().max(10) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().max({"j": 10}) # type: ignore[arg-type] + + async def test_min(self): + db = self.db + await db.test.drop() + j_index = [("j", ASCENDING)] + await db.test.create_index(j_index) + + await db.test.insert_many([{"j": j, "k": j} for j in range(10)]) + + def find(min_spec, expected_index): + return db.test.find().min(min_spec).hint(expected_index) + + cursor = find([("j", 3)], j_index) + self.assertEqual(len(await cursor.to_list()), 7) + + # Tuple. + cursor = find((("j", 3),), j_index) + self.assertEqual(len(await cursor.to_list()), 7) + + # Compound index. + index_keys = [("j", ASCENDING), ("k", ASCENDING)] + await db.test.create_index(index_keys) + cursor = find([("j", 3), ("k", 3)], index_keys) + self.assertEqual(len(await cursor.to_list()), 7) + + # Wrong order. + cursor = find([("k", 3), ("j", 3)], index_keys) + with self.assertRaises(OperationFailure): + await cursor.to_list() + + # No such index. + cursor = find([("k", 3)], "k") + with self.assertRaises(OperationFailure): + await cursor.to_list() + + with self.assertRaises(TypeError): + db.test.find().min(10) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().min({"j": 10}) # type: ignore[arg-type] + + async def test_min_max_without_hint(self): + coll = self.db.test + j_index = [("j", ASCENDING)] + await coll.create_index(j_index) + + with self.assertRaises(InvalidOperation): + await coll.find().min([("j", 3)]).to_list() + with self.assertRaises(InvalidOperation): + await coll.find().max([("j", 3)]).to_list() + + async def test_batch_size(self): + db = self.db + await db.test.drop() + await db.test.insert_many([{"x": x} for x in range(200)]) + + with self.assertRaises(TypeError): + db.test.find().batch_size(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().batch_size("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().batch_size(5.5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().batch_size(-1) + self.assertTrue((db.test.find()).batch_size(5)) + a = db.test.find() + async for _ in a: + break + self.assertRaises(InvalidOperation, a.batch_size, 5) + + async def cursor_count(cursor, expected_count): + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(expected_count, count) + + await cursor_count((db.test.find()).batch_size(0), 200) + await cursor_count((db.test.find()).batch_size(1), 200) + await cursor_count((db.test.find()).batch_size(2), 200) + await cursor_count((db.test.find()).batch_size(5), 200) + await cursor_count((db.test.find()).batch_size(100), 200) + await cursor_count((db.test.find()).batch_size(500), 200) + + await cursor_count((db.test.find()).batch_size(0).limit(1), 1) + await cursor_count((db.test.find()).batch_size(1).limit(1), 1) + await cursor_count((db.test.find()).batch_size(2).limit(1), 1) + await cursor_count((db.test.find()).batch_size(5).limit(1), 1) + await cursor_count((db.test.find()).batch_size(100).limit(1), 1) + await cursor_count((db.test.find()).batch_size(500).limit(1), 1) + + await cursor_count((db.test.find()).batch_size(0).limit(10), 10) + await cursor_count((db.test.find()).batch_size(1).limit(10), 10) + await cursor_count((db.test.find()).batch_size(2).limit(10), 10) + await cursor_count((db.test.find()).batch_size(5).limit(10), 10) + await cursor_count((db.test.find()).batch_size(100).limit(10), 10) + await cursor_count((db.test.find()).batch_size(500).limit(10), 10) + + cur = db.test.find().batch_size(1) + await anext(cur) + # find command batchSize should be 1 + self.assertEqual(0, len(cur._data)) + await anext(cur) + self.assertEqual(0, len(cur._data)) + await anext(cur) + self.assertEqual(0, len(cur._data)) + await anext(cur) + self.assertEqual(0, len(cur._data)) + + async def test_limit_and_batch_size(self): + db = self.db + await db.test.drop() + await db.test.insert_many([{"x": x} for x in range(500)]) + + curs = db.test.find().limit(0).batch_size(10) + await anext(curs) + self.assertEqual(10, curs._retrieved) + + curs = db.test.find(limit=0, batch_size=10) + await anext(curs) + self.assertEqual(10, curs._retrieved) + + curs = db.test.find().limit(-2).batch_size(0) + await anext(curs) + self.assertEqual(2, curs._retrieved) + + curs = db.test.find(limit=-2, batch_size=0) + await anext(curs) + self.assertEqual(2, curs._retrieved) + + curs = db.test.find().limit(-4).batch_size(5) + await anext(curs) + self.assertEqual(4, curs._retrieved) + + curs = db.test.find(limit=-4, batch_size=5) + await anext(curs) + self.assertEqual(4, curs._retrieved) + + curs = db.test.find().limit(50).batch_size(500) + await anext(curs) + self.assertEqual(50, curs._retrieved) + + curs = db.test.find(limit=50, batch_size=500) + await anext(curs) + self.assertEqual(50, curs._retrieved) + + curs = db.test.find().batch_size(500) + await anext(curs) + self.assertEqual(500, curs._retrieved) + + curs = db.test.find(batch_size=500) + await anext(curs) + self.assertEqual(500, curs._retrieved) + + curs = db.test.find().limit(50) + await anext(curs) + self.assertEqual(50, curs._retrieved) + + curs = db.test.find(limit=50) + await anext(curs) + self.assertEqual(50, curs._retrieved) + + # these two might be shaky, as the default + # is set by the server. as of 2.0.0-rc0, 101 + # or 1MB (whichever is smaller) is default + # for queries without ntoreturn + curs = db.test.find() + await anext(curs) + self.assertEqual(101, curs._retrieved) + + curs = db.test.find().limit(0).batch_size(0) + await anext(curs) + self.assertEqual(101, curs._retrieved) + + curs = db.test.find(limit=0, batch_size=0) + await anext(curs) + self.assertEqual(101, curs._retrieved) + + async def test_skip(self): + db = self.db + + with self.assertRaises(TypeError): + db.test.find().skip(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().skip("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().skip(5.5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().skip(-5) + self.assertTrue((db.test.find()).skip(5)) + + await db.drop_collection("test") + + await db.test.insert_many([{"x": i} for i in range(100)]) + + async for i in db.test.find(): + self.assertEqual(i["x"], 0) + break + + async for i in db.test.find().skip(20): + self.assertEqual(i["x"], 20) + break + + async for i in db.test.find().skip(99): + self.assertEqual(i["x"], 99) + break + + async for i in db.test.find().skip(1): + self.assertEqual(i["x"], 1) + break + + async for i in db.test.find().skip(0): + self.assertEqual(i["x"], 0) + break + + async for i in db.test.find().skip(0).skip(50).skip(10): + self.assertEqual(i["x"], 10) + break + + async for _ in db.test.find().skip(1000): + self.fail() + + a = db.test.find() + a.skip(10) + async for _ in a: + break + self.assertRaises(InvalidOperation, a.skip, 5) + + async def test_sort(self): + db = self.db + + with self.assertRaises(TypeError): + db.test.find().sort(5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().sort([]) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().sort([], ASCENDING) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().sort([("hello", DESCENDING)], DESCENDING) # type: ignore[arg-type] + + await db.test.drop() + + unsort = list(range(10)) + random.shuffle(unsort) + + await db.test.insert_many([{"x": i} for i in unsort]) + + asc = [i["x"] async for i in db.test.find().sort("x", ASCENDING)] + self.assertEqual(asc, list(range(10))) + asc = [i["x"] async for i in db.test.find().sort("x")] + self.assertEqual(asc, list(range(10))) + asc = [i["x"] async for i in db.test.find().sort([("x", ASCENDING)])] + self.assertEqual(asc, list(range(10))) + + expect = list(reversed(range(10))) + desc = [i["x"] async for i in db.test.find().sort("x", DESCENDING)] + self.assertEqual(desc, expect) + desc = [i["x"] async for i in db.test.find().sort([("x", DESCENDING)])] + self.assertEqual(desc, expect) + desc = [i["x"] async for i in db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] + self.assertEqual(desc, expect) + + expected = [(1, 5), (2, 5), (0, 3), (7, 3), (9, 2), (2, 1), (3, 1)] + shuffled = list(expected) + random.shuffle(shuffled) + + await db.test.drop() + for a, b in shuffled: + await db.test.insert_one({"a": a, "b": b}) + + result = [ + (i["a"], i["b"]) + async for i in db.test.find().sort([("b", DESCENDING), ("a", ASCENDING)]) + ] + self.assertEqual(result, expected) + result = [(i["a"], i["b"]) async for i in db.test.find().sort([("b", DESCENDING), "a"])] + self.assertEqual(result, expected) + + a = db.test.find() + a.sort("x", ASCENDING) + async for _ in a: + break + self.assertRaises(InvalidOperation, a.sort, "x", ASCENDING) + + async def test_where(self): + db = self.db + await db.test.drop() + + a = db.test.find() + with self.assertRaises(TypeError): + a.where(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + a.where(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + a.where({}) # type: ignore[arg-type] + + await db.test.insert_many([{"x": i} for i in range(10)]) + + self.assertEqual(3, len(await db.test.find().where("this.x < 3").to_list())) + self.assertEqual(3, len(await db.test.find().where(Code("this.x < 3")).to_list())) + + code_with_scope = Code("this.x < i", {"i": 3}) + if async_client_context.version.at_least(4, 3, 3): + # MongoDB 4.4 removed support for Code with scope. + with self.assertRaises(OperationFailure): + await db.test.find().where(code_with_scope).to_list() + + code_with_empty_scope = Code("this.x < 3", {}) + with self.assertRaises(OperationFailure): + await db.test.find().where(code_with_empty_scope).to_list() + else: + self.assertEqual(3, len(await db.test.find().where(code_with_scope).to_list())) + + self.assertEqual(10, len(await db.test.find().to_list())) + self.assertEqual([0, 1, 2], [a["x"] async for a in db.test.find().where("this.x < 3")]) + self.assertEqual([], [a["x"] async for a in db.test.find({"x": 5}).where("this.x < 3")]) + self.assertEqual([5], [a["x"] async for a in db.test.find({"x": 5}).where("this.x > 3")]) + + cursor = db.test.find().where("this.x < 3").where("this.x > 7") + self.assertEqual([8, 9], [a["x"] async for a in cursor]) + + a = db.test.find() + _ = a.where("this.x > 3") + async for _ in a: + break + self.assertRaises(InvalidOperation, a.where, "this.x < 3") + + async def test_rewind(self): + await self.db.test.insert_many([{"x": i} for i in range(1, 4)]) + + cursor = self.db.test.find().limit(2) + + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(0, count) + + await cursor.rewind() + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + + await cursor.rewind() + count = 0 + async for _ in cursor: + break + await cursor.rewind() + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + + self.assertEqual(cursor, await cursor.rewind()) + + # oplog_reply, and snapshot are all deprecated. + @ignore_deprecations + async def test_clone(self): + await self.db.test.insert_many([{"x": i} for i in range(1, 4)]) + + cursor = self.db.test.find().limit(2) + + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(0, count) + + cursor = cursor.clone() + cursor2 = cursor.clone() + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + async for _ in cursor2: + count += 1 + self.assertEqual(4, count) + + await cursor.rewind() + count = 0 + async for _ in cursor: + break + cursor = cursor.clone() + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + + self.assertNotEqual(cursor, cursor.clone()) + + # Just test attributes + cursor = ( + self.db.test.find( + {"x": re.compile("^hello.*")}, + projection={"_id": False}, + skip=1, + no_cursor_timeout=True, + cursor_type=CursorType.TAILABLE_AWAIT, + sort=[("x", 1)], + allow_partial_results=True, + oplog_replay=True, + batch_size=123, + collation={"locale": "en_US"}, + hint=[("_id", 1)], + max_scan=100, + max_time_ms=1000, + return_key=True, + show_record_id=True, + snapshot=True, + allow_disk_use=True, + ) + ).limit(2) + cursor.min([("a", 1)]).max([("b", 3)]) + await cursor.add_option(128) + cursor.comment("hi!") + + # Every attribute should be the same. + cursor2 = cursor.clone() + self.assertEqual(cursor.__dict__, cursor2.__dict__) + + # Shallow copies can so can mutate + cursor2 = copy.copy(cursor) + cursor2._projection["cursor2"] = False + self.assertIsNotNone(cursor._projection) + self.assertIn("cursor2", cursor._projection.keys()) + + # Deepcopies and shouldn't mutate + cursor3 = copy.deepcopy(cursor) + cursor3._projection["cursor3"] = False + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor3", cursor._projection.keys()) + + cursor4 = cursor.clone() + cursor4._projection["cursor4"] = False + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor4", cursor._projection.keys()) + + # Test memo when deepcopying queries + query = {"hello": "world"} + query["reflexive"] = query + cursor = self.db.test.find(query) + + cursor2 = copy.deepcopy(cursor) + + self.assertNotEqual(id(cursor._spec), id(cursor2._spec)) + self.assertEqual(id(cursor2._spec["reflexive"]), id(cursor2._spec)) + self.assertEqual(len(cursor2._spec), 2) + + # Ensure hints are cloned as the correct type + cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) + cursor2 = copy.deepcopy(cursor) + # Internal types are now dict rather than SON by default + self.assertIsInstance(cursor2._hint, dict) + self.assertEqual(cursor._hint, cursor2._hint) + + @async_client_context.require_sync + def test_clone_empty(self): + self.db.test.delete_many({}) + self.db.test.insert_many([{"x": i} for i in range(1, 4)]) + cursor = self.db.test.find()[2:2] + cursor2 = cursor.clone() + self.assertRaises(StopIteration, cursor.next) + self.assertRaises(StopIteration, cursor2.next) + + # AsyncCursors don't support slicing + @async_client_context.require_sync + def test_bad_getitem(self): + self.assertRaises(TypeError, lambda x: self.db.test.find()[x], "hello") + self.assertRaises(TypeError, lambda x: self.db.test.find()[x], 5.5) + self.assertRaises(TypeError, lambda x: self.db.test.find()[x], None) + + # AsyncCursors don't support slicing + @async_client_context.require_sync + def test_getitem_slice_index(self): + self.db.drop_collection("test") + self.db.test.insert_many([{"i": i} for i in range(100)]) + + count = itertools.count + + self.assertRaises(IndexError, lambda: self.db.test.find()[-1:]) + self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2]) + + for a, b in zip(count(0), self.db.test.find()): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(100, len(list(self.db.test.find()[0:]))) # type: ignore[call-overload] + for a, b in zip(count(0), self.db.test.find()[0:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[20:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + for a, b in zip(count(99), self.db.test.find()[99:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + for _i in self.db.test.find()[1000:]: + self.fail() + + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) # type: ignore[call-overload] + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[20:25]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[40:45][20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[40:45][20:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[40:45].limit(0).skip(20)))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[40:45].limit(0).skip(20)): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find().limit(10).skip(40)[20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find().limit(10).skip(40)[20:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(1, len(list(self.db.test.find()[:1]))) # type: ignore[call-overload] + self.assertEqual(5, len(list(self.db.test.find()[:5]))) # type: ignore[call-overload] + + self.assertEqual(1, len(list(self.db.test.find()[99:100]))) # type: ignore[call-overload] + self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) # type: ignore[call-overload] + self.assertEqual(0, len(list(self.db.test.find()[10:10]))) # type: ignore[call-overload] + self.assertEqual(0, len(list(self.db.test.find()[:0]))) # type: ignore[call-overload] + self.assertEqual(80, len(list(self.db.test.find()[10:10].limit(0).skip(20)))) # type: ignore[call-overload] + + self.assertRaises(IndexError, lambda: self.db.test.find()[10:8]) + + # AsyncCursors don't support slicing + @async_client_context.require_sync + def test_getitem_numeric_index(self): + self.db.drop_collection("test") + self.db.test.insert_many([{"i": i} for i in range(100)]) + + self.assertEqual(0, self.db.test.find()[0]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(50, self.db.test.find().skip(50)[0]["i"]) + self.assertEqual(50, self.db.test.find().skip(49)[1]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(99, self.db.test.find()[99]["i"]) + + self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1) + self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100) + self.assertRaises(IndexError, lambda x: self.db.test.find().skip(50)[x], 50) + + @async_client_context.require_sync + def test_iteration_with_list(self): + self.db.drop_collection("test") + self.db.test.insert_many([{"i": i} for i in range(100)]) + + cur = self.db.test.find().batch_size(10) + + self.assertEqual(100, len(list(cur))) # type: ignore[call-overload] + + def test_len(self): + with self.assertRaises(TypeError): + len(self.db.test.find()) # type: ignore[arg-type] + + def test_properties(self): + self.assertEqual(self.db.test, self.db.test.find().collection) + + with self.assertRaises(AttributeError): + self.db.test.find().collection = "hello" # type: ignore + + async def test_get_more(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many([{"i": i} for i in range(10)]) + self.assertEqual(10, len(await db.test.find().batch_size(5).to_list())) + + async def test_tailable(self): + db = self.db + await db.drop_collection("test") + await db.create_collection("test", capped=True, size=1000, max=3) + self.addAsyncCleanup(db.drop_collection, "test") + cursor = db.test.find(cursor_type=CursorType.TAILABLE) + + await db.test.insert_one({"x": 1}) + count = 0 + async for doc in cursor: + count += 1 + self.assertEqual(1, doc["x"]) + self.assertEqual(1, count) + + await db.test.insert_one({"x": 2}) + count = 0 + async for doc in cursor: + count += 1 + self.assertEqual(2, doc["x"]) + self.assertEqual(1, count) + + await db.test.insert_one({"x": 3}) + count = 0 + async for doc in cursor: + count += 1 + self.assertEqual(3, doc["x"]) + self.assertEqual(1, count) + + # Capped rollover - the collection can never + # have more than 3 documents. Just make sure + # this doesn't raise... + await db.test.insert_many([{"x": i} for i in range(4, 7)]) + self.assertEqual(0, len(await cursor.to_list())) + + # and that the cursor doesn't think it's still alive. + self.assertFalse(cursor.alive) + + self.assertEqual(3, await db.test.count_documents({})) + + # __getitem__(index) + if _IS_SYNC: + for cursor in ( + db.test.find(cursor_type=CursorType.TAILABLE), + db.test.find(cursor_type=CursorType.TAILABLE_AWAIT), + ): + self.assertEqual(4, cursor[0]["x"]) + self.assertEqual(5, cursor[1]["x"]) + self.assertEqual(6, cursor[2]["x"]) + + cursor.rewind() + self.assertEqual([4], [doc["x"] for doc in cursor[0:1]]) + cursor.rewind() + self.assertEqual([5], [doc["x"] for doc in cursor[1:2]]) + cursor.rewind() + self.assertEqual([6], [doc["x"] for doc in cursor[2:3]]) + cursor.rewind() + self.assertEqual([4, 5], [doc["x"] for doc in cursor[0:2]]) + cursor.rewind() + self.assertEqual([5, 6], [doc["x"] for doc in cursor[1:3]]) + cursor.rewind() + self.assertEqual([4, 5, 6], [doc["x"] for doc in cursor[0:3]]) + + # The Async API does not support threading + @async_client_context.require_sync + def test_concurrent_close(self): + """Ensure a tailable can be closed from another thread.""" + db = self.db + db.drop_collection("test") + db.create_collection("test", capped=True, size=1000, max=3) + self.addCleanup(db.drop_collection, "test") + cursor = db.test.find(cursor_type=CursorType.TAILABLE) + + def iterate_cursor(): + while cursor.alive: + try: + for _doc in cursor: + pass + except OperationFailure as e: + if e.code != 237: # CursorKilled error code + raise + + t = threading.Thread(target=iterate_cursor) + t.start() + time.sleep(1) + cursor.close() + self.assertFalse(cursor.alive) + t.join(3) + self.assertFalse(t.is_alive()) + + async def test_distinct(self): + await self.db.drop_collection("test") + + await self.db.test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) + + distinct = await self.db.test.find({"a": {"$lt": 3}}).distinct("a") + distinct.sort() + + self.assertEqual([1, 2], distinct) + + await self.db.drop_collection("test") + + await self.db.test.insert_one({"a": {"b": "a"}, "c": 12}) + await self.db.test.insert_one({"a": {"b": "b"}, "c": 8}) + await self.db.test.insert_one({"a": {"b": "c"}, "c": 12}) + await self.db.test.insert_one({"a": {"b": "c"}, "c": 8}) + + distinct = await self.db.test.find({"c": 8}).distinct("a.b") + distinct.sort() + + self.assertEqual(["b", "c"], distinct) + + async def test_with_statement(self): + await self.db.drop_collection("test") + await self.db.test.insert_many([{} for _ in range(100)]) + + c1 = self.db.test.find() + async with self.db.test.find() as c2: + self.assertTrue(c2.alive) + self.assertFalse(c2.alive) + + async with self.db.test.find() as c2: + self.assertEqual(100, len(await c2.to_list())) + self.assertFalse(c2.alive) + self.assertTrue(c1.alive) + + @async_client_context.require_no_mongos + async def test_comment(self): + await self.client.drop_database(self.db) + await self.db.command("profile", 2) # Profile ALL commands. + try: + await self.db.test.find().comment("foo").to_list() + count = await self.db.system.profile.count_documents( + {"ns": "pymongo_test.test", "op": "query", "command.comment": "foo"} + ) + self.assertEqual(count, 1) + + await self.db.test.find().comment("foo").distinct("type") + count = await self.db.system.profile.count_documents( + { + "ns": "pymongo_test.test", + "op": "command", + "command.distinct": "test", + "command.comment": "foo", + } + ) + self.assertEqual(count, 1) + finally: + await self.db.command("profile", 0) # Turn off profiling. + await self.db.system.profile.drop() + + await self.db.test.insert_many([{}, {}]) + cursor = self.db.test.find() + await anext(cursor) + self.assertRaises(InvalidOperation, cursor.comment, "hello") + + async def test_alive(self): + await self.db.test.delete_many({}) + await self.db.test.insert_many([{} for _ in range(3)]) + self.addAsyncCleanup(self.db.test.delete_many, {}) + cursor = self.db.test.find().batch_size(2) + n = 0 + while True: + await cursor.next() + n += 1 + if n == 3: + self.assertFalse(cursor.alive) + break + + self.assertTrue(cursor.alive) + + async def test_close_kills_cursor_synchronously(self): + # Kill any cursors possibly queued up by previous tests. + gc.collect() + await self.client._process_periodic_tasks() + + listener = AllowListEventListener("killCursors") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + coll = client[self.db.name].test_close_kills_cursors + + # Add some test data. + docs_inserted = 1000 + await coll.insert_many([{"i": i} for i in range(docs_inserted)]) + + listener.reset() + + # Close a cursor while it's still open on the server. + cursor = coll.find().batch_size(10) + self.assertTrue(bool(await anext(cursor))) + self.assertLess(cursor.retrieved, docs_inserted) + await cursor.close() + + def assertCursorKilled(): + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) + + assertCursorKilled() + listener.reset() + + # Close a command cursor while it's still open on the server. + cursor = await coll.aggregate([], batchSize=10) + self.assertTrue(bool(await anext(cursor))) + await cursor.close() + + # The cursor should be killed if it had a non-zero id. + if cursor.cursor_id: + assertCursorKilled() + else: + self.assertEqual(0, len(listener.started_events)) + + @async_client_context.require_failCommand_appName + async def test_timeout_kills_cursor_asynchronously(self): + listener = AllowListEventListener("killCursors") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + coll = client[self.db.name].test_timeout_kills_cursor + + # Add some test data. + docs_inserted = 10 + await coll.insert_many([{"i": i} for i in range(docs_inserted)]) + + listener.reset() + + cursor = coll.find({}, batch_size=1) + await cursor.next() + + # Mock getMore commands timing out. + mock_timeout_errors = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "errorCode": 50, + "failCommands": ["getMore"], + }, + } + + async with self.fail_point(mock_timeout_errors): + with self.assertRaises(ExecutionTimeout): + await cursor.next() + + async def assertCursorKilled(): + await async_wait_until( + lambda: len(listener.succeeded_events), + "find successful killCursors command", + ) + + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) + + await assertCursorKilled() + listener.reset() + + cursor = await coll.aggregate([], batchSize=1) + await cursor.next() + + async with self.fail_point(mock_timeout_errors): + with self.assertRaises(ExecutionTimeout): + await cursor.next() + + await assertCursorKilled() + + def test_delete_not_initialized(self): + # Creating a cursor with invalid arguments will not run __init__ + # but will still call __del__, eg test.find(invalidKwarg=1). + cursor = AsyncCursor.__new__(AsyncCursor) # Skip calling __init__ + cursor.__del__() # no error + + async def test_getMore_does_not_send_readPreference(self): + listener = AllowListEventListener("find", "getMore") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + # We never send primary read preference so override the default. + coll = client[self.db.name].get_collection( + "test", read_preference=ReadPreference.PRIMARY_PREFERRED + ) + + await coll.delete_many({}) + await coll.insert_many([{} for _ in range(5)]) + self.addAsyncCleanup(coll.drop) + + await coll.find(batch_size=3).to_list() + started = listener.started_events + self.assertEqual(2, len(started)) + self.assertEqual("find", started[0].command_name) + if async_client_context.is_rs or async_client_context.is_mongos: + self.assertIn("$readPreference", started[0].command) + else: + self.assertNotIn("$readPreference", started[0].command) + self.assertEqual("getMore", started[1].command_name) + self.assertNotIn("$readPreference", started[1].command) + + @async_client_context.require_replica_set + async def test_to_list_tailable(self): + oplog = self.client.local.oplog.rs + last = await oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1).next() + ts = last["ts"] + # Set maxAwaitTimeMS=1 to speed up the test and avoid blocking on the noop writer. + c = oplog.find( + {"ts": {"$gte": ts}}, cursor_type=pymongo.CursorType.TAILABLE_AWAIT, oplog_replay=True + ).max_await_time_ms(1) + self.addAsyncCleanup(c.close) + # Wait for the change to be read. + docs = [] + while not docs: + docs = await c.to_list() + self.assertGreaterEqual(len(docs), 1) + + async def test_to_list_empty(self): + c = self.db.does_not_exist.find() + docs = await c.to_list() + self.assertEqual([], docs) + + async def test_to_list_length(self): + coll = self.db.test + await coll.insert_many([{} for _ in range(5)]) + self.addAsyncCleanup(coll.drop) + c = coll.find() + docs = await c.to_list(3) + self.assertEqual(len(docs), 3) + + c = coll.find(batch_size=2) + docs = await c.to_list(3) + self.assertEqual(len(docs), 3) + docs = await c.to_list(3) + self.assertEqual(len(docs), 2) + + @flaky(reason="PYTHON-3522") + async def test_to_list_csot_applied(self): + client = await self.async_single_client(timeoutMS=500, w=1) + coll = client.pymongo.test + # Initialize the client with a larger timeout to help make test less flaky + with pymongo.timeout(10): + await coll.insert_many([{} for _ in range(5)]) + cursor = coll.find({"$where": delay(1)}) + with self.assertRaises(PyMongoError) as ctx: + await cursor.to_list() + self.assertTrue(ctx.exception.timeout) + + @async_client_context.require_change_streams + async def test_command_cursor_to_list(self): + # Set maxAwaitTimeMS=1 to speed up the test. + c = await self.db.test.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addAsyncCleanup(c.close) + docs = await c.to_list() + self.assertGreaterEqual(len(docs), 0) + + @async_client_context.require_change_streams + async def test_command_cursor_to_list_empty(self): + # Set maxAwaitTimeMS=1 to speed up the test. + c = await self.db.does_not_exist.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addAsyncCleanup(c.close) + docs = await c.to_list() + self.assertEqual([], docs) + + @async_client_context.require_change_streams + async def test_command_cursor_to_list_length(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many([{"foo": 1}, {"foo": 2}]) + + pipeline = {"$project": {"_id": False, "foo": True}} + result = await db.test.aggregate([pipeline]) + self.assertEqual(len(await result.to_list()), 2) + + result = await db.test.aggregate([pipeline]) + self.assertEqual(len(await result.to_list(1)), 1) + + @async_client_context.require_failCommand_blockConnection + @flaky(reason="PYTHON-3522") + async def test_command_cursor_to_list_csot_applied(self): + client = await self.async_single_client(timeoutMS=500, w=1) + coll = client.pymongo.test + # Initialize the client with a larger timeout to help make test less flaky + with pymongo.timeout(10): + await coll.insert_many([{} for _ in range(5)]) + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 5}, + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 1000}, + } + cursor = await coll.aggregate([], batchSize=1) + async with self.fail_point(fail_command): + with self.assertRaises(PyMongoError) as ctx: + await cursor.to_list() + self.assertTrue(ctx.exception.timeout) + + +class TestRawBatchCursor(AsyncIntegrationTest): + async def test_find_raw(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + batches = await c.find_raw_batches().sort("_id").to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @async_client_context.require_transactions + async def test_find_raw_transaction(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + async with client.start_session() as session: + async with await session.start_transaction(): + batches = await ( + client[self.db.name].test.find_raw_batches(session=session).sort("_id") + ).to_list() + cmd = listener.started_events[0] + self.assertEqual(cmd.command_name, "find") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) + # Ensure we update $clusterTime from the command response. + last_cmd = listener.succeeded_events[-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @async_client_context.require_sessions + @async_client_context.require_failCommand_fail_point + async def test_find_raw_retryable_reads(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) + async with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} + ): + batches = await client[self.db.name].test.find_raw_batches().sort("_id").to_list() + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + self.assertEqual(len(listener.started_events), 2) + for cmd in listener.started_events: + self.assertEqual(cmd.command_name, "find") + + @async_client_context.require_version_min(5, 0, 0) + @async_client_context.require_no_standalone + async def test_find_raw_snapshot_reads(self): + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) + db = client[self.db.name] + async with client.start_session(snapshot=True) as session: + await db.test.distinct("x", {}, session=session) + batches = await db.test.find_raw_batches(session=session).sort("_id").to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + find_cmd = listener.started_events[1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) + + async def test_explain(self): + c = self.db.test + await c.insert_one({}) + explanation = await c.find_raw_batches().explain() + self.assertIsInstance(explanation, dict) + + async def test_empty(self): + await self.db.test.drop() + cursor = self.db.test.find_raw_batches() + with self.assertRaises(StopAsyncIteration): + await anext(cursor) + + async def test_clone(self): + await self.db.test.insert_one({}) + cursor = self.db.test.find_raw_batches() + # Copy of a RawBatchCursor is also a RawBatchCursor, not a Cursor. + self.assertIsInstance(await anext(cursor.clone()), bytes) + self.assertIsInstance(await anext(copy.copy(cursor)), bytes) + + @async_client_context.require_no_mongos + async def test_exhaust(self): + c = self.db.test + await c.drop() + await c.insert_many({"_id": i} for i in range(200)) + result = b"".join(await c.find_raw_batches(cursor_type=CursorType.EXHAUST).to_list()) + self.assertEqual([{"_id": i} for i in range(200)], decode_all(result)) + + async def test_server_error(self): + with self.assertRaises(OperationFailure) as exc: + await anext(self.db.test.find_raw_batches({"x": {"$bad": 1}})) + + # The server response was decoded, not left raw. + self.assertIsInstance(exc.exception.details, dict) + + async def test_get_item(self): + with self.assertRaises(InvalidOperation): + self.db.test.find_raw_batches()[0] + + async def test_collation(self): + await anext(self.db.test.find_raw_batches(collation=Collation("en_US"))) + + async def test_read_concern(self): + await self.db.get_collection("test", write_concern=WriteConcern(w="majority")).insert_one( + {} + ) + c = self.db.get_collection("test", read_concern=ReadConcern("majority")) + await anext(c.find_raw_batches()) + + async def test_monitoring(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + c = client.pymongo_test.test + await c.drop() + await c.insert_many([{"_id": i} for i in range(10)]) + + listener.reset() + cursor = c.find_raw_batches(batch_size=4) + + # First raw batch of 4 documents. + await anext(cursor) + + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("find", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("find", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + + # The batch is a list of one raw bytes object. + self.assertEqual(len(csr["firstBatch"]), 1) + self.assertEqual(decode_all(csr["firstBatch"][0]), [{"_id": i} for i in range(4)]) + + listener.reset() + + # Next raw batch of 4 documents. + await anext(cursor) + try: + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(len(csr["nextBatch"]), 1) + self.assertEqual(decode_all(csr["nextBatch"][0]), [{"_id": i} for i in range(4, 8)]) + finally: + # Finish the cursor. + await cursor.close() + + +class TestRawBatchCommandCursor(AsyncIntegrationTest): + async def test_aggregate_raw(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + batches = await (await c.aggregate_raw_batches([{"$sort": {"_id": 1}}])).to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @async_client_context.require_transactions + async def test_aggregate_raw_transaction(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + async with client.start_session() as session: + async with await session.start_transaction(): + batches = await ( + await client[self.db.name].test.aggregate_raw_batches( + [{"$sort": {"_id": 1}}], session=session + ) + ).to_list() + cmd = listener.started_events[0] + self.assertEqual(cmd.command_name, "aggregate") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) + # Ensure we update $clusterTime from the command response. + last_cmd = listener.succeeded_events[-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @async_client_context.require_sessions + @async_client_context.require_failCommand_fail_point + async def test_aggregate_raw_retryable_reads(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) + async with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["aggregate"], "closeConnection": True}} + ): + batches = await ( + await client[self.db.name].test.aggregate_raw_batches([{"$sort": {"_id": 1}}]) + ).to_list() + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + self.assertEqual(len(listener.started_events), 3) + cmds = listener.started_events + self.assertEqual(cmds[0].command_name, "aggregate") + self.assertEqual(cmds[1].command_name, "aggregate") + + @async_client_context.require_version_min(5, 0, -1) + @async_client_context.require_no_standalone + async def test_aggregate_raw_snapshot_reads(self): + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) + db = client[self.db.name] + async with client.start_session(snapshot=True) as session: + await db.test.distinct("x", {}, session=session) + batches = await ( + await db.test.aggregate_raw_batches([{"$sort": {"_id": 1}}], session=session) + ).to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + find_cmd = listener.started_events[1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) + + async def test_server_error(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + await c.insert_one({"_id": 10, "x": "not a number"}) + + with self.assertRaises(OperationFailure) as exc: + await ( + await self.db.test.aggregate_raw_batches( + [ + { + "$sort": {"_id": 1}, + }, + {"$project": {"x": {"$multiply": [2, "$x"]}}}, + ], + batchSize=4, + ) + ).to_list() + + # The server response was decoded, not left raw. + self.assertIsInstance(exc.exception.details, dict) + + async def test_get_item(self): + with self.assertRaises(InvalidOperation): + (await self.db.test.aggregate_raw_batches([]))[0] + + async def test_collation(self): + await anext(await self.db.test.aggregate_raw_batches([], collation=Collation("en_US"))) + + async def test_monitoring(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + c = client.pymongo_test.test + await c.drop() + await c.insert_many([{"_id": i} for i in range(10)]) + + listener.reset() + cursor = await c.aggregate_raw_batches([{"$sort": {"_id": 1}}], batchSize=4) + + # Start cursor, no initial batch. + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("aggregate", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("aggregate", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + + # First batch is empty. + self.assertEqual(len(csr["firstBatch"]), 0) + listener.reset() + + # Batches of 4 documents. + n = 0 + async for batch in cursor: + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(len(csr["nextBatch"]), 1) + self.assertEqual(csr["nextBatch"][0], batch) + self.assertEqual(decode_all(batch), [{"_id": i} for i in range(n, min(n + 4, 10))]) + + n += 4 + listener.reset() + + @async_client_context.require_version_min(5, 0, -1) + @async_client_context.require_no_mongos + @async_client_context.require_sync + async def test_exhaust_cursor_db_set(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + c = client.pymongo_test.test + await c.delete_many({}) + await c.insert_many([{"_id": i} for i in range(3)]) + + listener.reset() + + result = list(await c.find({}, cursor_type=pymongo.CursorType.EXHAUST, batch_size=1)) + + self.assertEqual(len(result), 3) + + self.assertEqual( + listener.started_command_names(), ["find", "getMore", "getMore", "getMore"] + ) + for cmd in listener.started_events: + self.assertEqual(cmd.command["$db"], "pymongo_test") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_custom_types.py b/test/asynchronous/test_custom_types.py new file mode 100644 index 0000000000..f8fa51ba76 --- /dev/null +++ b/test/asynchronous/test_custom_types.py @@ -0,0 +1,976 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test support for callbacks to encode/decode custom types.""" +from __future__ import annotations + +import datetime +import sys +import tempfile +from collections import OrderedDict +from decimal import Decimal +from random import random +from typing import Any, Tuple, Type, no_type_check + +from bson.decimal128 import DecimalDecoder, DecimalEncoder +from gridfs.asynchronous.grid_file import AsyncGridIn, AsyncGridOut + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest + +from bson import ( + _BUILT_IN_TYPES, + RE_TYPE, + Decimal128, + _bson_to_dict, + _dict_to_bson, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, +) +from bson.codec_options import ( + CodecOptions, + TypeCodec, + TypeDecoder, + TypeEncoder, + TypeRegistry, +) +from bson.errors import InvalidDocument +from bson.int64 import Int64 +from bson.raw_bson import RawBSONDocument +from pymongo.asynchronous.collection import ReturnDocument +from pymongo.errors import DuplicateKeyError +from pymongo.message import _CursorAddress + +_IS_SYNC = False + + +DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalEncoder(), DecimalDecoder()])) + + +class UndecipherableInt64Type: + def __init__(self, value): + self.value = value + + def __eq__(self, other): + if isinstance(other, type(self)): + return self.value == other.value + # Does not compare equal to integers. + return False + + +class UndecipherableIntDecoder(TypeDecoder): + bson_type = Int64 + + def transform_bson(self, value): + return UndecipherableInt64Type(value) + + +class UndecipherableIntEncoder(TypeEncoder): + python_type = UndecipherableInt64Type + + def transform_python(self, value): + return Int64(value.value) + + +UNINT_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UndecipherableIntDecoder(), + ] + ) +) + + +UNINT_CODECOPTS = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder(), UndecipherableIntEncoder()]) +) + + +class UppercaseTextDecoder(TypeDecoder): + bson_type = str + + def transform_bson(self, value): + return value.upper() + + +UPPERSTR_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UppercaseTextDecoder(), + ] + ) +) + + +def type_obfuscating_decoder_factory(rt_type): + class ResumeTokenToNanDecoder(TypeDecoder): + bson_type = rt_type + + def transform_bson(self, value): + return "NaN" + + return ResumeTokenToNanDecoder + + +class CustomBSONTypeTests: + @no_type_check + def roundtrip(self, doc): + bsonbytes = encode(doc, codec_options=self.codecopts) + rt_document = decode(bsonbytes, codec_options=self.codecopts) + self.assertEqual(doc, rt_document) + + def test_encode_decode_roundtrip(self): + self.roundtrip({"average": Decimal("56.47")}) + self.roundtrip({"average": {"b": Decimal("56.47")}}) + self.roundtrip({"average": [Decimal("56.47")]}) + self.roundtrip({"average": [[Decimal("56.47")]]}) + self.roundtrip({"average": [{"b": Decimal("56.47")}]}) + + @no_type_check + def test_decode_all(self): + documents = [] + for dec in range(3): + documents.append({"average": Decimal(f"56.4{dec}")}) + + bsonstream = b"" + for doc in documents: + bsonstream += encode(doc, codec_options=self.codecopts) + + self.assertEqual(decode_all(bsonstream, self.codecopts), documents) + + @no_type_check + def test__bson_to_dict(self): + document = {"average": Decimal("56.47")} + rawbytes = encode(document, codec_options=self.codecopts) + decoded_document = _bson_to_dict(rawbytes, self.codecopts) + self.assertEqual(document, decoded_document) + + @no_type_check + def test__dict_to_bson(self): + document = {"average": Decimal("56.47")} + rawbytes = encode(document, codec_options=self.codecopts) + encoded_document = _dict_to_bson(document, False, self.codecopts) + self.assertEqual(encoded_document, rawbytes) + + def _generate_multidocument_bson_stream(self): + inp_num = [str(random() * 100)[:4] for _ in range(10)] + docs = [{"n": Decimal128(dec)} for dec in inp_num] + edocs = [{"n": Decimal(dec)} for dec in inp_num] + bsonstream = b"" + for doc in docs: + bsonstream += encode(doc) + return edocs, bsonstream + + @no_type_check + def test_decode_iter(self): + expected, bson_data = self._generate_multidocument_bson_stream() + for expected_doc, decoded_doc in zip(expected, decode_iter(bson_data, self.codecopts)): + self.assertEqual(expected_doc, decoded_doc) + + @no_type_check + def test_decode_file_iter(self): + expected, bson_data = self._generate_multidocument_bson_stream() + fileobj = tempfile.TemporaryFile() + fileobj.write(bson_data) + fileobj.seek(0) + + for expected_doc, decoded_doc in zip(expected, decode_file_iter(fileobj, self.codecopts)): + self.assertEqual(expected_doc, decoded_doc) + + fileobj.close() + + +class TestCustomPythonBSONTypeToBSONMonolithicCodec(CustomBSONTypeTests, unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.codecopts = DECIMAL_CODECOPTS + + +class TestCustomPythonBSONTypeToBSONMultiplexedCodec(CustomBSONTypeTests, unittest.TestCase): + @classmethod + def setUpClass(cls): + codec_options = CodecOptions( + type_registry=TypeRegistry((DecimalEncoder(), DecimalDecoder())) + ) + cls.codecopts = codec_options + + +class TestBSONFallbackEncoder(unittest.TestCase): + def _get_codec_options(self, fallback_encoder): + type_registry = TypeRegistry(fallback_encoder=fallback_encoder) + return CodecOptions(type_registry=type_registry) + + def test_simple(self): + codecopts = self._get_codec_options(lambda x: Decimal128(x)) + document = {"average": Decimal("56.47")} + bsonbytes = encode(document, codec_options=codecopts) + + exp_document = {"average": Decimal128("56.47")} + exp_bsonbytes = encode(exp_document) + self.assertEqual(bsonbytes, exp_bsonbytes) + + def test_erroring_fallback_encoder(self): + codecopts = self._get_codec_options(lambda _: 1 / 0) + + # fallback converter should not be invoked when encoding known types. + encode( + {"a": 1, "b": Decimal128("1.01"), "c": {"arr": ["abc", 3.678]}}, codec_options=codecopts + ) + + # expect an error when encoding a custom type. + document = {"average": Decimal("56.47")} + with self.assertRaises(ZeroDivisionError): + encode(document, codec_options=codecopts) + + def test_noop_fallback_encoder(self): + codecopts = self._get_codec_options(lambda x: x) + document = {"average": Decimal("56.47")} + with self.assertRaises(InvalidDocument): + encode(document, codec_options=codecopts) + + def test_type_unencodable_by_fallback_encoder(self): + def fallback_encoder(value): + try: + return Decimal128(value) + except: + raise TypeError("cannot encode type %s" % (type(value))) + + codecopts = self._get_codec_options(fallback_encoder) + document = {"average": Decimal} + with self.assertRaises(TypeError): + encode(document, codec_options=codecopts) + + def test_call_only_once_for_not_handled_big_integers(self): + called_with = [] + + def fallback_encoder(value): + called_with.append(value) + return value + + codecopts = self._get_codec_options(fallback_encoder) + document = {"a": {"b": {"c": 2 << 65}}} + + msg = "MongoDB can only handle up to 8-byte ints" + with self.assertRaises(OverflowError, msg=msg): + encode(document, codec_options=codecopts) + + self.assertEqual(called_with, [2 << 65]) + + +class TestBSONTypeEnDeCodecs(unittest.TestCase): + def test_instantiation(self): + msg = "Can't instantiate abstract class" + + def run_test(base, attrs, fail): + codec = type("testcodec", (base,), attrs) + if fail: + with self.assertRaisesRegex(TypeError, msg): + codec() + else: + codec() + + class MyType: + pass + + run_test( + TypeEncoder, + { + "python_type": MyType, + }, + fail=True, + ) + run_test(TypeEncoder, {"transform_python": lambda s, x: x}, fail=True) + run_test( + TypeEncoder, {"transform_python": lambda s, x: x, "python_type": MyType}, fail=False + ) + + run_test( + TypeDecoder, + { + "bson_type": Decimal128, + }, + fail=True, + ) + run_test(TypeDecoder, {"transform_bson": lambda s, x: x}, fail=True) + run_test( + TypeDecoder, {"transform_bson": lambda s, x: x, "bson_type": Decimal128}, fail=False + ) + + run_test(TypeCodec, {"bson_type": Decimal128, "python_type": MyType}, fail=True) + run_test( + TypeCodec, + {"transform_bson": lambda s, x: x, "transform_python": lambda s, x: x}, + fail=True, + ) + run_test( + TypeCodec, + { + "python_type": MyType, + "transform_python": lambda s, x: x, + "transform_bson": lambda s, x: x, + "bson_type": Decimal128, + }, + fail=False, + ) + + def test_type_checks(self): + self.assertTrue(issubclass(TypeCodec, TypeEncoder)) + self.assertTrue(issubclass(TypeCodec, TypeDecoder)) + self.assertFalse(issubclass(TypeDecoder, TypeEncoder)) + self.assertFalse(issubclass(TypeEncoder, TypeDecoder)) + + +class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): + TypeA: Any + TypeB: Any + fallback_encoder_A2B: Any + fallback_encoder_A2BSON: Any + B2BSON: Type[TypeEncoder] + B2A: Type[TypeEncoder] + A2B: Type[TypeEncoder] + + @classmethod + def setUpClass(cls): + class TypeA: + def __init__(self, x): + self.value = x + + class TypeB: + def __init__(self, x): + self.value = x + + # transforms A, and only A into B + def fallback_encoder_A2B(value): + assert isinstance(value, TypeA) + return TypeB(value.value) + + # transforms A, and only A into something encodable + def fallback_encoder_A2BSON(value): + assert isinstance(value, TypeA) + return value.value + + # transforms B into something encodable + class B2BSON(TypeEncoder): + python_type = TypeB + + def transform_python(self, value): + return value.value + + # transforms A into B + # technically, this isn't a proper type encoder as the output is not + # BSON-encodable. + class A2B(TypeEncoder): + python_type = TypeA + + def transform_python(self, value): + return TypeB(value.value) + + # transforms B into A + # technically, this isn't a proper type encoder as the output is not + # BSON-encodable. + class B2A(TypeEncoder): + python_type = TypeB + + def transform_python(self, value): + return TypeA(value.value) + + cls.TypeA = TypeA + cls.TypeB = TypeB + cls.fallback_encoder_A2B = staticmethod(fallback_encoder_A2B) + cls.fallback_encoder_A2BSON = staticmethod(fallback_encoder_A2BSON) + cls.B2BSON = B2BSON + cls.B2A = B2A + cls.A2B = A2B + + def test_encode_fallback_then_custom(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2BSON()], fallback_encoder=self.fallback_encoder_A2B) + ) + testdoc = {"x": self.TypeA(123)} + expected_bytes = encode({"x": 123}) + + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) + + def test_encode_custom_then_fallback(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2BSON) + ) + testdoc = {"x": self.TypeB(123)} + expected_bytes = encode({"x": 123}) + + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) + + def test_chaining_encoders_fails(self): + codecopts = CodecOptions(type_registry=TypeRegistry([self.A2B(), self.B2BSON()])) + + with self.assertRaises(InvalidDocument): + encode({"x": self.TypeA(123)}, codec_options=codecopts) + + def test_infinite_loop_exceeds_max_recursion_depth(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2B) + ) + + # Raises max recursion depth exceeded error + with self.assertRaises(RuntimeError): + encode({"x": self.TypeA(100)}, codec_options=codecopts) + + +class TestTypeRegistry(unittest.TestCase): + types: Tuple[object, object] + codecs: Tuple[Type[TypeCodec], Type[TypeCodec]] + fallback_encoder: Any + + @classmethod + def setUpClass(cls): + class MyIntType: + def __init__(self, x): + assert isinstance(x, int) + self.x = x + + class MyStrType: + def __init__(self, x): + assert isinstance(x, str) + self.x = x + + class MyIntCodec(TypeCodec): + @property + def python_type(self): + return MyIntType + + @property + def bson_type(self): + return int + + def transform_python(self, value): + return value.x + + def transform_bson(self, value): + return MyIntType(value) + + class MyStrCodec(TypeCodec): + @property + def python_type(self): + return MyStrType + + @property + def bson_type(self): + return str + + def transform_python(self, value): + return value.x + + def transform_bson(self, value): + return MyStrType(value) + + def fallback_encoder(value): + return value + + cls.types = (MyIntType, MyStrType) + cls.codecs = (MyIntCodec, MyStrCodec) + cls.fallback_encoder = fallback_encoder + + def test_simple(self): + codec_instances = [codec() for codec in self.codecs] + + def assert_proper_initialization(type_registry, codec_instances): + self.assertEqual( + type_registry._encoder_map, + { + self.types[0]: codec_instances[0].transform_python, + self.types[1]: codec_instances[1].transform_python, + }, + ) + self.assertEqual( + type_registry._decoder_map, + {int: codec_instances[0].transform_bson, str: codec_instances[1].transform_bson}, + ) + self.assertEqual(type_registry._fallback_encoder, self.fallback_encoder) + + type_registry = TypeRegistry(codec_instances, self.fallback_encoder) + assert_proper_initialization(type_registry, codec_instances) + + type_registry = TypeRegistry( + fallback_encoder=self.fallback_encoder, type_codecs=codec_instances + ) + assert_proper_initialization(type_registry, codec_instances) + + # Ensure codec list held by the type registry doesn't change if we + # mutate the initial list. + codec_instances_copy = list(codec_instances) + codec_instances.pop(0) + self.assertListEqual(type_registry._TypeRegistry__type_codecs, codec_instances_copy) + + def test_simple_separate_codecs(self): + class MyIntEncoder(TypeEncoder): + python_type = self.types[0] + + def transform_python(self, value): + return value.x + + class MyIntDecoder(TypeDecoder): + bson_type = int + + def transform_bson(self, value): + return self.types[0](value) + + codec_instances: list = [MyIntDecoder(), MyIntEncoder()] + type_registry = TypeRegistry(codec_instances) + + self.assertEqual( + type_registry._encoder_map, + {MyIntEncoder.python_type: codec_instances[1].transform_python}, + ) + self.assertEqual( + type_registry._decoder_map, + {MyIntDecoder.bson_type: codec_instances[0].transform_bson}, + ) + + def test_initialize_fail(self): + err_msg = "Expected an instance of TypeEncoder, TypeDecoder, or TypeCodec, got .* instead" + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry(self.codecs) # type: ignore[arg-type] + + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry([type("AnyType", (object,), {})()]) + + err_msg = f"fallback_encoder {True!r} is not a callable" + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry([], True) # type: ignore[arg-type] + + err_msg = "fallback_encoder {!r} is not a callable".format("hello") + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] + + def test_type_registry_codecs(self): + codec_instances = [codec() for codec in self.codecs] + type_registry = TypeRegistry(codec_instances) + self.assertEqual(type_registry.codecs, codec_instances) + + def test_type_registry_fallback(self): + type_registry = TypeRegistry(fallback_encoder=self.fallback_encoder) + self.assertEqual(type_registry.fallback_encoder, self.fallback_encoder) + + def test_type_registry_repr(self): + codec_instances = [codec() for codec in self.codecs] + type_registry = TypeRegistry(codec_instances) + r = f"TypeRegistry(type_codecs={codec_instances!r}, fallback_encoder={None!r})" + self.assertEqual(r, repr(type_registry)) + + def test_type_registry_eq(self): + codec_instances = [codec() for codec in self.codecs] + self.assertEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances)) + + codec_instances_2 = [codec() for codec in self.codecs] + self.assertNotEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances_2)) + + def test_builtin_types_override_fails(self): + def run_test(base, attrs): + msg = ( + r"TypeEncoders cannot change how built-in types " + r"are encoded \(encoder .* transforms type .*\)" + ) + for pytype in _BUILT_IN_TYPES: + attrs.update({"python_type": pytype, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) + codec_instance = codec() + with self.assertRaisesRegex(TypeError, msg): + TypeRegistry( + [ + codec_instance, + ] + ) + + # Test only some subtypes as not all can be subclassed. + if pytype in [ + bool, + type(None), + RE_TYPE, + ]: + continue + + class MyType(pytype): # type: ignore + pass + + attrs.update({"python_type": MyType, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) + codec_instance = codec() + with self.assertRaisesRegex(TypeError, msg): + TypeRegistry( + [ + codec_instance, + ] + ) + + run_test(TypeEncoder, {}) + run_test(TypeCodec, {"bson_type": Decimal128, "transform_bson": lambda x: x}) + + +class TestCollectionWCustomType(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.drop() + + async def asyncTearDown(self): + await self.db.test.drop() + + async def test_overflow_int_w_custom_decoder(self): + type_registry = TypeRegistry(fallback_encoder=lambda val: str(val)) + codec_options = CodecOptions(type_registry=type_registry) + collection = self.db.get_collection("test", codec_options=codec_options) + + await collection.insert_one({"_id": 1, "data": 2**520}) + ret = await collection.find_one() + self.assertEqual(ret["data"], str(2**520)) + + async def test_command_errors_w_custom_type_decoder(self): + db = self.db + test_doc = {"_id": 1, "data": "a"} + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + + result = await test.insert_one(test_doc) + self.assertEqual(result.inserted_id, test_doc["_id"]) + with self.assertRaises(DuplicateKeyError): + await test.insert_one(test_doc) + + async def test_find_w_custom_type_decoder(self): + db = self.db + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] + for doc in input_docs: + await db.test.insert_one(doc) + + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + async for doc in test.find({}, batch_size=1): + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + + async def test_find_w_custom_type_decoder_and_document_class(self): + async def run_test(doc_cls): + db = self.db + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] + for doc in input_docs: + await db.test.insert_one(doc) + + test = db.get_collection( + "test", + codec_options=CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder()]), document_class=doc_cls + ), + ) + async for doc in test.find({}, batch_size=1): + self.assertIsInstance(doc, doc_cls) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + + for doc_cls in [RawBSONDocument, OrderedDict]: + await run_test(doc_cls) + + async def test_aggregate_w_custom_type_decoder(self): + db = self.db + await db.test.insert_many( + [ + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + ] + ) + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + + pipeline: list = [ + {"$match": {"status": "complete"}}, + {"$group": {"_id": "$status", "total_qty": {"$sum": "$qty"}}}, + ] + result = await test.aggregate(pipeline) + + res = (await result.to_list())[0] + self.assertEqual(res["_id"], "complete") + self.assertIsInstance(res["total_qty"], UndecipherableInt64Type) + self.assertEqual(res["total_qty"].value, 20) + + async def test_distinct_w_custom_type(self): + await self.db.drop_collection("test") + + test = self.db.get_collection("test", codec_options=UNINT_CODECOPTS) + values = [ + UndecipherableInt64Type(1), + UndecipherableInt64Type(2), + UndecipherableInt64Type(3), + {"b": UndecipherableInt64Type(3)}, + ] + await test.insert_many({"a": val} for val in values) + + self.assertEqual(values, await test.distinct("a")) + + async def test_find_one_and__w_custom_type_decoder(self): + db = self.db + c = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + await c.insert_one({"_id": 1, "x": Int64(1)}) + + doc = await c.find_one_and_update( + {"_id": 1}, {"$inc": {"x": 1}}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 2) + + doc = await c.find_one_and_replace( + {"_id": 1}, {"x": Int64(3), "y": True}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertEqual(doc["y"], True) + + doc = await c.find_one_and_delete({"y": True}) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertIsNone(await c.find_one()) + + +class TestGridFileCustomType(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.drop_collection("fs.files") + await self.db.drop_collection("fs.chunks") + + async def test_grid_out_custom_opts(self): + db = self.db.with_options(codec_options=UPPERSTR_DECODER_CODECOPTS) + one = AsyncGridIn( + db.fs, + _id=5, + filename="my_file", + chunkSize=1000, + metadata={"foo": "red", "bar": "blue"}, + bar=3, + baz="hello", + ) + await one.write(b"hello world") + await one.close() + + two = AsyncGridOut(db.fs, 5) + await two.open() + + self.assertEqual("my_file", two.name) + self.assertEqual("my_file", two.filename) + self.assertEqual(5, two._id) + self.assertEqual(11, two.length) + self.assertEqual(1000, two.chunk_size) + self.assertIsInstance(two.upload_date, datetime.datetime) + self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) + self.assertEqual(3, two.bar) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: + self.assertRaises(AttributeError, setattr, two, attr, 5) + + +class ChangeStreamsWCustomTypesTestMixin: + @no_type_check + async def change_stream(self, *args, **kwargs): + stream = await self.watched_target.watch(*args, max_await_time_ms=1, **kwargs) + self.addAsyncCleanup(stream.close) + return stream + + @no_type_check + async def insert_and_check(self, change_stream, insert_doc, expected_doc): + await self.input_target.insert_one(insert_doc) + change = await anext(change_stream) + self.assertEqual(change["fullDocument"], expected_doc) + + @no_type_check + async def kill_change_stream_cursor(self, change_stream): + # Cause a cursor not found error on the next getMore. + cursor = change_stream._cursor + address = _CursorAddress(cursor.address, cursor._ns) + client = self.input_target.database.client + await client._close_cursor_now(cursor.cursor_id, address) + + @no_type_check + async def test_simple(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) + await self.create_targets(codec_options=codecopts) + + input_docs = [ + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [ + {"_id": 1, "data": "HELLO"}, + {"_id": 2, "data": "WORLD"}, + {"_id": 3, "data": "!"}, + ] + + change_stream = await self.change_stream() + + await self.insert_and_check(change_stream, input_docs[0], expected_docs[0]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, input_docs[1], expected_docs[1]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, input_docs[2], expected_docs[2]) + + @no_type_check + async def test_custom_type_in_pipeline(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) + await self.create_targets(codec_options=codecopts) + + input_docs = [ + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [{"_id": 2, "data": "WORLD"}, {"_id": 3, "data": "!"}] + + # UndecipherableInt64Type should be encoded with the TypeRegistry. + change_stream = await self.change_stream( + [{"$match": {"documentKey._id": {"$gte": UndecipherableInt64Type(2)}}}] + ) + + await self.input_target.insert_one(input_docs[0]) + await self.insert_and_check(change_stream, input_docs[1], expected_docs[0]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, input_docs[2], expected_docs[1]) + + @no_type_check + async def test_break_resume_token(self): + # Get one document from a change stream to determine resumeToken type. + await self.create_targets() + change_stream = await self.change_stream() + await self.input_target.insert_one({"data": "test"}) + change = await anext(change_stream) + resume_token_decoder = type_obfuscating_decoder_factory(type(change["_id"]["_data"])) + + # Custom-decoding the resumeToken type breaks resume tokens. + codecopts = CodecOptions( + type_registry=TypeRegistry([resume_token_decoder(), UndecipherableIntEncoder()]) + ) + + # Re-create targets, change stream and proceed. + await self.create_targets(codec_options=codecopts) + + docs = [{"_id": 1}, {"_id": 2}, {"_id": 3}] + + change_stream = await self.change_stream() + await self.insert_and_check(change_stream, docs[0], docs[0]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, docs[1], docs[1]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, docs[2], docs[2]) + + @no_type_check + async def test_document_class(self): + async def run_test(doc_cls): + codecopts = CodecOptions( + type_registry=TypeRegistry([UppercaseTextDecoder(), UndecipherableIntEncoder()]), + document_class=doc_cls, + ) + + await self.create_targets(codec_options=codecopts) + change_stream = await self.change_stream() + + doc = {"a": UndecipherableInt64Type(101), "b": "xyz"} + await self.input_target.insert_one(doc) + change = await anext(change_stream) + + self.assertIsInstance(change, doc_cls) + self.assertEqual(change["fullDocument"]["a"], 101) + self.assertEqual(change["fullDocument"]["b"], "XYZ") + + for doc_cls in [OrderedDict, RawBSONDocument]: + await run_test(doc_cls) + + +class TestCollectionChangeStreamsWCustomTypes( + AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin +): + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.delete_many({}) + + async def asyncTearDown(self): + await self.input_target.drop() + + async def create_targets(self, *args, **kwargs): + self.watched_target = self.db.get_collection("test", *args, **kwargs) + self.input_target = self.watched_target + # Ensure the collection exists and is empty. + await self.input_target.insert_one({}) + await self.input_target.delete_many({}) + + +class TestDatabaseChangeStreamsWCustomTypes( + AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin +): + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.delete_many({}) + + async def asyncTearDown(self): + await self.input_target.drop() + await self.client.drop_database(self.watched_target) + + async def create_targets(self, *args, **kwargs): + self.watched_target = self.client.get_database(self.db.name, *args, **kwargs) + self.input_target = self.watched_target.test + # Insert a record to ensure db, coll are created. + await self.input_target.insert_one({"data": "dummy"}) + + +class TestClusterChangeStreamsWCustomTypes( + AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin +): + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.delete_many({}) + + async def asyncTearDown(self): + await self.input_target.drop() + await self.client.drop_database(self.db) + + async def create_targets(self, *args, **kwargs): + codec_options = kwargs.pop("codec_options", None) + if codec_options: + kwargs["type_registry"] = codec_options.type_registry + kwargs["document_class"] = codec_options.document_class + self.watched_target = await self.async_rs_client(*args, **kwargs) + self.input_target = self.watched_target[self.db.name].test + # Insert a record to ensure db, coll are created. + await self.input_target.insert_one({"data": "dummy"}) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py new file mode 100644 index 0000000000..b49183a852 --- /dev/null +++ b/test/asynchronous/test_database.py @@ -0,0 +1,773 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the database module.""" +from __future__ import annotations + +import re +import sys +from typing import Any, Iterable, List, Mapping, Union + +from pymongo.asynchronous.command_cursor import AsyncCommandCursor + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous import AsyncIntegrationTest, async_client_context +from test.test_custom_types import DECIMAL_CODECOPTS +from test.utils_shared import ( + IMPOSSIBLE_WRITE_CONCERN, + OvertCommandListener, + async_wait_until, +) + +from bson.codec_options import CodecOptions +from bson.dbref import DBRef +from bson.int64 import Int64 +from bson.objectid import ObjectId +from bson.regex import Regex +from bson.son import SON +from pymongo import helpers_shared +from pymongo.asynchronous import auth +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + CollectionInvalid, + ExecutionTimeout, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestDatabaseNoConnect(unittest.TestCase): + """Test Database features on a client that does not connect.""" + + client: AsyncMongoClient + + @classmethod + def setUpClass(cls): + cls.client = AsyncMongoClient(connect=False) + + def test_name(self): + self.assertRaises(TypeError, AsyncDatabase, self.client, 4) + self.assertRaises(InvalidName, AsyncDatabase, self.client, "my db") + self.assertRaises(InvalidName, AsyncDatabase, self.client, 'my"db') + self.assertRaises(InvalidName, AsyncDatabase, self.client, "my\x00db") + self.assertRaises(InvalidName, AsyncDatabase, self.client, "my\u0000db") + self.assertEqual("name", AsyncDatabase(self.client, "name").name) + + def test_get_collection(self): + codec_options = CodecOptions(tz_aware=True) + write_concern = WriteConcern(w=2, j=True) + read_concern = ReadConcern("majority") + coll = self.client.pymongo_test.get_collection( + "foo", codec_options, ReadPreference.SECONDARY, write_concern, read_concern + ) + self.assertEqual("foo", coll.name) + self.assertEqual(codec_options, coll.codec_options) + self.assertEqual(ReadPreference.SECONDARY, coll.read_preference) + self.assertEqual(write_concern, coll.write_concern) + self.assertEqual(read_concern, coll.read_concern) + + def test_getattr(self): + db = self.client.pymongo_test + self.assertIsInstance(db["_does_not_exist"], AsyncCollection) + + with self.assertRaises(AttributeError) as context: + db._does_not_exist + + # Message should be: "AttributeError: Database has no attribute + # '_does_not_exist'. To access the _does_not_exist collection, + # use database['_does_not_exist']". + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) + + def test_iteration(self): + db = self.client.pymongo_test + msg = "'AsyncDatabase' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in db: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = db[0] + # next fails + with self.assertRaisesRegex(TypeError, "'AsyncDatabase' object is not iterable"): + _ = next(db) + # .next() fails + with self.assertRaisesRegex(TypeError, "'AsyncDatabase' object is not iterable"): + _ = db.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(db, Iterable) + + +class TestDatabase(AsyncIntegrationTest): + def test_equality(self): + self.assertNotEqual(AsyncDatabase(self.client, "test"), AsyncDatabase(self.client, "mike")) + self.assertEqual(AsyncDatabase(self.client, "test"), AsyncDatabase(self.client, "test")) + + # Explicitly test inequality + self.assertFalse(AsyncDatabase(self.client, "test") != AsyncDatabase(self.client, "test")) + + def test_hashable(self): + self.assertIn(self.client.test, {AsyncDatabase(self.client, "test")}) + + def test_get_coll(self): + db = AsyncDatabase(self.client, "pymongo_test") + self.assertEqual(db.test, db["test"]) + self.assertEqual(db.test, AsyncCollection(db, "test")) + self.assertNotEqual(db.test, AsyncCollection(db, "mike")) + self.assertEqual(db.test.mike, db["test.mike"]) + + def test_repr(self): + name = "AsyncDatabase" + self.assertEqual( + repr(AsyncDatabase(self.client, "pymongo_test")), + "{}({!r}, {})".format(name, self.client, repr("pymongo_test")), + ) + + async def test_create_collection(self): + db = AsyncDatabase(self.client, "pymongo_test") + + await db.test.insert_one({"hello": "world"}) + with self.assertRaises(CollectionInvalid): + await db.create_collection("test") + + await db.drop_collection("test") + + with self.assertRaises(TypeError): + await db.create_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.create_collection(None) # type: ignore[arg-type] + with self.assertRaises(InvalidName): + await db.create_collection("coll..ection") # type: ignore[arg-type] + + test = await db.create_collection("test") + self.assertIn("test", await db.list_collection_names()) + await test.insert_one({"hello": "world"}) + self.assertEqual((await db.test.find_one())["hello"], "world") + + await db.drop_collection("test.foo") + await db.create_collection("test.foo") + self.assertIn("test.foo", await db.list_collection_names()) + with self.assertRaises(CollectionInvalid): + await db.create_collection("test.foo") + + async def test_list_collection_names(self): + db = AsyncDatabase(self.client, "pymongo_test") + await db.test.insert_one({"dummy": "object"}) + await db.test.mike.insert_one({"dummy": "object"}) + + colls = await db.list_collection_names() + self.assertIn("test", colls) + self.assertIn("test.mike", colls) + for coll in colls: + self.assertNotIn("$", coll) + + await db.systemcoll.test.insert_one({}) + no_system_collections = await db.list_collection_names( + filter={"name": {"$regex": r"^(?!system\.)"}} + ) + for coll in no_system_collections: + self.assertFalse(coll.startswith("system.")) + self.assertIn("systemcoll.test", no_system_collections) + + # Force more than one batch. + db = self.client.many_collections + for i in range(101): + await db["coll" + str(i)].insert_one({}) + # No Error + try: + await db.list_collection_names() + finally: + await self.client.drop_database("many_collections") + + async def test_list_collection_names_filter(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + db = client[self.db.name] + await db.capped.drop() + await db.create_collection("capped", capped=True, size=4096) + await db.capped.insert_one({}) + await db.non_capped.insert_one({}) + self.addAsyncCleanup(client.drop_database, db.name) + filter: Union[None, Mapping[str, Any]] + # Should not send nameOnly. + for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): + listener.reset() + names = await db.list_collection_names(filter=filter) + self.assertEqual(names, ["capped"]) + self.assertNotIn("nameOnly", listener.started_events[0].command) + + # Should send nameOnly (except on 2.6). + for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): + listener.reset() + names = await db.list_collection_names(filter=filter) + self.assertIn("capped", names) + self.assertIn("non_capped", names) + command = listener.started_events[0].command + self.assertIn("nameOnly", command) + self.assertTrue(command["nameOnly"]) + + async def test_check_exists(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + db = client[self.db.name] + await db.drop_collection("unique") + await db.create_collection("unique", check_exists=True) + self.assertIn("listCollections", listener.started_command_names()) + listener.reset() + await db.drop_collection("unique") + await db.create_collection("unique", check_exists=False) + self.assertGreater(len(listener.started_events), 0) + self.assertNotIn("listCollections", listener.started_command_names()) + + async def test_list_collections(self): + await self.client.drop_database("pymongo_test") + db = AsyncDatabase(self.client, "pymongo_test") + await db.test.insert_one({"dummy": "object"}) + await db.test.mike.insert_one({"dummy": "object"}) + + results = await db.list_collections() + colls = [result["name"] async for result in results] + + # All the collections present. + self.assertIn("test", colls) + self.assertIn("test.mike", colls) + + # No collection containing a '$'. + for coll in colls: + self.assertNotIn("$", coll) + + # Duplicate check. + coll_cnt: dict = {} + for coll in colls: + try: + # Found duplicate. + coll_cnt[coll] += 1 + self.fail("Found duplicate") + except KeyError: + coll_cnt[coll] = 1 + coll_cnt: dict = {} + + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "test.mike", "system.indexes"}) + + colls = await (await db.list_collections(filter={"name": {"$regex": "^test$"}})).to_list() + self.assertEqual(1, len(colls)) + + colls = await ( + await db.list_collections(filter={"name": {"$regex": "^test.mike$"}}) + ).to_list() + self.assertEqual(1, len(colls)) + + await db.drop_collection("test") + + await db.create_collection("test", capped=True, size=4096) + results = await db.list_collections(filter={"options.capped": True}) + colls = [result["name"] async for result in results] + + # Checking only capped collections are present + self.assertIn("test", colls) + self.assertNotIn("test.mike", colls) + + # No collection containing a '$'. + for coll in colls: + self.assertNotIn("$", coll) + + # Duplicate check. + coll_cnt = {} + for coll in colls: + try: + # Found duplicate. + coll_cnt[coll] += 1 + self.fail("Found duplicate") + except KeyError: + coll_cnt[coll] = 1 + coll_cnt = {} + + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "system.indexes"}) + + await self.client.drop_database("pymongo_test") + + async def test_list_collection_names_single_socket(self): + client = await self.async_rs_or_single_client(maxPoolSize=1) + await client.drop_database("test_collection_names_single_socket") + db = client.test_collection_names_single_socket + for i in range(200): + await db.create_collection(str(i)) + + await db.list_collection_names() # Must not hang. + await client.drop_database("test_collection_names_single_socket") + + async def test_drop_collection(self): + db = AsyncDatabase(self.client, "pymongo_test") + + with self.assertRaises(TypeError): + await db.drop_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.drop_collection(None) # type: ignore[arg-type] + + await db.test.insert_one({"dummy": "object"}) + self.assertIn("test", await db.list_collection_names()) + await db.drop_collection("test") + self.assertNotIn("test", await db.list_collection_names()) + + await db.test.insert_one({"dummy": "object"}) + self.assertIn("test", await db.list_collection_names()) + await db.drop_collection("test") + self.assertNotIn("test", await db.list_collection_names()) + + await db.test.insert_one({"dummy": "object"}) + self.assertIn("test", await db.list_collection_names()) + await db.drop_collection(db.test) + self.assertNotIn("test", await db.list_collection_names()) + + await db.test.insert_one({"dummy": "object"}) + self.assertIn("test", await db.list_collection_names()) + await db.test.drop() + self.assertNotIn("test", await db.list_collection_names()) + await db.test.drop() + + await db.drop_collection(db.test.doesnotexist) + + if async_client_context.is_rs: + db_wc = AsyncDatabase( + self.client, "pymongo_test", write_concern=IMPOSSIBLE_WRITE_CONCERN + ) + with self.assertRaises(WriteConcernError): + await db_wc.drop_collection("test") + + async def test_validate_collection(self): + db = self.client.pymongo_test + + with self.assertRaises(TypeError): + await db.validate_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.validate_collection(None) # type: ignore[arg-type] + + await db.test.insert_one({"dummy": "object"}) + + with self.assertRaises(OperationFailure): + await db.validate_collection("test.doesnotexist") + with self.assertRaises(OperationFailure): + await db.validate_collection(db.test.doesnotexist) + + self.assertTrue(await db.validate_collection("test")) + self.assertTrue(await db.validate_collection(db.test)) + self.assertTrue(await db.validate_collection(db.test, full=True)) + self.assertTrue(await db.validate_collection(db.test, scandata=True)) + self.assertTrue(await db.validate_collection(db.test, scandata=True, full=True)) + self.assertTrue(await db.validate_collection(db.test, True, True)) + + @async_client_context.require_version_min(4, 3, 3) + @async_client_context.require_no_standalone + async def test_validate_collection_background(self): + db = self.client.pymongo_test.with_options(write_concern=WriteConcern(w="majority")) + await db.test.insert_one({"dummy": "object"}) + coll = db.test + self.assertTrue(await db.validate_collection(coll, background=False)) + # The inMemory storage engine does not support background=True. + if async_client_context.storage_engine != "inMemory": + # background=True requires the collection exist in a checkpoint. + await self.client.admin.command("fsync") + self.assertTrue(await db.validate_collection(coll, background=True)) + self.assertTrue(await db.validate_collection(coll, scandata=True, background=True)) + # The server does not support background=True with full=True. + # Assert that we actually send the background option by checking + # that this combination fails. + with self.assertRaises(OperationFailure): + await db.validate_collection(coll, full=True, background=True) + + async def test_command(self): + self.maxDiff = None + db = self.client.admin + first = await db.command("buildinfo") + second = await db.command({"buildinfo": 1}) + third = await db.command("buildinfo", 1) + self.assertEqualReply(first, second) + self.assertEqualReply(second, third) + + # We use 'aggregate' as our example command, since it's an easy way to + # retrieve a BSON regex from a collection using a command. + async def test_command_with_regex(self): + db = self.client.pymongo_test + await db.test.drop() + await db.test.insert_one({"r": re.compile(".*")}) + await db.test.insert_one({"r": Regex(".*")}) + + result = await db.command("aggregate", "test", pipeline=[], cursor={}) + for doc in result["cursor"]["firstBatch"]: + self.assertIsInstance(doc["r"], Regex) + + async def test_command_bulkWrite(self): + # Ensure bulk write commands can be run directly via db.command(). + if async_client_context.version.at_least(8, 0): + await self.client.admin.command( + { + "bulkWrite": 1, + "nsInfo": [{"ns": self.db.test.full_name}], + "ops": [{"insert": 0, "document": {}}], + } + ) + await self.db.command({"insert": "test", "documents": [{}]}) + await self.db.command({"update": "test", "updates": [{"q": {}, "u": {"$set": {"x": 1}}}]}) + await self.db.command({"delete": "test", "deletes": [{"q": {}, "limit": 1}]}) + await self.db.test.drop() + + async def test_cursor_command(self): + db = self.client.pymongo_test + await db.test.drop() + + docs = [{"_id": i, "doc": i} for i in range(3)] + await db.test.insert_many(docs) + + cursor = await db.cursor_command("find", "test") + + self.assertIsInstance(cursor, AsyncCommandCursor) + + result_docs = await cursor.to_list() + self.assertEqual(docs, result_docs) + + async def test_cursor_command_invalid(self): + with self.assertRaises(InvalidOperation): + await self.db.cursor_command("usersInfo", "test") + + @async_client_context.require_no_fips + def test_password_digest(self): + with self.assertRaises(TypeError): + auth._password_digest(5) # type: ignore[arg-type, call-arg] + with self.assertRaises(TypeError): + auth._password_digest(True) # type: ignore[arg-type, call-arg] + with self.assertRaises(TypeError): + auth._password_digest(None) # type: ignore[arg-type, call-arg] + + self.assertIsInstance(auth._password_digest("mike", "password"), str) + self.assertEqual( + auth._password_digest("mike", "password"), "cd7e45b3b2767dc2fa9b6b548457ed00" + ) + self.assertEqual( + auth._password_digest("Gustave", "Dor\xe9"), "81e0e2364499209f466e75926a162d73" + ) + + async def test_id_ordering(self): + # PyMongo attempts to have _id show up first + # when you iterate key/value pairs in a document. + # This isn't reliable since python dicts don't + # guarantee any particular order. This will never + # work right in Jython or any Python or environment + # with hash randomization enabled (e.g. tox). + db = self.client.pymongo_test + await db.test.drop() + await db.test.insert_one(SON([("hello", "world"), ("_id", 5)])) + + db = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) + ) + cursor = db.test.find() + async for x in cursor: + for k, _v in x.items(): + self.assertEqual(k, "_id") + break + + async def test_deref(self): + db = self.client.pymongo_test + await db.test.drop() + + with self.assertRaises(TypeError): + await db.dereference(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.dereference("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.dereference(None) # type: ignore[arg-type] + + self.assertEqual(None, await db.dereference(DBRef("test", ObjectId()))) + obj: dict[str, Any] = {"x": True} + key = (await db.test.insert_one(obj)).inserted_id + self.assertEqual(obj, await db.dereference(DBRef("test", key))) + self.assertEqual(obj, await db.dereference(DBRef("test", key, "pymongo_test"))) + with self.assertRaises(ValueError): + await db.dereference(DBRef("test", key, "foo")) + + self.assertEqual(None, await db.dereference(DBRef("test", 4))) + obj = {"_id": 4} + await db.test.insert_one(obj) + self.assertEqual(obj, await db.dereference(DBRef("test", 4))) + + async def test_deref_kwargs(self): + db = self.client.pymongo_test + await db.test.drop() + + await db.test.insert_one({"_id": 4, "foo": "bar"}) + db = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) + ) + self.assertEqual( + SON([("foo", "bar")]), await db.dereference(DBRef("test", 4), projection={"_id": False}) + ) + + # TODO some of these tests belong in the collection level testing. + async def test_insert_find_one(self): + db = self.client.pymongo_test + await db.test.drop() + + a_doc = SON({"hello": "world"}) + a_key = (await db.test.insert_one(a_doc)).inserted_id + self.assertIsInstance(a_doc["_id"], ObjectId) + self.assertEqual(a_doc["_id"], a_key) + self.assertEqual(a_doc, await db.test.find_one({"_id": a_doc["_id"]})) + self.assertEqual(a_doc, await db.test.find_one(a_key)) + self.assertEqual(None, await db.test.find_one(ObjectId())) + self.assertEqual(a_doc, await db.test.find_one({"hello": "world"})) + self.assertEqual(None, await db.test.find_one({"hello": "test"})) + + b = await db.test.find_one() + assert b is not None + b["hello"] = "mike" + await db.test.replace_one({"_id": b["_id"]}, b) + + self.assertNotEqual(a_doc, await db.test.find_one(a_key)) + self.assertEqual(b, await db.test.find_one(a_key)) + self.assertEqual(b, await db.test.find_one()) + + count = 0 + async for _ in db.test.find(): + count += 1 + self.assertEqual(count, 1) + + async def test_long(self): + db = self.client.pymongo_test + await db.test.drop() + await db.test.insert_one({"x": 9223372036854775807}) + retrieved = (await db.test.find_one())["x"] + self.assertEqual(Int64(9223372036854775807), retrieved) + self.assertIsInstance(retrieved, Int64) + await db.test.delete_many({}) + await db.test.insert_one({"x": Int64(1)}) + retrieved = (await db.test.find_one())["x"] + self.assertEqual(Int64(1), retrieved) + self.assertIsInstance(retrieved, Int64) + + async def test_delete(self): + db = self.client.pymongo_test + await db.test.drop() + + await db.test.insert_one({"x": 1}) + await db.test.insert_one({"x": 2}) + await db.test.insert_one({"x": 3}) + length = 0 + async for _ in db.test.find(): + length += 1 + self.assertEqual(length, 3) + + await db.test.delete_one({"x": 1}) + length = 0 + async for _ in db.test.find(): + length += 1 + self.assertEqual(length, 2) + + await db.test.delete_one(await db.test.find_one()) # type: ignore[arg-type] + await db.test.delete_one(await db.test.find_one()) # type: ignore[arg-type] + self.assertEqual(await db.test.find_one(), None) + + await db.test.insert_one({"x": 1}) + await db.test.insert_one({"x": 2}) + await db.test.insert_one({"x": 3}) + + self.assertTrue(await db.test.find_one({"x": 2})) + await db.test.delete_one({"x": 2}) + self.assertFalse(await db.test.find_one({"x": 2})) + + self.assertTrue(await db.test.find_one()) + await db.test.delete_many({}) + self.assertFalse(await db.test.find_one()) + + def test_command_response_without_ok(self): + # Sometimes (SERVER-10891) the server's response to a badly-formatted + # command document will have no 'ok' field. We should raise + # OperationFailure instead of KeyError. + with self.assertRaises(OperationFailure): + helpers_shared._check_command_response({}, None) + + try: + helpers_shared._check_command_response({"$err": "foo"}, None) + except OperationFailure as e: + self.assertEqual(e.args[0], "foo, full error: {'$err': 'foo'}") + else: + self.fail("_check_command_response didn't raise OperationFailure") + + def test_mongos_response(self): + error_document = { + "ok": 0, + "errmsg": "outer", + "raw": {"shard0/host0,host1": {"ok": 0, "errmsg": "inner"}}, + } + + with self.assertRaises(OperationFailure) as context: + helpers_shared._check_command_response(error_document, None) + + self.assertIn("inner", str(context.exception)) + + # If a shard has no primary and you run a command like dbstats, which + # cannot be run on a secondary, mongos's response includes empty "raw" + # errors. See SERVER-15428. + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {}}} + + with self.assertRaises(OperationFailure) as context: + helpers_shared._check_command_response(error_document, None) + + self.assertIn("outer", str(context.exception)) + + # Raw error has ok: 0 but no errmsg. Not a known case, but test it. + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {"ok": 0}}} + + with self.assertRaises(OperationFailure) as context: + helpers_shared._check_command_response(error_document, None) + + self.assertIn("outer", str(context.exception)) + + @async_client_context.require_test_commands + @async_client_context.require_no_mongos + async def test_command_max_time_ms(self): + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn" + ) + try: + db = self.client.pymongo_test + await db.command("count", "test") + with self.assertRaises(ExecutionTimeout): + await db.command("count", "test", maxTimeMS=1) + pipeline = [{"$project": {"name": 1, "count": 1}}] + # Database command helper. + await db.command("aggregate", "test", pipeline=pipeline, cursor={}) + with self.assertRaises(ExecutionTimeout): + await db.command( + "aggregate", + "test", + pipeline=pipeline, + cursor={}, + maxTimeMS=1, + ) + # Collection helper. + await db.test.aggregate(pipeline=pipeline) + with self.assertRaises(ExecutionTimeout): + await db.test.aggregate(pipeline, maxTimeMS=1) + finally: + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="off" + ) + + def test_with_options(self): + codec_options = DECIMAL_CODECOPTS + read_preference = ReadPreference.SECONDARY_PREFERRED + write_concern = WriteConcern(j=True) + read_concern = ReadConcern(level="majority") + + # List of all options to compare. + allopts = [ + "name", + "client", + "codec_options", + "read_preference", + "write_concern", + "read_concern", + ] + + db1 = self.client.get_database( + "with_options_test", + codec_options=codec_options, + read_preference=read_preference, + write_concern=write_concern, + read_concern=read_concern, + ) + + # Case 1: swap no options + db2 = db1.with_options() + for opt in allopts: + self.assertEqual(getattr(db1, opt), getattr(db2, opt)) + + # Case 2: swap all options + newopts = { + "codec_options": CodecOptions(), + "read_preference": ReadPreference.PRIMARY, + "write_concern": WriteConcern(w=1), + "read_concern": ReadConcern(level="local"), + } + db2 = db1.with_options(**newopts) # type: ignore[arg-type, call-overload] + for opt in newopts: + self.assertEqual(getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) + + +class TestDatabaseAggregation(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.pipeline: List[Mapping[str, Any]] = [ + {"$listLocalSessions": {}}, + {"$limit": 1}, + {"$addFields": {"dummy": "dummy field"}}, + {"$project": {"_id": 0, "dummy": 1}}, + ] + self.result = {"dummy": "dummy field"} + self.admin = self.client.admin + + async def test_database_aggregation(self): + async with await self.admin.aggregate(self.pipeline) as cursor: + result = await anext(cursor) + self.assertEqual(result, self.result) + + @async_client_context.require_no_mongos + async def test_database_aggregation_fake_cursor(self): + coll_name = "test_output" + write_stage: dict + if async_client_context.version < (4, 3): + db_name = "admin" + write_stage = {"$out": coll_name} + else: + # SERVER-43287 disallows writing with $out to the admin db, use + # $merge instead. + db_name = "pymongo_test" + write_stage = {"$merge": {"into": {"db": db_name, "coll": coll_name}}} + output_coll = self.client[db_name][coll_name] + await output_coll.drop() + self.addAsyncCleanup(output_coll.drop) + + admin = self.admin.with_options(write_concern=WriteConcern(w=0)) + pipeline = self.pipeline[:] + pipeline.append(write_stage) + async with await admin.aggregate(pipeline) as cursor: + with self.assertRaises(StopAsyncIteration): + await anext(cursor) + + async def lambda_fn(): + return await output_coll.find_one() + + result = await async_wait_until(lambda_fn, "read unacknowledged write") + self.assertEqual(result["dummy"], self.result["dummy"]) + + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(AsyncDatabase(self.client, "test")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py new file mode 100644 index 0000000000..5820d00c48 --- /dev/null +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -0,0 +1,587 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module.""" +from __future__ import annotations + +import asyncio +import os +import socketserver +import sys +import threading +import time +from asyncio import StreamReader, StreamWriter +from pathlib import Path +from test.asynchronous.helpers import ConcurrentRunner +from test.asynchronous.utils import flaky + +from pymongo.asynchronous.pool import AsyncConnection +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + AsyncUnitTest, + async_client_context, + unittest, +) +from test.asynchronous.pymongo_mocks import DummyMonitor +from test.asynchronous.unified_format import generate_test_classes +from test.asynchronous.utils import ( + async_get_pool, +) +from test.utils_shared import ( + CMAPListener, + HeartbeatEventListener, + HeartbeatEventsListListener, + assertion_context, + async_barrier_wait, + async_create_barrier, + async_wait_until, + server_name_to_type, +) +from unittest.mock import patch + +from bson import Timestamp, json_util +from pymongo import common, monitoring +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology, _ErrorContext +from pymongo.asynchronous.uri_parser import parse_uri +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + NetworkTimeout, + NotPrimaryError, + OperationFailure, +) +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _check_command_response, _check_write_command_response +from pymongo.monitoring import ServerHeartbeatFailedEvent, ServerHeartbeatStartedEvent +from pymongo.server_description import SERVER_TYPE, ServerDescription +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + SDAM_PATH = os.path.join(Path(__file__).resolve().parent, "discovery_and_monitoring") +else: + SDAM_PATH = os.path.join( + Path(__file__).resolve().parent.parent, + "discovery_and_monitoring", + ) + + +async def create_mock_topology(uri, monitor_class=DummyMonitor): + parsed_uri = await parse_uri(uri) + replica_set_name = None + direct_connection = None + load_balanced = None + if "replicaSet" in parsed_uri["options"]: + replica_set_name = parsed_uri["options"]["replicaSet"] + if "directConnection" in parsed_uri["options"]: + direct_connection = parsed_uri["options"]["directConnection"] + if "loadBalanced" in parsed_uri["options"]: + load_balanced = parsed_uri["options"]["loadBalanced"] + + topology_settings = TopologySettings( + parsed_uri["nodelist"], + replica_set_name=replica_set_name, + monitor_class=monitor_class, + direct_connection=direct_connection, + load_balanced=load_balanced, + ) + + c = Topology(topology_settings) + await c.open() + return c + + +async def got_hello(topology, server_address, hello_response): + server_description = ServerDescription(server_address, Hello(hello_response), 0) + await topology.on_change(server_description) + + +async def got_app_error(topology, app_error): + server_address = common.partition_node(app_error["address"]) + server = topology.get_server_by_address(server_address) + error_type = app_error["type"] + generation = app_error.get("generation", server.pool.gen.get_overall()) + when = app_error["when"] + max_wire_version = app_error["maxWireVersion"] + # XXX: We could get better test coverage by mocking the errors on the + # Pool/AsyncConnection. + try: + if error_type == "command": + _check_command_response(app_error["response"], max_wire_version) + _check_write_command_response(app_error["response"]) + elif error_type == "network": + raise AutoReconnect("mock non-timeout network error") + elif error_type == "timeout": + raise NetworkTimeout("mock network timeout error") + else: + raise AssertionError(f"unknown error type: {error_type}") + raise AssertionError + except (AutoReconnect, NotPrimaryError, OperationFailure) as e: + if when == "beforeHandshakeCompletes": + completed_handshake = False + elif when == "afterHandshakeCompletes": + completed_handshake = True + else: + raise AssertionError(f"Unknown when field {when}") + + await topology.handle_error( + server_address, + _ErrorContext(e, max_wire_version, generation, completed_handshake, None), + ) + + +def get_type(topology, hostname): + description = topology.get_server_by_address((hostname, 27017)).description + return description.server_type + + +class TestAllScenarios(AsyncUnitTest): + pass + + +def topology_type_name(topology_type): + return TOPOLOGY_TYPE._fields[topology_type] + + +def server_type_name(server_type): + return SERVER_TYPE._fields[server_type] + + +def check_outcome(self, topology, outcome): + expected_servers = outcome["servers"] + + # Check weak equality before proceeding. + self.assertEqual(len(topology.description.server_descriptions()), len(expected_servers)) + + if outcome.get("compatible") is False: + with self.assertRaises(ConfigurationError): + topology.description.check_compatible() + else: + # No error. + topology.description.check_compatible() + + # Since lengths are equal, every actual server must have a corresponding + # expected server. + for expected_server_address, expected_server in expected_servers.items(): + node = common.partition_node(expected_server_address) + self.assertTrue(topology.has_server(node)) + actual_server = topology.get_server_by_address(node) + actual_server_description = actual_server.description + expected_server_type = server_name_to_type(expected_server["type"]) + + self.assertEqual( + server_type_name(expected_server_type), + server_type_name(actual_server_description.server_type), + ) + expected_error = expected_server.get("error") + if expected_error: + self.assertIn(expected_error, str(actual_server_description.error)) + + self.assertEqual(expected_server.get("setName"), actual_server_description.replica_set_name) + + self.assertEqual(expected_server.get("setVersion"), actual_server_description.set_version) + + self.assertEqual(expected_server.get("electionId"), actual_server_description.election_id) + + self.assertEqual( + expected_server.get("topologyVersion"), actual_server_description.topology_version + ) + + expected_pool = expected_server.get("pool") + if expected_pool: + self.assertEqual(expected_pool.get("generation"), actual_server.pool.gen.get_overall()) + + self.assertEqual(outcome["setName"], topology.description.replica_set_name) + self.assertEqual( + outcome.get("logicalSessionTimeoutMinutes"), + topology.description.logical_session_timeout_minutes, + ) + + expected_topology_type = getattr(TOPOLOGY_TYPE, outcome["topologyType"]) + self.assertEqual( + topology_type_name(expected_topology_type), + topology_type_name(topology.description.topology_type), + ) + + self.assertEqual(outcome.get("maxSetVersion"), topology.description.max_set_version) + self.assertEqual(outcome.get("maxElectionId"), topology.description.max_election_id) + + +def create_test(scenario_def): + async def run_scenario(self): + c = await create_mock_topology(scenario_def["uri"]) + + for i, phase in enumerate(scenario_def["phases"]): + # Including the phase description makes failures easier to debug. + description = phase.get("description", str(i)) + with assertion_context(f"phase: {description}"): + for response in phase.get("responses", []): + await got_hello(c, common.partition_node(response[0]), response[1]) + + for app_error in phase.get("applicationErrors", []): + await got_app_error(c, app_error) + + check_outcome(self, c, phase["outcome"]) + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(SDAM_PATH): + dirname = os.path.split(dirpath)[-1] + # SDAM unified tests are handled separately. + if dirname == "unified": + continue + + for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json_util.loads(scenario_stream.read()) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + + +class TestClusterTimeComparison(AsyncPyMongoTestCase): + async def test_cluster_time_comparison(self): + t = await create_mock_topology("mongodb://host") + + async def send_cluster_time(time, inc): + old = t.max_cluster_time() + new = {"clusterTime": Timestamp(time, inc)} + await got_hello( + t, + ("host", 27017), + { + "ok": 1, + "minWireVersion": 0, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + "$clusterTime": new, + }, + ) + + actual = t.max_cluster_time() + # We never update $clusterTime from monitoring connections. + self.assertEqual(actual, old) + + await send_cluster_time(0, 1) + await send_cluster_time(2, 2) + await send_cluster_time(2, 1) + await send_cluster_time(1, 3) + await send_cluster_time(2, 3) + + +class TestIgnoreStaleErrors(AsyncIntegrationTest): + async def test_ignore_stale_connection_errors(self): + if not _IS_SYNC and sys.version_info < (3, 11): + self.skipTest("Test requires asyncio.Barrier (added in Python 3.11)") + N_TASKS = 5 + barrier = async_create_barrier(N_TASKS) + client = await self.async_rs_or_single_client(minPoolSize=N_TASKS) + + # Wait for initial discovery. + await client.admin.command("ping") + pool = await async_get_pool(client) + starting_generation = pool.gen.get_overall() + await async_wait_until(lambda: len(pool.conns) == N_TASKS, "created conns") + + async def mock_command(*args, **kwargs): + # Synchronize all tasks to ensure they use the same generation. + await async_barrier_wait(barrier, timeout=30) + raise AutoReconnect("mock AsyncConnection.command error") + + for conn in pool.conns: + conn.command = mock_command + + async def insert_command(i): + try: + await client.test.command("insert", "test", documents=[{"i": i}]) + except AutoReconnect: + pass + + tasks = [] + for i in range(N_TASKS): + tasks.append(ConcurrentRunner(target=insert_command, args=(i,))) + for t in tasks: + await t.start() + for t in tasks: + await t.join() + + # Expect a single pool reset for the network error + self.assertEqual(starting_generation + 1, pool.gen.get_overall()) + + # Server should be selectable. + await client.admin.command("ping") + + +class CMAPHeartbeatListener(HeartbeatEventListener, CMAPListener): + pass + + +class TestPoolManagement(AsyncIntegrationTest): + @async_client_context.require_failCommand_appName + async def test_pool_unpause(self): + # This test implements the prose test "AsyncConnection Pool Management" + listener = CMAPHeartbeatListener() + _ = await self.async_single_client( + appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener] + ) + # Assert that AsyncConnectionPoolReadyEvent occurs after the first + # ServerHeartbeatSucceededEvent. + await listener.async_wait_for_event(monitoring.PoolReadyEvent, 1) + pool_ready = listener.events_by_type(monitoring.PoolReadyEvent)[0] + hb_succeeded = listener.events_by_type(monitoring.ServerHeartbeatSucceededEvent)[0] + self.assertGreater(listener.events.index(pool_ready), listener.events.index(hb_succeeded)) + + listener.reset() + fail_hello = { + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMPoolManagementTest", + }, + } + async with self.fail_point(fail_hello): + await listener.async_wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + await listener.async_wait_for_event(monitoring.PoolClearedEvent, 1) + await listener.async_wait_for_event(monitoring.ServerHeartbeatSucceededEvent, 1) + await listener.async_wait_for_event(monitoring.PoolReadyEvent, 1) + + @async_client_context.require_failCommand_appName + @async_client_context.require_test_commands + @async_client_context.require_async + @flaky(reason="PYTHON-5428") + async def test_connection_close_does_not_block_other_operations(self): + listener = CMAPHeartbeatListener() + client = await self.async_single_client( + appName="SDAMConnectionCloseTest", + event_listeners=[listener], + heartbeatFrequencyMS=500, + minPoolSize=10, + ) + server = await (await client._get_topology()).select_server( + writable_server_selector, _Op.TEST + ) + await async_wait_until( + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", + ) + + await client.db.test.insert_one({"x": 1}) + close_delay = 0.1 + latencies = [] + should_exit = [] + + async def run_task(): + while True: + start_time = time.monotonic() + await client.db.test.find_one({}) + elapsed = time.monotonic() - start_time + latencies.append(elapsed) + if should_exit: + break + await asyncio.sleep(0.001) + + task = ConcurrentRunner(target=run_task) + await task.start() + original_close = AsyncConnection.close_conn + try: + # Artificially delay the close operation to simulate a slow close + async def mock_close(self, reason): + await asyncio.sleep(close_delay) + await original_close(self, reason) + + AsyncConnection.close_conn = mock_close + + fail_hello = { + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 91, + "appName": "SDAMConnectionCloseTest", + }, + } + async with self.fail_point(fail_hello): + # Wait for server heartbeat to fail + await listener.async_wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + # Wait until all idle connections are closed to simulate real-world conditions + await listener.async_wait_for_event(monitoring.ConnectionClosedEvent, 10) + # Wait for one more find to complete after the pool has been reset, then shutdown the task + n = len(latencies) + await async_wait_until(lambda: len(latencies) >= n + 1, "run one more find") + should_exit.append(True) + await task.join() + # No operation latency should not significantly exceed close_delay + self.assertLessEqual(max(latencies), close_delay * 5.0) + finally: + AsyncConnection.close_conn = original_close + + +class TestServerMonitoringMode(AsyncIntegrationTest): + @async_client_context.require_no_load_balancer + async def asyncSetUp(self): + await super().asyncSetUp() + + async def test_rtt_connection_is_enabled_stream(self): + client = await self.async_rs_or_single_client(serverMonitoringMode="stream") + await client.admin.command("ping") + + def predicate(): + for _, server in client._topology._servers.items(): + monitor = server._monitor + if not monitor._stream: + return False + if async_client_context.version >= (4, 4): + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is None: + return False + else: + if monitor._rtt_monitor._executor._task is None: + return False + else: + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is not None: + return False + else: + if monitor._rtt_monitor._executor._task is not None: + return False + return True + + await async_wait_until(predicate, "find all RTT monitors") + + async def test_rtt_connection_is_disabled_poll(self): + client = await self.async_rs_or_single_client(serverMonitoringMode="poll") + + await self.assert_rtt_connection_is_disabled(client) + + async def test_rtt_connection_is_disabled_auto(self): + envs = [ + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10"}, + {"FUNCTIONS_WORKER_RUNTIME": "python"}, + {"K_SERVICE": "gcpservicename"}, + {"FUNCTION_NAME": "gcpfunctionname"}, + {"VERCEL": "1"}, + ] + for env in envs: + with patch.dict("os.environ", env): + client = await self.async_rs_or_single_client(serverMonitoringMode="auto") + await self.assert_rtt_connection_is_disabled(client) + + async def assert_rtt_connection_is_disabled(self, client): + await client.admin.command("ping") + for _, server in client._topology._servers.items(): + monitor = server._monitor + self.assertFalse(monitor._stream) + if _IS_SYNC: + self.assertIsNone(monitor._rtt_monitor._executor._thread) + else: + self.assertIsNone(monitor._rtt_monitor._executor._task) + + +class MockTCPHandler(socketserver.BaseRequestHandler): + def handle(self): + self.server.events.append("client connected") + if self.request.recv(1024).strip(): + self.server.events.append("client hello received") + self.request.close() + + +class TCPServer(socketserver.TCPServer): + allow_reuse_address = True + + def handle_request_and_shutdown(self): + self.handle_request() + self.server_close() + + +class TestHeartbeatStartOrdering(AsyncPyMongoTestCase): + async def test_heartbeat_start_ordering(self): + events = [] + listener = HeartbeatEventsListListener(events) + + if _IS_SYNC: + server = TCPServer(("localhost", 9999), MockTCPHandler) + server.events = events + server_thread = ConcurrentRunner(target=server.handle_request_and_shutdown) + await server_thread.start() + _c = await self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + await server_thread.join() + listener.wait_for_event(ServerHeartbeatStartedEvent, 1) + listener.wait_for_event(ServerHeartbeatFailedEvent, 1) + + else: + + async def handle_client(reader: StreamReader, writer: StreamWriter): + events.append("client connected") + if (await reader.read(1024)).strip(): + events.append("client hello received") + writer.close() + await writer.wait_closed() + + server = await asyncio.start_server(handle_client, "localhost", 9999) + server.events = events + await server.start_serving() + _c = self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + await _c.aconnect() + + await listener.async_wait_for_event(ServerHeartbeatStartedEvent, 1) + await listener.async_wait_for_event(ServerHeartbeatFailedEvent, 1) + + server.close() + await server.wait_closed() + await _c.close() + + self.assertEqual( + events, + [ + "serverHeartbeatStartedEvent", + "client connected", + "client hello received", + "serverHeartbeatFailedEvent", + ], + ) + + +# Generate unified tests. +globals().update(generate_test_classes(os.path.join(SDAM_PATH, "unified"), module=__name__)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_dns.py b/test/asynchronous/test_dns.py new file mode 100644 index 0000000000..5666612218 --- /dev/null +++ b/test/asynchronous/test_dns.py @@ -0,0 +1,308 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the SRV support tests.""" +from __future__ import annotations + +import glob +import json +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + async_client_context, + unittest, +) +from test.utils_shared import async_wait_until +from unittest.mock import MagicMock, patch + +from pymongo.asynchronous.uri_parser import parse_uri +from pymongo.common import validate_read_preference_tags +from pymongo.errors import ConfigurationError +from pymongo.uri_parser_shared import split_hosts + +_IS_SYNC = False + + +class TestDNSRepl(AsyncPyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "replica-set" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "replica-set" + ) + load_balanced = False + + @async_client_context.require_replica_set + def asyncSetUp(self): + pass + + +class TestDNSLoadBalanced(AsyncPyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "load-balanced" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "load-balanced" + ) + load_balanced = True + + @async_client_context.require_load_balancer + def asyncSetUp(self): + pass + + +class TestDNSSharded(AsyncPyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "srv_seedlist", "sharded") + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "sharded" + ) + load_balanced = False + + @async_client_context.require_mongos + def asyncSetUp(self): + pass + + +def create_test(test_case): + async def run_test(self): + uri = test_case["uri"] + seeds = test_case.get("seeds") + num_seeds = test_case.get("numSeeds", len(seeds or [])) + hosts = test_case.get("hosts") + num_hosts = test_case.get("numHosts", len(hosts or [])) + + options = test_case.get("options", {}) + if "ssl" in options: + options["tls"] = options.pop("ssl") + parsed_options = test_case.get("parsed_options") + # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. + needs_tls = not (options and (options.get("ssl") is False or options.get("tls") is False)) + if needs_tls and not async_client_context.tls: + self.skipTest("this test requires a TLS cluster") + if not needs_tls and async_client_context.tls: + self.skipTest("this test requires a non-TLS cluster") + + if seeds: + seeds = split_hosts(",".join(seeds)) + if hosts: + hosts = frozenset(split_hosts(",".join(hosts))) + + if seeds or num_seeds: + result = await parse_uri(uri, validate=True) + if seeds is not None: + self.assertEqual(sorted(result["nodelist"]), sorted(seeds)) + if num_seeds is not None: + self.assertEqual(len(result["nodelist"]), num_seeds) + if options: + opts = result["options"] + if "readpreferencetags" in opts: + rpts = validate_read_preference_tags( + "readPreferenceTags", opts.pop("readpreferencetags") + ) + opts["readPreferenceTags"] = rpts + self.assertEqual(result["options"], options) + if parsed_options: + for opt, expected in parsed_options.items(): + if opt == "user": + self.assertEqual(result["username"], expected) + elif opt == "password": + self.assertEqual(result["password"], expected) + elif opt == "auth_database" or opt == "db": + self.assertEqual(result["database"], expected) + + hostname = next(iter(async_client_context.client.nodes))[0] + # The replica set members must be configured as 'localhost'. + if hostname == "localhost": + copts = async_client_context.default_client_options.copy() + # Remove tls since SRV parsing should add it automatically. + copts.pop("tls", None) + if async_client_context.tls: + # Our test certs don't support the SRV hosts used in these + # tests. + copts["tlsAllowInvalidHostnames"] = True + + client = self.simple_client(uri, **copts) + if client._options.connect: + await client.aconnect() + if num_seeds is not None: + self.assertEqual(len(client._topology_settings.seeds), num_seeds) + if hosts is not None: + await async_wait_until( + lambda: hosts == client.nodes, "match test hosts to client nodes" + ) + if num_hosts is not None: + await async_wait_until( + lambda: num_hosts == len(client.nodes), "wait to connect to num_hosts" + ) + if test_case.get("ping", True): + await client.admin.command("ping") + # XXX: we should block until SRV poller runs at least once + # and re-run these assertions. + else: + try: + await parse_uri(uri) + except (ConfigurationError, ValueError): + pass + else: + self.fail("failed to raise an exception") + + return run_test + + +def create_tests(cls): + for filename in glob.glob(os.path.join(cls.TEST_PATH, "*.json")): + test_suffix, _ = os.path.splitext(os.path.basename(filename)) + with open(filename) as dns_test_file: + test_method = create_test(json.load(dns_test_file)) + setattr(cls, "test_" + test_suffix, test_method) + + +create_tests(TestDNSRepl) +create_tests(TestDNSLoadBalanced) +create_tests(TestDNSSharded) + + +class TestParsingErrors(AsyncPyMongoTestCase): + async def test_invalid_host(self): + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://127.0.0.1") + await client.aconnect() + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://[::1]") + await client.aconnect() + + +class IsolatedAsyncioTestCaseInsensitive(AsyncIntegrationTest): + async def test_connect_case_insensitive(self): + client = self.simple_client("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") + await client.aconnect() + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + + +class TestInitialDnsSeedlistDiscovery(AsyncPyMongoTestCase): + """ + Initial DNS Seedlist Discovery prose tests + https://github.com/mongodb/specifications/blob/0a7a8b5/source/initial-dns-seedlist-discovery/tests/README.md#prose-tests + """ + + async def run_initial_dns_seedlist_discovery_prose_tests(self, test_cases): + for case in test_cases: + with patch("dns.asyncresolver.resolve") as mock_resolver: + + async def mock_resolve(query, record_type, *args, **kwargs): + mock_srv = MagicMock() + mock_srv.target.to_text.return_value = case["mock_target"] + return [mock_srv] + + mock_resolver.side_effect = mock_resolve + domain = case["query"].split("._tcp.")[1] + connection_string = f"mongodb+srv://{domain}" + if "expected_error" not in case: + await parse_uri(connection_string) + else: + try: + await parse_uri(connection_string) + except ConfigurationError as e: + self.assertIn(case["expected_error"], str(e)) + else: + self.fail(f"ConfigurationError was not raised for query: {case['query']}") + + async def test_1_allow_srv_hosts_with_fewer_than_three_dot_separated_parts(self): + with patch("dns.asyncresolver.resolve"): + await parse_uri("mongodb+srv://localhost/") + await parse_uri("mongodb+srv://mongo.local/") + + async def test_2_throw_when_return_address_does_not_end_with_srv_domain(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost.mongodb", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.evil.com", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongo.local", + "mock_target": "test_1.evil.com", + "expected_error": "Invalid SRV host", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + async def test_3_throw_when_return_address_is_identical_to_srv_hostname(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "mongo.local", + "expected_error": "Invalid SRV host", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + async def test_4_throw_when_return_address_does_not_contain_dot_separating_shared_part_of_domain( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "test_1.cluster_1localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "test_1.my_hostmongo.local", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "cluster.testmongodb.com", + "expected_error": "Invalid SRV host", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + async def test_5_when_srv_hostname_has_two_dot_separated_parts_it_is_valid_for_the_returned_hostname_to_be_identical( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.mongodb.com", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py new file mode 100644 index 0000000000..74c0136ad0 --- /dev/null +++ b/test/asynchronous/test_encryption.py @@ -0,0 +1,3753 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption spec.""" +from __future__ import annotations + +import base64 +import copy +import http.client +import json +import os +import pathlib +import re +import socket +import socketserver +import ssl +import sys +import textwrap +import traceback +import uuid +import warnings +from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, async_client_context +from test.asynchronous.test_bulk import AsyncBulkTestBase +from test.asynchronous.utils import flaky +from test.asynchronous.utils_spec_runner import AsyncSpecRunner, AsyncSpecTestCreator +from threading import Thread +from typing import Any, Dict, Mapping, Optional + +import pytest + +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.daemon import _spawn_daemon +from pymongo.uri_parser_shared import _parse_kms_tls_options + +try: + from pymongo.pyopenssl_context import IS_PYOPENSSL +except ImportError: + IS_PYOPENSSL = False + +sys.path[0:0] = [""] + +from test import ( + unittest, +) +from test.asynchronous.test_bulk import AsyncBulkTestBase +from test.asynchronous.unified_format import generate_test_classes +from test.asynchronous.utils_spec_runner import AsyncSpecRunner +from test.helpers_shared import ( + ALL_KMS_PROVIDERS, + AWS_CREDS, + AWS_TEMP_CREDS, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + DEFAULT_KMS_TLS, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, +) +from test.utils_shared import ( + AllowListEventListener, + OvertCommandListener, + TopologyEventListener, + async_wait_until, + camel_to_snake_args, + is_greenthread_patched, +) + +from bson import BSON, DatetimeMS, Decimal128, encode, json_util +from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.errors import BSONError +from bson.json_util import JSONOptions +from bson.son import SON +from pymongo import ReadPreference +from pymongo.asynchronous import encryption +from pymongo.asynchronous.encryption import Algorithm, AsyncClientEncryption, QueryType +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.cursor_shared import CursorType +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts, TextOpts +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ConfigurationError, + DuplicateKeyError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WriteError, +) +from pymongo.operations import InsertOne, ReplaceOne, UpdateOne +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +pytestmark = pytest.mark.encryption + +KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} + + +def get_client_opts(client): + return client.options + + +class TestAutoEncryptionOpts(AsyncPyMongoTestCase): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + async def test_crypt_shared(self): + # Test that we can pick up crypt_shared lib automatically + self.simple_client( + auto_encryption_opts=AutoEncryptionOpts( + KMS_PROVIDERS, "keyvault.datakeys", crypt_shared_lib_required=True + ), + connect=False, + ) + + @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") + def test_init_requires_pymongocrypt(self): + with self.assertRaises(ConfigurationError): + AutoEncryptionOpts({}, "keyvault.datakeys") + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init(self): + opts = AutoEncryptionOpts({}, "keyvault.datakeys") + self.assertEqual(opts._kms_providers, {}) + self.assertEqual(opts._key_vault_namespace, "keyvault.datakeys") + self.assertEqual(opts._key_vault_client, None) + self.assertEqual(opts._schema_map, None) + self.assertEqual(opts._bypass_auto_encryption, False) + self.assertEqual(opts._mongocryptd_uri, "mongodb://localhost:27020") + self.assertEqual(opts._mongocryptd_bypass_spawn, False) + self.assertEqual(opts._mongocryptd_spawn_path, "mongocryptd") + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) + self.assertEqual(opts._kms_tls_options, None) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init_spawn_args(self): + # User can override idleShutdownTimeoutSecs + opts = AutoEncryptionOpts( + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--idleShutdownTimeoutSecs=88"] + ) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=88"]) + + # idleShutdownTimeoutSecs is added by default + opts = AutoEncryptionOpts({}, "keyvault.datakeys", mongocryptd_spawn_args=[]) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) + + # Also added when other options are given + opts = AutoEncryptionOpts( + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--quiet", "--port=27020"] + ) + self.assertEqual( + opts._mongocryptd_spawn_args, + ["--quiet", "--port=27020", "--idleShutdownTimeoutSecs=60"], + ) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def test_init_kms_tls_options(self): + # Error cases: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) + with self.assertRaisesRegex(TypeError, r'kms_tls_options\["kmip"\] must be a dict'): + AsyncMongoClient(auto_encryption_opts=opts) + + tls_opts: Any + for tls_opts in [ + {"kmip": {"tls": True, "tlsInsecure": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, + ]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): + AsyncMongoClient(auto_encryption_opts=opts) + opts = AutoEncryptionOpts( + {}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}} + ) + with self.assertRaises(FileNotFoundError): + AsyncMongoClient(auto_encryption_opts=opts) + # Success cases: + tls_opts: Any + for tls_opts in [None, {}]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + kms_tls_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + self.assertEqual(kms_tls_contexts, {}) + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + ctx = _kms_ssl_contexts["aws"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + opts = AutoEncryptionOpts( + {}, + "k.d", + kms_tls_options=DEFAULT_KMS_TLS, + ) + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + + +class TestClientOptions(AsyncPyMongoTestCase): + async def test_default(self): + client = self.simple_client(connect=False) + self.assertEqual(get_client_opts(client).auto_encryption_opts, None) + + client = self.simple_client(auto_encryption_opts=None, connect=False) + self.assertEqual(get_client_opts(client).auto_encryption_opts, None) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def test_kwargs(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = self.simple_client(auto_encryption_opts=opts, connect=False) + self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) + + +class AsyncEncryptionIntegrationTest(AsyncIntegrationTest): + """Base class for encryption integration tests.""" + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @async_client_context.require_version_min(4, 2, -1) + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + + def assertEncrypted(self, val): + self.assertIsInstance(val, Binary) + self.assertEqual(val.subtype, 6) + + def assertBinaryUUID(self, val): + self.assertIsInstance(val, Binary) + self.assertEqual(val.subtype, UUID_SUBTYPE) + + def create_client_encryption( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: AsyncMongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = AsyncClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + self.addAsyncCleanup(client_encryption.close) + return client_encryption + + @classmethod + def unmanaged_create_client_encryption( + cls, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: AsyncMongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = AsyncClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + return client_encryption + + +# Location of JSON test files. +if _IS_SYNC: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent, "client-side-encryption") +else: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "client-side-encryption") + +SPEC_PATH = os.path.join(BASE, "spec") + +OPTS = CodecOptions() + +# Use SON to preserve the order of fields while parsing json. Use tz_aware +# =False to match how CodecOptions decodes dates. +JSON_OPTS = JSONOptions(document_class=SON, tz_aware=False) + + +def read(*paths): + with open(os.path.join(BASE, *paths)) as fp: + return fp.read() + + +def json_data(*paths): + return json_util.loads(read(*paths), json_options=JSON_OPTS) + + +def bson_data(*paths): + return encode(json_data(*paths), codec_options=OPTS) + + +class TestClientSimple(AsyncEncryptionIntegrationTest): + async def _test_auto_encrypt(self, opts): + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + # Create the encrypted field's data key. + key_vault = await create_key_vault( + self.client.keyvault.datakeys, json_data("custom", "key-document-local.json") + ) + self.addAsyncCleanup(key_vault.drop) + + # Collection.insert_one/insert_many auto encrypts. + docs = [ + {"_id": 0, "ssn": "000"}, + {"_id": 1, "ssn": "111"}, + {"_id": 2, "ssn": "222"}, + {"_id": 3, "ssn": "333"}, + {"_id": 4, "ssn": "444"}, + {"_id": 5, "ssn": "555"}, + ] + encrypted_coll = client.pymongo_test.test + await encrypted_coll.insert_one(docs[0]) + await encrypted_coll.insert_many(docs[1:3]) + unack = encrypted_coll.with_options(write_concern=WriteConcern(w=0)) + await unack.insert_one(docs[3]) + await unack.insert_many(docs[4:], ordered=False) + + async def count_documents(): + return await self.db.test.count_documents({}) == len(docs) + + await async_wait_until(count_documents, "insert documents with w=0") + + # Database.command auto decrypts. + res = await client.pymongo_test.command("find", "test", filter={"ssn": "000"}) + decrypted_docs = res["cursor"]["firstBatch"] + self.assertEqual(decrypted_docs, [{"_id": 0, "ssn": "000"}]) + + # Collection.find auto decrypts. + decrypted_docs = await encrypted_coll.find().to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.find auto decrypts getMores. + decrypted_docs = await encrypted_coll.find(batch_size=1).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.aggregate auto decrypts. + decrypted_docs = await (await encrypted_coll.aggregate([])).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.aggregate auto decrypts getMores. + decrypted_docs = await (await encrypted_coll.aggregate([], batchSize=1)).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.distinct auto decrypts. + decrypted_ssns = await encrypted_coll.distinct("ssn") + self.assertEqual(set(decrypted_ssns), {d["ssn"] for d in docs}) + + # Make sure the field is actually encrypted. + async for encrypted_doc in self.db.test.find(): + self.assertIsInstance(encrypted_doc["_id"], int) + self.assertEncrypted(encrypted_doc["ssn"]) + + # Attempt to encrypt an unencodable object. + with self.assertRaises(BSONError): + await encrypted_coll.insert_one({"unencodeable": object()}) + + async def test_auto_encrypt(self): + # Configure the encrypted field via jsonSchema. + json_schema = json_data("custom", "schema.json") + await create_with_schema(self.db.test, json_schema) + self.addAsyncCleanup(self.db.test.drop) + + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + await self._test_auto_encrypt(opts) + + async def test_auto_encrypt_local_schema_map(self): + # Configure the encrypted field via the local schema_map option. + schemas = {"pymongo_test.test": json_data("custom", "schema.json")} + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas) + + await self._test_auto_encrypt(opts) + + async def test_use_after_close(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + await client.admin.command("ping") + await client.aclose() + with self.assertRaisesRegex(InvalidOperation, "Cannot use AsyncMongoClient after close"): + await client.admin.command("ping") + + @unittest.skipIf( + not hasattr(os, "register_at_fork"), + "register_at_fork not available in this version of Python", + ) + @unittest.skipIf( + is_greenthread_patched(), + "gevent does not support POSIX-style forking.", + ) + @async_client_context.require_sync + async def test_fork(self): + self.skipTest("Test is flaky, PYTHON-4738") + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + async def target(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + await client.admin.command("ping") + + with self.fork(target): + await target() + + +class TestEncryptedBulkWrite(AsyncBulkTestBase, AsyncEncryptionIntegrationTest): + async def test_upsert_uuid_standard_encrypt(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encrypted_coll = client.pymongo_test.test + coll = encrypted_coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = await coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + +class TestClientMaxWireVersion(AsyncIntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def asyncSetUp(self): + await super().asyncSetUp() + + async def test_raise_unsupported_error(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + msg = "find_raw_batches does not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await client.test.test.find_raw_batches({}) + + msg = "aggregate_raw_batches does not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await client.test.test.aggregate_raw_batches([]) + + if async_client_context.is_mongos: + msg = "Exhaust cursors are not supported by mongos" + else: + msg = "exhaust cursors do not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await anext(client.test.test.find(cursor_type=CursorType.EXHAUST)) + + +class TestExplicitSimple(AsyncEncryptionIntegrationTest): + async def test_encrypt_decrypt(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + # Use standard UUID representation. + key_vault = async_client_context.client.keyvault.get_collection( + "datakeys", codec_options=OPTS + ) + self.addAsyncCleanup(key_vault.drop) + + # Create the encrypted field's data key. + key_id = await client_encryption.create_data_key("local", key_alt_names=["name"]) + self.assertBinaryUUID(key_id) + self.assertTrue(await key_vault.find_one({"_id": key_id})) + + # Create an unused data key to make sure filtering works. + unused_key_id = await client_encryption.create_data_key("local", key_alt_names=["unused"]) + self.assertBinaryUUID(unused_key_id) + self.assertTrue(await key_vault.find_one({"_id": unused_key_id})) + + doc = {"_id": 0, "ssn": "000"} + encrypted_ssn = await client_encryption.encrypt( + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + + # Ensure encryption via key_alt_name for the same key produces the + # same output. + encrypted_ssn2 = await client_encryption.encrypt( + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="name" + ) + self.assertEqual(encrypted_ssn, encrypted_ssn2) + + # Test encryption via UUID + encrypted_ssn3 = await client_encryption.encrypt( + doc["ssn"], + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=key_id.as_uuid(), + ) + self.assertEqual(encrypted_ssn, encrypted_ssn3) + + # Test decryption. + decrypted_ssn = await client_encryption.decrypt(encrypted_ssn) + self.assertEqual(decrypted_ssn, doc["ssn"]) + + async def test_validation(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + + msg = "value to decrypt must be a bson.binary.Binary with subtype 6" + with self.assertRaisesRegex(TypeError, msg): + await client_encryption.decrypt("str") # type: ignore[arg-type] + with self.assertRaisesRegex(TypeError, msg): + await client_encryption.decrypt(Binary(b"123")) + + msg = "key_id must be a bson.binary.Binary with subtype 4" + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + with self.assertRaisesRegex(TypeError, msg): + await client_encryption.encrypt("str", algo, key_id=Binary(b"123")) + + async def test_bson_errors(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + + # Attempt to encrypt an unencodable object. + unencodable_value = object() + with self.assertRaises(BSONError): + await client_encryption.encrypt( + unencodable_value, + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=Binary.from_uuid(uuid.uuid4()), + ) + + async def test_codec_options(self): + with self.assertRaisesRegex(TypeError, "codec_options must be"): + self.create_client_encryption( + KMS_PROVIDERS, + "keyvault.datakeys", + async_client_context.client, + None, # type: ignore[arg-type] + ) + + opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) + client_encryption_legacy = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, opts + ) + + # Create the encrypted field's data key. + key_id = await client_encryption_legacy.create_data_key("local") + + # Encrypt a UUID with JAVA_LEGACY codec options. + value = uuid.uuid4() + encrypted_legacy = await client_encryption_legacy.encrypt( + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_value_legacy = await client_encryption_legacy.decrypt(encrypted_legacy) + self.assertEqual(decrypted_value_legacy, value) + + # Encrypt the same UUID with STANDARD codec options. + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, opts + ) + encrypted_standard = await client_encryption.encrypt( + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_standard = await client_encryption.decrypt(encrypted_standard) + self.assertEqual(decrypted_standard, value) + + # Test that codec_options is applied during encryption. + self.assertNotEqual(encrypted_standard, encrypted_legacy) + # Test that codec_options is applied during decryption. + self.assertEqual( + await client_encryption_legacy.decrypt(encrypted_standard), Binary.from_uuid(value) + ) + self.assertNotEqual(await client_encryption.decrypt(encrypted_legacy), value) + + async def test_close(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + await client_encryption.close() + # Close can be called multiple times. + await client_encryption.close() + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + msg = "Cannot use closed AsyncClientEncryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await client_encryption.create_data_key("local") + with self.assertRaisesRegex(InvalidOperation, msg): + await client_encryption.encrypt("val", algo, key_alt_name="name") + with self.assertRaisesRegex(InvalidOperation, msg): + await client_encryption.decrypt(Binary(b"", 6)) + + async def test_with_statement(self): + async with self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) as client_encryption: + pass + with self.assertRaisesRegex(InvalidOperation, "Cannot use closed AsyncClientEncryption"): + await client_encryption.create_data_key("local") + + +# Spec tests +AWS_TEMP_NO_SESSION_CREDS = { + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), +} + + +class AsyncTestSpec(AsyncSpecRunner): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def _setup_class(cls): + await super()._setup_class() + + def parse_auto_encrypt_opts(self, opts): + """Parse clientOptions.autoEncryptOpts.""" + opts = camel_to_snake_args(opts) + kms_providers = opts["kms_providers"] + if "aws" in kms_providers: + kms_providers["aws"] = AWS_CREDS + if not any(AWS_CREDS.values()): + self.skipTest("AWS environment credentials are not set") + if "awsTemporary" in kms_providers: + kms_providers["aws"] = AWS_TEMP_CREDS + del kms_providers["awsTemporary"] + if not any(AWS_TEMP_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "awsTemporaryNoSessionToken" in kms_providers: + kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers["awsTemporaryNoSessionToken"] + if not any(AWS_TEMP_NO_SESSION_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "azure" in kms_providers: + kms_providers["azure"] = AZURE_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("Azure environment credentials are not set") + if "gcp" in kms_providers: + kms_providers["gcp"] = GCP_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("GCP environment credentials are not set") + if "kmip" in kms_providers: + kms_providers["kmip"] = KMIP_CREDS + opts["kms_tls_options"] = DEFAULT_KMS_TLS + if "key_vault_namespace" not in opts: + opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) + + opts = dict(opts) + return AutoEncryptionOpts(**opts) + + def parse_client_options(self, opts): + """Override clientOptions parsing to support autoEncryptOpts.""" + encrypt_opts = opts.pop("autoEncryptOpts", None) + if encrypt_opts: + opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) + + return super().parse_client_options(opts) + + def get_object_name(self, op): + """Default object is collection.""" + return op.get("object", "collection") + + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") + if "type=symbol" in desc: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to listcollections to get collection schema" in desc and not _IS_SYNC: + self.skipTest("PYTHON-4844 flaky test on async") + + async def setup_scenario(self, scenario_def): + """Override a test's setup.""" + key_vault_data = scenario_def["key_vault_data"] + encrypted_fields = scenario_def["encrypted_fields"] + json_schema = scenario_def["json_schema"] + data = scenario_def["data"] + coll = async_client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + await coll.delete_many({}) + if key_vault_data: + await coll.insert_many(key_vault_data) + + db_name = self.get_scenario_db_name(scenario_def) + coll_name = self.get_scenario_coll_name(scenario_def) + db = async_client_context.client.get_database(db_name, codec_options=OPTS) + await db.drop_collection(coll_name, encrypted_fields=encrypted_fields) + wc = WriteConcern(w="majority") + kwargs: Dict[str, Any] = {} + if json_schema: + kwargs["validator"] = {"$jsonSchema": json_schema} + kwargs["codec_options"] = OPTS + if not data: + kwargs["write_concern"] = wc + if encrypted_fields: + kwargs["encryptedFields"] = encrypted_fields + await db.create_collection(coll_name, **kwargs) + coll = db[coll_name] + if data: + # Load data. + await coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) + + def allowable_errors(self, op): + """Override expected error classes.""" + errors = super().allowable_errors(op) + # An updateOne test expects encryption to error when no $ operator + # appears but pymongo raises a client side ValueError in this case. + if op["name"] == "updateOne": + errors += (ValueError,) + return errors + + +def create_test(scenario_def, test, name): + @async_client_context.require_test_commands + async def run_scenario(self): + await self.run_scenario(scenario_def, test) + + return run_scenario + + +test_creator = AsyncSpecTestCreator(create_test, AsyncTestSpec, os.path.join(SPEC_PATH, "legacy")) +test_creator.create_tests() + +if _HAVE_PYMONGOCRYPT: + globals().update( + generate_test_classes( + os.path.join(SPEC_PATH, "unified"), module=__name__, expected_failures=["mapReduce .*"] + ) + ) + +# Prose Tests +LOCAL_KEY_ID = Binary(base64.b64decode(b"LOCALAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AWS_KEY_ID = Binary(base64.b64decode(b"AWSAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AZURE_KEY_ID = Binary(base64.b64decode(b"AZUREAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +GCP_KEY_ID = Binary(base64.b64decode(b"GCPAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +KMIP_KEY_ID = Binary(base64.b64decode(b"KMIPAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) + + +async def create_with_schema(coll, json_schema): + """Create and return a Collection with a jsonSchema.""" + await coll.with_options(write_concern=WriteConcern(w="majority")).drop() + return await coll.database.create_collection( + coll.name, validator={"$jsonSchema": json_schema}, codec_options=OPTS + ) + + +async def create_key_vault(vault, *data_keys): + """Create the key vault collection with optional data keys.""" + vault = vault.with_options(write_concern=WriteConcern(w="majority"), codec_options=OPTS) + await vault.drop() + if data_keys: + await vault.insert_many(data_keys) + await vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) + return vault + + +class TestDataKeyDoubleEncryption(AsyncEncryptionIntegrationTest): + client_encrypted: AsyncMongoClient + client_encryption: AsyncClientEncryption + listener: OvertCommandListener + vault: Any + + KMS_PROVIDERS = ALL_KMS_PROVIDERS + + MASTER_KEYS = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": None, + } + + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + await self.client.db.coll.drop() + self.vault = await create_key_vault(self.client.keyvault.datakeys) + + # Configure the encrypted field via the local schema_map option. + schemas = { + "db.coll": { + "bsonType": "object", + "properties": { + "encrypted_placeholder": { + "encrypt": { + "keyId": "/placeholder", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + } + } + }, + } + } + opts = AutoEncryptionOpts( + self.KMS_PROVIDERS, + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=DEFAULT_KMS_TLS, + ) + self.client_encrypted = await self.async_rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + self.client_encryption = self.create_client_encryption( + self.KMS_PROVIDERS, + "keyvault.datakeys", + self.client, + OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + self.listener.reset() + + async def asyncTearDown(self) -> None: + await self.vault.drop() + + async def run_test(self, provider_name): + # Create data key. + master_key: Any = self.MASTER_KEYS[provider_name] + datakey_id = await self.client_encryption.create_data_key( + provider_name, master_key=master_key, key_alt_names=[f"{provider_name}_altname"] + ) + self.assertBinaryUUID(datakey_id) + cmd = self.listener.started_events[-1] + self.assertEqual("insert", cmd.command_name) + self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) + docs = await self.vault.find({"_id": datakey_id}).to_list() + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["masterKey"]["provider"], provider_name) + + # Encrypt by key_id. + encrypted = await self.client_encryption.encrypt( + f"hello {provider_name}", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=datakey_id, + ) + self.assertEncrypted(encrypted) + await self.client_encrypted.db.coll.insert_one({"_id": provider_name, "value": encrypted}) + doc_decrypted = await self.client_encrypted.db.coll.find_one({"_id": provider_name}) + self.assertEqual(doc_decrypted["value"], f"hello {provider_name}") # type: ignore + + # Encrypt by key_alt_name. + encrypted_altname = await self.client_encryption.encrypt( + f"hello {provider_name}", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_alt_name=f"{provider_name}_altname", + ) + self.assertEqual(encrypted_altname, encrypted) + + # Explicitly encrypting an auto encrypted field. + with self.assertRaisesRegex(EncryptionError, r"encrypt element of type"): + await self.client_encrypted.db.coll.insert_one({"encrypted_placeholder": encrypted}) + + async def test_data_key_local(self): + await self.run_test("local") + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_data_key_aws(self): + await self.run_test("aws") + + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + async def test_data_key_azure(self): + await self.run_test("azure") + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def test_data_key_gcp(self): + await self.run_test("gcp") + + async def test_data_key_kmip(self): + await self.run_test("kmip") + + +class TestExternalKeyVault(AsyncEncryptionIntegrationTest): + @staticmethod + def kms_providers(): + return {"local": {"key": LOCAL_MASTER_KEY}} + + async def _test_external_key_vault(self, with_external_key_vault): + await self.client.db.coll.drop() + vault = await create_key_vault( + self.client.keyvault.datakeys, + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + ) + self.addAsyncCleanup(vault.drop) + + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": json_data("external", "external-schema.json")} + if with_external_key_vault: + key_vault_client = await self.async_rs_or_single_client( + username="fake-user", password="fake-pwd" + ) + else: + key_vault_client = async_client_context.client + opts = AutoEncryptionOpts( + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + key_vault_client=key_vault_client, + ) + + client_encrypted = await self.async_rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + + client_encryption = self.create_client_encryption( + self.kms_providers(), "keyvault.datakeys", key_vault_client, OPTS + ) + + if with_external_key_vault: + # Authentication error. + with self.assertRaises(EncryptionError) as ctx: + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + # AuthenticationFailed error. + self.assertIsInstance(ctx.exception.cause, OperationFailure) + self.assertEqual(ctx.exception.cause.code, 18) + else: + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + + if with_external_key_vault: + # Authentication error. + with self.assertRaises(EncryptionError) as ctx: + await client_encryption.encrypt( + "test", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=LOCAL_KEY_ID, + ) + # AuthenticationFailed error. + self.assertIsInstance(ctx.exception.cause, OperationFailure) + self.assertEqual(ctx.exception.cause.code, 18) + else: + await client_encryption.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=LOCAL_KEY_ID + ) + + async def test_external_key_vault_1(self): + await self._test_external_key_vault(True) + + async def test_external_key_vault_2(self): + await self._test_external_key_vault(False) + + +class TestViews(AsyncEncryptionIntegrationTest): + @staticmethod + def kms_providers(): + return {"local": {"key": LOCAL_MASTER_KEY}} + + async def test_views_are_prohibited(self): + await self.client.db.view.drop() + await self.client.db.create_collection("view", viewOn="coll") + self.addAsyncCleanup(self.client.db.view.drop) + + opts = AutoEncryptionOpts(self.kms_providers(), "keyvault.datakeys") + client_encrypted = await self.async_rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + + with self.assertRaisesRegex(EncryptionError, "cannot auto encrypt a view"): + await client_encrypted.db.view.insert_one({}) + + +class TestCorpus(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + + @staticmethod + def kms_providers(): + return ALL_KMS_PROVIDERS + + @staticmethod + def fix_up_schema(json_schema): + """Remove deprecated symbol/dbPointer types from json schema.""" + for key in list(json_schema["properties"]): + if "_symbol_" in key or "_dbPointer_" in key: + del json_schema["properties"][key] + return json_schema + + @staticmethod + def fix_up_curpus(corpus): + """Disallow deprecated symbol/dbPointer types from corpus test.""" + for key in corpus: + if "_symbol_" in key or "_dbPointer_" in key: + corpus[key]["allowed"] = False + return corpus + + @staticmethod + def fix_up_curpus_encrypted(corpus_encrypted, corpus): + """Fix the expected values for deprecated symbol/dbPointer types.""" + for key in corpus_encrypted: + if "_symbol_" in key or "_dbPointer_" in key: + corpus_encrypted[key] = copy.deepcopy(corpus[key]) + return corpus_encrypted + + async def _test_corpus(self, opts): + # Drop and create the collection 'db.coll' with jsonSchema. + coll = await create_with_schema( + self.client.db.coll, self.fix_up_schema(json_data("corpus", "corpus-schema.json")) + ) + self.addAsyncCleanup(coll.drop) + + vault = await create_key_vault( + self.client.keyvault.datakeys, + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + json_data("corpus", "corpus-key-azure.json"), + json_data("corpus", "corpus-key-gcp.json"), + json_data("corpus", "corpus-key-kmip.json"), + ) + self.addAsyncCleanup(vault.drop) + + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + client_encryption = self.create_client_encryption( + self.kms_providers(), + "keyvault.datakeys", + async_client_context.client, + OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + + corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) + corpus_copied: SON = SON() + for key, value in corpus.items(): + corpus_copied[key] = copy.deepcopy(value) + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): + continue + if value["method"] == "auto": + continue + if value["method"] == "explicit": + identifier = value["identifier"] + self.assertIn(identifier, ("id", "altname")) + kms = value["kms"] + self.assertIn(kms, ("local", "aws", "azure", "gcp", "kmip")) + if identifier == "id": + if kms == "local": + kwargs = {"key_id": LOCAL_KEY_ID} + elif kms == "aws": + kwargs = {"key_id": AWS_KEY_ID} + elif kms == "azure": + kwargs = {"key_id": AZURE_KEY_ID} + elif kms == "gcp": + kwargs = {"key_id": GCP_KEY_ID} + else: + kwargs = {"key_id": KMIP_KEY_ID} + else: + kwargs = {"key_alt_name": kms} + + self.assertIn(value["algo"], ("det", "rand")) + if value["algo"] == "det": + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + else: + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random + + try: + encrypted_val = await client_encryption.encrypt( + value["value"], + algo, + **kwargs, # type: ignore[arg-type] + ) + if not value["allowed"]: + self.fail(f"encrypt should have failed: {key!r}: {value!r}") + corpus_copied[key]["value"] = encrypted_val + except Exception: + if value["allowed"]: + tb = traceback.format_exc() + self.fail(f"encrypt failed: {key!r}: {value!r}, traceback: {tb}") + + await client_encrypted.db.coll.insert_one(corpus_copied) + corpus_decrypted = await client_encrypted.db.coll.find_one() + self.assertEqual(corpus_decrypted, corpus) + + corpus_encrypted_expected = self.fix_up_curpus_encrypted( + json_data("corpus", "corpus-encrypted.json"), corpus + ) + corpus_encrypted_actual = await coll.find_one() + for key, value in corpus_encrypted_actual.items(): + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): + continue + + if value["algo"] == "det": + self.assertEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + elif value["algo"] == "rand" and value["allowed"]: + self.assertNotEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + + if value["allowed"]: + decrypt_actual = await client_encryption.decrypt(value["value"]) + decrypt_expected = await client_encryption.decrypt( + corpus_encrypted_expected[key]["value"] + ) + self.assertEqual(decrypt_actual, decrypt_expected, key) + else: + self.assertEqual(value["value"], corpus[key]["value"], key) + + async def test_corpus(self): + opts = AutoEncryptionOpts( + self.kms_providers(), "keyvault.datakeys", kms_tls_options=DEFAULT_KMS_TLS + ) + await self._test_corpus(opts) + + async def test_corpus_local_schema(self): + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": self.fix_up_schema(json_data("corpus", "corpus-schema.json"))} + opts = AutoEncryptionOpts( + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=DEFAULT_KMS_TLS, + ) + await self._test_corpus(opts) + + +_2_MiB = 2097152 +_16_MiB = 16777216 + + +class TestBsonSizeBatches(AsyncEncryptionIntegrationTest): + """Prose tests for BSON size limits and batch splitting.""" + + coll: AsyncCollection + coll_encrypted: AsyncCollection + client_encrypted: AsyncMongoClient + listener: OvertCommandListener + + async def asyncSetUp(self): + await super().asyncSetUp() + db = async_client_context.client.db + self.coll = db.coll + await self.coll.drop() + # Configure the encrypted 'db.coll' collection via jsonSchema. + json_schema = json_data("limits", "limits-schema.json") + await db.create_collection( + "coll", + validator={"$jsonSchema": json_schema}, + codec_options=OPTS, + write_concern=WriteConcern(w="majority"), + ) + + # Create the key vault. + coll = async_client_context.client.get_database( + "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS + )["datakeys"] + await coll.drop() + await coll.insert_one(json_data("limits", "limits-key.json")) + + opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") + self.listener = OvertCommandListener() + self.client_encrypted = await self.async_rs_or_single_client( + auto_encryption_opts=opts, event_listeners=[self.listener] + ) + self.coll_encrypted = self.client_encrypted.db.coll + + async def asyncTearDown(self) -> None: + await self.coll_encrypted.drop() + + async def test_01_insert_succeeds_under_2MiB(self): + doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "over_2mib_under_16mib_bulk" + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + + async def test_02_insert_succeeds_over_2MiB_post_encryption(self): + doc = {"_id": "encryption_exceeds_2mib", "unencrypted": "a" * ((2**21) - 2000)} + doc.update(json_data("limits", "limits-doc.json")) + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "encryption_exceeds_2mib_bulk" + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + + async def test_03_bulk_batch_split(self): + doc1 = {"_id": "over_2mib_1", "unencrypted": "a" * _2_MiB} + doc2 = {"_id": "over_2mib_2", "unencrypted": "a" * _2_MiB} + self.listener.reset() + await self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) + + async def test_04_bulk_batch_split(self): + limits_doc = json_data("limits", "limits-doc.json") + doc1 = {"_id": "encryption_exceeds_2mib_1", "unencrypted": "a" * (_2_MiB - 2000)} + doc1.update(limits_doc) + doc2 = {"_id": "encryption_exceeds_2mib_2", "unencrypted": "a" * (_2_MiB - 2000)} + doc2.update(limits_doc) + self.listener.reset() + await self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) + + async def test_05_insert_succeeds_just_under_16MiB(self): + doc = {"_id": "under_16mib", "unencrypted": "a" * (_16_MiB - 2000)} + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "under_16mib_bulk" + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + + async def test_06_insert_fails_over_16MiB(self): + limits_doc = json_data("limits", "limits-doc.json") + doc = {"_id": "encryption_exceeds_16mib", "unencrypted": "a" * (_16_MiB - 2000)} + doc.update(limits_doc) + + with self.assertRaisesRegex(WriteError, "object to insert too large"): + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "encryption_exceeds_16mib_bulk" + with self.assertRaises(BulkWriteError) as ctx: + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + err = ctx.exception.details["writeErrors"][0] + self.assertIn(err["code"], [2, 10334]) + self.assertIn("object to insert too large", err["errmsg"]) + + +class TestCustomEndpoint(AsyncEncryptionIntegrationTest): + """Prose tests for creating data keys with a custom endpoint.""" + + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) + async def asyncSetUp(self): + await super().asyncSetUp() + kms_providers = { + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP_CREDS, + } + self.client_encryption = self.create_client_encryption( + kms_providers=kms_providers, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + + kms_providers_invalid = copy.deepcopy(kms_providers) + kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.invalid:5698" + self.client_encryption_invalid = self.create_client_encryption( + kms_providers=kms_providers_invalid, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + self._kmip_host_error = None + self._invalid_host_error = None + + async def run_test_expected_success(self, provider_name, master_key): + data_key_id = await self.client_encryption.create_data_key( + provider_name, master_key=master_key + ) + encrypted = await self.client_encryption.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", await self.client_encryption.decrypt(encrypted)) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_01_aws_region_key(self): + await self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_02_aws_region_key_endpoint(self): + await self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_03_aws_region_key_endpoint_port(self): + await self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com:443", + }, + ) + + async def test_04_kmip_endpoint_invalid_port(self): + master_key = {"keyId": "1", "endpoint": "localhost:12345"} + with self.assertRaisesRegex(EncryptionError, "localhost:12345"): + await self.client_encryption.create_data_key("kmip", master_key=master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_05_aws_endpoint_wrong_region(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-2.amazonaws.com", + } + # The full error should be something like: + # "Credential should be scoped to a valid region, not 'us-east-1'" + # but we only check for EncryptionError to avoid breaking on slight + # changes to AWS' error message. + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("aws", master_key=master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_06_aws_endpoint_invalid_host(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "doesnotexist.invalid", + } + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + await self.client_encryption.create_data_key("aws", master_key=master_key) + + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + async def test_07_azure(self): + master_key = { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + } + await self.run_test_expected_success("azure", master_key) + + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + await self.client_encryption_invalid.create_data_key("azure", master_key=master_key) + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def test_08_gcp_valid_endpoint(self): + master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "cloudkms.googleapis.com:443", + } + await self.run_test_expected_success("gcp", master_key) + + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + await self.client_encryption_invalid.create_data_key("gcp", master_key=master_key) + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def test_09_gcp_invalid_endpoint(self): + master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "doesnotexist.invalid:443", + } + + # The full error should be something like: + # "Invalid KMS response, no access_token returned. HTTP status=200" + with self.assertRaisesRegex(EncryptionError, "Invalid KMS response"): + await self.client_encryption.create_data_key("gcp", master_key=master_key) + + def dns_error(self, host, port): + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaises(Exception) as ctx: + socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) + return re.escape(str(ctx.exception)) + + @property + def invalid_host_error(self): + if self._invalid_host_error is None: + self._invalid_host_error = self.dns_error("doesnotexist.invalid", 443) + return self._invalid_host_error + + @property + def kmip_host_error(self): + if self._kmip_host_error is None: + self._kmip_host_error = self.dns_error("doesnotexist.local", 5698) + return self._kmip_host_error + + async def test_10_kmip_invalid_endpoint(self): + key = {"keyId": "1"} + await self.run_test_expected_success("kmip", key) + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): + await self.client_encryption_invalid.create_data_key("kmip", key) + + async def test_11_kmip_master_key_endpoint(self): + key = {"keyId": "1", "endpoint": KMIP_CREDS["endpoint"]} + await self.run_test_expected_success("kmip", key) + # Override invalid endpoint: + data_key_id = await self.client_encryption_invalid.create_data_key("kmip", master_key=key) + encrypted = await self.client_encryption_invalid.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", await self.client_encryption_invalid.decrypt(encrypted)) + + async def test_12_kmip_master_key_invalid_endpoint(self): + key = {"keyId": "1", "endpoint": "doesnotexist.invalid:5698"} + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): + await self.client_encryption.create_data_key("kmip", key) + + +class AzureGCPEncryptionTestMixin(AsyncEncryptionIntegrationTest): + DEK = None + KMS_PROVIDER_MAP = None + KEYVAULT_DB = "keyvault" + KEYVAULT_COLL = "datakeys" + client: AsyncMongoClient + + async def _setup(self): + keyvault = self.client.get_database(self.KEYVAULT_DB).get_collection(self.KEYVAULT_COLL) + await create_key_vault(keyvault, self.DEK) + + async def _test_explicit(self, expectation): + await self._setup() + client_encryption = self.create_client_encryption( + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] + ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), + async_client_context.client, + OPTS, + ) + + ciphertext = await client_encryption.encrypt( + "string0", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=self.DEK["_id"], + ) + + self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) + self.assertEqual(await client_encryption.decrypt(ciphertext), "string0") + + async def _test_automatic(self, expectation_extjson, payload): + await self._setup() + encrypted_db = "db" + encrypted_coll = "coll" + keyvault_namespace = ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) + + encryption_opts = AutoEncryptionOpts( + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] + keyvault_namespace, + schema_map=self.SCHEMA_MAP, + ) + + insert_listener = AllowListEventListener("insert") + client = await self.async_rs_or_single_client( + auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] + ) + + coll = client.get_database(encrypted_db).get_collection( + encrypted_coll, codec_options=OPTS, write_concern=WriteConcern("majority") + ) + await coll.drop() + + expected_document = json_util.loads(expectation_extjson, json_options=JSON_OPTS) + + await coll.insert_one(payload) + event = insert_listener.started_events[0] + inserted_doc = event.command["documents"][0] + + for key, value in expected_document.items(): + self.assertEqual(value, inserted_doc[key]) + + output_doc = await coll.find_one({}) + for key, value in payload.items(): + self.assertEqual(output_doc[key], value) + + +class TestAzureEncryption(AzureGCPEncryptionTestMixin, AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + async def asyncSetUp(self): + self.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} + self.DEK = json_data(BASE, "custom", "azure-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + await super().asyncSetUp() + + async def test_explicit(self): + return await self._test_explicit( + "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==" + ) + + async def test_automatic(self): + expected_document_extjson = textwrap.dedent( + """ + {"secret_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06"} + }}""" + ) + return await self._test_automatic(expected_document_extjson, {"secret_azure": "string0"}) + + +class TestGCPEncryption(AzureGCPEncryptionTestMixin, AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def asyncSetUp(self): + self.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} + self.DEK = json_data(BASE, "custom", "gcp-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + await super().asyncSetUp() + + async def test_explicit(self): + return await self._test_explicit( + "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==" + ) + + async def test_automatic(self): + expected_document_extjson = textwrap.dedent( + """ + {"secret_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06"} + }}""" + ) + return await self._test_automatic(expected_document_extjson, {"secret_gcp": "string0"}) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#deadlock-tests +class TestDeadlockProse(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.client_test = await self.async_rs_or_single_client( + maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" + ) + + self.client_keyvault_listener = OvertCommandListener() + self.client_keyvault = await self.async_rs_or_single_client( + maxPoolSize=1, + readConcernLevel="majority", + w="majority", + event_listeners=[self.client_keyvault_listener], + ) + + await self.client_test.keyvault.datakeys.drop() + await self.client_test.db.coll.drop() + await self.client_test.keyvault.datakeys.insert_one( + json_data("external", "external-key.json") + ) + _ = await self.client_test.db.create_collection( + "coll", + validator={"$jsonSchema": json_data("external", "external-schema.json")}, + codec_options=OPTS, + ) + + client_encryption = self.create_client_encryption( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=self.client_test, + codec_options=OPTS, + ) + self.ciphertext = await client_encryption.encrypt( + "string0", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="local" + ) + + self.client_listener = OvertCommandListener() + self.topology_listener = TopologyEventListener() + self.optargs = ({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") + + async def _run_test(self, max_pool_size, auto_encryption_opts): + client_encrypted = await self.async_rs_or_single_client( + readConcernLevel="majority", + w="majority", + maxPoolSize=max_pool_size, + auto_encryption_opts=auto_encryption_opts, + event_listeners=[self.client_listener, self.topology_listener], + ) + + if auto_encryption_opts._bypass_auto_encryption is True: + await self.client_test.db.coll.insert_one({"_id": 0, "encrypted": self.ciphertext}) + elif auto_encryption_opts._bypass_auto_encryption is False: + await client_encrypted.db.coll.insert_one({"_id": 0, "encrypted": "string0"}) + else: + raise RuntimeError("bypass_auto_encryption must be a bool") + + result = await client_encrypted.db.coll.find_one({"_id": 0}) + self.assertEqual(result, {"_id": 0, "encrypted": "string0"}) + + async def test_case_1(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 4) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "insert") + self.assertEqual(cev[2].database_name, "db") + self.assertEqual(cev[3].command_name, "find") + self.assertEqual(cev[3].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + async def test_case_2(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + async def test_case_3(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + async def test_case_4(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_5(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 5) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "listCollections") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "keyvault") + self.assertEqual(cev[3].command_name, "insert") + self.assertEqual(cev[3].database_name, "db") + self.assertEqual(cev[4].command_name, "find") + self.assertEqual(cev[4].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_6(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_7(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_8(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#14-decryption-events +class TestDecryptProse(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.client = async_client_context.client + await self.client.db.drop_collection("decryption_events") + await create_key_vault(self.client.keyvault.datakeys) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + self.client_encryption = self.create_client_encryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + keyID = await self.client_encryption.create_data_key("local") + self.cipher_text = await self.client_encryption.encrypt( + "hello", key_id=keyID, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + self.malformed_cipher_text = self.cipher_text[:-1] + (self.cipher_text[-1] ^ 1).to_bytes( + 1, "big" + ) + self.malformed_cipher_text = Binary(self.malformed_cipher_text, 6) + opts = AutoEncryptionOpts( + key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map + ) + self.listener = AllowListEventListener("aggregate") + self.encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] + ) + + async def test_01_command_error(self): + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(OperationFailure): + await self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.failed_events), 1) + for event in self.listener.failed_events: + self.assertEqual(event.failure["code"], 123) + + async def test_02_network_error(self): + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "closeConnection": True, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(AutoReconnect): + await self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.failed_events), 1) + self.assertEqual(self.listener.failed_events[0].command_name, "aggregate") + + async def test_03_decrypt_error(self): + await self.encrypted_client.db.decryption_events.insert_one( + {"encrypted": self.malformed_cipher_text} + ) + with self.assertRaises(EncryptionError): + await anext(await self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) + self.assertEqual( + event.reply["cursor"]["firstBatch"][0]["encrypted"], self.malformed_cipher_text + ) + + async def test_04_decrypt_success(self): + await self.encrypted_client.db.decryption_events.insert_one({"encrypted": self.cipher_text}) + await anext(await self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) + self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#bypass-spawning-mongocryptd +class TestBypassSpawningMongocryptdProse(AsyncEncryptionIntegrationTest): + @unittest.skipIf( + os.environ.get("TEST_CRYPT_SHARED"), + "this prose test does not work when crypt_shared is on a system dynamic " + "library search path.", + ) + async def test_mongocryptd_bypass_spawn(self): + # Lower the mongocryptd timeout to reduce the test run time. + self._original_timeout = encryption._MONGOCRYPTD_TIMEOUT_MS + encryption._MONGOCRYPTD_TIMEOUT_MS = 500 + + def reset_timeout(): + encryption._MONGOCRYPTD_TIMEOUT_MS = self._original_timeout + + self.addCleanup(reset_timeout) + + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", + schema_map=schemas, + mongocryptd_bypass_spawn=True, + mongocryptd_uri="mongodb://localhost:27027/", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], + ) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + with self.assertRaisesRegex(EncryptionError, "Timeout"): + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + + async def test_bypassAutoEncryption(self): + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", + bypass_auto_encryption=True, + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], + ) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + await client_encrypted.db.coll.insert_one({"unencrypted": "test"}) + # Validate that mongocryptd was not spawned: + mongocryptd_client = self.simple_client( + "mongodb://localhost:27027/?serverSelectionTimeoutMS=500" + ) + with self.assertRaises(ServerSelectionTimeoutError): + await mongocryptd_client.admin.command("ping") + + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + async def test_via_loading_shared_library(self): + await create_key_vault( + async_client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=47021", + ], + crypt_shared_lib_required=True, + ) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + await client_encrypted.db.coll.drop() + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + self.assertEncrypted((await async_client_context.client.db.coll.find_one({}))["encrypted"]) + no_mongocryptd_client = self.simple_client( + host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" + ) + with self.assertRaises(ServerSelectionTimeoutError): + await no_mongocryptd_client.db.command("ping") + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + async def test_client_via_loading_shared_library(self): + connection_established = False + + class Handler(socketserver.BaseRequestHandler): + def handle(self): + nonlocal connection_established + connection_established = True + + server = socketserver.TCPServer(("localhost", 47021), Handler) + + def listener(): + with server: + server.serve_forever(poll_interval=0.05) # Short poll timeout to speed up the test + + listener_t = Thread(target=listener) + listener_t.start() + await create_key_vault( + async_client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021", + crypt_shared_lib_required=False, + ) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + await client_encrypted.db.coll.drop() + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + server.shutdown() + listener_t.join() + self.assertFalse(connection_established, "a connection was established on port 47021") + + +# https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests +class TestKmsTLSProse(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + self.patch_system_certs(CA_PEM) + self.client_encrypted = self.create_client_encryption( + {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS + ) + + async def test_invalid_kms_certificate_expired(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:9000", + } + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encrypted.create_data_key("aws", master_key=key) + + async def test_invalid_hostname_in_kms_certificate(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:9001", + } + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + ): + await self.client_encrypted.create_data_key("aws", master_key=key) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#kms-tls-options-tests +class TestKmsTLSOptions(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9002" + providers["gcp"]["endpoint"] = "127.0.0.1:9002" + kms_tls_opts_ca_only = { + "aws": {"tlsCAFile": CA_PEM}, + "azure": {"tlsCAFile": CA_PEM}, + "gcp": {"tlsCAFile": CA_PEM}, + "kmip": {"tlsCAFile": CA_PEM}, + } + self.client_encryption_no_client_cert = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + # 2, same providers as above but with tlsCertificateKeyFile. + kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) + for p in kms_tls_opts: + kms_tls_opts[p]["tlsCertificateKeyFile"] = CLIENT_PEM + self.client_encryption_with_tls = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + # 3, update endpoints to expired host. + providers: dict = copy.deepcopy(providers) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9000" + providers["gcp"]["endpoint"] = "127.0.0.1:9000" + providers["kmip"]["endpoint"] = "127.0.0.1:9000" + self.client_encryption_expired = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + # 3, update endpoints to invalid host. + providers: dict = copy.deepcopy(providers) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9001" + providers["gcp"]["endpoint"] = "127.0.0.1:9001" + providers["kmip"]["endpoint"] = "127.0.0.1:9001" + self.client_encryption_invalid_hostname = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + # Errors when client has no cert, some examples: + # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) + self.cert_error = ( + "certificate required|SSL handshake failed|" + "KMS connection closed|Connection reset by peer|ECONNRESET|EPIPE" + ) + # On Python 3.10+ this error might be: + # EOF occurred in violation of protocol (_ssl.c:2384) + if sys.version_info[:2] >= (3, 10): + self.cert_error += "|EOF" + # On Windows this error might be: + # [WinError 10054] An existing connection was forcibly closed by the remote host + if sys.platform == "win32": + self.cert_error += "|forcibly closed" + # 4, Test named KMS providers. + providers = { + "aws:no_client_cert": AWS_CREDS, + "azure:no_client_cert": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:no_client_cert": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:no_client_cert": KMIP_CREDS, + "aws:with_tls": AWS_CREDS, + "azure:with_tls": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:with_tls": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:with_tls": KMIP_CREDS, + } + no_cert = {"tlsCAFile": CA_PEM} + with_cert = {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} + kms_tls_opts_4 = { + "aws:no_client_cert": no_cert, + "azure:no_client_cert": no_cert, + "gcp:no_client_cert": no_cert, + "kmip:no_client_cert": no_cert, + "aws:with_tls": with_cert, + "azure:with_tls": with_cert, + "gcp:with_tls": with_cert, + "kmip:with_tls": with_cert, + } + self.client_encryption_with_names = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_4 + ) + + async def test_01_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:9002", + } + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("aws", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + await self.client_encryption_with_tls.create_data_key("aws", key) + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + key["endpoint"] = "127.0.0.1:9000" + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("aws", key) + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + key["endpoint"] = "127.0.0.1:9001" + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + await self.client_encryption_invalid_hostname.create_data_key("aws", key) + + async def test_02_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("azure", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_tls.create_data_key("azure", key) + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("azure", key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + await self.client_encryption_invalid_hostname.create_data_key("azure", key) + + async def test_03_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("gcp", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_tls.create_data_key("gcp", key) + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("gcp", key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + await self.client_encryption_invalid_hostname.create_data_key("gcp", key) + + async def test_04_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("kmip") + await self.client_encryption_with_tls.create_data_key("kmip") + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("kmip") + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + await self.client_encryption_invalid_hostname.create_data_key("kmip") + + async def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): + providers = {"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}} + options = {"aws": {"tlsDisableOCSPEndpointCheck": True}} + encryption = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options + ) + ctx = encryption._io_callbacks._kms_ssl_contexts["aws"] + if not hasattr(ctx, "check_ocsp_endpoint"): + raise self.skipTest("OCSP not enabled") + self.assertFalse(ctx.check_ocsp_endpoint) + + async def test_06_named_kms_providers_apply_tls_options_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:9002", + } + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("aws:no_client_cert", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + await self.client_encryption_with_names.create_data_key("aws:with_tls", key) + + async def test_06_named_kms_providers_apply_tls_options_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("azure:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_names.create_data_key("azure:with_tls", key) + + async def test_06_named_kms_providers_apply_tls_options_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("gcp:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_names.create_data_key("gcp:with_tls", key) + + async def test_06_named_kms_providers_apply_tls_options_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("kmip:no_client_cert") + await self.client_encryption_with_names.create_data_key("kmip:with_tls") + + +# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.md#unique-index-on-keyaltnames +class TestUniqueIndexOnKeyAltNamesProse(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.client = async_client_context.client + await create_key_vault(self.client.keyvault.datakeys) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = self.create_client_encryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + self.def_key_id = await self.client_encryption.create_data_key( + "local", key_alt_names=["def"] + ) + + async def test_01_create_key(self): + await self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + await self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + await self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + async def test_02_add_key_alt_name(self): + key_id = await self.client_encryption.create_data_key("local") + await self.client_encryption.add_key_alt_name(key_id, "abc") + key_doc = await self.client_encryption.add_key_alt_name(key_id, "abc") + assert key_doc["keyAltNames"] == ["abc"] + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error collection"): + await self.client_encryption.add_key_alt_name(key_id, "def") + key_doc = await self.client_encryption.add_key_alt_name(self.def_key_id, "def") + assert key_doc["keyAltNames"] == ["def"] + + +# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.md#explicit-encryption +class TestExplicitQueryableEncryption(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.encrypted_fields = json_data("etc", "data", "encryptedFields.json") + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.db = self.client.test_queryable_encryption + await self.client.drop_database(self.db) + await self.db.command( + "create", "explicit_encryption", encryptedFields=self.encrypted_fields + ) + key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + async def test_01_insert_encrypted_indexed_and_find(self): + val = "encrypted indexed value" + insert_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) + await self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 + ) + docs = ( + await self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedIndexed"], val) + + async def test_02_insert_encrypted_indexed_and_find_contention(self): + val = "encrypted indexed value" + contention = 10 + for _ in range(contention): + insert_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=contention + ) + await self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 + ) + docs = ( + await self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertLessEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + # Find with contention_factor will return all 10 documents. + find_payload = await self.client_encryption.encrypt( + val, + Algorithm.INDEXED, + self.key1_id, + query_type=QueryType.EQUALITY, + contention_factor=contention, + ) + docs = ( + await self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + async def test_03_insert_encrypted_unindexed(self): + val = "encrypted unindexed value" + insert_payload = await self.client_encryption.encrypt( + val, Algorithm.UNINDEXED, self.key1_id + ) + await self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"_id": 1, "encryptedUnindexed": insert_payload} + ) + + docs = ( + await self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1}).to_list() + ) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedUnindexed"], val) + + async def test_04_roundtrip_encrypted_indexed(self): + val = "encrypted indexed value" + payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) + decrypted = await self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + async def test_05_roundtrip_encrypted_unindexed(self): + val = "encrypted indexed value" + payload = await self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + decrypted = await self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + +# https://github.com/mongodb/specifications/blob/527e22d5090ec48bf1e144c45fc831de0f1935f6/source/client-side-encryption/tests/README.md#25-test-lookup +class TestLookupProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + await encrypted_client.drop_database("db") + + key_doc = json_data("etc", "data", "lookup", "key-doc.json") + await create_key_vault(encrypted_client.db.keyvault, key_doc) + self.addAsyncCleanup(async_client_context.client.drop_database, "db") + + await encrypted_client.db.create_collection( + "csfle", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle.json")}, + ) + await encrypted_client.db.create_collection( + "csfle2", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle2.json")}, + ) + await encrypted_client.db.create_collection( + "qe", encryptedFields=json_data("etc", "data", "lookup", "schema-qe.json") + ) + await encrypted_client.db.create_collection( + "qe2", encryptedFields=json_data("etc", "data", "lookup", "schema-qe2.json") + ) + await encrypted_client.db.create_collection("no_schema") + await encrypted_client.db.create_collection("no_schema2") + + unencrypted_client = await self.async_rs_or_single_client() + + await encrypted_client.db.csfle.insert_one({"csfle": "csfle"}) + doc = await unencrypted_client.db.csfle.find_one() + self.assertIsInstance(doc["csfle"], Binary) + await encrypted_client.db.csfle2.insert_one({"csfle2": "csfle2"}) + doc = await unencrypted_client.db.csfle2.find_one() + self.assertIsInstance(doc["csfle2"], Binary) + await encrypted_client.db.qe.insert_one({"qe": "qe"}) + doc = await unencrypted_client.db.qe.find_one() + self.assertIsInstance(doc["qe"], Binary) + await encrypted_client.db.qe2.insert_one({"qe2": "qe2"}) + doc = await unencrypted_client.db.qe2.find_one() + self.assertIsInstance(doc["qe2"], Binary) + await encrypted_client.db.no_schema.insert_one({"no_schema": "no_schema"}) + await encrypted_client.db.no_schema2.insert_one({"no_schema2": "no_schema2"}) + + await encrypted_client.close() + await unencrypted_client.close() + + @async_client_context.require_version_min(8, 1, -1) + async def test_1_csfle_joins_no_schema(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"no_schema": "no_schema"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_2_qe_joins_no_schema(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"no_schema": "no_schema"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_3_no_schema_joins_csfle(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "csfle", + "as": "matched", + "pipeline": [{"$match": {"csfle": "csfle"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"csfle": "csfle"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_4_no_schema_joins_qe(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [ + {"$match": {"qe": "qe"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"qe": "qe"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_5_csfle_joins_csfle2(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "csfle2", + "as": "matched", + "pipeline": [ + {"$match": {"csfle2": "csfle2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"csfle2": "csfle2"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_6_qe_joins_qe2(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "qe2", + "as": "matched", + "pipeline": [ + {"$match": {"qe2": "qe2"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"qe2": "qe2"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_7_no_schema_joins_no_schema2(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "no_schema2", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema2": "no_schema2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"no_schema2": "no_schema2"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_8_csfle_joins_qe(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "qe"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [{"$match": {"qe": "qe"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertIn("not supported", str(exc)) + + @async_client_context.require_version_max(8, 1, -1) + async def test_9_error(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertIn("Upgrade", str(exc)) + + +# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.md#rewrap +class TestRewrapWithSeparateClientEncryption(AsyncEncryptionIntegrationTest): + MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": {}, + } + + async def test_rewrap(self): + for src_provider in self.MASTER_KEYS: + for dst_provider in self.MASTER_KEYS: + with self.subTest(src_provider=src_provider, dst_provider=dst_provider): + await self.run_test(src_provider, dst_provider) + + async def run_test(self, src_provider, dst_provider): + # Step 1. Drop the collection ``keyvault.datakeys``. + await self.client.keyvault.drop_collection("datakeys") + + # Step 2. Create a ``AsyncClientEncryption`` object named ``client_encryption1`` + client_encryption1 = self.create_client_encryption( + key_vault_client=self.client, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=DEFAULT_KMS_TLS, + codec_options=OPTS, + ) + + # Step 3. Call ``client_encryption1.create_data_key`` with ``src_provider``. + key_id = await client_encryption1.create_data_key( + master_key=self.MASTER_KEYS[src_provider], kms_provider=src_provider + ) + + # Step 4. Call ``client_encryption1.encrypt`` with the value "test" + cipher_text = await client_encryption1.encrypt( + "test", key_id=key_id, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + + # Step 5. Create a ``AsyncClientEncryption`` object named ``client_encryption2`` + client2 = await self.async_rs_or_single_client() + client_encryption2 = self.create_client_encryption( + key_vault_client=client2, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=DEFAULT_KMS_TLS, + codec_options=OPTS, + ) + + # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. + rewrap_many_data_key_result = await client_encryption2.rewrap_many_data_key( + {}, provider=dst_provider, master_key=self.MASTER_KEYS[dst_provider] + ) + + self.assertEqual(rewrap_many_data_key_result.bulk_write_result.modified_count, 1) + + # 7. Call ``client_encryption1.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result1 = await client_encryption1.decrypt(cipher_text) + self.assertEqual(decrypt_result1, "test") + + # 8. Call ``client_encryption2.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result2 = await client_encryption2.decrypt(cipher_text) + self.assertEqual(decrypt_result2, "test") + + # 8. Case 2. Provider is not optional when master_key is given. + with self.assertRaises(ConfigurationError): + rewrap_many_data_key_result = await client_encryption2.rewrap_many_data_key( + {}, master_key=self.MASTER_KEYS[dst_provider] + ) + + +# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.md#on-demand-aws-credentials +class TestOnDemandAWSCredentials(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + + @unittest.skipIf(any(AWS_CREDS.values()), "AWS environment credentials are set") + async def test_01_failure(self): + self.client_encryption = self.create_client_encryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + ) + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("aws", self.master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_02_success(self): + self.client_encryption = self.create_client_encryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + ) + await self.client_encryption.create_data_key("aws", self.master_key) + + +class TestQueryableEncryptionDocsExample(AsyncEncryptionIntegrationTest): + # Queryable Encryption is not supported on Standalone topology. + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + + async def test_queryable_encryption(self): + # AsyncMongoClient to use in testing that handles auth/tls/etc, + # and cleanup. + async def AsyncMongoClient(**kwargs): + c = await self.async_rs_or_single_client(**kwargs) + return c + + # Drop data from prior test runs. + await self.client.keyvault.datakeys.drop() + await self.client.drop_database("docs_examples") + + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + # Create two data keys. + key_vault_client = await AsyncMongoClient() + client_encryption = self.create_client_encryption( + kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() + ) + key1_id = await client_encryption.create_data_key("local") + key2_id = await client_encryption.create_data_key("local") + + # Create an encryptedFieldsMap. + encrypted_fields_map = { + "docs_examples.encrypted": { + "fields": [ + { + "path": "encrypted_indexed", + "bsonType": "string", + "keyId": key1_id, + "queries": [ + { + "queryType": "equality", + }, + ], + }, + { + "path": "encrypted_unindexed", + "bsonType": "string", + "keyId": key2_id, + }, + ], + }, + } + + # Create an Queryable Encryption collection. + opts = AutoEncryptionOpts( + kms_providers_map, "keyvault.datakeys", encrypted_fields_map=encrypted_fields_map + ) + encrypted_client = await AsyncMongoClient(auto_encryption_opts=opts) + + # Create a Queryable Encryption collection "docs_examples.encrypted". + # Because docs_examples.encrypted is in encrypted_fields_map, it is + # created with Queryable Encryption support. + db = encrypted_client.docs_examples + encrypted_coll = await db.create_collection("encrypted") + + # Auto encrypt an insert and find. + + # Encrypt an insert. + await encrypted_coll.insert_one( + { + "_id": 1, + "encrypted_indexed": "indexed_value", + "encrypted_unindexed": "unindexed_value", + } + ) + + # Encrypt a find. + res = await encrypted_coll.find_one({"encrypted_indexed": "indexed_value"}) + assert res is not None + assert res["encrypted_indexed"] == "indexed_value" + assert res["encrypted_unindexed"] == "unindexed_value" + + # Find documents without decryption. + unencrypted_client = await AsyncMongoClient() + unencrypted_coll = unencrypted_client.docs_examples.encrypted + res = await unencrypted_coll.find_one({"_id": 1}) + assert res is not None + assert isinstance(res["encrypted_indexed"], Binary) + assert isinstance(res["encrypted_unindexed"], Binary) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#22-range-explicit-encryption +class TestRangeQueryProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(8, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + await self.client.drop_database(self.db) + key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + self.db = self.encrypted_client.db + + async def run_expression_find( + self, name, expression, expected_elems, range_opts, use_expr=False, key_id=None + ): + find_payload = await self.client_encryption.encrypt_expression( + expression=expression, + key_id=key_id or self.key1_id, + algorithm=Algorithm.RANGE, + query_type=QueryType.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + if use_expr: + find_payload = {"$expr": find_payload} + sorted_find = sorted( + await self.encrypted_client.db.explicit_encryption.find(find_payload).to_list(), + key=lambda x: x["_id"], + ) + for elem, expected in zip(sorted_find, expected_elems): + self.assertEqual(elem[f"encrypted{name}"], expected) + + async def run_test_cases(self, name, range_opts, cast_func): + encrypted_fields = json_data("etc", "data", f"range-encryptedFields-{name}.json") + await self.db.drop_collection("explicit_encryption", encrypted_fields=encrypted_fields) + await self.db.create_collection("explicit_encryption", encryptedFields=encrypted_fields) + + async def encrypt_and_cast(i): + return await self.client_encryption.encrypt( + cast_func(i), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + for elem in [{f"encrypted{name}": await encrypt_and_cast(i)} for i in [0, 6, 30, 200]]: + await self.encrypted_client.db.explicit_encryption.insert_one(elem) + + # Case 1. + insert_payload = await self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + self.assertEqual(await self.client_encryption.decrypt(insert_payload), cast_func(6)) + + # Case 2. + expression = { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(6)}}, + {f"encrypted{name}": {"$lte": cast_func(200)}}, + ] + } + await self.run_expression_find( + name, expression, [cast_func(i) for i in [6, 30, 200]], range_opts + ) + # Case 2, with UUID key_id + await self.run_expression_find( + name, + expression, + [cast_func(i) for i in [6, 30, 200]], + range_opts, + key_id=self.key1_id.as_uuid(), + ) + + # Case 3. + await self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(0)}}, + {f"encrypted{name}": {"$lte": cast_func(6)}}, + ] + }, + [cast_func(i) for i in [0, 6]], + range_opts, + ) + + # Case 4. + await self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gt": cast_func(30)}}, + ] + }, + [cast_func(i) for i in [200]], + range_opts, + ) + + # Case 5. + await self.run_expression_find( + name, + {"$and": [{"$lt": [f"$encrypted{name}", cast_func(30)]}]}, + [cast_func(i) for i in [0, 6]], + range_opts, + use_expr=True, + ) + + # The spec says to skip the following tests for no precision decimal or double types. + if name not in ("DoubleNoPrecision", "DecimalNoPrecision"): + # Case 6. + with self.assertRaisesRegex( + EncryptionError, + "greater than or equal to the minimum value and less than or equal to the maximum value", + ): + await self.client_encryption.encrypt( + cast_func(201), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 7. + with self.assertRaisesRegex( + EncryptionError, "expected matching 'min' and value type. Got range option" + ): + await self.client_encryption.encrypt( + 6 if cast_func != int else float(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 8. + # The spec says we must additionally not run this case with any precision type, not just the ones above. + if "Precision" not in name: + with self.assertRaisesRegex( + EncryptionError, + "expected 'precision' to be set with double or decimal128 index, but got:", + ): + await self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=RangeOpts( + min=cast_func(0), + max=cast_func(200), + sparsity=1, + trim_factor=1, + precision=2, + ), + ) + + async def test_double_no_precision(self): + await self.run_test_cases("DoubleNoPrecision", RangeOpts(sparsity=1, trim_factor=1), float) + + async def test_double_precision(self): + await self.run_test_cases( + "DoublePrecision", + RangeOpts(min=0.0, max=200.0, sparsity=1, trim_factor=1, precision=2), + float, + ) + + async def test_decimal_no_precision(self): + await self.run_test_cases( + "DecimalNoPrecision", RangeOpts(sparsity=1, trim_factor=1), lambda x: Decimal128(str(x)) + ) + + async def test_decimal_precision(self): + await self.run_test_cases( + "DecimalPrecision", + RangeOpts( + min=Decimal128("0.0"), + max=Decimal128("200.0"), + sparsity=1, + trim_factor=1, + precision=2, + ), + lambda x: Decimal128(str(x)), + ) + + async def test_datetime(self): + await self.run_test_cases( + "Date", + RangeOpts(min=DatetimeMS(0), max=DatetimeMS(200), sparsity=1, trim_factor=1), + lambda x: DatetimeMS(x).as_datetime(), + ) + + async def test_int(self): + await self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1, trim_factor=1), int) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#23-range-explicit-encryption-applies-defaults +class TestRangeQueryDefaultsProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(8, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + await self.client.drop_database(self.db) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys", self.key_vault_client, OPTS + ) + self.key_id = await self.client_encryption.create_data_key("local") + opts = RangeOpts(min=0, max=1000) + self.payload_defaults = await self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + + async def test_uses_libmongocrypt_defaults(self): + opts = RangeOpts(min=0, max=1000, sparsity=2, trim_factor=6) + payload = await self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) == len(self.payload_defaults) + + async def test_accepts_trim_factor_0(self): + opts = RangeOpts(min=0, max=1000, trim_factor=0) + payload = await self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) > len(self.payload_defaults) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#24-kms-retry-tests +class TestKmsRetryProse(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9003" + providers["gcp"]["endpoint"] = "127.0.0.1:9003" + kms_tls_opts = { + p: {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} for p in providers + } + self.client_encryption = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + + async def http_post(self, path, data=None): + # Note, the connection to the mock server needs to be closed after + # each request because the server is single threaded. + ctx = ssl.create_default_context(cafile=CA_PEM) + ctx.load_cert_chain(CLIENT_PEM) + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE + conn = http.client.HTTPSConnection("127.0.0.1:9003", context=ctx) + try: + if data is not None: + headers = {"Content-type": "application/json"} + body = json.dumps(data) + else: + headers = {} + body = None + conn.request("POST", path, body, headers) + res = conn.getresponse() + res.read() + finally: + conn.close() + + async def _test(self, provider, master_key): + await self.http_post("/reset") + # Case 1: createDataKey and encrypt with TCP retry + await self.http_post("/set_failpoint/network", {"count": 1}) + key_id = await self.client_encryption.create_data_key(provider, master_key=master_key) + await self.http_post("/set_failpoint/network", {"count": 1}) + await self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 2: createDataKey and encrypt with HTTP retry + await self.http_post("/set_failpoint/http", {"count": 1}) + key_id = await self.client_encryption.create_data_key(provider, master_key=master_key) + await self.http_post("/set_failpoint/http", {"count": 1}) + await self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 3: createDataKey fails after too many retries + await self.http_post("/set_failpoint/network", {"count": 4}) + with self.assertRaisesRegex(EncryptionError, "KMS request failed after"): + await self.client_encryption.create_data_key(provider, master_key=master_key) + + async def test_kms_retry(self): + if IS_PYOPENSSL: + self.skipTest( + "PyOpenSSL does not support a required method for this test, Connection.makefile" + ) + await self._test("aws", {"region": "foo", "key": "bar", "endpoint": "127.0.0.1:9003"}) + await self._test("azure", {"keyVaultEndpoint": "127.0.0.1:9003", "keyName": "foo"}) + await self._test( + "gcp", + { + "projectId": "foo", + "location": "bar", + "keyRing": "baz", + "keyName": "qux", + "endpoint": "127.0.0.1:9003", + }, + ) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#automatic-data-encryption-keys +class TestAutomaticDecryptionKeys(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + @flaky(reason="PYTHON-4982") + async def asyncSetUp(self): + await super().asyncSetUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + await self.client.drop_database(self.db) + self.key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addAsyncCleanup(self.key_vault.drop) + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, + self.key_vault.full_name, + self.client, + OPTS, + ) + + async def test_01_simple_create(self): + coll, _ = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + with self.assertRaises(WriteError) as exc: + await coll.insert_one({"ssn": "123-45-6789"}) + self.assertEqual(exc.exception.code, 121) + + async def test_02_no_fields(self): + with self.assertRaisesRegex( + TypeError, + "create_encrypted_collection.* missing 1 required positional argument: 'encrypted_fields'", + ): + await self.client_encryption.create_encrypted_collection( # type:ignore[call-arg] + database=self.db, + name="testing1", + ) + + async def test_03_invalid_keyid(self): + # checkAuthForCreateCollection can be removed when SERVER-102101 is fixed. + with self.assertRaisesRegex( + EncryptedCollectionError, + "(create|checkAuthForCreateCollection).encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", + ): + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [{"path": "ssn", "bsonType": "string", "keyId": False}] + }, + kms_provider="local", + ) + + async def test_04_insert_encrypted(self): + coll, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + key1_id = ef["fields"][0]["keyId"] + encrypted_value = await self.client_encryption.encrypt( + "123-45-6789", + key_id=key1_id, + algorithm=Algorithm.UNINDEXED, + ) + await coll.insert_one({"ssn": encrypted_value}) + + async def test_copy_encrypted_fields(self): + encrypted_fields = { + "fields": [ + { + "path": "ssn", + "bsonType": "string", + "keyId": None, + } + ] + } + _, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields=encrypted_fields, + ) + self.assertIsNotNone(ef["fields"][0]["keyId"]) + self.assertIsNone(encrypted_fields["fields"][0]["keyId"]) + + async def test_options_forward(self): + coll, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + read_preference=ReadPreference.NEAREST, + ) + self.assertEqual(coll.read_preference, ReadPreference.NEAREST) + self.assertEqual(coll.name, "testing1") + + async def test_mixed_null_keyids(self): + key = await self.client_encryption.create_data_key(kms_provider="local") + coll, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [ + {"path": "ssn", "bsonType": "string", "keyId": None}, + {"path": "dob", "bsonType": "string", "keyId": key}, + {"path": "secrets", "bsonType": "string"}, + {"path": "address", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + encrypted_values = [ + await self.client_encryption.encrypt( + val, + key_id=key, + algorithm=Algorithm.UNINDEXED, + ) + for val, key in zip( + ["123-45-6789", "11/22/1963", "My secret", "New Mexico, 87104"], + [field["keyId"] for field in ef["fields"]], + ) + ] + await coll.insert_one( + { + "ssn": encrypted_values[0], + "dob": encrypted_values[1], + "secrets": encrypted_values[2], + "address": encrypted_values[3], + } + ) + + async def test_create_datakey_fails(self): + key = await self.client_encryption.create_data_key(kms_provider="local") + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + } + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # generating keys fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="does not exist", + ) + self.assertEqual(exc.exception.encrypted_fields, encrypted_fields) + + async def test_create_failure(self): + key = await self.client_encryption.create_data_key(kms_provider="local") + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # it is the creation of the collection that fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name=1, # type:ignore[arg-type] + encrypted_fields={ + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + for field in exc.exception.encrypted_fields["fields"]: + self.assertIsInstance(field["keyId"], Binary) + + async def test_collection_name_collision(self): + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": None}, + ] + } + await self.db.create_collection("testing1") + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + await self.db.drop_collection("testing1", encrypted_fields=encrypted_fields) + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#27-text-explicit-encryption +class TestExplicitTextEncryptionProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(8, 2, -1) + @async_client_context.require_libmongocrypt_min(1, 15, 1) + @async_client_context.require_pymongocrypt_min(1, 16, 0) + async def asyncSetUp(self): + await super().asyncSetUp() + # Load the file key1-document.json as key1Document. + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + # Read the "_id" field of key1Document as key1ID. + self.key1_id = self.key1_document["_id"] + # Drop and create the collection keyvault.datakeys. + # Insert key1Document in keyvault.datakeys with majority write concern. + self.key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addAsyncCleanup(self.key_vault.drop) + # Create a ClientEncryption object named clientEncryption with these options. + self.kms_providers = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = self.create_client_encryption( + self.kms_providers, + self.key_vault.full_name, + self.client, + OPTS, + ) + # Create a MongoClient named encryptedClient with these AutoEncryptionOpts. + opts = AutoEncryptionOpts( + self.kms_providers, + "keyvault.datakeys", + bypass_query_analysis=True, + ) + self.client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + # Using QE CreateCollection() and Collection.Drop(), drop and create the following collections with majority write concern: + # db.prefix-suffix using the encryptedFields option set to the contents of encryptedFields-prefix-suffix.json. + db = self.client_encrypted.db + await db.drop_collection("prefix-suffix") + encrypted_fields = json_data("etc", "data", "encryptedFields-prefix-suffix.json") + await self.client_encryption.create_encrypted_collection( + db, "prefix-suffix", kms_provider="local", encrypted_fields=encrypted_fields + ) + # db.substring using the encryptedFields option set to the contents of encryptedFields-substring.json. + await db.drop_collection("substring") + encrypted_fields = json_data("etc", "data", "encryptedFields-substring.json") + await self.client_encryption.create_encrypted_collection( + db, "substring", kms_provider="local", encrypted_fields=encrypted_fields + ) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.prefix-suffix with majority write concern. + coll = self.client_encrypted.db["prefix-suffix"].with_options( + write_concern=WriteConcern(w="majority") + ) + await coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.substring with majority write concern. + coll = self.client_encrypted.db["substring"].with_options( + write_concern=WriteConcern(w="majority") + ) + await coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + async def test_01_can_find_a_document_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter. + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + async def test_02_can_find_a_document_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + async def test_03_no_document_found_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + async def test_04_no_document_found_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + async def test_05_can_find_a_document_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "bar" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "bar", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = await self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert the following document is returned: + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + async def test_06_no_document_found_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "qux" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "qux", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = await self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + async def test_07_contentionFactor_is_required(self): + from pymongocrypt.errors import MongoCryptError + + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + with self.assertRaises(EncryptionError) as ctx: + await self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + text_opts=text_opts, + ) + # Expect an error from libmongocrypt with a message containing the string: "contention factor is required for textPreview algorithm". + self.assertIsInstance(ctx.exception.cause, MongoCryptError) + self.assertEqual( + str(ctx.exception), "contention factor is required for textPreview algorithm" + ) + + +def start_mongocryptd(port) -> None: + args = ["mongocryptd", f"--port={port}", "--idleShutdownTimeoutSecs=60"] + _spawn_daemon(args) + + +@unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") +class TestNoSessionsSupport(AsyncEncryptionIntegrationTest): + mongocryptd_client: AsyncMongoClient + MONGOCRYPTD_PORT = 27020 + + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + start_mongocryptd(self.MONGOCRYPTD_PORT) + + self.listener = OvertCommandListener() + self.mongocryptd_client = self.simple_client( + f"mongodb://localhost:{self.MONGOCRYPTD_PORT}", event_listeners=[self.listener] + ) + + hello = await self.mongocryptd_client.db.command("hello") + self.assertNotIn("logicalSessionTimeoutMinutes", hello) + + async def test_implicit_session_ignored_when_unsupported(self): + self.listener.reset() + with self.assertRaises(OperationFailure): + await self.mongocryptd_client.db.test.find_one() + + self.assertNotIn("lsid", self.listener.started_events[0].command) + + with self.assertRaises(OperationFailure): + await self.mongocryptd_client.db.test.insert_one({"x": 1}) + + self.assertNotIn("lsid", self.listener.started_events[1].command) + + await self.mongocryptd_client.close() + + async def test_explicit_session_errors_when_unsupported(self): + self.listener.reset() + async with self.mongocryptd_client.start_session() as s: + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + await self.mongocryptd_client.db.test.find_one(session=s) + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + await self.mongocryptd_client.db.test.insert_one({"x": 1}, session=s) + + await self.mongocryptd_client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_examples.py b/test/asynchronous/test_examples.py new file mode 100644 index 0000000000..21770f490c --- /dev/null +++ b/test/asynchronous/test_examples.py @@ -0,0 +1,1446 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MongoDB documentation examples in Python.""" +from __future__ import annotations + +import asyncio +import datetime +import functools +import sys +import threading +import time +from test.asynchronous.helpers import ConcurrentRunner + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import async_wait_until + +import pymongo +from pymongo.errors import ConnectionFailure, OperationFailure +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import ServerApi +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestSampleShellCommands(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.inventory.drop() + + async def asyncTearDown(self): + # Run after every test. + await self.db.inventory.drop() + await self.client.drop_database("pymongo_test") + + async def test_first_three_examples(self): + db = self.db + + # Start Example 1 + await db.inventory.insert_one( + { + "item": "canvas", + "qty": 100, + "tags": ["cotton"], + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + } + ) + # End Example 1 + + self.assertEqual(await db.inventory.count_documents({}), 1) + + # Start Example 2 + cursor = db.inventory.find({"item": "canvas"}) + # End Example 2 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 3 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "tags": ["blank", "red"], + "size": {"h": 14, "w": 21, "uom": "cm"}, + }, + { + "item": "mat", + "qty": 85, + "tags": ["gray"], + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + }, + { + "item": "mousepad", + "qty": 25, + "tags": ["gel", "blue"], + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + }, + ] + ) + # End Example 3 + + self.assertEqual(await db.inventory.count_documents({}), 4) + + async def test_query_top_level_fields(self): + db = self.db + + # Start Example 6 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 6 + + self.assertEqual(await db.inventory.count_documents({}), 5) + + # Start Example 7 + cursor = db.inventory.find({}) + # End Example 7 + + self.assertEqual(len(await cursor.to_list()), 5) + + # Start Example 9 + cursor = db.inventory.find({"status": "D"}) + # End Example 9 + + self.assertEqual(len(await cursor.to_list()), 2) + + # Start Example 10 + cursor = db.inventory.find({"status": {"$in": ["A", "D"]}}) + # End Example 10 + + self.assertEqual(len(await cursor.to_list()), 5) + + # Start Example 11 + cursor = db.inventory.find({"status": "A", "qty": {"$lt": 30}}) + # End Example 11 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 12 + cursor = db.inventory.find({"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) + # End Example 12 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 13 + cursor = db.inventory.find( + {"status": "A", "$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]} + ) + # End Example 13 + + self.assertEqual(len(await cursor.to_list()), 2) + + async def test_query_embedded_documents(self): + db = self.db + + # Start Example 14 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 14 + + # Start Example 15 + cursor = db.inventory.find({"size": {"h": 14, "w": 21, "uom": "cm"}}) + # End Example 15 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 16 + cursor = db.inventory.find({"size": {"w": 21, "h": 14, "uom": "cm"}}) + # End Example 16 + + self.assertEqual(len(await cursor.to_list()), 0) + + # Start Example 17 + cursor = db.inventory.find({"size.uom": "in"}) + # End Example 17 + + self.assertEqual(len(await cursor.to_list()), 2) + + # Start Example 18 + cursor = db.inventory.find({"size.h": {"$lt": 15}}) + # End Example 18 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 19 + cursor = db.inventory.find({"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) + # End Example 19 + + self.assertEqual(len(await cursor.to_list()), 1) + + async def test_query_arrays(self): + db = self.db + + # Start Example 20 + await db.inventory.insert_many( + [ + {"item": "journal", "qty": 25, "tags": ["blank", "red"], "dim_cm": [14, 21]}, + {"item": "notebook", "qty": 50, "tags": ["red", "blank"], "dim_cm": [14, 21]}, + { + "item": "paper", + "qty": 100, + "tags": ["red", "blank", "plain"], + "dim_cm": [14, 21], + }, + {"item": "planner", "qty": 75, "tags": ["blank", "red"], "dim_cm": [22.85, 30]}, + {"item": "postcard", "qty": 45, "tags": ["blue"], "dim_cm": [10, 15.25]}, + ] + ) + # End Example 20 + + # Start Example 21 + cursor = db.inventory.find({"tags": ["red", "blank"]}) + # End Example 21 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 22 + cursor = db.inventory.find({"tags": {"$all": ["red", "blank"]}}) + # End Example 22 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 23 + cursor = db.inventory.find({"tags": "red"}) + # End Example 23 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 24 + cursor = db.inventory.find({"dim_cm": {"$gt": 25}}) + # End Example 24 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 25 + cursor = db.inventory.find({"dim_cm": {"$gt": 15, "$lt": 20}}) + # End Example 25 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 26 + cursor = db.inventory.find({"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) + # End Example 26 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 27 + cursor = db.inventory.find({"dim_cm.1": {"$gt": 25}}) + # End Example 27 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 28 + cursor = db.inventory.find({"tags": {"$size": 3}}) + # End Example 28 + + self.assertEqual(len(await cursor.to_list()), 1) + + async def test_query_array_of_documents(self): + db = self.db + + # Start Example 29 + await db.inventory.insert_many( + [ + { + "item": "journal", + "instock": [ + {"warehouse": "A", "qty": 5}, + {"warehouse": "C", "qty": 15}, + ], + }, + {"item": "notebook", "instock": [{"warehouse": "C", "qty": 5}]}, + { + "item": "paper", + "instock": [ + {"warehouse": "A", "qty": 60}, + {"warehouse": "B", "qty": 15}, + ], + }, + { + "item": "planner", + "instock": [ + {"warehouse": "A", "qty": 40}, + {"warehouse": "B", "qty": 5}, + ], + }, + { + "item": "postcard", + "instock": [ + {"warehouse": "B", "qty": 15}, + {"warehouse": "C", "qty": 35}, + ], + }, + ] + ) + # End Example 29 + + # Start Example 30 + cursor = db.inventory.find({"instock": {"warehouse": "A", "qty": 5}}) + # End Example 30 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 31 + cursor = db.inventory.find({"instock": {"qty": 5, "warehouse": "A"}}) + # End Example 31 + + self.assertEqual(len(await cursor.to_list()), 0) + + # Start Example 32 + cursor = db.inventory.find({"instock.0.qty": {"$lte": 20}}) + # End Example 32 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 33 + cursor = db.inventory.find({"instock.qty": {"$lte": 20}}) + # End Example 33 + + self.assertEqual(len(await cursor.to_list()), 5) + + # Start Example 34 + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) + # End Example 34 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 35 + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) + # End Example 35 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 36 + cursor = db.inventory.find({"instock.qty": {"$gt": 10, "$lte": 20}}) + # End Example 36 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 37 + cursor = db.inventory.find({"instock.qty": 5, "instock.warehouse": "A"}) + # End Example 37 + + self.assertEqual(len(await cursor.to_list()), 2) + + async def test_query_null(self): + db = self.db + + # Start Example 38 + await db.inventory.insert_many([{"_id": 1, "item": None}, {"_id": 2}]) + # End Example 38 + + # Start Example 39 + cursor = db.inventory.find({"item": None}) + # End Example 39 + + self.assertEqual(len(await cursor.to_list()), 2) + + # Start Example 40 + cursor = db.inventory.find({"item": {"$type": 10}}) + # End Example 40 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 41 + cursor = db.inventory.find({"item": {"$exists": False}}) + # End Example 41 + + self.assertEqual(len(await cursor.to_list()), 1) + + async def test_projection(self): + db = self.db + + # Start Example 42 + await db.inventory.insert_many( + [ + { + "item": "journal", + "status": "A", + "size": {"h": 14, "w": 21, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 5}], + }, + { + "item": "notebook", + "status": "A", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "C", "qty": 5}], + }, + { + "item": "paper", + "status": "D", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "A", "qty": 60}], + }, + { + "item": "planner", + "status": "D", + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 40}], + }, + { + "item": "postcard", + "status": "A", + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "instock": [{"warehouse": "B", "qty": 15}, {"warehouse": "C", "qty": 35}], + }, + ] + ) + # End Example 42 + + # Start Example 43 + cursor = db.inventory.find({"status": "A"}) + # End Example 43 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 44 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1}) + # End Example 44 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) + + # Start Example 45 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "_id": 0}) + # End Example 45 + + async for doc in cursor: + self.assertNotIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) + + # Start Example 46 + cursor = db.inventory.find({"status": "A"}, {"status": 0, "instock": 0}) + # End Example 46 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertNotIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) + + # Start Example 47 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) + # End Example 47 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) + size = doc["size"] + self.assertIn("uom", size) + self.assertNotIn("h", size) + self.assertNotIn("w", size) + + # Start Example 48 + cursor = db.inventory.find({"status": "A"}, {"size.uom": 0}) + # End Example 48 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertIn("instock", doc) + size = doc["size"] + self.assertNotIn("uom", size) + self.assertIn("h", size) + self.assertIn("w", size) + + # Start Example 49 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) + # End Example 49 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) + for subdoc in doc["instock"]: + self.assertNotIn("warehouse", subdoc) + self.assertIn("qty", subdoc) + + # Start Example 50 + cursor = db.inventory.find( + {"status": "A"}, {"item": 1, "status": 1, "instock": {"$slice": -1}} + ) + # End Example 50 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) + self.assertEqual(len(doc["instock"]), 1) + + async def test_update_and_replace(self): + db = self.db + + # Start Example 51 + await db.inventory.insert_many( + [ + { + "item": "canvas", + "qty": 100, + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "mat", + "qty": 85, + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "mousepad", + "qty": 25, + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + "status": "P", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketchbook", + "qty": 80, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketch pad", + "qty": 95, + "size": {"h": 22.85, "w": 30.5, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 51 + + # Start Example 52 + await db.inventory.update_one( + {"item": "paper"}, + {"$set": {"size.uom": "cm", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) + # End Example 52 + + async for doc in db.inventory.find({"item": "paper"}): + self.assertEqual(doc["size"]["uom"], "cm") + self.assertEqual(doc["status"], "P") + self.assertIn("lastModified", doc) + + # Start Example 53 + await db.inventory.update_many( + {"qty": {"$lt": 50}}, + {"$set": {"size.uom": "in", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) + # End Example 53 + + async for doc in db.inventory.find({"qty": {"$lt": 50}}): + self.assertEqual(doc["size"]["uom"], "in") + self.assertEqual(doc["status"], "P") + self.assertIn("lastModified", doc) + + # Start Example 54 + await db.inventory.replace_one( + {"item": "paper"}, + { + "item": "paper", + "instock": [{"warehouse": "A", "qty": 60}, {"warehouse": "B", "qty": 40}], + }, + ) + # End Example 54 + + async for doc in db.inventory.find({"item": "paper"}, {"_id": 0}): + self.assertEqual(len(doc.keys()), 2) + self.assertIn("item", doc) + self.assertIn("instock", doc) + self.assertEqual(len(doc["instock"]), 2) + + async def test_delete(self): + db = self.db + + # Start Example 55 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 55 + + self.assertEqual(await db.inventory.count_documents({}), 5) + + # Start Example 57 + await db.inventory.delete_many({"status": "A"}) + # End Example 57 + + self.assertEqual(await db.inventory.count_documents({}), 3) + + # Start Example 58 + await db.inventory.delete_one({"status": "D"}) + # End Example 58 + + self.assertEqual(await db.inventory.count_documents({}), 2) + + # Start Example 56 + await db.inventory.delete_many({}) + # End Example 56 + + self.assertEqual(await db.inventory.count_documents({}), 0) + + @async_client_context.require_change_streams + async def test_change_streams(self): + db = self.db + done = False + + async def insert_docs(): + nonlocal done + while not done: + await db.inventory.insert_one({"username": "alice"}) + await db.inventory.delete_one({"username": "alice"}) + await asyncio.sleep(0.005) + + t = ConcurrentRunner(target=insert_docs) + await t.start() + + try: + # 1. The database for reactive, real-time applications + # Start Changestream Example 1 + cursor = await db.inventory.watch() + await anext(cursor) + # End Changestream Example 1 + await cursor.close() + + # Start Changestream Example 2 + cursor = await db.inventory.watch(full_document="updateLookup") + await anext(cursor) + # End Changestream Example 2 + await cursor.close() + + # Start Changestream Example 3 + resume_token = cursor.resume_token + cursor = await db.inventory.watch(resume_after=resume_token) + await anext(cursor) + # End Changestream Example 3 + await cursor.close() + + # Start Changestream Example 4 + pipeline = [ + {"$match": {"fullDocument.username": "alice"}}, + {"$addFields": {"newField": "this is an added field!"}}, + ] + cursor = await db.inventory.watch(pipeline=pipeline) + await anext(cursor) + # End Changestream Example 4 + await cursor.close() + finally: + done = True + await t.join() + + async def test_aggregate_examples(self): + db = self.db + + # Start Aggregation Example 1 + await db.sales.aggregate([{"$match": {"items.fruit": "banana"}}, {"$sort": {"date": 1}}]) + # End Aggregation Example 1 + + # Start Aggregation Example 2 + await db.sales.aggregate( + [ + {"$unwind": "$items"}, + {"$match": {"items.fruit": "banana"}}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "count": {"$sum": "$items.quantity"}, + } + }, + {"$project": {"dayOfWeek": "$_id.day", "numberSold": "$count", "_id": 0}}, + {"$sort": {"numberSold": 1}}, + ] + ) + # End Aggregation Example 2 + + # Start Aggregation Example 3 + await db.sales.aggregate( + [ + {"$unwind": "$items"}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "items_sold": {"$sum": "$items.quantity"}, + "revenue": {"$sum": {"$multiply": ["$items.quantity", "$items.price"]}}, + } + }, + { + "$project": { + "day": "$_id.day", + "revenue": 1, + "items_sold": 1, + "discount": { + "$cond": {"if": {"$lte": ["$revenue", 250]}, "then": 25, "else": 0} + }, + } + }, + ] + ) + # End Aggregation Example 3 + + # Start Aggregation Example 4 + await db.air_alliances.aggregate( + [ + { + "$lookup": { + "from": "air_airlines", + "let": {"constituents": "$airlines"}, + "pipeline": [{"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}}], + "as": "airlines", + } + }, + { + "$project": { + "_id": 0, + "name": 1, + "airlines": { + "$filter": { + "input": "$airlines", + "as": "airline", + "cond": {"$eq": ["$$airline.country", "Canada"]}, + } + }, + } + }, + ] + ) + # End Aggregation Example 4 + + @async_client_context.require_version_min(4, 4) + async def test_aggregate_projection_example(self): + db = self.db + + # Start Aggregation Projection Example 1 + db.inventory.find( + {}, + { + "_id": 0, + "item": 1, + "status": { + "$switch": { + "branches": [ + {"case": {"$eq": ["$status", "A"]}, "then": "Available"}, + {"case": {"$eq": ["$status", "D"]}, "then": "Discontinued"}, + ], + "default": "No status found", + } + }, + "area": { + "$concat": [ + {"$toString": {"$multiply": ["$size.h", "$size.w"]}}, + " ", + "$size.uom", + ] + }, + "reportNumber": {"$literal": 1}, + }, + ) + + # End Aggregation Projection Example 1 + + async def test_commands(self): + db = self.db + await db.restaurants.insert_one({}) + + # Start runCommand Example 1 + await db.command("buildInfo") + # End runCommand Example 1 + + # Start runCommand Example 2 + await db.command("count", "restaurants") + # End runCommand Example 2 + + async def test_index_management(self): + db = self.db + + # Start Index Example 1 + await db.records.create_index("score") + # End Index Example 1 + + # Start Index Example 1 + await db.restaurants.create_index( + [("cuisine", pymongo.ASCENDING), ("name", pymongo.ASCENDING)], + partialFilterExpression={"rating": {"$gt": 5}}, + ) + # End Index Example 1 + + @async_client_context.require_replica_set + async def test_misc(self): + # Marketing examples + client = self.client + self.addAsyncCleanup(client.drop_database, "test") + self.addAsyncCleanup(client.drop_database, "my_database") + + # 2. Tunable consistency controls + collection = client.my_database.my_collection + async with client.start_session() as session: + await collection.insert_one({"_id": 1}, session=session) + await collection.update_one({"_id": 1}, {"$set": {"a": 1}}, session=session) + async for _doc in collection.find({}, session=session): + pass + + # 3. Exploiting the power of arrays + collection = client.test.array_updates_test + await collection.update_one( + {"_id": 1}, {"$set": {"a.$[i].b": 2}}, array_filters=[{"i.b": 0}] + ) + + +class TestTransactionExamples(AsyncIntegrationTest): + @async_client_context.require_transactions + async def test_transactions(self): + # Transaction examples + client = self.client + self.addAsyncCleanup(client.drop_database, "hr") + self.addAsyncCleanup(client.drop_database, "reporting") + + employees = client.hr.employees + events = client.reporting.events + await employees.insert_one({"employee": 3, "status": "Active"}) + await events.insert_one({"employee": 3, "status": {"new": "Active", "old": None}}) + + # Start Transactions Intro Example 1 + + async def update_employee_info(session): + employees_coll = session.client.hr.employees + events_coll = session.client.reporting.events + + async with await session.start_transaction( + read_concern=ReadConcern("snapshot"), write_concern=WriteConcern(w="majority") + ): + await employees_coll.update_one( + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) + await events_coll.insert_one( + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) + + while True: + try: + # Commit uses write concern set at transaction start. + await session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # End Transactions Intro Example 1 + + async with client.start_session() as session: + await update_employee_info(session) + + employee = await employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + # Start Transactions Retry Example 1 + async def run_transaction_with_retry(txn_func, session): + while True: + try: + await txn_func(session) # performs transaction + break + except (ConnectionFailure, OperationFailure) as exc: + print("Transaction aborted. Caught exception during transaction.") + + # If transient error, retry the whole transaction + if exc.has_error_label("TransientTransactionError"): + print("TransientTransactionError, retrying transaction ...") + continue + else: + raise + + # End Transactions Retry Example 1 + + async with client.start_session() as session: + await run_transaction_with_retry(update_employee_info, session) + + employee = await employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + # Start Transactions Retry Example 2 + async def commit_with_retry(session): + while True: + try: + # Commit uses write concern set at transaction start. + await session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # End Transactions Retry Example 2 + + # Test commit_with_retry from the previous examples + async def _insert_employee_retry_commit(session): + async with await session.start_transaction(): + await employees.insert_one({"employee": 4, "status": "Active"}, session=session) + await events.insert_one( + {"employee": 4, "status": {"new": "Active", "old": None}}, session=session + ) + + await commit_with_retry(session) + + async with client.start_session() as session: + await run_transaction_with_retry(_insert_employee_retry_commit, session) + + employee = await employees.find_one({"employee": 4}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Active") + + # Start Transactions Retry Example 3 + + async def run_transaction_with_retry(txn_func, session): + while True: + try: + await txn_func(session) # performs transaction + break + except (ConnectionFailure, OperationFailure) as exc: + # If transient error, retry the whole transaction + if exc.has_error_label("TransientTransactionError"): + print("TransientTransactionError, retrying transaction ...") + continue + else: + raise + + async def commit_with_retry(session): + while True: + try: + # Commit uses write concern set at transaction start. + await session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # Updates two collections in a transactions + + async def update_employee_info(session): + employees_coll = session.client.hr.employees + events_coll = session.client.reporting.events + + async with await session.start_transaction( + read_concern=ReadConcern("snapshot"), + write_concern=WriteConcern(w="majority"), + read_preference=ReadPreference.PRIMARY, + ): + await employees_coll.update_one( + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) + await events_coll.insert_one( + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) + + await commit_with_retry(session) + + # Start a session. + async with client.start_session() as session: + try: + await run_transaction_with_retry(update_employee_info, session) + except Exception: + # Do something with error. + raise + + # End Transactions Retry Example 3 + + employee = await employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + async def MongoClient(_): + return await self.async_rs_client() + + uriString = None + + # Start Transactions withTxn API Example 1 + + # For a replica set, include the replica set name and a seedlist of the members in the URI string; e.g. + # uriString = 'mongodb://mongodb0.example.com:27017,mongodb1.example.com:27017/?replicaSet=myRepl' + # For a sharded cluster, connect to the mongos instances; e.g. + # uriString = 'mongodb://mongos0.example.com:27017,mongos1.example.com:27017/' + + client = await MongoClient(uriString) + wc_majority = WriteConcern("majority", wtimeout=1000) + + # Prereq: Create collections. + await client.get_database("mydb1", write_concern=wc_majority).foo.insert_one({"abc": 0}) + await client.get_database("mydb2", write_concern=wc_majority).bar.insert_one({"xyz": 0}) + + # Step 1: Define the callback that specifies the sequence of operations to perform inside the transactions. + async def callback(session): + collection_one = session.client.mydb1.foo + collection_two = session.client.mydb2.bar + + # Important:: You must pass the session to the operations. + await collection_one.insert_one({"abc": 1}, session=session) + await collection_two.insert_one({"xyz": 999}, session=session) + + # Step 2: Start a client session. + async with client.start_session() as session: + # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). + await session.with_transaction(callback) + + # End Transactions withTxn API Example 1 + + +class TestCausalConsistencyExamples(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def test_causal_consistency(self): + # Causal consistency examples + client = self.client + self.addAsyncCleanup(client.drop_database, "test") + await client.test.drop_collection("items") + await client.test.items.insert_one( + {"sku": "111", "name": "Peanuts", "start": datetime.datetime.today()} + ) + + # Start Causal Consistency Example 1 + async with client.start_session(causal_consistency=True) as s1: + current_date = datetime.datetime.today() + items = client.get_database( + "test", + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + await items.update_one( + {"sku": "111", "end": None}, {"$set": {"end": current_date}}, session=s1 + ) + await items.insert_one( + {"sku": "nuts-111", "name": "Pecans", "start": current_date}, session=s1 + ) + # End Causal Consistency Example 1 + + assert s1.cluster_time is not None + assert s1.operation_time is not None + + # Start Causal Consistency Example 2 + async with client.start_session(causal_consistency=True) as s2: + s2.advance_cluster_time(s1.cluster_time) + s2.advance_operation_time(s1.operation_time) + + items = client.get_database( + "test", + read_preference=ReadPreference.SECONDARY, + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + async for item in items.find({"end": None}, session=s2): + print(item) + # End Causal Consistency Example 2 + + +class TestVersionedApiExamples(AsyncIntegrationTest): + @async_client_context.require_version_min(4, 7) + async def test_versioned_api(self): + # Versioned API examples + async def MongoClient(_, server_api): + return await self.async_rs_client(server_api=server_api, connect=False) + + uri = None + + # Start Versioned API Example 1 + from pymongo.server_api import ServerApi + + await MongoClient(uri, server_api=ServerApi("1")) + # End Versioned API Example 1 + + # Start Versioned API Example 2 + await MongoClient(uri, server_api=ServerApi("1", strict=True)) + # End Versioned API Example 2 + + # Start Versioned API Example 3 + await MongoClient(uri, server_api=ServerApi("1", strict=False)) + # End Versioned API Example 3 + + # Start Versioned API Example 4 + await MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) + # End Versioned API Example 4 + + @unittest.skip("PYTHON-3167 count has been added to API version 1") + @async_client_context.require_version_min(4, 7) + async def test_versioned_api_migration(self): + # SERVER-58785 + if await async_client_context.is_topology_type( + ["sharded"] + ) and not async_client_context.version.at_least(5, 0, 2): + self.skipTest("This test needs MongoDB 5.0.2 or newer") + + client = await self.async_rs_client(server_api=ServerApi("1", strict=True)) + await client.db.sales.drop() + + # Start Versioned API Example 5 + def strptime(s): + return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") + + await client.db.sales.insert_many( + [ + { + "_id": 1, + "item": "abc", + "price": 10, + "quantity": 2, + "date": strptime("2021-01-01T08:00:00Z"), + }, + { + "_id": 2, + "item": "jkl", + "price": 20, + "quantity": 1, + "date": strptime("2021-02-03T09:00:00Z"), + }, + { + "_id": 3, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-03T09:05:00Z"), + }, + { + "_id": 4, + "item": "abc", + "price": 10, + "quantity": 10, + "date": strptime("2021-02-15T08:00:00Z"), + }, + { + "_id": 5, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T09:05:00Z"), + }, + { + "_id": 6, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-15T12:05:10Z"), + }, + { + "_id": 7, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T14:12:12Z"), + }, + { + "_id": 8, + "item": "abc", + "price": 10, + "quantity": 5, + "date": strptime("2021-03-16T20:20:13Z"), + }, + ] + ) + # End Versioned API Example 5 + + with self.assertRaisesRegex( + OperationFailure, + "Provided apiStrict:true, but the command count is not in API Version 1", + ): + await client.db.command("count", "sales", query={}) + # Start Versioned API Example 6 + # pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError'} + # End Versioned API Example 6 + + # Start Versioned API Example 7 + await client.db.sales.count_documents({}) + # End Versioned API Example 7 + + # Start Versioned API Example 8 + # 8 + # End Versioned API Example 8 + + +class TestSnapshotQueryExamples(AsyncIntegrationTest): + @async_client_context.require_version_min(5, 0) + async def test_snapshot_query(self): + client = self.client + + if not await async_client_context.is_topology_type(["replicaset", "sharded"]): + self.skipTest("Must be a sharded or replicaset") + + self.addAsyncCleanup(client.drop_database, "pets") + db = client.pets + await db.drop_collection("cats") + await db.drop_collection("dogs") + await db.cats.insert_one( + {"name": "Whiskers", "color": "white", "age": 10, "adoptable": True} + ) + await db.dogs.insert_one( + {"name": "Pebbles", "color": "Brown", "age": 10, "adoptable": True} + ) + + async def predicate_one(): + return await self.check_for_snapshot(db.cats) + + async def predicate_two(): + return await self.check_for_snapshot(db.dogs) + + await async_wait_until(predicate_two, "success") + await async_wait_until(predicate_one, "success") + + # Start Snapshot Query Example 1 + + db = client.pets + async with client.start_session(snapshot=True) as s: + adoptablePetsCount = ( + await ( + await db.cats.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], + session=s, + ) + ).next() + )["adoptableCatsCount"] + + adoptablePetsCount += ( + await ( + await db.dogs.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], + session=s, + ) + ).next() + )["adoptableDogsCount"] + + print(adoptablePetsCount) + + # End Snapshot Query Example 1 + db = client.retail + self.addAsyncCleanup(client.drop_database, "retail") + await db.drop_collection("sales") + + saleDate = datetime.datetime.now() + await db.sales.insert_one({"shoeType": "boot", "price": 30, "saleDate": saleDate}) + + async def predicate_three(): + return await self.check_for_snapshot(db.sales) + + await async_wait_until(predicate_three, "success") + + # Start Snapshot Query Example 2 + db = client.retail + async with client.start_session(snapshot=True) as s: + _ = ( + await ( + await db.sales.aggregate( + [ + { + "$match": { + "$expr": { + "$gt": [ + "$saleDate", + { + "$dateSubtract": { + "startDate": "$$NOW", + "unit": "day", + "amount": 1, + } + }, + ] + } + } + }, + {"$count": "totalDailySales"}, + ], + session=s, + ) + ).next() + )["totalDailySales"] + + # End Snapshot Query Example 2 + + async def check_for_snapshot(self, collection): + """Wait for snapshot reads to become available to prevent this error: + [246:SnapshotUnavailable]: Unable to read from a snapshot due to pending collection catalog changes; please retry the operation. Snapshot timestamp is Timestamp(1646666892, 4). Collection minimum is Timestamp(1646666892, 5) (on localhost:27017, modern retry, attempt 1) + From https://github.com/mongodb/mongo-ruby-driver/commit/7c4117b58e3d12e237f7536f7521e18fc15f79ac + """ + async with self.client.start_session(snapshot=True) as s: + try: + if await collection.find_one(session=s): + return True + return False + except OperationFailure as e: + # Retry them as the server demands... + if e.code == 246: # SnapshotUnavailable + return False + raise + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_grid_file.py b/test/asynchronous/test_grid_file.py new file mode 100644 index 0000000000..2a7e9e1f9d --- /dev/null +++ b/test/asynchronous/test_grid_file.py @@ -0,0 +1,873 @@ +# +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the grid_file module.""" +from __future__ import annotations + +import datetime +import io +import sys +import zipfile +from io import BytesIO +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncUnitTest, + async_client_context, + qcheck, + unittest, +) + +from pymongo.asynchronous.database import AsyncDatabase + +sys.path[0:0] = [""] + +from test.utils_shared import OvertCommandListener + +from bson.objectid import ObjectId +from gridfs.asynchronous.grid_file import ( + _SEEK_CUR, + _SEEK_END, + DEFAULT_CHUNK_SIZE, + AsyncGridFS, + AsyncGridIn, + AsyncGridOut, + AsyncGridOutCursor, +) +from gridfs.errors import NoFile +from pymongo import AsyncMongoClient +from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError +from pymongo.message import _CursorAddress + +_IS_SYNC = False + + +class AsyncTestGridFileNoConnect(AsyncUnitTest): + """Test GridFile features on a client that does not connect.""" + + db: AsyncDatabase + + @classmethod + def setUpClass(cls): + cls.db = AsyncMongoClient(connect=False).pymongo_test + + def test_grid_in_custom_opts(self): + self.assertRaises(TypeError, AsyncGridIn, "foo") + + a = AsyncGridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) + + self.assertEqual(5, a._id) + self.assertEqual("my_file", a.filename) + self.assertEqual("my_file", a.name) + self.assertEqual("text/html", a.content_type) + self.assertEqual(1000, a.chunk_size) + self.assertEqual(["foo"], a.aliases) + self.assertEqual({"foo": 1, "bar": 2}, a.metadata) + self.assertEqual(3, a.bar) + self.assertEqual("hello", a.baz) + self.assertRaises(AttributeError, getattr, a, "mike") + + b = AsyncGridIn(self.db.fs, content_type="text/html", chunk_size=1000, baz=100) + self.assertEqual("text/html", b.content_type) + self.assertEqual(1000, b.chunk_size) + self.assertEqual(100, b.baz) + + +class AsyncTestGridFile(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) + + async def test_basic(self): + f = AsyncGridIn(self.db.fs, filename="test") + await f.write(b"hello world") + await f.close() + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world", await g.read()) + + # make sure it's still there... + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world", await g.read()) + + f = AsyncGridIn(self.db.fs, filename="test") + await f.close() + self.assertEqual(2, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"", await g.read()) + + # test that reading 0 returns proper type + self.assertEqual(b"", await g.read(0)) + + async def test_md5(self): + f = AsyncGridIn(self.db.fs) + await f.write(b"hello world\n") + await f.close() + self.assertEqual(None, f.md5) + + async def test_alternate_collection(self): + await self.db.alt.files.delete_many({}) + await self.db.alt.chunks.delete_many({}) + + f = AsyncGridIn(self.db.alt) + await f.write(b"hello world") + await f.close() + + self.assertEqual(1, await self.db.alt.files.count_documents({})) + self.assertEqual(1, await self.db.alt.chunks.count_documents({})) + + g = AsyncGridOut(self.db.alt, f._id) + self.assertEqual(b"hello world", await g.read()) + + async def test_grid_in_default_opts(self): + self.assertRaises(TypeError, AsyncGridIn, "foo") + + a = AsyncGridIn(self.db.fs) + + self.assertIsInstance(a._id, ObjectId) + self.assertRaises(AttributeError, setattr, a, "_id", 5) + + self.assertEqual(None, a.filename) + self.assertEqual(None, a.name) + a.filename = "my_file" + self.assertEqual("my_file", a.filename) + self.assertEqual("my_file", a.name) + + self.assertEqual(None, a.content_type) + a.content_type = "text/html" + + self.assertEqual("text/html", a.content_type) + + self.assertRaises(AttributeError, getattr, a, "length") + self.assertRaises(AttributeError, setattr, a, "length", 5) + + self.assertEqual(255 * 1024, a.chunk_size) + self.assertRaises(AttributeError, setattr, a, "chunk_size", 5) + + self.assertRaises(AttributeError, getattr, a, "upload_date") + self.assertRaises(AttributeError, setattr, a, "upload_date", 5) + + self.assertRaises(AttributeError, getattr, a, "aliases") + a.aliases = ["foo"] + + self.assertEqual(["foo"], a.aliases) + + self.assertRaises(AttributeError, getattr, a, "metadata") + a.metadata = {"foo": 1} + + self.assertEqual({"foo": 1}, a.metadata) + + self.assertRaises(AttributeError, setattr, a, "md5", 5) + + await a.close() + + if _IS_SYNC: + a.forty_two = 42 + else: + self.assertRaises(AttributeError, setattr, a, "forty_two", 42) + await a.set("forty_two", 42) + + self.assertEqual(42, a.forty_two) + + self.assertIsInstance(a._id, ObjectId) + self.assertRaises(AttributeError, setattr, a, "_id", 5) + + self.assertEqual("my_file", a.filename) + self.assertEqual("my_file", a.name) + + self.assertEqual("text/html", a.content_type) + + self.assertEqual(0, a.length) + self.assertRaises(AttributeError, setattr, a, "length", 5) + + self.assertEqual(255 * 1024, a.chunk_size) + self.assertRaises(AttributeError, setattr, a, "chunk_size", 5) + + self.assertIsInstance(a.upload_date, datetime.datetime) + self.assertRaises(AttributeError, setattr, a, "upload_date", 5) + + self.assertEqual(["foo"], a.aliases) + + self.assertEqual({"foo": 1}, a.metadata) + + self.assertEqual(None, a.md5) + self.assertRaises(AttributeError, setattr, a, "md5", 5) + + # Make sure custom attributes that were set both before and after + # a.close() are reflected in b. PYTHON-411. + b = await AsyncGridFS(self.db).get_last_version(filename=a.filename) + self.assertEqual(a.metadata, b.metadata) + self.assertEqual(a.aliases, b.aliases) + self.assertEqual(a.forty_two, b.forty_two) + + async def test_grid_out_default_opts(self): + self.assertRaises(TypeError, AsyncGridOut, "foo") + + gout = AsyncGridOut(self.db.fs, 5) + with self.assertRaises(NoFile): + if not _IS_SYNC: + await gout.open() + gout.name + + a = AsyncGridIn(self.db.fs) + await a.close() + + b = AsyncGridOut(self.db.fs, a._id) + if not _IS_SYNC: + await b.open() + + self.assertEqual(a._id, b._id) + self.assertEqual(0, b.length) + self.assertEqual(None, b.content_type) + self.assertEqual(None, b.name) + self.assertEqual(None, b.filename) + self.assertEqual(255 * 1024, b.chunk_size) + self.assertIsInstance(b.upload_date, datetime.datetime) + self.assertEqual(None, b.aliases) + self.assertEqual(None, b.metadata) + self.assertEqual(None, b.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: + self.assertRaises(AttributeError, setattr, b, attr, 5) + + async def test_grid_out_cursor_options(self): + self.assertRaises( + TypeError, AsyncGridOutCursor.__init__, self.db.fs, {}, projection={"filename": 1} + ) + + cursor = AsyncGridOutCursor(self.db.fs, {}) + cursor_clone = cursor.clone() + + cursor_dict = cursor.__dict__.copy() + cursor_dict.pop("_session") + cursor_clone_dict = cursor_clone.__dict__.copy() + cursor_clone_dict.pop("_session") + self.assertDictEqual(cursor_dict, cursor_clone_dict) + + self.assertRaises(NotImplementedError, cursor.add_option, 0) + self.assertRaises(NotImplementedError, cursor.remove_option, 0) + + async def test_grid_out_custom_opts(self): + one = AsyncGridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) + await one.write(b"hello world") + await one.close() + + two = AsyncGridOut(self.db.fs, 5) + + if not _IS_SYNC: + await two.open() + + self.assertEqual("my_file", two.name) + self.assertEqual("my_file", two.filename) + self.assertEqual(5, two._id) + self.assertEqual(11, two.length) + self.assertEqual("text/html", two.content_type) + self.assertEqual(1000, two.chunk_size) + self.assertIsInstance(two.upload_date, datetime.datetime) + self.assertEqual(["foo"], two.aliases) + self.assertEqual({"foo": 1, "bar": 2}, two.metadata) + self.assertEqual(3, two.bar) + self.assertEqual(None, two.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: + self.assertRaises(AttributeError, setattr, two, attr, 5) + + async def test_grid_out_file_document(self): + one = AsyncGridIn(self.db.fs) + await one.write(b"foo bar") + await one.close() + + two = AsyncGridOut(self.db.fs, file_document=await self.db.fs.files.find_one()) + self.assertEqual(b"foo bar", await two.read()) + + three = AsyncGridOut(self.db.fs, 5, file_document=await self.db.fs.files.find_one()) + self.assertEqual(b"foo bar", await three.read()) + + four = AsyncGridOut(self.db.fs, file_document={}) + with self.assertRaises(NoFile): + if not _IS_SYNC: + await four.open() + four.name + + async def test_write_file_like(self): + one = AsyncGridIn(self.db.fs) + await one.write(b"hello world") + await one.close() + + two = AsyncGridOut(self.db.fs, one._id) + + three = AsyncGridIn(self.db.fs) + await three.write(two) + await three.close() + + four = AsyncGridOut(self.db.fs, three._id) + self.assertEqual(b"hello world", await four.read()) + + five = AsyncGridIn(self.db.fs, chunk_size=2) + await five.write(b"hello") + buffer = BytesIO(b" world") + await five.write(buffer) + await five.write(b" and mongodb") + await five.close() + self.assertEqual( + b"hello world and mongodb", await AsyncGridOut(self.db.fs, five._id).read() + ) + + async def test_write_lines(self): + a = AsyncGridIn(self.db.fs) + await a.writelines([b"hello ", b"world"]) + await a.close() + + self.assertEqual(b"hello world", await AsyncGridOut(self.db.fs, a._id).read()) + + async def test_close(self): + f = AsyncGridIn(self.db.fs) + await f.close() + with self.assertRaises(ValueError): + await f.write("test") + await f.close() + + async def test_closed(self): + f = AsyncGridIn(self.db.fs, chunkSize=5) + await f.write(b"Hello world.\nHow are you?") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + if not _IS_SYNC: + await g.open() + self.assertFalse(g.closed) + await g.read(1) + self.assertFalse(g.closed) + await g.read(100) + self.assertFalse(g.closed) + await g.close() + self.assertTrue(g.closed) + + async def test_multi_chunk_file(self): + random_string = b"a" * (DEFAULT_CHUNK_SIZE + 1000) + + f = AsyncGridIn(self.db.fs) + await f.write(random_string) + await f.close() + + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(2, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(random_string, await g.read()) + + async def test_small_chunks(self): + self.files = 0 + self.chunks = 0 + + async def helper(data): + f = AsyncGridIn(self.db.fs, chunkSize=1) + await f.write(data) + await f.close() + + self.files += 1 + self.chunks += len(data) + + self.assertEqual(self.files, await self.db.fs.files.count_documents({})) + self.assertEqual(self.chunks, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(data, await g.read()) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(data, await g.read(10) + await g.read(10)) + return True + + await qcheck.check_unittest(self, helper, qcheck.gen_string(qcheck.gen_range(0, 20))) + + async def test_seek(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world", await g.read()) + await g.seek(0) + self.assertEqual(b"hello world", await g.read()) + await g.seek(1) + self.assertEqual(b"ello world", await g.read()) + with self.assertRaises(IOError): + await g.seek(-1) + + await g.seek(-3, _SEEK_END) + self.assertEqual(b"rld", await g.read()) + await g.seek(0, _SEEK_END) + self.assertEqual(b"", await g.read()) + with self.assertRaises(IOError): + await g.seek(-100, _SEEK_END) + + await g.seek(3) + await g.seek(3, _SEEK_CUR) + self.assertEqual(b"world", await g.read()) + with self.assertRaises(IOError): + await g.seek(-100, _SEEK_CUR) + + async def test_tell(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(0, g.tell()) + await g.read(0) + self.assertEqual(0, g.tell()) + await g.read(1) + self.assertEqual(1, g.tell()) + await g.read(2) + self.assertEqual(3, g.tell()) + await g.read() + self.assertEqual(g.length, g.tell()) + + async def test_multiple_reads(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"he", await g.read(2)) + self.assertEqual(b"ll", await g.read(2)) + self.assertEqual(b"o ", await g.read(2)) + self.assertEqual(b"wo", await g.read(2)) + self.assertEqual(b"rl", await g.read(2)) + self.assertEqual(b"d", await g.read(2)) + self.assertEqual(b"", await g.read(2)) + + async def test_readline(self): + f = AsyncGridIn(self.db.fs, chunkSize=5) + await f.write( + b"""Hello world, +How are you? +Hope all is well. +Bye""" + ) + await f.close() + + # Try read(), then readline(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"H", await g.read(1)) + self.assertEqual(b"ello world,\n", await g.readline()) + self.assertEqual(b"How a", await g.readline(5)) + self.assertEqual(b"", await g.readline(0)) + self.assertEqual(b"re you?\n", await g.readline()) + self.assertEqual(b"Hope all is well.\n", await g.readline(1000)) + self.assertEqual(b"Bye", await g.readline()) + self.assertEqual(b"", await g.readline()) + + # Try readline() first, then read(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"He", await g.readline(2)) + self.assertEqual(b"l", await g.read(1)) + self.assertEqual(b"lo", await g.readline(2)) + self.assertEqual(b" world,\n", await g.readline()) + + # Only readline(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"H", await g.readline(1)) + self.assertEqual(b"e", await g.readline(1)) + self.assertEqual(b"llo world,\n", await g.readline()) + + async def test_readlines(self): + f = AsyncGridIn(self.db.fs, chunkSize=5) + await f.write( + b"""Hello world, +How are you? +Hope all is well. +Bye""" + ) + await f.close() + + # Try read(), then readlines(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"He", await g.read(2)) + self.assertEqual([b"llo world,\n", b"How are you?\n"], await g.readlines(11)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], await g.readlines()) + self.assertEqual([], await g.readlines()) + + # Try readline(), then readlines(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"Hello world,\n", await g.readline()) + self.assertEqual([b"How are you?\n", b"Hope all is well.\n"], await g.readlines(13)) + self.assertEqual(b"Bye", await g.readline()) + self.assertEqual([], await g.readlines()) + + # Only readlines(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + await g.readlines(), + ) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + await g.readlines(0), + ) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], await g.readlines(1)) + self.assertEqual([b"How are you?\n"], await g.readlines(12)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], await g.readlines(18)) + + # Try readlines() first, then read(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], await g.readlines(1)) + self.assertEqual(b"H", await g.read(1)) + self.assertEqual([b"ow are you?\n", b"Hope all is well.\n"], await g.readlines(29)) + self.assertEqual([b"Bye"], await g.readlines(1)) + + # Try readlines() first, then readline(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], await g.readlines(1)) + self.assertEqual(b"How are you?\n", await g.readline()) + self.assertEqual([b"Hope all is well.\n"], await g.readlines(17)) + self.assertEqual(b"Bye", await g.readline()) + + async def test_iterator(self): + f = AsyncGridIn(self.db.fs) + await f.close() + g = AsyncGridOut(self.db.fs, f._id) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], await g.to_list()) + + f = AsyncGridIn(self.db.fs) + await f.write(b"hello world\nhere are\nsome lines.") + await f.close() + g = AsyncGridOut(self.db.fs, f._id) + if _IS_SYNC: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], list(g)) + else: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], await g.to_list()) + + self.assertEqual(b"", await g.read(5)) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], await g.to_list()) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world\n", await anext(aiter(g))) + self.assertEqual(b"here", await g.read(4)) + self.assertEqual(b" are\n", await anext(aiter(g))) + self.assertEqual(b"some lines", await g.read(10)) + self.assertEqual(b".", await anext(aiter(g))) + with self.assertRaises(StopAsyncIteration): + await aiter(g).__anext__() + + f = AsyncGridIn(self.db.fs, chunk_size=2) + await f.write(b"hello world") + await f.close() + g = AsyncGridOut(self.db.fs, f._id) + if _IS_SYNC: + self.assertEqual([b"hello world"], list(g)) + else: + self.assertEqual([b"hello world"], await g.to_list()) + + async def test_read_unaligned_buffer_size(self): + in_data = b"This is a text that doesn't quite fit in a single 16-byte chunk." + f = AsyncGridIn(self.db.fs, chunkSize=16) + await f.write(in_data) + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + out_data = b"" + while 1: + s = await g.read(13) + if not s: + break + out_data += s + + self.assertEqual(in_data, out_data) + + async def test_readchunk(self): + in_data = b"a" * 10 + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(in_data) + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(3, len(await g.readchunk())) + + self.assertEqual(2, len(await g.read(2))) + self.assertEqual(1, len(await g.readchunk())) + + self.assertEqual(3, len(await g.read(3))) + + self.assertEqual(1, len(await g.readchunk())) + + self.assertEqual(0, len(await g.readchunk())) + + async def test_write_unicode(self): + f = AsyncGridIn(self.db.fs) + with self.assertRaises(TypeError): + await f.write("foo") + + f = AsyncGridIn(self.db.fs, encoding="utf-8") + await f.write("foo") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"foo", await g.read()) + + f = AsyncGridIn(self.db.fs, encoding="iso-8859-1") + await f.write("aé") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual("aé".encode("iso-8859-1"), await g.read()) + + async def test_set_after_close(self): + f = AsyncGridIn(self.db.fs, _id="foo", bar="baz") + + self.assertEqual("foo", f._id) + self.assertEqual("baz", f.bar) + self.assertRaises(AttributeError, getattr, f, "baz") + self.assertRaises(AttributeError, getattr, f, "uploadDate") + + self.assertRaises(AttributeError, setattr, f, "_id", 5) + if _IS_SYNC: + f.bar = "foo" + f.baz = 5 + else: + await f.set("bar", "foo") + await f.set("baz", 5) + + self.assertEqual("foo", f._id) + self.assertEqual("foo", f.bar) + self.assertEqual(5, f.baz) + self.assertRaises(AttributeError, getattr, f, "uploadDate") + + await f.close() + + self.assertEqual("foo", f._id) + self.assertEqual("foo", f.bar) + self.assertEqual(5, f.baz) + self.assertTrue(f.uploadDate) + + self.assertRaises(AttributeError, setattr, f, "_id", 5) + if _IS_SYNC: + f.bar = "a" + f.baz = "b" + else: + await f.set("bar", "a") + await f.set("baz", "b") + self.assertRaises(AttributeError, setattr, f, "upload_date", 5) + + g = AsyncGridOut(self.db.fs, f._id) + if not _IS_SYNC: + await g.open() + self.assertEqual("a", g.bar) + self.assertEqual("b", g.baz) + # Versions 2.0.1 and older saved a _closed field for some reason. + self.assertRaises(AttributeError, getattr, g, "_closed") + + async def test_context_manager(self): + contents = b"Imagine this is some important data..." + + async with AsyncGridIn(self.db.fs, filename="important") as infile: + await infile.write(contents) + + async with AsyncGridOut(self.db.fs, infile._id) as outfile: + self.assertEqual(contents, await outfile.read()) + + async def test_exception_file_non_existence(self): + contents = b"Imagine this is some important data..." + + with self.assertRaises(ConnectionError): + async with AsyncGridIn(self.db.fs, filename="important") as infile: + await infile.write(contents) + raise ConnectionError("Test exception") + + # Expectation: File chunks are written, entry in files doesn't appear. + self.assertEqual( + await self.db.fs.chunks.count_documents({"files_id": infile._id}), infile._chunk_number + ) + + self.assertIsNone(await self.db.fs.files.find_one({"_id": infile._id})) + self.assertTrue(infile.closed) + + async def test_prechunked_string(self): + async def write_me(s, chunk_size): + buf = BytesIO(s) + infile = AsyncGridIn(self.db.fs) + while True: + to_write = buf.read(chunk_size) + if to_write == b"": + break + await infile.write(to_write) + await infile.close() + buf.close() + + outfile = AsyncGridOut(self.db.fs, infile._id) + data = await outfile.read() + self.assertEqual(s, data) + + s = b"x" * DEFAULT_CHUNK_SIZE * 4 + # Test with default chunk size + await write_me(s, DEFAULT_CHUNK_SIZE) + # Multiple + await write_me(s, DEFAULT_CHUNK_SIZE * 3) + # Custom + await write_me(s, 262300) + + async def test_grid_out_lazy_connect(self): + fs = self.db.fs + outfile = AsyncGridOut(fs, file_id=-1) + with self.assertRaises(NoFile): + await outfile.read() + with self.assertRaises(NoFile): + if not _IS_SYNC: + await outfile.open() + outfile.filename + + infile = AsyncGridIn(fs, filename=1) + await infile.close() + + outfile = AsyncGridOut(fs, infile._id) + await outfile.read() + outfile.filename + + outfile = AsyncGridOut(fs, infile._id) + await outfile.readchunk() + + async def test_grid_in_lazy_connect(self): + client = self.simple_client("badhost", connect=False, serverSelectionTimeoutMS=10) + fs = client.db.fs + infile = AsyncGridIn(fs, file_id=-1, chunk_size=1) + with self.assertRaises(ServerSelectionTimeoutError): + await infile.write(b"data") + with self.assertRaises(ServerSelectionTimeoutError): + await infile.close() + + async def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + AsyncGridIn((await self.async_rs_or_single_client(w=0)).pymongo_test.fs) + + async def test_survive_cursor_not_found(self): + # By default the find command returns 101 documents in the first batch. + # Use 102 batches to cause a single getMore. + chunk_size = 1024 + data = b"d" * (102 * chunk_size) + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + db = client.pymongo_test + async with AsyncGridIn(db.fs, chunk_size=chunk_size) as infile: + await infile.write(data) + + async with AsyncGridOut(db.fs, infile._id) as outfile: + self.assertEqual(len(await outfile.readchunk()), chunk_size) + + # Kill the cursor to simulate the cursor timing out on the server + # when an application spends a long time between two calls to + # readchunk(). + assert await client.address is not None + await client._close_cursor_now( + outfile._chunk_iter._cursor.cursor_id, + _CursorAddress(await client.address, db.fs.chunks.full_name), # type: ignore[arg-type] + ) + + # Read the rest of the file without error. + self.assertEqual(len(await outfile.read()), len(data) - chunk_size) + + # Paranoid, ensure that a getMore was actually sent. + self.assertIn("getMore", listener.started_command_names()) + + @async_client_context.require_sync + async def test_zip(self): + zf = BytesIO() + z = zipfile.ZipFile(zf, "w") + z.writestr("test.txt", b"hello world") + z.close() + zf.seek(0) + + f = AsyncGridIn(self.db.fs, filename="test.zip") + await f.write(zf) + await f.close() + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + z = zipfile.ZipFile(g) + self.assertSequenceEqual(z.namelist(), ["test.txt"]) + self.assertEqual(z.read("test.txt"), b"hello world") + + async def test_grid_out_unsupported_operations(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + + self.assertRaises(io.UnsupportedOperation, g.writelines, [b"some", b"lines"]) + self.assertRaises(io.UnsupportedOperation, g.write, b"some text") + self.assertRaises(io.UnsupportedOperation, g.fileno) + self.assertRaises(io.UnsupportedOperation, g.truncate) + + self.assertFalse(g.writable()) + self.assertFalse(g.isatty()) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_gridfs.py b/test/asynchronous/test_gridfs.py new file mode 100644 index 0000000000..f60352f3cb --- /dev/null +++ b/test/asynchronous/test_gridfs.py @@ -0,0 +1,603 @@ +# +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the gridfs package.""" +from __future__ import annotations + +import asyncio +import datetime +import sys +import threading +import time +from io import BytesIO +from test.asynchronous.helpers import ConcurrentRunner +from unittest.mock import patch + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils import async_joinall +from test.utils_shared import one + +import gridfs +from bson.binary import Binary +from gridfs.asynchronous.grid_file import DEFAULT_CHUNK_SIZE, AsyncGridOutCursor +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, +) +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False + + +class JustWrite(ConcurrentRunner): + def __init__(self, fs, n): + super().__init__() + self.fs = fs + self.n = n + self.daemon = True + + async def run(self): + for _ in range(self.n): + file = self.fs.new_file(filename="test") + await file.write(b"hello") + await file.close() + + +class JustRead(ConcurrentRunner): + def __init__(self, fs, n, results): + super().__init__() + self.fs = fs + self.n = n + self.results = results + self.daemon = True + + async def run(self): + for _ in range(self.n): + file = await self.fs.get("test") + data = await file.read() + self.results.append(data) + assert data == b"hello" + + +class TestGridfsNoConnect(unittest.IsolatedAsyncioTestCase): + db: AsyncDatabase + + async def asyncSetUp(self): + await super().asyncSetUp() + self.db = AsyncMongoClient(connect=False).pymongo_test + + async def test_gridfs(self): + self.assertRaises(TypeError, gridfs.AsyncGridFS, "foo") + self.assertRaises(TypeError, gridfs.AsyncGridFS, self.db, 5) + + +class TestGridfs(AsyncIntegrationTest): + fs: gridfs.AsyncGridFS + alt: gridfs.AsyncGridFS + + async def asyncSetUp(self): + await super().asyncSetUp() + self.fs = gridfs.AsyncGridFS(self.db) + self.alt = gridfs.AsyncGridFS(self.db, "alt") + await self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) + + async def test_basic(self): + oid = await self.fs.put(b"hello world") + self.assertEqual(b"hello world", await (await self.fs.get(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + await self.fs.delete(oid) + with self.assertRaises(NoFile): + await self.fs.get(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + with self.assertRaises(NoFile): + await self.fs.get("foo") + oid = await self.fs.put(b"hello world", _id="foo") + self.assertEqual("foo", oid) + self.assertEqual(b"hello world", await (await self.fs.get("foo")).read()) + + async def test_multi_chunk_delete(self): + await self.db.fs.drop() + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + gfs = gridfs.AsyncGridFS(self.db) + oid = await gfs.put(b"hello", chunkSize=1) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(5, await self.db.fs.chunks.count_documents({})) + await gfs.delete(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_list(self): + self.assertEqual([], await self.fs.list()) + await self.fs.put(b"hello world") + self.assertEqual([], await self.fs.list()) + + # PYTHON-598: in server versions before 2.5.x, creating an index on + # filename, uploadDate causes list() to include None. + await self.fs.get_last_version() + self.assertEqual([], await self.fs.list()) + + await self.fs.put(b"", filename="mike") + await self.fs.put(b"foo", filename="test") + await self.fs.put(b"", filename="hello world") + + self.assertEqual({"mike", "test", "hello world"}, set(await self.fs.list())) + + async def test_empty_file(self): + oid = await self.fs.put(b"") + self.assertEqual(b"", await (await self.fs.get(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + raw = await self.db.fs.files.find_one() + assert raw is not None + self.assertEqual(0, raw["length"]) + self.assertEqual(oid, raw["_id"]) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) + self.assertEqual(255 * 1024, raw["chunkSize"]) + self.assertNotIn("md5", raw) + + async def test_corrupt_chunk(self): + files_id = await self.fs.put(b"foobar") + await self.db.fs.chunks.update_one( + {"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}} + ) + try: + out = await self.fs.get(files_id) + with self.assertRaises(CorruptGridFile): + await out.read() + + out = await self.fs.get(files_id) + with self.assertRaises(CorruptGridFile): + await out.readline() + finally: + await self.fs.delete(files_id) + + async def test_put_ensures_index(self): + chunks = self.db.fs.chunks + files = self.db.fs.files + # Ensure the collections are removed. + await chunks.drop() + await files.drop() + await self.fs.put(b"junk") + + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in (await chunks.index_information()).values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in (await files.index_information()).values() + ) + ) + + async def test_alt_collection(self): + oid = await self.alt.put(b"hello world") + self.assertEqual(b"hello world", await (await self.alt.get(oid)).read()) + self.assertEqual(1, await self.db.alt.files.count_documents({})) + self.assertEqual(1, await self.db.alt.chunks.count_documents({})) + + await self.alt.delete(oid) + with self.assertRaises(NoFile): + await self.alt.get(oid) + self.assertEqual(0, await self.db.alt.files.count_documents({})) + self.assertEqual(0, await self.db.alt.chunks.count_documents({})) + + with self.assertRaises(NoFile): + await self.alt.get("foo") + oid = await self.alt.put(b"hello world", _id="foo") + self.assertEqual("foo", oid) + self.assertEqual(b"hello world", await (await self.alt.get("foo")).read()) + + await self.alt.put(b"", filename="mike") + await self.alt.put(b"foo", filename="test") + await self.alt.put(b"", filename="hello world") + + self.assertEqual({"mike", "test", "hello world"}, set(await self.alt.list())) + + async def test_threaded_reads(self): + await self.fs.put(b"hello", _id="test") + + tasks = [] + results: list = [] + for i in range(10): + tasks.append(JustRead(self.fs, 10, results)) + await tasks[i].start() + + await async_joinall(tasks) + + self.assertEqual(100 * [b"hello"], results) + + async def test_threaded_writes(self): + tasks = [] + for i in range(10): + tasks.append(JustWrite(self.fs, 10)) + await tasks[i].start() + + await async_joinall(tasks) + + f = await self.fs.get_last_version("test") + self.assertEqual(await f.read(), b"hello") + + # Should have created 100 versions of 'test' file + self.assertEqual(100, await self.db.fs.files.count_documents({"filename": "test"})) + + async def test_get_last_version(self): + one = await self.fs.put(b"foo", filename="test") + await asyncio.sleep(0.01) + two = self.fs.new_file(filename="test") + await two.write(b"bar") + await two.close() + await asyncio.sleep(0.01) + two = two._id + three = await self.fs.put(b"baz", filename="test") + + self.assertEqual(b"baz", await (await self.fs.get_last_version("test")).read()) + await self.fs.delete(three) + self.assertEqual(b"bar", await (await self.fs.get_last_version("test")).read()) + await self.fs.delete(two) + self.assertEqual(b"foo", await (await self.fs.get_last_version("test")).read()) + await self.fs.delete(one) + with self.assertRaises(NoFile): + await self.fs.get_last_version("test") + + async def test_get_last_version_with_metadata(self): + one = await self.fs.put(b"foo", filename="test", author="author") + await asyncio.sleep(0.01) + two = await self.fs.put(b"bar", filename="test", author="author") + + self.assertEqual(b"bar", await (await self.fs.get_last_version(author="author")).read()) + await self.fs.delete(two) + self.assertEqual(b"foo", await (await self.fs.get_last_version(author="author")).read()) + await self.fs.delete(one) + + one = await self.fs.put(b"foo", filename="test", author="author1") + await asyncio.sleep(0.01) + two = await self.fs.put(b"bar", filename="test", author="author2") + + self.assertEqual(b"foo", await (await self.fs.get_last_version(author="author1")).read()) + self.assertEqual(b"bar", await (await self.fs.get_last_version(author="author2")).read()) + self.assertEqual(b"bar", await (await self.fs.get_last_version(filename="test")).read()) + + with self.assertRaises(NoFile): + await self.fs.get_last_version(author="author3") + with self.assertRaises(NoFile): + await self.fs.get_last_version(filename="nottest", author="author1") + + await self.fs.delete(one) + await self.fs.delete(two) + + async def test_get_version(self): + await self.fs.put(b"foo", filename="test") + await asyncio.sleep(0.01) + await self.fs.put(b"bar", filename="test") + await asyncio.sleep(0.01) + await self.fs.put(b"baz", filename="test") + await asyncio.sleep(0.01) + + self.assertEqual(b"foo", await (await self.fs.get_version("test", 0)).read()) + self.assertEqual(b"bar", await (await self.fs.get_version("test", 1)).read()) + self.assertEqual(b"baz", await (await self.fs.get_version("test", 2)).read()) + + self.assertEqual(b"baz", await (await self.fs.get_version("test", -1)).read()) + self.assertEqual(b"bar", await (await self.fs.get_version("test", -2)).read()) + self.assertEqual(b"foo", await (await self.fs.get_version("test", -3)).read()) + + with self.assertRaises(NoFile): + await self.fs.get_version("test", 3) + with self.assertRaises(NoFile): + await self.fs.get_version("test", -4) + + async def test_get_version_with_metadata(self): + one = await self.fs.put(b"foo", filename="test", author="author1") + await asyncio.sleep(0.01) + two = await self.fs.put(b"bar", filename="test", author="author1") + await asyncio.sleep(0.01) + three = await self.fs.put(b"baz", filename="test", author="author2") + + self.assertEqual( + b"foo", + await (await self.fs.get_version(filename="test", author="author1", version=-2)).read(), + ) + self.assertEqual( + b"bar", + await (await self.fs.get_version(filename="test", author="author1", version=-1)).read(), + ) + self.assertEqual( + b"foo", + await (await self.fs.get_version(filename="test", author="author1", version=0)).read(), + ) + self.assertEqual( + b"bar", + await (await self.fs.get_version(filename="test", author="author1", version=1)).read(), + ) + self.assertEqual( + b"baz", + await (await self.fs.get_version(filename="test", author="author2", version=0)).read(), + ) + self.assertEqual( + b"baz", await (await self.fs.get_version(filename="test", version=-1)).read() + ) + self.assertEqual( + b"baz", await (await self.fs.get_version(filename="test", version=2)).read() + ) + + with self.assertRaises(NoFile): + await self.fs.get_version(filename="test", author="author3") + with self.assertRaises(NoFile): + await self.fs.get_version(filename="test", author="author1", version=2) + + await self.fs.delete(one) + await self.fs.delete(two) + await self.fs.delete(three) + + async def test_put_filelike(self): + oid = await self.fs.put(BytesIO(b"hello world"), chunk_size=1) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + self.assertEqual(b"hello world", await (await self.fs.get(oid)).read()) + + async def test_file_exists(self): + oid = await self.fs.put(b"hello") + with self.assertRaises(FileExists): + await self.fs.put(b"world", _id=oid) + + one = self.fs.new_file(_id=123) + await one.write(b"some content") + await one.close() + + # Attempt to upload a file with more chunks to the same _id. + with patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_SIZE", DEFAULT_CHUNK_SIZE): + two = self.fs.new_file(_id=123) + with self.assertRaises(FileExists): + await two.write(b"x" * DEFAULT_CHUNK_SIZE * 3) + # Original file is still readable (no extra chunks were uploaded). + self.assertEqual(await (await self.fs.get(123)).read(), b"some content") + + two = self.fs.new_file(_id=123) + await two.write(b"some content") + with self.assertRaises(FileExists): + await two.close() + # Original file is still readable. + self.assertEqual(await (await self.fs.get(123)).read(), b"some content") + + async def test_exists(self): + oid = await self.fs.put(b"hello") + self.assertTrue(await self.fs.exists(oid)) + self.assertTrue(await self.fs.exists({"_id": oid})) + self.assertTrue(await self.fs.exists(_id=oid)) + + self.assertFalse(await self.fs.exists(filename="mike")) + self.assertFalse(await self.fs.exists("mike")) + + oid = await self.fs.put(b"hello", filename="mike", foo=12) + self.assertTrue(await self.fs.exists(oid)) + self.assertTrue(await self.fs.exists({"_id": oid})) + self.assertTrue(await self.fs.exists(_id=oid)) + self.assertTrue(await self.fs.exists(filename="mike")) + self.assertTrue(await self.fs.exists({"filename": "mike"})) + self.assertTrue(await self.fs.exists(foo=12)) + self.assertTrue(await self.fs.exists({"foo": 12})) + self.assertTrue(await self.fs.exists(foo={"$gt": 11})) + self.assertTrue(await self.fs.exists({"foo": {"$gt": 11}})) + + self.assertFalse(await self.fs.exists(foo=13)) + self.assertFalse(await self.fs.exists({"foo": 13})) + self.assertFalse(await self.fs.exists(foo={"$gt": 12})) + self.assertFalse(await self.fs.exists({"foo": {"$gt": 12}})) + + async def test_put_unicode(self): + with self.assertRaises(TypeError): + await self.fs.put("hello") + + oid = await self.fs.put("hello", encoding="utf-8") + self.assertEqual(b"hello", await (await self.fs.get(oid)).read()) + self.assertEqual("utf-8", (await self.fs.get(oid)).encoding) + + oid = await self.fs.put("aé", encoding="iso-8859-1") + self.assertEqual("aé".encode("iso-8859-1"), await (await self.fs.get(oid)).read()) + self.assertEqual("iso-8859-1", (await self.fs.get(oid)).encoding) + + async def test_missing_length_iter(self): + # Test fix that guards against PHP-237 + await self.fs.put(b"", filename="empty") + doc = await self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None + doc.pop("length") + await self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) + f = await self.fs.get_last_version(filename="empty") + + async def iterate_file(grid_file): + async for _chunk in grid_file: + pass + return True + + self.assertTrue(await iterate_file(f)) + + async def test_gridfs_lazy_connect(self): + client = await self.async_single_client( + "badhost", connect=False, serverSelectionTimeoutMS=10 + ) + db = client.db + gfs = gridfs.AsyncGridFS(db) + with self.assertRaises(ServerSelectionTimeoutError): + await gfs.list() + + fs = gridfs.AsyncGridFS(db) + f = fs.new_file() + with self.assertRaises(ServerSelectionTimeoutError): + await f.close() + + async def test_gridfs_find(self): + await self.fs.put(b"test2", filename="two") + await asyncio.sleep(0.01) + await self.fs.put(b"test2+", filename="two") + await asyncio.sleep(0.01) + await self.fs.put(b"test1", filename="one") + await asyncio.sleep(0.01) + await self.fs.put(b"test2++", filename="two") + files = self.db.fs.files + self.assertEqual(3, await files.count_documents({"filename": "two"})) + self.assertEqual(4, await files.count_documents({})) + cursor = self.fs.find(no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + await cursor.rewind() + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + gout = await cursor.next() + self.assertEqual(b"test2+", await gout.read()) + with self.assertRaises(StopAsyncIteration): + await cursor.__anext__() + await cursor.rewind() + items = await cursor.to_list() + self.assertEqual(len(items), 2) + await cursor.rewind() + items = await cursor.to_list(1) + self.assertEqual(len(items), 1) + await cursor.close() + self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) + + async def test_delete_not_initialized(self): + # Creating a cursor with invalid arguments will not run __init__ + # but will still call __del__. + cursor = AsyncGridOutCursor.__new__(AsyncGridOutCursor) # Skip calling __init__ + with self.assertRaises(TypeError): + cursor.__init__(self.db.fs.files, {}, {"_id": True}) # type: ignore + cursor.__del__() # no error + + async def test_gridfs_find_one(self): + self.assertEqual(None, await self.fs.find_one()) + + id1 = await self.fs.put(b"test1", filename="file1") + res = await self.fs.find_one() + assert res is not None + self.assertEqual(b"test1", await res.read()) + + id2 = await self.fs.put(b"test2", filename="file2", meta="data") + res1 = await self.fs.find_one(id1) + assert res1 is not None + self.assertEqual(b"test1", await res1.read()) + res2 = await self.fs.find_one(id2) + assert res2 is not None + self.assertEqual(b"test2", await res2.read()) + + res3 = await self.fs.find_one({"filename": "file1"}) + assert res3 is not None + self.assertEqual(b"test1", await res3.read()) + + res4 = await self.fs.find_one(id2) + assert res4 is not None + self.assertEqual("data", res4.meta) + + async def test_grid_in_non_int_chunksize(self): + # Lua, and perhaps other buggy AsyncGridFS clients, store size as a float. + data = b"data" + await self.fs.put(data, filename="f") + await self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) + + self.assertEqual(data, await (await self.fs.get_version("f")).read()) + + async def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + gridfs.AsyncGridFS((await self.async_rs_or_single_client(w=0)).pymongo_test) + + async def test_md5(self): + gin = self.fs.new_file() + await gin.write(b"no md5 sum") + await gin.close() + self.assertIsNone(gin.md5) + + gout = await self.fs.get(gin._id) + self.assertIsNone(gout.md5) + + _id = await self.fs.put(b"still no md5 sum") + gout = await self.fs.get(_id) + self.assertIsNone(gout.md5) + + +class TestGridfsReplicaSet(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + + @classmethod + @async_client_context.require_connection + async def asyncTearDownClass(cls): + await async_client_context.client.drop_database("gfsreplica") + + async def test_gridfs_replica_set(self): + rsc = await self.async_rs_client( + w=async_client_context.w, read_preference=ReadPreference.SECONDARY + ) + + fs = gridfs.AsyncGridFS(rsc.gfsreplica, "gfsreplicatest") + + gin = fs.new_file() + self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY) + + oid = await fs.put(b"foo") + content = await (await fs.get(oid)).read() + self.assertEqual(b"foo", content) + + async def test_gridfs_secondary(self): + secondary_host, secondary_port = one(await self.client.secondaries) + secondary_connection = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) + + # Should detect it's connected to secondary and not attempt to + # create index + fs = gridfs.AsyncGridFS(secondary_connection.gfsreplica, "gfssecondarytest") + + # This won't detect secondary, raises error + with self.assertRaises(NotPrimaryError): + await fs.put(b"foo") + + async def test_gridfs_secondary_lazy(self): + # Should detect it's connected to secondary and not attempt to + # create index. + secondary_host, secondary_port = one(await self.client.secondaries) + client = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) + + # Still no connection. + fs = gridfs.AsyncGridFS(client.gfsreplica, "gfssecondarylazytest") + + # Connects, doesn't create index. + with self.assertRaises(NoFile): + await fs.get_last_version() + with self.assertRaises(NotPrimaryError): + await fs.put("data", encoding="utf-8") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_gridfs_bucket.py b/test/asynchronous/test_gridfs_bucket.py new file mode 100644 index 0000000000..fd9b9883bf --- /dev/null +++ b/test/asynchronous/test_gridfs_bucket.py @@ -0,0 +1,597 @@ +# +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the gridfs package.""" +from __future__ import annotations + +import asyncio +import datetime +import itertools +import sys +import threading +import time +from io import BytesIO +from test.asynchronous.helpers import ConcurrentRunner +from unittest.mock import patch + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils import async_joinall +from test.utils_shared import one + +import gridfs +from bson.binary import Binary +from bson.int64 import Int64 +from bson.objectid import ObjectId +from bson.son import SON +from gridfs.errors import CorruptGridFile, NoFile +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, + WriteConcernError, +) +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False + + +class JustWrite(ConcurrentRunner): + def __init__(self, gfs, num): + super().__init__() + self.gfs = gfs + self.num = num + self.daemon = True + + async def run(self): + for _ in range(self.num): + file = self.gfs.open_upload_stream("test") + await file.write(b"hello") + await file.close() + + +class JustRead(ConcurrentRunner): + def __init__(self, gfs, num, results): + super().__init__() + self.gfs = gfs + self.num = num + self.results = results + self.daemon = True + + async def run(self): + for _ in range(self.num): + file = await self.gfs.open_download_stream_by_name("test") + data = await file.read() + self.results.append(data) + assert data == b"hello" + + +class TestGridfs(AsyncIntegrationTest): + fs: gridfs.AsyncGridFSBucket + alt: gridfs.AsyncGridFSBucket + + async def asyncSetUp(self): + await super().asyncSetUp() + self.fs = gridfs.AsyncGridFSBucket(self.db) + self.alt = gridfs.AsyncGridFSBucket(self.db, bucket_name="alt") + await self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) + + async def test_basic(self): + oid = await self.fs.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", await (await self.fs.open_download_stream(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + await self.fs.delete(oid) + with self.assertRaises(NoFile): + await self.fs.open_download_stream(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_multi_chunk_delete(self): + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + gfs = gridfs.AsyncGridFSBucket(self.db) + oid = await gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(5, await self.db.fs.chunks.count_documents({})) + await gfs.delete(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_delete_by_name(self): + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + gfs = gridfs.AsyncGridFSBucket(self.db) + await gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(5, await self.db.fs.chunks.count_documents({})) + await gfs.delete_by_name("test_filename") + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_empty_file(self): + oid = await self.fs.upload_from_stream("test_filename", b"") + self.assertEqual(b"", await (await self.fs.open_download_stream(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + raw = await self.db.fs.files.find_one() + assert raw is not None + self.assertEqual(0, raw["length"]) + self.assertEqual(oid, raw["_id"]) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) + self.assertEqual(255 * 1024, raw["chunkSize"]) + self.assertNotIn("md5", raw) + + async def test_corrupt_chunk(self): + files_id = await self.fs.upload_from_stream("test_filename", b"foobar") + await self.db.fs.chunks.update_one( + {"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}} + ) + try: + out = await self.fs.open_download_stream(files_id) + with self.assertRaises(CorruptGridFile): + await out.read() + + out = await self.fs.open_download_stream(files_id) + with self.assertRaises(CorruptGridFile): + await out.readline() + finally: + await self.fs.delete(files_id) + + async def test_upload_ensures_index(self): + chunks = self.db.fs.chunks + files = self.db.fs.files + # Ensure the collections are removed. + await chunks.drop() + await files.drop() + await self.fs.upload_from_stream("filename", b"junk") + + self.assertIn( + [("files_id", 1), ("n", 1)], + [info.get("key") for info in (await chunks.index_information()).values()], + "Missing required index on chunks collection: {files_id: 1, n: 1}", + ) + + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (await files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", + ) + + async def test_ensure_index_shell_compat(self): + files = self.db.fs.files + for i, j in itertools.combinations_with_replacement([1, 1.0, Int64(1)], 2): + # Create the index with different numeric types (as might be done + # from the mongo shell). + shell_index = [("filename", i), ("uploadDate", j)] + await self.db.command( + "createIndexes", + files.name, + indexes=[{"key": SON(shell_index), "name": "filename_1.0_uploadDate_1.0"}], + ) + + # No error. + await self.fs.upload_from_stream("filename", b"data") + + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (await files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", + ) + await files.drop() + + async def test_alt_collection(self): + oid = await self.alt.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", await (await self.alt.open_download_stream(oid)).read()) + self.assertEqual(1, await self.db.alt.files.count_documents({})) + self.assertEqual(1, await self.db.alt.chunks.count_documents({})) + + await self.alt.delete(oid) + with self.assertRaises(NoFile): + await self.alt.open_download_stream(oid) + self.assertEqual(0, await self.db.alt.files.count_documents({})) + self.assertEqual(0, await self.db.alt.chunks.count_documents({})) + + with self.assertRaises(NoFile): + await self.alt.open_download_stream("foo") + await self.alt.upload_from_stream("foo", b"hello world") + self.assertEqual( + b"hello world", await (await self.alt.open_download_stream_by_name("foo")).read() + ) + + await self.alt.upload_from_stream("mike", b"") + await self.alt.upload_from_stream("test", b"foo") + await self.alt.upload_from_stream("hello world", b"") + + self.assertEqual( + {"mike", "test", "hello world", "foo"}, + {k["filename"] for k in await self.db.alt.files.find().to_list()}, + ) + + async def test_threaded_reads(self): + await self.fs.upload_from_stream("test", b"hello") + + threads = [] + results: list = [] + for i in range(10): + threads.append(JustRead(self.fs, 10, results)) + await threads[i].start() + + await async_joinall(threads) + + self.assertEqual(100 * [b"hello"], results) + + async def test_threaded_writes(self): + threads = [] + for i in range(10): + threads.append(JustWrite(self.fs, 10)) + await threads[i].start() + + await async_joinall(threads) + + fstr = await self.fs.open_download_stream_by_name("test") + self.assertEqual(await fstr.read(), b"hello") + + # Should have created 100 versions of 'test' file + self.assertEqual(100, await self.db.fs.files.count_documents({"filename": "test"})) + + async def test_get_last_version(self): + one = await self.fs.upload_from_stream("test", b"foo") + await asyncio.sleep(0.01) + two = self.fs.open_upload_stream("test") + await two.write(b"bar") + await two.close() + await asyncio.sleep(0.01) + two = two._id + three = await self.fs.upload_from_stream("test", b"baz") + + self.assertEqual(b"baz", await (await self.fs.open_download_stream_by_name("test")).read()) + await self.fs.delete(three) + self.assertEqual(b"bar", await (await self.fs.open_download_stream_by_name("test")).read()) + await self.fs.delete(two) + self.assertEqual(b"foo", await (await self.fs.open_download_stream_by_name("test")).read()) + await self.fs.delete(one) + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("test") + + async def test_get_version(self): + await self.fs.upload_from_stream("test", b"foo") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("test", b"bar") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("test", b"baz") + await asyncio.sleep(0.01) + + self.assertEqual( + b"foo", await (await self.fs.open_download_stream_by_name("test", revision=0)).read() + ) + self.assertEqual( + b"bar", await (await self.fs.open_download_stream_by_name("test", revision=1)).read() + ) + self.assertEqual( + b"baz", await (await self.fs.open_download_stream_by_name("test", revision=2)).read() + ) + + self.assertEqual( + b"baz", await (await self.fs.open_download_stream_by_name("test", revision=-1)).read() + ) + self.assertEqual( + b"bar", await (await self.fs.open_download_stream_by_name("test", revision=-2)).read() + ) + self.assertEqual( + b"foo", await (await self.fs.open_download_stream_by_name("test", revision=-3)).read() + ) + + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("test", revision=3) + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("test", revision=-4) + + async def test_upload_from_stream(self): + oid = await self.fs.upload_from_stream( + "test_file", BytesIO(b"hello world"), chunk_size_bytes=1 + ) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + self.assertEqual(b"hello world", await (await self.fs.open_download_stream(oid)).read()) + + async def test_upload_from_stream_with_id(self): + oid = ObjectId() + await self.fs.upload_from_stream_with_id( + oid, "test_file_custom_id", BytesIO(b"custom id"), chunk_size_bytes=1 + ) + self.assertEqual(b"custom id", await (await self.fs.open_download_stream(oid)).read()) + + @patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 3) + @async_client_context.require_failCommand_fail_point + async def test_upload_bulk_write_error(self): + # Test BulkWriteError from insert_many is converted to an insert_one style error. + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + gin = self.fs.open_upload_stream("test_file", chunk_size_bytes=1) + async with self.fail_point(cause_wce): + # Assert we raise WriteConcernError, not BulkWriteError. + with self.assertRaises(WriteConcernError): + await gin.write(b"hello world") + # 3 chunks were uploaded. + self.assertEqual(3, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + await gin.abort() + + @patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 10) + async def test_upload_batching(self): + async with self.fs.open_upload_stream("test_file", chunk_size_bytes=1) as gin: + await gin.write(b"s" * (10 - 1)) + # No chunks were uploaded yet. + self.assertEqual(0, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + await gin.write(b"s") + # All chunks were uploaded since we hit the _UPLOAD_BUFFER_CHUNKS limit. + self.assertEqual(10, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + + async def test_open_upload_stream(self): + gin = self.fs.open_upload_stream("from_stream") + await gin.write(b"from stream") + await gin.close() + self.assertEqual(b"from stream", await (await self.fs.open_download_stream(gin._id)).read()) + + async def test_open_upload_stream_with_id(self): + oid = ObjectId() + gin = self.fs.open_upload_stream_with_id(oid, "from_stream_custom_id") + await gin.write(b"from stream with custom id") + await gin.close() + self.assertEqual( + b"from stream with custom id", await (await self.fs.open_download_stream(oid)).read() + ) + + async def test_missing_length_iter(self): + # Test fix that guards against PHP-237 + await self.fs.upload_from_stream("empty", b"") + doc = await self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None + doc.pop("length") + await self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) + fstr = await self.fs.open_download_stream_by_name("empty") + + async def iterate_file(grid_file): + async for _ in grid_file: + pass + return True + + self.assertTrue(await iterate_file(fstr)) + + async def test_gridfs_lazy_connect(self): + client = await self.async_single_client( + "badhost", connect=False, serverSelectionTimeoutMS=0 + ) + cdb = client.db + gfs = gridfs.AsyncGridFSBucket(cdb) + with self.assertRaises(ServerSelectionTimeoutError): + await gfs.delete(0) + + gfs = gridfs.AsyncGridFSBucket(cdb) + with self.assertRaises(ServerSelectionTimeoutError): + await gfs.upload_from_stream("test", b"") # Still no connection. + + async def test_gridfs_find(self): + await self.fs.upload_from_stream("two", b"test2") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("two", b"test2+") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("one", b"test1") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("two", b"test2++") + files = self.db.fs.files + self.assertEqual(3, await files.count_documents({"filename": "two"})) + self.assertEqual(4, await files.count_documents({})) + cursor = self.fs.find( + {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2 + ) + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + await cursor.rewind() + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + gout = await cursor.next() + self.assertEqual(b"test2+", await gout.read()) + with self.assertRaises(StopAsyncIteration): + await cursor.next() + await cursor.close() + self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) + + async def test_grid_in_non_int_chunksize(self): + # Lua, and perhaps other buggy AsyncGridFS clients, store size as a float. + data = b"data" + await self.fs.upload_from_stream("f", data) + await self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) + + self.assertEqual(data, await (await self.fs.open_download_stream_by_name("f")).read()) + + async def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + gridfs.AsyncGridFSBucket((await self.async_rs_or_single_client(w=0)).pymongo_test) + + async def test_rename(self): + _id = await self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("first_name")).read() + ) + + await self.fs.rename(_id, "second_name") + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("first_name") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("second_name")).read() + ) + + async def test_rename_by_name(self): + _id = await self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("first_name")).read() + ) + + await self.fs.rename_by_name("first_name", "second_name") + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("first_name") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("second_name")).read() + ) + + @patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_SIZE", 5) + async def test_abort(self): + gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) + await gin.write(b"test1") + await gin.write(b"test2") + await gin.write(b"test3") + self.assertEqual(3, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + await gin.abort() + self.assertTrue(gin.closed) + with self.assertRaises(ValueError): + await gin.write(b"test4") + self.assertEqual(0, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + + async def test_download_to_stream(self): + file1 = BytesIO(b"hello world") + # Test with one chunk. + oid = await self.fs.upload_from_stream("one_chunk", file1) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + await self.fs.download_to_stream(oid, file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + # Test with many chunks. + await self.db.drop_collection("fs.files") + await self.db.drop_collection("fs.chunks") + file1.seek(0) + oid = await self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + await self.fs.download_to_stream(oid, file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + async def test_download_to_stream_by_name(self): + file1 = BytesIO(b"hello world") + # Test with one chunk. + _ = await self.fs.upload_from_stream("one_chunk", file1) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + await self.fs.download_to_stream_by_name("one_chunk", file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + # Test with many chunks. + await self.db.drop_collection("fs.files") + await self.db.drop_collection("fs.chunks") + file1.seek(0) + await self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + + file2 = BytesIO() + await self.fs.download_to_stream_by_name("many_chunks", file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + async def test_md5(self): + gin = self.fs.open_upload_stream("no md5") + await gin.write(b"no md5 sum") + await gin.close() + self.assertIsNone(gin.md5) + + gout = await self.fs.open_download_stream(gin._id) + self.assertIsNone(gout.md5) + + gin = self.fs.open_upload_stream_with_id(ObjectId(), "also no md5") + await gin.write(b"also no md5 sum") + await gin.close() + self.assertIsNone(gin.md5) + + gout = await self.fs.open_download_stream(gin._id) + self.assertIsNone(gout.md5) + + +class TestGridfsBucketReplicaSet(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + + @classmethod + @async_client_context.require_connection + async def asyncTearDownClass(cls): + await async_client_context.client.drop_database("gfsbucketreplica") + + async def test_gridfs_replica_set(self): + rsc = await self.async_rs_client( + w=async_client_context.w, read_preference=ReadPreference.SECONDARY + ) + + gfs = gridfs.AsyncGridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest") + oid = await gfs.upload_from_stream("test_filename", b"foo") + content = await (await gfs.open_download_stream(oid)).read() + self.assertEqual(b"foo", content) + + async def test_gridfs_secondary(self): + secondary_host, secondary_port = one(await self.client.secondaries) + secondary_connection = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) + + # Should detect it's connected to secondary and not attempt to + # create index + gfs = gridfs.AsyncGridFSBucket( + secondary_connection.gfsbucketreplica, "gfsbucketsecondarytest" + ) + + # This won't detect secondary, raises error + with self.assertRaises(NotPrimaryError): + await gfs.upload_from_stream("test_filename", b"foo") + + async def test_gridfs_secondary_lazy(self): + # Should detect it's connected to secondary and not attempt to + # create index. + secondary_host, secondary_port = one(await self.client.secondaries) + client = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) + + # Still no connection. + gfs = gridfs.AsyncGridFSBucket(client.gfsbucketreplica, "gfsbucketsecondarylazytest") + + # Connects, doesn't create index. + with self.assertRaises(NoFile): + await gfs.open_download_stream_by_name("test_filename") + with self.assertRaises(NotPrimaryError): + await gfs.upload_from_stream("test_filename", b"data") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_gridfs_spec.py b/test/asynchronous/test_gridfs_spec.py new file mode 100644 index 0000000000..f3dc14fbdc --- /dev/null +++ b/test/asynchronous/test_gridfs_spec.py @@ -0,0 +1,39 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the AsyncGridFS unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "gridfs") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "gridfs") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_heartbeat_monitoring.py b/test/asynchronous/test_heartbeat_monitoring.py new file mode 100644 index 0000000000..aa8a205021 --- /dev/null +++ b/test/asynchronous/test_heartbeat_monitoring.py @@ -0,0 +1,98 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the monitoring of the server heartbeats.""" +from __future__ import annotations + +import sys +from test.asynchronous.utils import AsyncMockPool + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, client_knobs, unittest +from test.utils_shared import HeartbeatEventListener, async_wait_until + +from pymongo.asynchronous.monitor import Monitor +from pymongo.errors import ConnectionFailure +from pymongo.hello import Hello, HelloCompat + +_IS_SYNC = False + + +class TestHeartbeatMonitoring(AsyncIntegrationTest): + async def create_mock_monitor(self, responses, uri, expected_results): + listener = HeartbeatEventListener() + with client_knobs( + heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1 + ): + + class MockMonitor(Monitor): + async def _check_with_socket(self, *args, **kwargs): + if isinstance(responses[1], Exception): + raise responses[1] + return Hello(responses[1]), 99 + + _ = await self.async_single_client( + h=uri, + event_listeners=(listener,), + _monitor_class=MockMonitor, + _pool_class=AsyncMockPool, + connect=True, + ) + + expected_len = len(expected_results) + # Wait for *at least* expected_len number of results. The + # monitor thread may run multiple times during the execution + # of this test. + await async_wait_until( + lambda: len(listener.events) >= expected_len, "publish all events" + ) + + # zip gives us len(expected_results) pairs. + for expected, actual in zip(expected_results, listener.events): + self.assertEqual(expected, actual.__class__.__name__) + self.assertEqual(actual.connection_id, responses[0]) + if expected != "ServerHeartbeatStartedEvent": + if isinstance(actual.reply, Hello): + self.assertEqual(actual.duration, 99) + self.assertEqual(actual.reply._doc, responses[1]) + else: + self.assertEqual(actual.reply, responses[1]) + + async def test_standalone(self): + responses = ( + ("a", 27017), + {HelloCompat.LEGACY_CMD: True, "maxWireVersion": 4, "minWireVersion": 0, "ok": 1}, + ) + uri = "mongodb://a:27017" + expected_results = ["ServerHeartbeatStartedEvent", "ServerHeartbeatSucceededEvent"] + + await self.create_mock_monitor(responses, uri, expected_results) + + async def test_standalone_error(self): + responses = (("a", 27017), ConnectionFailure("SPECIAL MESSAGE")) + uri = "mongodb://a:27017" + # _check_with_socket failing results in a second attempt. + expected_results = [ + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + ] + + await self.create_mock_monitor(responses, uri, expected_results) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_index_management.py b/test/asynchronous/test_index_management.py new file mode 100644 index 0000000000..890788fc56 --- /dev/null +++ b/test/asynchronous/test_index_management.py @@ -0,0 +1,379 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the auth spec tests.""" +from __future__ import annotations + +import asyncio +import os +import pathlib +import sys +import time +import uuid +from typing import Any, Mapping + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import AllowListEventListener, OvertCommandListener + +from pymongo.errors import OperationFailure +from pymongo.operations import SearchIndexModel +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +pytestmark = pytest.mark.search_index + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "index_management") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "index_management") + +_NAME = "test-search-index" + + +class TestCreateSearchIndex(AsyncIntegrationTest): + async def test_inputs(self): + listener = AllowListEventListener("createSearchIndexes") + client = self.simple_client(event_listeners=[listener]) + coll = client.test.test + await coll.drop() + definition = dict(mappings=dict(dynamic=True)) + model_kwarg_list: list[Mapping[str, Any]] = [ + dict(definition=definition, name=None), + dict(definition=definition, name="test"), + ] + for model_kwargs in model_kwarg_list: + model = SearchIndexModel(**model_kwargs) + with self.assertRaises(OperationFailure): + await coll.create_search_index(model) + with self.assertRaises(OperationFailure): + await coll.create_search_index(model_kwargs) + + listener.reset() + with self.assertRaises(OperationFailure): + await coll.create_search_index({"definition": definition, "arbitraryOption": 1}) + self.assertEqual( + {"definition": definition, "arbitraryOption": 1}, + listener.events[0].command["indexes"][0], + ) + + listener.reset() + with self.assertRaises(OperationFailure): + await coll.create_search_index({"definition": definition, "type": "search"}) + self.assertEqual( + {"definition": definition, "type": "search"}, listener.events[0].command["indexes"][0] + ) + + +class SearchIndexIntegrationBase(AsyncPyMongoTestCase): + db_name = "test_search_index_base" + + @classmethod + def setUpClass(cls) -> None: + cls.url = os.environ.get("MONGODB_URI") + cls.username = os.environ["DB_USER"] + cls.password = os.environ["DB_PASSWORD"] + cls.listener = OvertCommandListener() + + async def asyncSetUp(self) -> None: + self.client = self.simple_client( + self.url, + username=self.username, + password=self.password, + event_listeners=[self.listener], + ) + await self.client.drop_database(_NAME) + self.db = self.client[self.db_name] + + async def asyncTearDown(self): + await self.client.drop_database(_NAME) + + async def wait_for_ready(self, coll, name=_NAME, predicate=None): + """Wait for a search index to be ready.""" + indices: list[Mapping[str, Any]] = [] + if predicate is None: + predicate = lambda index: index.get("queryable") is True + + while True: + indices = await (await coll.list_search_indexes(name)).to_list() + if len(indices) and predicate(indices[0]): + return indices[0] + await asyncio.sleep(5) + + +class TestSearchIndexIntegration(SearchIndexIntegrationBase): + db_name = "test_search_index" + + async def test_comment_field(self): + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create a new search index on ``coll0`` that implicitly passes its type. + search_definition = {"mappings": {"dynamic": False}} + self.listener.reset() + implicit_search_resp = await coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition}, comment="foo" + ) + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + # Get the index definition. + self.listener.reset() + await (await coll0.list_search_indexes(name=implicit_search_resp, comment="foo")).next() + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + +class TestSearchIndexProse(SearchIndexIntegrationBase): + db_name = "test_search_index_prose" + + async def test_case_1(self): + """Driver can successfully create and list search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. Use the following definition: + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + await coll0.insert_one({}) + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # An index with the ``name`` of ``test-search-index`` is present and the index has a field ``queryable`` with a value of ``true``. + index = await self.wait_for_ready(coll0) + + # . Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + async def test_case_2(self): + """Driver can successfully create multiple indexes in batch.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create two new search indexes on ``coll0`` with the ``createSearchIndexes`` helper. + name1 = "test-search-index-1" + name2 = "test-search-index-2" + definition = {"mappings": {"dynamic": False}} + index_definitions: list[dict[str, Any]] = [ + {"name": name1, "definition": definition}, + {"name": name2, "definition": definition}, + ] + await coll0.create_search_indexes( + [SearchIndexModel(i["definition"], i["name"]) for i in index_definitions] + ) + + # .Assert that the command returns an array containing the new indexes' names: ``["test-search-index-1", "test-search-index-2"]``. + indices = await (await coll0.list_search_indexes()).to_list() + names = [i["name"] for i in indices] + self.assertIn(name1, names) + self.assertIn(name2, names) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied. + # An index with the ``name`` of ``test-search-index-1`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index1``. + # An index with the ``name`` of ``test-search-index-2`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index2``. + index1 = await self.wait_for_ready(coll0, name1) + index2 = await self.wait_for_ready(coll0, name2) + + # Assert that ``index1`` and ``index2`` have the property ``latestDefinition`` whose value is ``{ "mappings" : { "dynamic" : false } }`` + for index in [index1, index2]: + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], definition) + + async def test_case_3(self): + """Driver can successfully drop search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, "test-search-index") + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + await self.wait_for_ready(coll0) + + # Run a ``dropSearchIndex`` on ``coll0``, using ``test-search-index`` for the name. + await coll0.drop_search_index(_NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until ``listSearchIndexes`` returns an empty array. + t0 = time.time() + while True: + indices = await (await coll0.list_search_indexes()).to_list() + if indices: + break + if (time.time() - t0) / 60 > 5: + raise TimeoutError("Timed out waiting for index deletion") + await asyncio.sleep(5) + + async def test_case_4(self): + """Driver can update a search index.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + await self.wait_for_ready(coll0) + + # Run a ``updateSearchIndex`` on ``coll0``. + # Assert that the command does not error and the server responds with a success. + model2: dict[str, Any] = {"name": _NAME, "definition": {"mappings": {"dynamic": True}}} + await coll0.update_search_index(_NAME, model2["definition"]) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present. This index is referred to as ``index``. + # The index has a field ``queryable`` with a value of ``true`` and has a field ``status`` with the value of ``READY``. + predicate = lambda index: index.get("queryable") is True and index.get("status") == "READY" + await self.wait_for_ready(coll0, predicate=predicate) + + # Assert that an index is present with the name ``test-search-index`` and the definition has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': true } }``. + index = (await (await coll0.list_search_indexes(_NAME)).to_list())[0] + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model2["definition"]) + + async def test_case_5(self): + """``dropSearchIndex`` suppresses namespace not found errors.""" + # Create a driver-side collection object for a randomly generated collection name. Do not create this collection on the server. + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Run a ``dropSearchIndex`` command and assert that no error is thrown. + await coll0.drop_search_index("foo") + + async def test_case_6(self): + """Driver can successfully create and list search indexes with non-default readConcern and writeConcern.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Apply a write concern ``WriteConcern(w=1)`` and a read concern with ``ReadConcern(level="majority")`` to ``coll0``. + coll0 = coll0.with_options( + write_concern=WriteConcern(w="1"), read_concern=ReadConcern(level="majority") + ) + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. + name = "test-search-index-case6" + model = {"name": name, "definition": {"mappings": {"dynamic": False}}} + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index-case6"``. + self.assertEqual(resp, name) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # - An index with the ``name`` of ``test-search-index-case6`` is present and the index has a field ``queryable`` with a value of ``true``. + index = await self.wait_for_ready(coll0, name) + + # Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + async def test_case_7(self): + """Driver handles index types.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Use these search and vector search definitions for indexes. + search_definition = {"mappings": {"dynamic": False}} + vector_search_definition = { + "fields": [ + { + "type": "vector", + "path": "plot_embedding", + "numDimensions": 1536, + "similarity": "euclidean", + }, + ] + } + + # Create a new search index on ``coll0`` that implicitly passes its type. + implicit_search_resp = await coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition} + ) + + # Get the index definition. + resp = await (await coll0.list_search_indexes(name=implicit_search_resp)).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new search index on ``coll0`` that explicitly passes its type. + explicit_search_resp = await coll0.create_search_index( + model={"name": _NAME + "-explicit", "type": "search", "definition": search_definition} + ) + + # Get the index definition. + resp = await (await coll0.list_search_indexes(name=explicit_search_resp)).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new vector search index on ``coll0`` that explicitly passes its type. + explicit_vector_resp = await coll0.create_search_index( + model={ + "name": _NAME + "-vector", + "type": "vectorSearch", + "definition": vector_search_definition, + } + ) + + # Get the index definition. + resp = await (await coll0.list_search_indexes(name=explicit_vector_resp)).next() + + # Assert that the index model contains the correct index type: ``"vectorSearch"``. + self.assertEqual(resp["type"], "vectorSearch") + + # Catch the error raised when trying to create a vector search index without specifying the type + with self.assertRaises(OperationFailure) as e: + await coll0.create_search_index( + model={"name": _NAME + "-error", "definition": vector_search_definition} + ) + self.assertIn("Attribute mappings missing.", e.exception.details["errmsg"]) + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_json_util_integration.py b/test/asynchronous/test_json_util_integration.py new file mode 100644 index 0000000000..32312cb9d3 --- /dev/null +++ b/test/asynchronous/test_json_util_integration.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from test.asynchronous import AsyncIntegrationTest +from typing import Any, List, MutableMapping + +from bson import Binary, Code, DBRef, ObjectId, json_util +from bson.binary import USER_DEFINED_SUBTYPE + +_IS_SYNC = False + + +class TestJsonUtilRoundtrip(AsyncIntegrationTest): + async def test_cursor(self): + db = self.db + + await db.drop_collection("test") + docs: List[MutableMapping[str, Any]] = [ + {"foo": [1, 2]}, + {"bar": {"hello": "world"}}, + {"code": Code("function x() { return 1; }")}, + {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, + {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, + ] + + await db.test.insert_many(docs) + reloaded_docs = json_util.loads(json_util.dumps(await (db.test.find()).to_list())) + for doc in docs: + self.assertIn(doc, reloaded_docs) diff --git a/test/asynchronous/test_load_balancer.py b/test/asynchronous/test_load_balancer.py new file mode 100644 index 0000000000..17d85841f9 --- /dev/null +++ b/test/asynchronous/test_load_balancer.py @@ -0,0 +1,194 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Load Balancer unified spec tests.""" +from __future__ import annotations + +import asyncio +import gc +import os +import pathlib +import sys +import threading +from asyncio import Event +from test.asynchronous.helpers import ConcurrentRunner, ExceptionCatchingTask +from test.asynchronous.utils import async_get_pool + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import ( + async_wait_until, + create_async_event, +) + +_IS_SYNC = False + +pytestmark = pytest.mark.load_balancer + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "load_balancer") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "load_balancer") + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + + +class TestLB(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + + async def test_connections_are_only_returned_once(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3011 + self.skipTest("Test is flaky on PyPy") + pool = await async_get_pool(self.client) + n_conns = len(pool.conns) + await self.db.test.find_one({}) + self.assertEqual(len(pool.conns), n_conns) + await (await self.db.test.aggregate([{"$limit": 1}])).to_list() + self.assertEqual(len(pool.conns), n_conns) + + @async_client_context.require_load_balancer + async def test_unpin_committed_transaction(self): + client = await self.async_rs_client() + pool = await async_get_pool(client) + coll = client[self.db.name].test + async with client.start_session() as session: + async with await session.start_transaction(): + self.assertEqual(pool.active_sockets, 0) + await coll.insert_one({}, session=session) + self.assertEqual(pool.active_sockets, 1) # Pinned. + self.assertEqual(pool.active_sockets, 1) # Still pinned. + self.assertEqual(pool.active_sockets, 0) # Unpinned. + + @async_client_context.require_failCommand_fail_point + async def test_cursor_gc(self): + async def create_resource(coll): + cursor = coll.find({}, batch_size=3) + await anext(cursor) + return cursor + + await self._test_no_gc_deadlock(create_resource) + + @async_client_context.require_failCommand_fail_point + async def test_command_cursor_gc(self): + async def create_resource(coll): + cursor = await coll.aggregate([], batchSize=3) + await anext(cursor) + return cursor + + await self._test_no_gc_deadlock(create_resource) + + async def _test_no_gc_deadlock(self, create_resource): + client = await self.async_rs_client() + pool = await async_get_pool(client) + coll = client[self.db.name].test + await coll.insert_many([{} for _ in range(10)]) + self.assertEqual(pool.active_sockets, 0) + # Cause the initial find attempt to fail to induce a reference cycle. + args = { + "mode": {"times": 1}, + "data": { + "failCommands": ["find", "aggregate"], + "closeConnection": True, + }, + } + async with self.fail_point(args): + resource = await create_resource(coll) + if async_client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + task = PoolLocker(pool) + await task.start() + self.assertTrue(await task.wait(task.locked, 5), "timed out") + # Garbage collect the resource while the pool is locked to ensure we + # don't deadlock. + del resource + # On PyPy it can take a few rounds to collect the cursor. + for _ in range(3): + gc.collect() + task.unlock.set() + await task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) + + await async_wait_until(lambda: pool.active_sockets == 0, "return socket") + # Run another operation to ensure the socket still works. + await coll.delete_many({}) + + @async_client_context.require_transactions + async def test_session_gc(self): + client = await self.async_rs_client() + pool = await async_get_pool(client) + session = client.start_session() + await session.start_transaction() + await client.test_session_gc.test.find_one({}, session=session) + # Cleanup the transaction left open on the server + self.addAsyncCleanup(self.client.admin.command, "killSessions", [session.session_id]) + if async_client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + task = PoolLocker(pool) + await task.start() + self.assertTrue(await task.wait(task.locked, 5), "timed out") + # Garbage collect the session while the pool is locked to ensure we + # don't deadlock. + del session + # On PyPy it can take a few rounds to collect the session. + for _ in range(3): + gc.collect() + task.unlock.set() + await task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) + + await async_wait_until(lambda: pool.active_sockets == 0, "return socket") + # Run another operation to ensure the socket still works. + await client[self.db.name].test.delete_many({}) + + +class PoolLocker(ExceptionCatchingTask): + def __init__(self, pool): + super().__init__(target=self.lock_pool) + self.pool = pool + self.daemon = True + self.locked = create_async_event() + self.unlock = create_async_event() + + async def lock_pool(self): + async with self.pool.lock: + self.locked.set() + # Wait for the unlock flag. + unlock_pool = await self.wait(self.unlock, 10) + if not unlock_pool: + raise Exception("timed out waiting for unlock signal: deadlock?") + + async def wait(self, event: Event, timeout: int): + if _IS_SYNC: + return event.wait(timeout) # type: ignore[call-arg] + else: + try: + await asyncio.wait_for(event.wait(), timeout=timeout) + except asyncio.TimeoutError: + return False + return True + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_locks.py b/test/asynchronous/test_locks.py new file mode 100644 index 0000000000..e5a0adfee6 --- /dev/null +++ b/test/asynchronous/test_locks.py @@ -0,0 +1,462 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for lock.py""" +from __future__ import annotations + +import asyncio +import sys +import unittest + +from pymongo.lock import _async_create_condition, _async_create_lock + +sys.path[0:0] = [""] + +if sys.version_info < (3, 13): + # Tests adapted from: https://github.com/python/cpython/blob/v3.13.0rc2/Lib/test/test_asyncio/test_locks.py + # Includes tests for: + # - https://github.com/python/cpython/issues/111693 + # - https://github.com/python/cpython/issues/112202 + class TestConditionStdlib(unittest.IsolatedAsyncioTestCase): + async def test_wait(self): + cond = _async_create_condition(_async_create_lock()) + result = [] + + async def c1(result): + await cond.acquire() + if await cond.wait(): + result.append(1) + return True + + async def c2(result): + await cond.acquire() + if await cond.wait(): + result.append(2) + return True + + async def c3(result): + await cond.acquire() + if await cond.wait(): + result.append(3) + return True + + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + t3 = asyncio.create_task(c3(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + self.assertFalse(cond.locked()) + + self.assertTrue(await cond.acquire()) + cond.notify() + await asyncio.sleep(0) + self.assertEqual([], result) + self.assertTrue(cond.locked()) + + cond.release() + await asyncio.sleep(0) + self.assertEqual([1], result) + self.assertTrue(cond.locked()) + + cond.notify(2) + await asyncio.sleep(0) + self.assertEqual([1], result) + self.assertTrue(cond.locked()) + + cond.release() + await asyncio.sleep(0) + self.assertEqual([1, 2], result) + self.assertTrue(cond.locked()) + + cond.release() + await asyncio.sleep(0) + self.assertEqual([1, 2, 3], result) + self.assertTrue(cond.locked()) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + + async def test_wait_cancel(self): + cond = _async_create_condition(_async_create_lock()) + await cond.acquire() + + wait = asyncio.create_task(cond.wait()) + asyncio.get_running_loop().call_soon(wait.cancel) + with self.assertRaises(asyncio.CancelledError): + await wait + self.assertFalse(cond._waiters) + self.assertTrue(cond.locked()) + + async def test_wait_cancel_contested(self): + cond = _async_create_condition(_async_create_lock()) + + await cond.acquire() + self.assertTrue(cond.locked()) + + wait_task = asyncio.create_task(cond.wait()) + await asyncio.sleep(0) + self.assertFalse(cond.locked()) + + # Notify, but contest the lock before cancelling + await cond.acquire() + self.assertTrue(cond.locked()) + cond.notify() + asyncio.get_running_loop().call_soon(wait_task.cancel) + asyncio.get_running_loop().call_soon(cond.release) + + try: + await wait_task + except asyncio.CancelledError: + # Should not happen, since no cancellation points + pass + + self.assertTrue(cond.locked()) + + async def test_wait_cancel_after_notify(self): + # See bpo-32841 + waited = False + + cond = _async_create_condition(_async_create_lock()) + + async def wait_on_cond(): + nonlocal waited + async with cond: + waited = True # Make sure this area was reached + await cond.wait() + + waiter = asyncio.create_task(wait_on_cond()) + await asyncio.sleep(0) # Start waiting + + await cond.acquire() + cond.notify() + await asyncio.sleep(0) # Get to acquire() + waiter.cancel() + await asyncio.sleep(0) # Activate cancellation + cond.release() + await asyncio.sleep(0) # Cancellation should occur + + self.assertTrue(waiter.cancelled()) + self.assertTrue(waited) + + async def test_wait_unacquired(self): + cond = _async_create_condition(_async_create_lock()) + with self.assertRaises(RuntimeError): + await cond.wait() + + async def test_wait_for(self): + cond = _async_create_condition(_async_create_lock()) + presult = False + + def predicate(): + return presult + + result = [] + + async def c1(result): + await cond.acquire() + if await cond.wait_for(predicate): + result.append(1) + cond.release() + return True + + t = asyncio.create_task(c1(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + + await cond.acquire() + cond.notify() + cond.release() + await asyncio.sleep(0) + self.assertEqual([], result) + + presult = True + await cond.acquire() + cond.notify() + cond.release() + await asyncio.sleep(0) + self.assertEqual([1], result) + + self.assertTrue(t.done()) + self.assertTrue(t.result()) + + async def test_wait_for_unacquired(self): + cond = _async_create_condition(_async_create_lock()) + + # predicate can return true immediately + res = await cond.wait_for(lambda: [1, 2, 3]) + self.assertEqual([1, 2, 3], res) + + with self.assertRaises(RuntimeError): + await cond.wait_for(lambda: False) + + async def test_notify(self): + cond = _async_create_condition(_async_create_lock()) + result = [] + + async def c1(result): + async with cond: + if await cond.wait(): + result.append(1) + return True + + async def c2(result): + async with cond: + if await cond.wait(): + result.append(2) + return True + + async def c3(result): + async with cond: + if await cond.wait(): + result.append(3) + return True + + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + t3 = asyncio.create_task(c3(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + + async with cond: + cond.notify(1) + await asyncio.sleep(1) + self.assertEqual([1], result) + + async with cond: + cond.notify(1) + cond.notify(2048) + await asyncio.sleep(1) + self.assertEqual([1, 2, 3], result) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + + async def test_notify_all(self): + cond = _async_create_condition(_async_create_lock()) + + result = [] + + async def c1(result): + async with cond: + if await cond.wait(): + result.append(1) + return True + + async def c2(result): + async with cond: + if await cond.wait(): + result.append(2) + return True + + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + + async with cond: + cond.notify_all() + await asyncio.sleep(1) + self.assertEqual([1, 2], result) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + + async def test_context_manager(self): + cond = _async_create_condition(_async_create_lock()) + self.assertFalse(cond.locked()) + async with cond: + self.assertTrue(cond.locked()) + self.assertFalse(cond.locked()) + + async def test_timeout_in_block(self): + condition = _async_create_condition(_async_create_lock()) + async with condition: + with self.assertRaises(asyncio.TimeoutError): + await asyncio.wait_for(condition.wait(), timeout=0.5) + + @unittest.skipIf( + sys.version_info < (3, 11), "raising the same cancelled error requires Python>=3.11" + ) + async def test_cancelled_error_wakeup(self): + # Test that a cancelled error, received when awaiting wakeup, + # will be re-raised un-modified. + wake = False + raised = None + cond = _async_create_condition(_async_create_lock()) + + async def func(): + nonlocal raised + async with cond: + with self.assertRaises(asyncio.CancelledError) as err: + await cond.wait_for(lambda: wake) + raised = err.exception + raise raised + + task = asyncio.create_task(func()) + await asyncio.sleep(0) + # Task is waiting on the condition, cancel it there. + task.cancel(msg="foo") # type: ignore[call-arg] + with self.assertRaises(asyncio.CancelledError) as err: + await task + self.assertEqual(err.exception.args, ("foo",)) + # We should have got the _same_ exception instance as the one + # originally raised. + self.assertIs(err.exception, raised) + + @unittest.skipIf( + sys.version_info < (3, 11), "raising the same cancelled error requires Python>=3.11" + ) + async def test_cancelled_error_re_aquire(self): + # Test that a cancelled error, received when re-aquiring lock, + # will be re-raised un-modified. + wake = False + raised = None + cond = _async_create_condition(_async_create_lock()) + + async def func(): + nonlocal raised + async with cond: + with self.assertRaises(asyncio.CancelledError) as err: + await cond.wait_for(lambda: wake) + raised = err.exception + raise raised + + task = asyncio.create_task(func()) + await asyncio.sleep(0) + # Task is waiting on the condition + await cond.acquire() + wake = True + cond.notify() + await asyncio.sleep(0) + # Task is now trying to re-acquire the lock, cancel it there. + task.cancel(msg="foo") # type: ignore[call-arg] + cond.release() + with self.assertRaises(asyncio.CancelledError) as err: + await task + self.assertEqual(err.exception.args, ("foo",)) + # We should have got the _same_ exception instance as the one + # originally raised. + self.assertIs(err.exception, raised) + + @unittest.skipIf(sys.version_info < (3, 11), "asyncio.timeout requires Python>=3.11") + async def test_cancelled_wakeup(self): + # Test that a task cancelled at the "same" time as it is woken + # up as part of a Condition.notify() does not result in a lost wakeup. + # This test simulates a cancel while the target task is awaiting initial + # wakeup on the wakeup queue. + condition = _async_create_condition(_async_create_lock()) + state = 0 + + async def consumer(): + nonlocal state + async with condition: + while True: + await condition.wait_for(lambda: state != 0) + if state < 0: + return + state -= 1 + + # create two consumers + c = [asyncio.create_task(consumer()) for _ in range(2)] + # wait for them to settle + await asyncio.sleep(0.1) + async with condition: + # produce one item and wake up one + state += 1 + condition.notify(1) + + # Cancel it while it is awaiting to be run. + # This cancellation could come from the outside + c[0].cancel() + + # now wait for the item to be consumed + # if it doesn't means that our "notify" didn"t take hold. + # because it raced with a cancel() + try: + async with asyncio.timeout(1): + await condition.wait_for(lambda: state == 0) + except TimeoutError: + pass + self.assertEqual(state, 0) + + # clean up + state = -1 + condition.notify_all() + await c[1] + + @unittest.skipIf(sys.version_info < (3, 11), "asyncio.timeout requires Python>=3.11") + async def test_cancelled_wakeup_relock(self): + # Test that a task cancelled at the "same" time as it is woken + # up as part of a Condition.notify() does not result in a lost wakeup. + # This test simulates a cancel while the target task is acquiring the lock + # again. + condition = _async_create_condition(_async_create_lock()) + state = 0 + + async def consumer(): + nonlocal state + async with condition: + while True: + await condition.wait_for(lambda: state != 0) + if state < 0: + return + state -= 1 + + # create two consumers + c = [asyncio.create_task(consumer()) for _ in range(2)] + # wait for them to settle + await asyncio.sleep(0.1) + async with condition: + # produce one item and wake up one + state += 1 + condition.notify(1) + + # now we sleep for a bit. This allows the target task to wake up and + # settle on re-aquiring the lock + await asyncio.sleep(0) + + # Cancel it while awaiting the lock + # This cancel could come the outside. + c[0].cancel() + + # now wait for the item to be consumed + # if it doesn't means that our "notify" didn"t take hold. + # because it raced with a cancel() + try: + async with asyncio.timeout(1): + await condition.wait_for(lambda: state == 0) + except TimeoutError: + pass + self.assertEqual(state, 0) + + # clean up + state = -1 + condition.notify_all() + await c[1] + + if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_logger.py b/test/asynchronous/test_logger.py new file mode 100644 index 0000000000..d024735fd8 --- /dev/null +++ b/test/asynchronous/test_logger.py @@ -0,0 +1,145 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +from test import unittest +from test.asynchronous import AsyncIntegrationTest, async_client_context +from unittest.mock import patch + +from bson import json_util +from pymongo.errors import OperationFailure +from pymongo.logger import _DEFAULT_DOCUMENT_LENGTH + +_IS_SYNC = False + + +# https://github.com/mongodb/specifications/tree/master/source/command-logging-and-monitoring/tests#prose-tests +class TestLogger(AsyncIntegrationTest): + async def test_default_truncation_limit(self): + docs = [{"x": "y"} for _ in range(100)] + db = self.db + + with patch.dict("os.environ"): + os.environ.pop("MONGOB_LOG_MAX_DOCUMENT_LENGTH", None) + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await db.test.insert_many(docs) + + cmd_started_log = json_util.loads(cm.records[0].getMessage()) + self.assertEqual(len(cmd_started_log["command"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertLessEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await db.test.find({}).to_list() + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + async def test_configured_truncation_limit(self): + cmd = {"hello": True} + db = self.db + with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": "5"}): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await db.command(cmd) + + cmd_started_log = json_util.loads(cm.records[0].getMessage()) + self.assertEqual(len(cmd_started_log["command"]), 5 + 3) + + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertLessEqual(len(cmd_succeeded_log["reply"]), 5 + 3) + with self.assertRaises(OperationFailure): + await db.command({"notARealCommand": True}) + cmd_failed_log = json_util.loads(cm.records[-1].getMessage()) + self.assertEqual(len(cmd_failed_log["failure"]), 5 + 3) + + async def test_truncation_multi_byte_codepoints(self): + document_lengths = ["20000", "20001", "20002"] + multi_byte_char_str_len = 50_000 + str_to_repeat = "界" + + multi_byte_char_str = "" + for i in range(multi_byte_char_str_len): + multi_byte_char_str += str_to_repeat + + for length in document_lengths: + with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": length}): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await self.db.test.insert_one({"x": multi_byte_char_str}) + cmd_started_log = json_util.loads(cm.records[0].getMessage())["command"] + + cmd_started_log = cmd_started_log[:-3] + last_3_bytes = cmd_started_log.encode()[-3:].decode() + + self.assertEqual(last_3_bytes, str_to_repeat) + + async def test_logging_without_listeners(self): + c = await self.async_single_client() + self.assertEqual(len(c._event_listeners.event_listeners()), 0) + with self.assertLogs("pymongo.connection", level="DEBUG") as cm: + await c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + with self.assertLogs("pymongo.serverSelection", level="DEBUG") as cm: + await c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + + @async_client_context.require_failCommand_fail_point + async def test_logging_retry_read_attempts(self): + await self.db.test.insert_one({"x": "1"}) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + }, + } + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await self.db.test.find_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying read attempt" in r.getMessage() + ] + self.assertEqual(len(retry_messages), 1) + + @async_client_context.require_failCommand_fail_point + @async_client_context.require_retryable_writes + async def test_logging_retry_write_attempts(self): + async with self.fail_point( + { + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + "failCommands": ["insert"], + }, + } + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await self.db.test.insert_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying write attempt" in r.getMessage() + ] + self.assertEqual(len(retry_messages), 1) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_max_staleness.py b/test/asynchronous/test_max_staleness.py new file mode 100644 index 0000000000..b6e15f9158 --- /dev/null +++ b/test/asynchronous/test_max_staleness.py @@ -0,0 +1,149 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test maxStalenessSeconds support.""" +from __future__ import annotations + +import asyncio +import os +import sys +import time +import warnings +from pathlib import Path + +from pymongo import AsyncMongoClient +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncPyMongoTestCase, async_client_context, unittest +from test.asynchronous.utils_selection_tests import create_selection_tests + +from pymongo.errors import ConfigurationError +from pymongo.server_selectors import writable_server_selector + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "max_staleness") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "max_staleness") + + +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore + pass + + +class TestMaxStaleness(AsyncPyMongoTestCase): + async def test_max_staleness(self): + client = self.simple_client() + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary") + self.assertEqual(-1, client.read_preference.max_staleness) + + # These tests are specified in max-staleness-tests.rst. + with self.assertRaises(ConfigurationError): + # Default read pref "primary" can't be used with max staleness. + self.simple_client("mongodb://a/?maxStalenessSeconds=120") + + with self.assertRaises(ConfigurationError): + # Read pref "primary" can't be used with max staleness. + self.simple_client("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") + + client = self.simple_client("mongodb://host/?maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client( + "mongodb://host/?readPreference=secondary&maxStalenessSeconds=120" + ) + self.assertEqual(120, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") + self.assertEqual(1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client(maxStalenessSeconds=-1, readPreference="nearest") + self.assertEqual(-1, client.read_preference.max_staleness) + + with self.assertRaises(TypeError): + # Prohibit None. + self.simple_client(maxStalenessSeconds=None, readPreference="nearest") + + async def test_max_staleness_float(self): + with self.assertRaises(TypeError) as ctx: + await self.async_rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") + + self.assertIn("must be an integer", str(ctx.exception)) + + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest" + ) + + # Option was ignored. + self.assertEqual(-1, client.read_preference.max_staleness) + self.assertIn("must be an integer", str(ctx[0])) + + async def test_max_staleness_zero(self): + # Zero is too small. + with self.assertRaises(ValueError) as ctx: + await self.async_rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") + + self.assertIn("must be a positive integer", str(ctx.exception)) + + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=0&readPreference=nearest" + ) + + # Option was ignored. + self.assertEqual(-1, client.read_preference.max_staleness) + self.assertIn("must be a positive integer", str(ctx[0])) + + @async_client_context.require_replica_set + async def test_last_write_date(self): + # From max-staleness-tests.rst, "Parse lastWriteDate". + client = await self.async_rs_or_single_client(heartbeatFrequencyMS=500) + await client.pymongo_test.test.insert_one({}) + # Wait for the server description to be updated. + await asyncio.sleep(1) + server = await client._topology.select_server(writable_server_selector, _Op.TEST) + first = server.description.last_write_date + self.assertTrue(first) + # The first last_write_date may correspond to a internal server write, + # sleep so that the next write does not occur within the same second. + await asyncio.sleep(1) + await client.pymongo_test.test.insert_one({}) + # Wait for the server description to be updated. + await asyncio.sleep(1) + server = await client._topology.select_server(writable_server_selector, _Op.TEST) + second = server.description.last_write_date + assert first is not None + + assert second is not None + self.assertGreater(second, first) + self.assertLess(second, first + 10) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_mongos_load_balancing.py b/test/asynchronous/test_mongos_load_balancing.py new file mode 100644 index 0000000000..97170aa9e0 --- /dev/null +++ b/test/asynchronous/test_mongos_load_balancing.py @@ -0,0 +1,199 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test AsyncMongoClient's mongos load balancing using a mock.""" +from __future__ import annotations + +import asyncio +import sys +import threading +from test.asynchronous.helpers import ConcurrentRunner + +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncMockClientTest, async_client_context, connected, unittest +from test.asynchronous.pymongo_mocks import AsyncMockClient +from test.utils_shared import async_wait_until + +from pymongo.errors import AutoReconnect, InvalidOperation +from pymongo.server_selectors import writable_server_selector +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = False + + +class SimpleOp(ConcurrentRunner): + def __init__(self, client): + super().__init__() + self.client = client + self.passed = False + + async def run(self): + await self.client.db.command("ping") + self.passed = True # No exception raised. + + +async def do_simple_op(client, ntasks): + tasks = [SimpleOp(client) for _ in range(ntasks)] + for t in tasks: + await t.start() + + for t in tasks: + await t.join() + + for t in tasks: + assert t.passed + + +async def writable_addresses(topology): + return { + server.description.address + for server in await topology.select_servers(writable_server_selector, _Op.TEST) + } + + +class TestMongosLoadBalancing(AsyncMockClientTest): + @async_client_context.require_connection + @async_client_context.require_no_load_balancer + async def asyncSetUp(self): + await super().asyncSetUp() + + def mock_client(self, **kwargs): + mock_client = AsyncMockClient( + standalones=[], + members=[], + mongoses=["a:1", "b:2", "c:3"], + host="a:1,b:2,c:3", + connect=False, + **kwargs, + ) + self.addAsyncCleanup(mock_client.aclose) + + # Latencies in seconds. + mock_client.mock_rtts["a:1"] = 0.020 + mock_client.mock_rtts["b:2"] = 0.025 + mock_client.mock_rtts["c:3"] = 0.045 + return mock_client + + async def test_lazy_connect(self): + # While connected() ensures we can trigger connection from the main + # thread and wait for the monitors, this test triggers connection from + # several threads at once to check for data races. + nthreads = 10 + client = self.mock_client() + self.assertEqual(0, len(client.nodes)) + + # Trigger initial connection. + await do_simple_op(client, nthreads) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + async def test_failover(self): + ntasks = 10 + client = await connected(self.mock_client(localThresholdMS=0.001)) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + # Our chosen mongos goes down. + client.kill_host("a:1") + + # Trigger failover to higher-latency nodes. AutoReconnect should be + # raised at most once in each thread. + passed = [] + + async def f(): + try: + await client.db.command("ping") + except AutoReconnect: + # Second attempt succeeds. + await client.db.command("ping") + + passed.append(True) + + tasks = [ConcurrentRunner(target=f) for _ in range(ntasks)] + for t in tasks: + await t.start() + + for t in tasks: + await t.join() + + self.assertEqual(ntasks, len(passed)) + + # Down host removed from list. + self.assertEqual(2, len(client.nodes)) + + async def test_local_threshold(self): + client = await connected(self.mock_client(localThresholdMS=30)) + self.assertEqual(30, client.options.local_threshold_ms) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + topology = client._topology + + # All are within a 30-ms latency window, see self.mock_client(). + self.assertEqual({("a", 1), ("b", 2), ("c", 3)}, await writable_addresses(topology)) + + # No error + await client.admin.command("ping") + + client = await connected(self.mock_client(localThresholdMS=0)) + self.assertEqual(0, client.options.local_threshold_ms) + # No error + await client.db.command("ping") + # Our chosen mongos goes down. + client.kill_host("{}:{}".format(*next(iter(client.nodes)))) + try: + await client.db.command("ping") + except: + pass + + # We eventually connect to a new mongos. + async def connect_to_new_mongos(): + try: + return await client.db.command("ping") + except AutoReconnect: + pass + + await async_wait_until(connect_to_new_mongos, "connect to a new mongos") + + async def test_load_balancing(self): + # Although the server selection JSON tests already prove that + # select_servers works for sharded topologies, here we do an end-to-end + # test of discovering servers' round trip times and configuring + # localThresholdMS. + client = await connected(self.mock_client()) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + # Prohibited for topology type Sharded. + with self.assertRaises(InvalidOperation): + await client.address + + topology = client._topology + self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) + + # a and b are within the 15-ms latency window, see self.mock_client(). + self.assertEqual({("a", 1), ("b", 2)}, await writable_addresses(topology)) + + client.mock_rtts["a:1"] = 0.045 + + # Discover only b is within latency window. + async def predicate(): + return {("b", 2)} == await writable_addresses(topology) + + await async_wait_until( + predicate, + 'discover server "a" is too far', + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_monitor.py b/test/asynchronous/test_monitor.py new file mode 100644 index 0000000000..dde8976c06 --- /dev/null +++ b/test/asynchronous/test_monitor.py @@ -0,0 +1,125 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the monitor module.""" +from __future__ import annotations + +import asyncio +import gc +import subprocess +import sys +import warnings +from functools import partial + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, connected, unittest +from test.asynchronous.utils import ( + async_wait_until, +) +from test.utils_shared import ServerAndTopologyEventListener, gevent_monkey_patched + +from pymongo.periodic_executor import _EXECUTORS + +_IS_SYNC = False + + +def unregistered(ref): + gc.collect() + return ref not in _EXECUTORS + + +def get_executors(client): + executors = [] + for server in client._topology._servers.values(): + executors.append(server._monitor._executor) + executors.append(server._monitor._rtt_monitor._executor) + executors.append(client._kill_cursors_executor) + executors.append(client._topology._Topology__events_executor) + return [e for e in executors if e is not None] + + +class TestMonitor(AsyncIntegrationTest): + async def create_client(self): + listener = ServerAndTopologyEventListener() + client = await self.unmanaged_async_single_client(event_listeners=[listener]) + await connected(client) + return client + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-5283 fails often on PyPy") + @unittest.skipIf( + gevent_monkey_patched(), "PYTHON-5516 Resources are not cleared when using gevent" + ) + async def test_cleanup_executors_on_client_del(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + client = await self.create_client() + executors = get_executors(client) + self.assertEqual(len(executors), 4) + + # Each executor stores a weakref to itself in _EXECUTORS. + executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] + + del executors + del client + + for ref, name in executor_refs: + await async_wait_until( + partial(unregistered, ref), f"unregister executor: {name}", timeout=5 + ) + + def resource_warning_caught(): + gc.collect() + for warning in w: + if ( + issubclass(warning.category, ResourceWarning) + and "Call AsyncMongoClient.close() to safely shut down your client and free up resources." + in str(warning.message) + ): + return True + return False + + await async_wait_until(resource_warning_caught, "catch resource warning") + + async def test_cleanup_executors_on_client_close(self): + client = await self.create_client() + executors = get_executors(client) + self.assertEqual(len(executors), 4) + + await client.close() + + for executor in executors: + await async_wait_until( + lambda: executor._stopped, f"closed executor: {executor._name}", timeout=5 + ) + + @async_client_context.require_sync + def test_no_thread_start_runtime_err_on_shutdown(self): + """Test we silence noisy runtime errors fired when the AsyncMongoClient spawns a new thread + on process shutdown.""" + command = [ + sys.executable, + "-c", + "from pymongo import AsyncMongoClient; c = AsyncMongoClient()", + ] + completed_process: subprocess.CompletedProcess = subprocess.run( + command, capture_output=True + ) + + self.assertFalse(completed_process.stderr) + self.assertFalse(completed_process.stdout) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py new file mode 100644 index 0000000000..6a9a5b8da7 --- /dev/null +++ b/test/asynchronous/test_monitoring.py @@ -0,0 +1,1270 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import asyncio +import copy +import datetime +import sys +import time +from typing import Any + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + async_client_context, + client_knobs, + sanitize_cmd, + unittest, +) +from test.utils_shared import ( + EventListener, + OvertCommandListener, + async_wait_until, +) + +from bson.int64 import Int64 +from bson.objectid import ObjectId +from bson.son import SON +from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne, monitoring +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class AsyncTestCommandMonitoring(AsyncIntegrationTest): + listener: EventListener + + @classmethod + def setUpClass(cls) -> None: + cls.listener = OvertCommandListener() + + @async_client_context.require_connection + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.listener.reset() + self.client = await self.async_rs_or_single_client( + event_listeners=[self.listener], retryWrites=False + ) + + async def test_started_simple(self): + await self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + + async def test_succeeded_simple(self): + await self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertEqual("ping", succeeded.command_name) + self.assertEqual(await self.client.address, succeeded.connection_id) + self.assertEqual(1, succeeded.reply.get("ok")) + self.assertIsInstance(succeeded.request_id, int) + self.assertIsInstance(succeeded.duration_micros, int) + + async def test_failed_simple(self): + try: + await self.client.pymongo_test.command("oops!") + except OperationFailure: + pass + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertEqual("oops!", failed.command_name) + self.assertEqual(await self.client.address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) + + async def test_find_one(self): + await self.client.pymongo_test.test.find_one() + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("find", "test"), ("filter", {}), ("limit", 1), ("singleBatch", True)]), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + + async def test_find_and_get_more(self): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) + self.listener.reset() + cursor = self.client.pymongo_test.test.find(projection={"_id": False}, batch_size=4) + for _ in range(4): + await anext(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 4)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("find", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["id"], cursor_id) + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(csr["firstBatch"], [{} for _ in range(4)]) + + self.listener.reset() + # Next batch. Exhausting the cursor could cause a getMore + # that returns id of 0 and no results. + await anext(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("getMore", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["id"], cursor_id) + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(csr["nextBatch"], [{} for _ in range(4)]) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(await cursor.to_list()) + + async def test_find_with_explain(self): + cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_one({}) + self.listener.reset() + coll = self.client.pymongo_test.test + # Test that we publish the unwrapped command. + if await self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + res = await coll.find().explain() + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(cmd, started.command) + self.assertEqual("explain", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("explain", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(await self.client.address, succeeded.connection_id) + self.assertEqual(res, succeeded.reply) + + async def _test_find_options(self, query, expected_cmd): + coll = self.client.pymongo_test.test + await coll.drop() + await coll.create_index("x") + await coll.insert_many([{"x": i} for i in range(5)]) + + # Test that we publish the unwrapped command. + self.listener.reset() + if await self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + + cursor = coll.find(**query) + + await anext(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(expected_cmd, started.command) + self.assertEqual("find", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("find", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(await self.client.address, succeeded.connection_id) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(await cursor.to_list()) + + async def test_find_options(self): + query = { + "filter": {}, + "hint": [("x", 1)], + "max_time_ms": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "return_key": True, + "show_record_id": True, + "projection": {"x": False}, + "skip": 1, + "no_cursor_timeout": True, + "sort": [("_id", 1)], + "allow_partial_results": True, + "comment": "this is a test", + "batch_size": 2, + } + + cmd = { + "find": "test", + "filter": {}, + "hint": SON([("x", 1)]), + "comment": "this is a test", + "maxTimeMS": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "returnKey": True, + "showRecordId": True, + "sort": SON([("_id", 1)]), + "projection": {"x": False}, + "skip": 1, + "batchSize": 2, + "noCursorTimeout": True, + "allowPartialResults": True, + } + + if async_client_context.version < (4, 1, 0, -1): + query["max_scan"] = 10 + cmd["maxScan"] = 10 + + await self._test_find_options(query, cmd) + + @async_client_context.require_version_max(3, 7, 2) + async def test_find_snapshot(self): + # Test "snapshot" parameter separately, can't combine with "sort". + query = {"filter": {}, "snapshot": True} + + cmd = {"find": "test", "filter": {}, "snapshot": True} + + await self._test_find_options(query, cmd) + + async def test_command_and_get_more(self): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{"x": 1} for _ in range(10)]) + self.listener.reset() + coll = self.client.pymongo_test.test + # Test that we publish the unwrapped command. + if await self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + cursor = await coll.aggregate([{"$project": {"_id": False, "x": 1}}], batchSize=4) + for _ in range(4): + await anext(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON( + [ + ("aggregate", "test"), + ("pipeline", [{"$project": {"_id": False, "x": 1}}]), + ("cursor", {"batchSize": 4}), + ] + ), + started.command, + ) + self.assertEqual("aggregate", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("aggregate", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_cursor = { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{"x": 1} for _ in range(4)], + } + self.assertEqualCommand(expected_cursor, succeeded.reply.get("cursor")) + + self.listener.reset() + await anext(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("getMore", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_result = { + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "nextBatch": [{"x": 1} for _ in range(4)], + }, + "ok": 1.0, + } + self.assertEqualReply(expected_result, succeeded.reply) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(await cursor.to_list()) + + async def test_get_more_failure(self): + address = await self.client.address + coll = self.client.pymongo_test.test + cursor_id = Int64(12345) + cursor_doc = {"id": cursor_id, "firstBatch": [], "ns": coll.full_name} + cursor = AsyncCommandCursor(coll, cursor_doc, address) + try: + await anext(cursor) + except Exception: + pass + started = self.listener.started_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + failed = self.listener.failed_events[0] + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test")]), started.command + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertIsInstance(failed.duration_micros, int) + self.assertEqual("getMore", failed.command_name) + self.assertIsInstance(failed.request_id, int) + self.assertEqual(cursor.address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + + @async_client_context.require_replica_set + @async_client_context.require_secondaries_count(1) + async def test_not_primary_error(self): + address = next(iter(await async_client_context.client.secondaries)) + client = await self.async_single_client(*address, event_listeners=[self.listener]) + # Clear authentication command results from the listener. + await client.admin.command("ping") + self.listener.reset() + error = None + try: + await client.pymongo_test.test.find_one_and_delete({}) + except NotPrimaryError as exc: + error = exc.errors + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertEqual("findAndModify", failed.command_name) + self.assertEqual(address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) + self.assertEqual(error, failed.failure) + + @async_client_context.require_no_mongos + async def test_exhaust(self): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) + self.listener.reset() + cursor = self.client.pymongo_test.test.find( + projection={"_id": False}, batch_size=5, cursor_type=CursorType.EXHAUST + ) + await anext(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 5)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(cursor.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("find", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_result = { + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{} for _ in range(5)], + }, + "ok": 1, + } + self.assertEqualReply(expected_result, succeeded.reply) + + self.listener.reset() + tuple(await cursor.to_list()) + self.assertEqual(0, len(self.listener.failed_events)) + for event in self.listener.started_events: + self.assertIsInstance(event, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), + event.command, + ) + self.assertEqual("getMore", event.command_name) + self.assertEqual(cursor.address, event.connection_id) + self.assertEqual("pymongo_test", event.database_name) + self.assertIsInstance(event.request_id, int) + for event in self.listener.succeeded_events: + self.assertIsInstance(event, monitoring.CommandSucceededEvent) + self.assertIsInstance(event.duration_micros, int) + self.assertEqual("getMore", event.command_name) + self.assertIsInstance(event.request_id, int) + self.assertEqual(cursor.address, event.connection_id) + # Last getMore receives a response with cursor id 0. + self.assertEqual(0, self.listener.succeeded_events[-1].reply["cursor"]["id"]) + + async def test_kill_cursors(self): + with client_knobs(kill_cursor_frequency=0.01): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) + cursor = self.client.pymongo_test.test.find().batch_size(5) + await anext(cursor) + cursor_id = cursor.cursor_id + self.listener.reset() + await cursor.close() + await asyncio.sleep(2) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + # There could be more than one cursor_id here depending on + # when the thread last ran. + self.assertIn(cursor_id, started.command["cursors"]) + self.assertEqual("killCursors", started.command_name) + self.assertIs(type(started.connection_id), tuple) + self.assertEqual(cursor.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("killCursors", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertIs(type(succeeded.connection_id), tuple) + self.assertEqual(cursor.address, succeeded.connection_id) + # There could be more than one cursor_id here depending on + # when the thread last ran. + self.assertIn( + cursor_id, succeeded.reply["cursorsUnknown"] + succeeded.reply["cursorsKilled"] + ) + + async def test_non_bulk_writes(self): + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + # Implied write concern insert_one + res = await coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # Unacknowledged insert_one + self.listener.reset() + coll = coll.with_options(write_concern=WriteConcern(w=0)) + res = await coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 0}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertEqualReply(succeeded.reply, {"ok": 1}) + + # Explicit write concern insert_one + self.listener.reset() + coll = coll.with_options(write_concern=WriteConcern(w=1)) + res = await coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # delete_many + self.listener.reset() + res = await coll.delete_many({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 1}), ("limit", 0)])]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(res.deleted_count, reply.get("n")) + + # replace_one + self.listener.reset() + oid = ObjectId() + res = await coll.replace_one({"_id": oid}, {"_id": oid, "x": 1}, upsert=True) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": oid}), + ("u", {"_id": oid, "x": 1}), + ("multi", False), + ("upsert", True), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + self.assertEqual([{"index": 0, "_id": oid}], reply.get("upserted")) + + # update_one + self.listener.reset() + res = await coll.update_one({"x": 1}, {"$inc": {"x": 1}}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 1}), + ("u", {"$inc": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # update_many + self.listener.reset() + res = await coll.update_many({"x": 2}, {"$inc": {"x": 1}}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 2}), + ("u", {"$inc": {"x": 1}}), + ("multi", True), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # delete_one + self.listener.reset() + _ = await coll.delete_one({"x": 3}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 3}), ("limit", 1)])]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + self.assertEqual(0, await coll.count_documents({})) + + # write errors + await coll.insert_one({"_id": 1}) + try: + self.listener.reset() + await coll.insert_one({"_id": 1}) + except OperationFailure: + pass + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": 1}]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(0, reply.get("n")) + errors = reply.get("writeErrors") + self.assertIsInstance(errors, list) + error = errors[0] + self.assertEqual(0, error.get("index")) + self.assertIsInstance(error.get("code"), int) + self.assertIsInstance(error.get("errmsg"), str) + + async def test_insert_many(self): + # This always uses the bulk API. + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + big = "x" * (1024 * 1024 * 4) + docs = [{"_id": i, "big": big} for i in range(6)] + await coll.insert_many(docs) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + documents = [] + count = 0 + operation_id = started[0].operation_id + self.assertIsInstance(operation_id, int) + for start, succeed in zip(started, succeeded): + self.assertIsInstance(start, monitoring.CommandStartedEvent) + cmd = sanitize_cmd(start.command) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + reply = succeed.reply + self.assertEqual(1, reply.get("ok")) + count += reply.get("n", 0) + self.assertEqual(documents, docs) + self.assertEqual(6, count) + + async def test_insert_many_unacknowledged(self): + coll = self.client.pymongo_test.test + await coll.drop() + unack_coll = coll.with_options(write_concern=WriteConcern(w=0)) + self.listener.reset() + + # Force two batches on legacy servers. + big = "x" * (1024 * 1024 * 12) + docs = [{"_id": i, "big": big} for i in range(6)] + await unack_coll.insert_many(docs) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + documents = [] + operation_id = started[0].operation_id + self.assertIsInstance(operation_id, int) + for start, succeed in zip(started, succeeded): + self.assertIsInstance(start, monitoring.CommandStartedEvent) + cmd = sanitize_cmd(start.command) + cmd.pop("writeConcern", None) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + self.assertEqual(1, succeed.reply.get("ok")) + self.assertEqual(documents, docs) + + async def check(): + return await coll.count_documents({}) == 6 + + await async_wait_until(check, "insert documents with w=0") + + async def test_bulk_write(self): + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + await coll.bulk_write( + [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 1}, {"$set": {"x": 1}}), + DeleteOne({"_id": 1}), + ] + ) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + operation_id = started[0].operation_id + pairs = list(zip(started, succeeded)) + self.assertEqual(3, len(pairs)) + for start, succeed in pairs: + self.assertIsInstance(start, monitoring.CommandStartedEvent) + self.assertEqual("pymongo_test", start.database_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + + expected = SON([("insert", coll.name), ("ordered", True), ("documents", [{"_id": 1}])]) + self.assertEqualCommand(expected, started[0].command) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": 1}), + ("u", {"$set": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ] + ) + self.assertEqualCommand(expected, started[1].command) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"_id": 1}), ("limit", 1)])]), + ] + ) + self.assertEqualCommand(expected, started[2].command) + + @async_client_context.require_failCommand_fail_point + async def test_bulk_write_command_network_error(self): + coll = self.client.pymongo_test.test + self.listener.reset() + + insert_network_error = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + }, + } + async with self.fail_point(insert_network_error): + with self.assertRaises(AutoReconnect): + await coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.failed_events + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, "insert") + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure["errtype"], "AutoReconnect") + self.assertTrue(event.failure["errmsg"]) + + @async_client_context.require_failCommand_fail_point + async def test_bulk_write_command_error(self): + coll = self.client.pymongo_test.test + self.listener.reset() + + insert_command_error = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "await acloseAsyncConnection": False, + "errorCode": 10107, # Not primary + }, + } + async with self.fail_point(insert_command_error): + with self.assertRaises(NotPrimaryError): + await coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.failed_events + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, "insert") + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure["code"], 10107) + self.assertTrue(event.failure["errmsg"]) + + async def test_write_errors(self): + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + try: + await coll.bulk_write( + [ + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + DeleteOne({"_id": 1}), + ], + ordered=False, + ) + except OperationFailure: + pass + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + operation_id = started[0].operation_id + pairs = list(zip(started, succeeded)) + errors = [] + for start, succeed in pairs: + self.assertIsInstance(start, monitoring.CommandStartedEvent) + self.assertEqual("pymongo_test", start.database_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + if "writeErrors" in succeed.reply: + errors.extend(succeed.reply["writeErrors"]) + + self.assertEqual(2, len(errors)) + fields = {"index", "code", "errmsg"} + for error in errors: + self.assertLessEqual(fields, set(error)) + + async def test_first_batch_helper(self): + # Regardless of server version and use of helpers._first_batch + # this test should still pass. + self.listener.reset() + tuple(await (await self.client.pymongo_test.test.list_indexes()).to_list()) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON([("listIndexes", "test"), ("cursor", {})]) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("listIndexes", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertIn("cursor", succeeded.reply) + self.assertIn("ok", succeeded.reply) + + self.listener.reset() + + @async_client_context.require_version_max(6, 1, 99) + async def test_sensitive_commands(self): + listener = EventListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + listeners = client._event_listeners + + listener.reset() + cmd = SON([("getnonce", 1)]) + listeners.publish_command_start(cmd, "pymongo_test", 12345, await client.address, None) # type: ignore[arg-type] + delta = datetime.timedelta(milliseconds=100) + listeners.publish_command_success( + delta, + {"nonce": "e474f4561c5eb40b", "ok": 1.0}, + "getnonce", + 12345, + await self.client.address, # type: ignore[arg-type] + None, + database_name="pymongo_test", + ) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqual({}, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getnonce", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertEqual(succeeded.duration_micros, 100000) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertEqual({}, succeeded.reply) + + +class AsyncTestGlobalListener(AsyncIntegrationTest): + listener: EventListener + saved_listeners: Any + + @classmethod + def setUpClass(cls) -> None: + cls.listener = OvertCommandListener() + # We plan to call register(), which internally modifies _LISTENERS. + cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) + monitoring.register(cls.listener) + + @async_client_context.require_connection + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener.reset() + self.client = await self.async_single_client() + # Get one (authenticated) socket in the pool. + await self.client.pymongo_test.command("ping") + + @classmethod + def tearDownClass(cls): + monitoring._LISTENERS = cls.saved_listeners + + async def test_simple(self): + await self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + + +class AsyncTestEventClasses(unittest.IsolatedAsyncioTestCase): + def test_command_event_repr(self): + request_id, connection_id, operation_id, db_name = 1, ("localhost", 27017), 2, "admin" + event = monitoring.CommandStartedEvent( + {"ping": 1}, db_name, request_id, connection_id, operation_id + ) + self.assertEqual( + repr(event), + "", + ) + delta = datetime.timedelta(milliseconds=100) + event = monitoring.CommandSucceededEvent( + delta, {"ok": 1}, "ping", request_id, connection_id, operation_id, database_name=db_name + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.CommandFailedEvent( + delta, {"ok": 0}, "ping", request_id, connection_id, operation_id, database_name=db_name + ) + self.assertEqual( + repr(event), + "", + ) + + def test_server_heartbeat_event_repr(self): + connection_id = ("localhost", 27017) + event = monitoring.ServerHeartbeatStartedEvent(connection_id) + self.assertEqual( + repr(event), "" + ) + delta = 0.1 + event = monitoring.ServerHeartbeatSucceededEvent( + delta, + {"ok": 1}, # type: ignore[arg-type] + connection_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerHeartbeatFailedEvent( + delta, + "ERROR", # type: ignore[arg-type] + connection_id, + ) + self.assertEqual( + repr(event), + "", + ) + + def test_server_event_repr(self): + server_address = ("localhost", 27017) + topology_id = ObjectId("000000000000000000000001") + event = monitoring.ServerOpeningEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerDescriptionChangedEvent( + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + server_address, + topology_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerClosedEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "", + ) + + def test_topology_event_repr(self): + topology_id = ObjectId("000000000000000000000001") + event = monitoring.TopologyOpenedEvent(topology_id) + self.assertEqual(repr(event), "") + event = monitoring.TopologyDescriptionChangedEvent( + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + topology_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.TopologyClosedEvent(topology_id) + self.assertEqual(repr(event), "") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_on_demand_csfle.py b/test/asynchronous/test_on_demand_csfle.py new file mode 100644 index 0000000000..55394ddeb8 --- /dev/null +++ b/test/asynchronous/test_on_demand_csfle.py @@ -0,0 +1,115 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption with on demand credentials.""" +from __future__ import annotations + +import os +import sys +import unittest + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context + +from bson.codec_options import CodecOptions +from pymongo.asynchronous.encryption import ( + _HAVE_PYMONGOCRYPT, + AsyncClientEncryption, + EncryptionError, +) + +_IS_SYNC = False + +pytestmark = pytest.mark.kms + + +class TestonDemandGCPCredentials(AsyncIntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @async_client_context.require_version_min(4, 2, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + async def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = AsyncClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("gcp", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + async def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = AsyncClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + await self.client_encryption.create_data_key("gcp", self.master_key) + + +class TestonDemandAzureCredentials(AsyncIntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @async_client_context.require_version_min(4, 2, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.master_key = { + "keyVaultEndpoint": os.environ["KEY_VAULT_ENDPOINT"], + "keyName": os.environ["KEY_NAME"], + } + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + async def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = AsyncClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("azure", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + async def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = AsyncClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + await self.client_encryption.create_data_key("azure", self.master_key) + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py new file mode 100644 index 0000000000..3193d9e3d5 --- /dev/null +++ b/test/asynchronous/test_pooling.py @@ -0,0 +1,614 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test built in connection-pooling with threads.""" +from __future__ import annotations + +import asyncio +import gc +import random +import socket +import sys +import time +from test.asynchronous.utils import async_get_pool, async_joinall, flaky + +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.son import SON +from pymongo import AsyncMongoClient, message, timeout +from pymongo.errors import AutoReconnect, ConnectionFailure, DuplicateKeyError +from pymongo.hello import HelloCompat +from pymongo.lock import _async_create_lock + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.helpers import ConcurrentRunner +from test.utils_shared import delay + +from pymongo.asynchronous.pool import Pool, PoolOptions +from pymongo.socket_checker import SocketChecker + +_IS_SYNC = False + + +N = 10 +DB = "pymongo-pooling-tests" + + +async def gc_collect_until_done(tasks, timeout=60): + start = time.time() + running = list(tasks) + while running: + assert (time.time() - start) < timeout, "Tasks timed out" + for t in running: + await t.join(0.1) + if not t.is_alive(): + running.remove(t) + gc.collect() + + +class MongoTask(ConcurrentRunner): + """A thread/Task that uses a AsyncMongoClient.""" + + def __init__(self, client): + super().__init__() + self.daemon = True # Don't hang whole test if task hangs. + self.client = client + self.db = self.client[DB] + self.passed = False + + async def run(self): + await self.run_mongo_thread() + self.passed = True + + async def run_mongo_thread(self): + raise NotImplementedError + + +class InsertOneAndFind(MongoTask): + async def run_mongo_thread(self): + for _ in range(N): + rand = random.randint(0, N) + _id = (await self.db.sf.insert_one({"x": rand})).inserted_id + assert rand == (await self.db.sf.find_one(_id))["x"] + + +class Unique(MongoTask): + async def run_mongo_thread(self): + for _ in range(N): + await self.db.unique.insert_one({}) # no error + + +class NonUnique(MongoTask): + async def run_mongo_thread(self): + for _ in range(N): + try: + await self.db.unique.insert_one({"_id": "jesse"}) + except DuplicateKeyError: + pass + else: + raise AssertionError("Should have raised DuplicateKeyError") + + +class SocketGetter(MongoTask): + """Utility for TestPooling. + + Checks out a socket and holds it forever. Used in + test_no_wait_queue_timeout. + """ + + def __init__(self, client, pool): + super().__init__(client) + self.state = "init" + self.pool = pool + self.sock = None + + async def run_mongo_thread(self): + self.state = "get_socket" + + # Call 'pin_cursor' so we can hold the socket. + async with self.pool.checkout() as sock: + sock.pin_cursor() + self.sock = sock + + self.state = "connection" + + async def release_conn(self): + if self.sock: + await self.sock.unpin() + self.sock = None + return True + return False + + +async def run_cases(client, cases): + tasks = [] + n_runs = 5 + + for case in cases: + for _i in range(n_runs): + t = case(client) + await t.start() + tasks.append(t) + + for t in tasks: + await t.join() + + for t in tasks: + assert t.passed, "%s.run() threw an exception" % repr(t) + + +class _TestPoolingBase(AsyncIntegrationTest): + """Base class for all connection-pool tests.""" + + @async_client_context.require_connection + async def asyncSetUp(self): + await super().asyncSetUp() + self.c = await self.async_rs_or_single_client() + db = self.c[DB] + await db.unique.drop() + await db.test.drop() + await db.unique.insert_one({"_id": "jesse"}) + await db.test.insert_many([{} for _ in range(10)]) + + async def create_pool(self, pair=None, *args, **kwargs): + if pair is None: + pair = (await async_client_context.host, await async_client_context.port) + # Start the pool with the correct ssl options. + pool_options = async_client_context.client._topology_settings.pool_options + kwargs["ssl_context"] = pool_options._ssl_context + kwargs["tls_allow_invalid_hostnames"] = pool_options.tls_allow_invalid_hostnames + kwargs["server_api"] = pool_options.server_api + pool = Pool(pair, PoolOptions(*args, **kwargs)) + await pool.ready() + return pool + + +class TestPooling(_TestPoolingBase): + async def test_max_pool_size_validation(self): + host, port = await async_client_context.host, await async_client_context.port + self.assertRaises(ValueError, AsyncMongoClient, host=host, port=port, maxPoolSize=-1) + + self.assertRaises(ValueError, AsyncMongoClient, host=host, port=port, maxPoolSize="foo") + + c = AsyncMongoClient(host=host, port=port, maxPoolSize=100, connect=False) + self.assertEqual(c.options.pool_options.max_pool_size, 100) + + async def test_no_disconnect(self): + await run_cases(self.c, [NonUnique, Unique, InsertOneAndFind]) + + async def test_pool_reuses_open_socket(self): + # Test Pool's _check_closed() method doesn't close a healthy socket. + cx_pool = await self.create_pool(max_pool_size=10) + cx_pool._check_interval_seconds = 0 # Always check. + async with cx_pool.checkout() as conn: + pass + + async with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + async def test_get_socket_and_exception(self): + # get_socket() returns socket after a non-network error. + cx_pool = await self.create_pool(max_pool_size=1, wait_queue_timeout=1) + with self.assertRaises(ZeroDivisionError): + async with cx_pool.checkout() as conn: + 1 / 0 + + # Socket was returned, not closed. + async with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + async def test_pool_removes_closed_socket(self): + # Test that Pool removes explicitly closed socket. + cx_pool = await self.create_pool() + + async with cx_pool.checkout() as conn: + # Use Connection's API to close the socket. + await conn.close_conn(None) + + self.assertEqual(0, len(cx_pool.conns)) + + async def test_pool_removes_dead_socket(self): + # Test that Pool removes dead socket and the socket doesn't return + # itself PYTHON-344 + cx_pool = await self.create_pool(max_pool_size=1, wait_queue_timeout=1) + cx_pool._check_interval_seconds = 0 # Always check. + + async with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's + # closed. + await conn.conn.close() + self.assertTrue(conn.conn_closed()) + + async with cx_pool.checkout() as new_connection: + self.assertEqual(0, len(cx_pool.conns)) + self.assertNotEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + # Semaphore was released. + async with cx_pool.checkout(): + pass + + async def test_socket_closed(self): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((await async_client_context.host, await async_client_context.port)) + socket_checker = SocketChecker() + self.assertFalse(socket_checker.socket_closed(s)) + s.close() + self.assertTrue(socket_checker.socket_closed(s)) + + async def test_socket_checker(self): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((await async_client_context.host, await async_client_context.port)) + socket_checker = SocketChecker() + # Socket has nothing to read. + self.assertFalse(socket_checker.select(s, read=True)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0.05)) + # Socket is writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) + # Make the socket readable + _, msg, _ = message._query( + 0, "admin.$cmd", 0, -1, SON([("ping", 1)]), None, DEFAULT_CODEC_OPTIONS + ) + s.sendall(msg) + # Block until the socket is readable. + self.assertTrue(socket_checker.select(s, read=True, timeout=None)) + self.assertTrue(socket_checker.select(s, read=True)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0.05)) + # Socket is still writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) + s.close() + self.assertTrue(socket_checker.socket_closed(s)) + + async def test_return_socket_after_reset(self): + pool = await self.create_pool() + async with pool.checkout() as sock: + self.assertEqual(pool.active_sockets, 1) + self.assertEqual(pool.operation_count, 1) + await pool.reset() + + self.assertTrue(sock.closed) + self.assertEqual(0, len(pool.conns)) + self.assertEqual(pool.active_sockets, 0) + self.assertEqual(pool.operation_count, 0) + + async def test_pool_check(self): + # Test that Pool recovers from two connection failures in a row. + # This exercises code at the end of Pool._check(). + cx_pool = await self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=1) + cx_pool._check_interval_seconds = 0 # Always check. + self.addAsyncCleanup(cx_pool.close) + + async with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's + # closed. + await conn.conn.close() + + # Swap pool's address with a bad one. + address, cx_pool.address = cx_pool.address, ("foo.com", 1234) + with self.assertRaises(AutoReconnect): + async with cx_pool.checkout(): + pass + + # Back to normal, semaphore was correctly released. + cx_pool.address = address + async with cx_pool.checkout(): + pass + + async def test_wait_queue_timeout(self): + wait_queue_timeout = 2 # Seconds + pool = await self.create_pool(max_pool_size=1, wait_queue_timeout=wait_queue_timeout) + self.addAsyncCleanup(pool.close) + + async with pool.checkout(): + start = time.time() + with self.assertRaises(ConnectionFailure): + async with pool.checkout(): + pass + + duration = time.time() - start + self.assertLess( + abs(wait_queue_timeout - duration), + 1, + f"Waited {duration:.2f} seconds for a socket, expected {wait_queue_timeout:f}", + ) + + async def test_no_wait_queue_timeout(self): + # Verify get_socket() with no wait_queue_timeout blocks forever. + pool = await self.create_pool(max_pool_size=1) + self.addAsyncCleanup(pool.close) + + # Reach max_size. + async with pool.checkout() as s1: + t = SocketGetter(self.c, pool) + await t.start() + while t.state != "get_socket": + await asyncio.sleep(0.1) + + await asyncio.sleep(1) + self.assertEqual(t.state, "get_socket") + + while t.state != "connection": + await asyncio.sleep(0.1) + + self.assertEqual(t.state, "connection") + self.assertEqual(t.sock, s1) + # Cleanup + await t.release_conn() + await t.join() + await pool.close() + + async def test_checkout_more_than_max_pool_size(self): + pool = await self.create_pool(max_pool_size=2) + + socks = [] + for _ in range(2): + # Call 'pin_cursor' so we can hold the socket. + async with pool.checkout() as sock: + sock.pin_cursor() + socks.append(sock) + + tasks = [] + for _ in range(10): + t = SocketGetter(self.c, pool) + await t.start() + tasks.append(t) + await asyncio.sleep(1) + for t in tasks: + self.assertEqual(t.state, "get_socket") + # Cleanup + for socket_info in socks: + await socket_info.unpin() + while tasks: + to_remove = [] + for t in tasks: + if await t.release_conn(): + to_remove.append(t) + await t.join() + for t in to_remove: + tasks.remove(t) + await asyncio.sleep(0.05) + await pool.close() + + async def test_maxConnecting(self): + client = await self.async_rs_or_single_client() + await self.client.test.test.insert_one({}) + self.addAsyncCleanup(self.client.test.test.delete_many, {}) + pool = await async_get_pool(client) + docs = [] + + # Run 50 short running operations + async def find_one(): + docs.append(await client.test.test.find_one({})) + + tasks = [ConcurrentRunner(target=find_one) for _ in range(50)] + for task in tasks: + await task.start() + for task in tasks: + await task.join(10) + + self.assertEqual(len(docs), 50) + self.assertLessEqual(len(pool.conns), 50) + # TLS and auth make connection establishment more expensive than + # the query which leads to more threads hitting maxConnecting. + # The end result is fewer total connections and better latency. + if async_client_context.tls and async_client_context.auth_enabled: + self.assertLessEqual(len(pool.conns), 30) + else: + self.assertLessEqual(len(pool.conns), 50) + # MongoDB 4.4.1 with auth + ssl: + # maxConnecting = 2: 6 connections in ~0.231+ seconds + # maxConnecting = unbounded: 50 connections in ~0.642+ seconds + # + # MongoDB 4.4.1 with no-auth no-ssl Python 3.8: + # maxConnecting = 2: 15-22 connections in ~0.108+ seconds + # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds + print(len(pool.conns)) + + @async_client_context.require_failCommand_appName + async def test_csot_timeout_message(self): + client = await self.async_rs_or_single_client(appName="connectionTimeoutApp") + # Mock an operation failing due to pymongo.timeout(). + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + await client.db.t.insert_one({"x": 1}) + + async with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + with timeout(0.5): + await client.db.t.find_one({"$where": delay(2)}) + + self.assertIn("(configured timeouts: timeoutMS: 500.0ms", str(error.exception)) + + @async_client_context.require_failCommand_appName + async def test_socket_timeout_message(self): + client = await self.async_rs_or_single_client( + socketTimeoutMS=500, appName="connectionTimeoutApp" + ) + # Mock an operation failing due to socketTimeoutMS. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + await client.db.t.insert_one({"x": 1}) + + async with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + await client.db.t.find_one({"$where": delay(2)}) + + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 20000.0ms)", + str(error.exception), + ) + + @async_client_context.require_failCommand_appName + async def test_connection_timeout_message(self): + # Mock a connection creation failing due to timeout. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "appName": "connectionTimeoutApp", + }, + } + + client = await self.async_rs_or_single_client( + connectTimeoutMS=500, + socketTimeoutMS=500, + appName="connectionTimeoutApp", + heartbeatFrequencyMS=1000000, + ) + await client.admin.command("ping") + pool = await async_get_pool(client) + await pool.reset_without_pause() + async with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + await client.admin.command("ping") + + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 500.0ms)", + str(error.exception), + ) + + +class TestPoolMaxSize(_TestPoolingBase): + async def test_max_pool_size(self): + max_pool_size = 4 + c = await self.async_rs_or_single_client(maxPoolSize=max_pool_size) + collection = c[DB].test + + # Need one document. + await collection.drop() + await collection.insert_one({}) + + # ntasks had better be much larger than max_pool_size to ensure that + # max_pool_size connections are actually required at some point in this + # test's execution. + cx_pool = await async_get_pool(c) + ntasks = 10 + tasks = [] + lock = _async_create_lock() + self.n_passed = 0 + + async def f(): + for _ in range(5): + await collection.find_one({"$where": delay(0.1)}) + assert len(cx_pool.conns) <= max_pool_size + + async with lock: + self.n_passed += 1 + + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) + await t.start() + + await async_joinall(tasks) + self.assertEqual(ntasks, self.n_passed) + self.assertGreater(len(cx_pool.conns), 1) + self.assertEqual(0, cx_pool.requests) + + async def test_max_pool_size_none(self): + c = await self.async_rs_or_single_client(maxPoolSize=None) + collection = c[DB].test + + # Need one document. + await collection.drop() + await collection.insert_one({}) + + cx_pool = await async_get_pool(c) + ntasks = 10 + tasks = [] + lock = _async_create_lock() + self.n_passed = 0 + + async def f(): + for _ in range(5): + await collection.find_one({"$where": delay(0.1)}) + + async with lock: + self.n_passed += 1 + + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) + await t.start() + + await async_joinall(tasks) + self.assertEqual(ntasks, self.n_passed) + self.assertGreater(len(cx_pool.conns), 1) + self.assertEqual(cx_pool.max_pool_size, float("inf")) + + async def test_max_pool_size_zero(self): + c = await self.async_rs_or_single_client(maxPoolSize=0) + pool = await async_get_pool(c) + self.assertEqual(pool.max_pool_size, float("inf")) + + async def test_max_pool_size_with_connection_failure(self): + # The pool acquires its semaphore before attempting to connect; ensure + # it releases the semaphore on connection failure. + test_pool = Pool( + ("somedomainthatdoesntexist.org", 27017), + PoolOptions(max_pool_size=1, connect_timeout=1, socket_timeout=1, wait_queue_timeout=1), + ) + await test_pool.ready() + + # First call to get_socket fails; if pool doesn't release its semaphore + # then the second call raises "ConnectionFailure: Timed out waiting for + # socket from pool" instead of AutoReconnect. + for _i in range(2): + with self.assertRaises(AutoReconnect) as context: + async with test_pool.checkout(): + pass + + # Testing for AutoReconnect instead of ConnectionFailure, above, + # is sufficient right *now* to catch a semaphore leak. But that + # seems error-prone, so check the message too. + self.assertNotIn("waiting for socket from pool", str(context.exception)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_raw_bson.py b/test/asynchronous/test_raw_bson.py new file mode 100644 index 0000000000..70832ea668 --- /dev/null +++ b/test/asynchronous/test_raw_bson.py @@ -0,0 +1,219 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import datetime +import sys +import uuid + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest + +from bson import Code, DBRef, decode, encode +from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.errors import InvalidBSON +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument +from bson.son import SON + +_IS_SYNC = False + + +class TestRawBSONDocument(AsyncIntegrationTest): + # {'_id': ObjectId('556df68b6e32ab21a95e0785'), + # 'name': 'Sherlock', + # 'addresses': [{'street': 'Baker Street'}]} + bson_string = ( + b"Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t" + b"\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e" + b"\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00" + ) + document = RawBSONDocument(bson_string) + + async def asyncTearDown(self): + if async_client_context.connected: + await self.client.pymongo_test.test_raw.drop() + + def test_decode(self): + self.assertEqual("Sherlock", self.document["name"]) + first_address = self.document["addresses"][0] + self.assertIsInstance(first_address, RawBSONDocument) + self.assertEqual("Baker Street", first_address["street"]) + + def test_raw(self): + self.assertEqual(self.bson_string, self.document.raw) + + def test_empty_doc(self): + doc = RawBSONDocument(encode({})) + with self.assertRaises(KeyError): + doc["does-not-exist"] + + def test_invalid_bson_sequence(self): + bson_byte_sequence = encode({"a": 1}) + encode({}) + with self.assertRaisesRegex(InvalidBSON, "invalid object length"): + RawBSONDocument(bson_byte_sequence) + + def test_invalid_bson_eoo(self): + invalid_bson_eoo = encode({"a": 1})[:-1] + b"\x01" + with self.assertRaisesRegex(InvalidBSON, "bad eoo"): + RawBSONDocument(invalid_bson_eoo) + + @async_client_context.require_connection + async def test_round_trip(self): + db = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ) + await db.test_raw.insert_one(self.document) + result = await db.test_raw.find_one(self.document["_id"]) + assert result is not None + self.assertIsInstance(result, RawBSONDocument) + self.assertEqual(dict(self.document.items()), dict(result.items())) + + @async_client_context.require_connection + async def test_round_trip_raw_uuid(self): + coll = self.client.get_database("pymongo_test").test_raw + uid = uuid.uuid4() + doc = {"_id": 1, "bin4": Binary(uid.bytes, 4), "bin3": Binary(uid.bytes, 3)} + raw = RawBSONDocument(encode(doc)) + await coll.insert_one(raw) + self.assertEqual(await coll.find_one(), doc) + uuid_coll = coll.with_options( + codec_options=coll.codec_options.with_options( + uuid_representation=UuidRepresentation.STANDARD + ) + ) + self.assertEqual( + await uuid_coll.find_one(), {"_id": 1, "bin4": uid, "bin3": Binary(uid.bytes, 3)} + ) + + # Test that the raw bytes haven't changed. + raw_coll = coll.with_options(codec_options=DEFAULT_RAW_BSON_OPTIONS) + self.assertEqual(await raw_coll.find_one(), raw) + + def test_with_codec_options(self): + # {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} + # encoded with JAVA_LEGACY uuid representation. + bson_string = ( + b"-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02" + b"\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM" + b"\x01\x00\x00\x00" + ) + document = RawBSONDocument( + bson_string, + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) + + self.assertEqual(uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), document["_id"]) + + @async_client_context.require_connection + async def test_round_trip_codec_options(self): + doc = { + "date": datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + "_id": uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), + } + db = self.client.pymongo_test + coll = db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ) + await coll.insert_one(doc) + raw_java_legacy = CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ) + coll = db.get_collection("test_raw", codec_options=raw_java_legacy) + self.assertEqual( + RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), await coll.find_one() + ) + + @async_client_context.require_connection + async def test_raw_bson_document_embedded(self): + doc = {"embedded": self.document} + db = self.client.pymongo_test + await db.test_raw.insert_one(doc) + result = await db.test_raw.find_one() + assert result is not None + self.assertEqual(decode(self.document.raw), result["embedded"]) + + # Make sure that CodecOptions are preserved. + # {'embedded': [ + # {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} + # ]} + # encoded with JAVA_LEGACY uuid representation. + bson_string = ( + b"D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00" + b"\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00" + b"\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00" + b"\x00" + ) + rbd = RawBSONDocument( + bson_string, + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) + + await db.test_raw.drop() + await db.test_raw.insert_one(rbd) + result = await db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ).find_one() + assert result is not None + self.assertEqual(rbd["embedded"][0]["_id"], result["embedded"][0]["_id"]) + + @async_client_context.require_connection + async def test_write_response_raw_bson(self): + coll = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ).test_raw + + # No Exceptions raised while handling write response. + await coll.insert_one(self.document) + await coll.delete_one(self.document) + await coll.insert_many([self.document]) + await coll.delete_many(self.document) + await coll.update_one(self.document, {"$set": {"a": "b"}}, upsert=True) + await coll.update_many(self.document, {"$set": {"b": "c"}}) + + def test_preserve_key_ordering(self): + keyvaluepairs = [ + ("a", 1), + ("b", 2), + ("c", 3), + ] + rawdoc = RawBSONDocument(encode(SON(keyvaluepairs))) + + for rkey, elt in zip(rawdoc, keyvaluepairs): + self.assertEqual(rkey, elt[0]) + + def test_contains_code_with_scope(self): + doc = RawBSONDocument(encode({"value": Code("x=1", scope={})})) + + self.assertEqual(decode(encode(doc)), {"value": Code("x=1", {})}) + self.assertEqual(doc["value"].scope, RawBSONDocument(encode({}))) + + def test_contains_dbref(self): + doc = RawBSONDocument(encode({"value": DBRef("test", "id")})) + raw = {"$ref": "test", "$id": "id"} + raw_encoded = encode(decode(encode(raw))) + + self.assertEqual(decode(encode(doc)), {"value": DBRef("test", "id")}) + self.assertEqual(doc["value"].raw, raw_encoded) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_read_concern.py b/test/asynchronous/test_read_concern.py new file mode 100644 index 0000000000..8659bf80b2 --- /dev/null +++ b/test/asynchronous/test_read_concern.py @@ -0,0 +1,122 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the read_concern module.""" +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context +from test.utils_shared import OvertCommandListener + +from bson.son import SON +from pymongo.errors import OperationFailure +from pymongo.read_concern import ReadConcern + +_IS_SYNC = False + + +class TestReadConcern(AsyncIntegrationTest): + listener: OvertCommandListener + + @async_client_context.require_connection + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test + await async_client_context.client.pymongo_test.create_collection("coll") + + async def asyncTearDown(self): + await async_client_context.client.pymongo_test.drop_collection("coll") + + def test_read_concern(self): + rc = ReadConcern() + self.assertIsNone(rc.level) + self.assertTrue(rc.ok_for_legacy) + + rc = ReadConcern("majority") + self.assertEqual("majority", rc.level) + self.assertFalse(rc.ok_for_legacy) + + rc = ReadConcern("local") + self.assertEqual("local", rc.level) + self.assertTrue(rc.ok_for_legacy) + + self.assertRaises(TypeError, ReadConcern, 42) + + async def test_read_concern_uri(self): + uri = f"mongodb://{await async_client_context.pair}/?readConcernLevel=majority" + client = await self.async_rs_or_single_client(uri, connect=False) + self.assertEqual(ReadConcern("majority"), client.read_concern) + + async def test_invalid_read_concern(self): + coll = self.db.get_collection("coll", read_concern=ReadConcern("unknown")) + # We rely on the server to validate read concern. + with self.assertRaises(OperationFailure): + await coll.find_one() + + async def test_find_command(self): + # readConcern not sent in command if not specified. + coll = self.db.coll + await coll.find({"field": "value"}).to_list() + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + self.listener.reset() + + # Explicitly set readConcern to 'local'. + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + await coll.find({"field": "value"}).to_list() + self.assertEqualCommand( + SON( + [ + ("find", "coll"), + ("filter", {"field": "value"}), + ("readConcern", {"level": "local"}), + ] + ), + self.listener.started_events[0].command, + ) + + async def test_command_cursor(self): + # readConcern not sent in command if not specified. + coll = self.db.coll + await (await coll.aggregate([{"$match": {"field": "value"}}])).to_list() + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + self.listener.reset() + + # Explicitly set readConcern to 'local'. + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + await (await coll.aggregate([{"$match": {"field": "value"}}])).to_list() + self.assertEqual({"level": "local"}, self.listener.started_events[0].command["readConcern"]) + + async def test_aggregate_out(self): + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + await ( + await coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}]) + ).to_list() + + # Aggregate with $out supports readConcern MongoDB 4.2 onwards. + if async_client_context.version >= (4, 1): + self.assertIn("readConcern", self.listener.started_events[0].command) + else: + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_read_preferences.py b/test/asynchronous/test_read_preferences.py new file mode 100644 index 0000000000..d18887da40 --- /dev/null +++ b/test/asynchronous/test_read_preferences.py @@ -0,0 +1,742 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the replica_set_connection module.""" +from __future__ import annotations + +import contextlib +import copy +import pickle +import random +import sys +from typing import Any + +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + SkipTest, + async_client_context, + connected, + unittest, +) +from test.utils_shared import ( + OvertCommandListener, + _ignore_deprecations, + async_wait_until, + one, +) +from test.version import Version + +from bson.son import SON +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.message import _maybe_add_read_preference +from pymongo.read_preferences import ( + MovingAverage, + Nearest, + Primary, + PrimaryPreferred, + ReadPreference, + Secondary, + SecondaryPreferred, +) +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import Selection, readable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestSelections(AsyncIntegrationTest): + @async_client_context.require_connection + async def test_bool(self): + client = await self.async_single_client() + + async def predicate(): + return await client.address + + await async_wait_until(predicate, "discover primary") + selection = Selection.from_topology_description(client._topology.description) + + self.assertTrue(selection) + self.assertFalse(selection.with_server_descriptions([])) + + +class TestReadPreferenceObjects(unittest.TestCase): + prefs = [ + Primary(), + PrimaryPreferred(), + Secondary(), + Nearest(tag_sets=[{"a": 1}, {"b": 2}]), + SecondaryPreferred(max_staleness=30), + ] + + def test_pickle(self): + for pref in self.prefs: + self.assertEqual(pref, pickle.loads(pickle.dumps(pref))) + + def test_copy(self): + for pref in self.prefs: + self.assertEqual(pref, copy.copy(pref)) + + def test_deepcopy(self): + for pref in self.prefs: + self.assertEqual(pref, copy.deepcopy(pref)) + + +class TestReadPreferencesBase(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + # Insert some data so we can use cursors in read_from_which_host + await self.client.pymongo_test.test.drop() + await self.client.get_database( + "pymongo_test", write_concern=WriteConcern(w=async_client_context.w) + ).test.insert_many([{"_id": i} for i in range(10)]) + + self.addAsyncCleanup(self.client.pymongo_test.test.drop) + + async def read_from_which_host(self, client): + """Do a find() on the client and return which host was used""" + cursor = client.pymongo_test.test.find() + await anext(cursor) + return cursor.address + + async def read_from_which_kind(self, client): + """Do a find() on the client and return 'primary' or 'secondary' + depending on which the client used. + """ + address = await self.read_from_which_host(client) + if address == await client.primary: + return "primary" + elif address in await client.secondaries: + return "secondary" + else: + self.fail( + f"Cursor used address {address}, expected either primary " + f"{client.primary} or secondaries {client.secondaries}" + ) + + async def assertReadsFrom(self, expected, **kwargs): + c = await self.async_rs_client(**kwargs) + + async def predicate(): + return len(c.nodes - await c.arbiters) == async_client_context.w + + await async_wait_until(predicate, "discovered all nodes") + + used = await self.read_from_which_kind(c) + self.assertEqual(expected, used, f"Cursor used {used}, expected {expected}") + + +class TestSingleSecondaryOk(TestReadPreferencesBase): + async def test_reads_from_secondary(self): + host, port = next(iter(await self.client.secondaries)) + # Direct connection to a secondary. + client = await self.async_single_client(host, port) + self.assertFalse(await client.is_primary) + + # Regardless of read preference, we should be able to do + # "reads" with a direct connection to a secondary. + # See server-selection.rst#topology-type-single. + self.assertEqual(client.read_preference, ReadPreference.PRIMARY) + + db = client.pymongo_test + coll = db.test + + # Test find and find_one. + self.assertIsNotNone(await coll.find_one()) + self.assertEqual(10, len(await coll.find().to_list())) + + # Test some database helpers. + self.assertIsNotNone(await db.list_collection_names()) + self.assertIsNotNone(await db.validate_collection("test")) + self.assertIsNotNone(await db.command("ping")) + + # Test some collection helpers. + self.assertEqual(10, await coll.count_documents({})) + self.assertEqual(10, len(await coll.distinct("_id"))) + self.assertIsNotNone(await coll.aggregate([])) + self.assertIsNotNone(await coll.index_information()) + + +class TestReadPreferences(TestReadPreferencesBase): + async def test_mode_validation(self): + for mode in ( + ReadPreference.PRIMARY, + ReadPreference.PRIMARY_PREFERRED, + ReadPreference.SECONDARY, + ReadPreference.SECONDARY_PREFERRED, + ReadPreference.NEAREST, + ): + self.assertEqual( + mode, (await self.async_rs_client(read_preference=mode)).read_preference + ) + + with self.assertRaises(TypeError): + await self.async_rs_client(read_preference="foo") + + async def test_tag_sets_validation(self): + S = Secondary(tag_sets=[{}]) + self.assertEqual( + [{}], (await self.async_rs_client(read_preference=S)).read_preference.tag_sets + ) + + S = Secondary(tag_sets=[{"k": "v"}]) + self.assertEqual( + [{"k": "v"}], (await self.async_rs_client(read_preference=S)).read_preference.tag_sets + ) + + S = Secondary(tag_sets=[{"k": "v"}, {}]) + self.assertEqual( + [{"k": "v"}, {}], + (await self.async_rs_client(read_preference=S)).read_preference.tag_sets, + ) + + self.assertRaises(ValueError, Secondary, tag_sets=[]) + + # One dict not ok, must be a list of dicts + self.assertRaises(TypeError, Secondary, tag_sets={"k": "v"}) + + self.assertRaises(TypeError, Secondary, tag_sets="foo") + + self.assertRaises(TypeError, Secondary, tag_sets=["foo"]) + + async def test_threshold_validation(self): + self.assertEqual( + 17, + ( + await self.async_rs_client(localThresholdMS=17, connect=False) + ).options.local_threshold_ms, + ) + + self.assertEqual( + 42, + ( + await self.async_rs_client(localThresholdMS=42, connect=False) + ).options.local_threshold_ms, + ) + + self.assertEqual( + 666, + ( + await self.async_rs_client(localThresholdMS=666, connect=False) + ).options.local_threshold_ms, + ) + + self.assertEqual( + 0, + ( + await self.async_rs_client(localThresholdMS=0, connect=False) + ).options.local_threshold_ms, + ) + + with self.assertRaises(ValueError): + await self.async_rs_client(localthresholdms=-1) + + async def test_zero_latency(self): + ping_times: set = set() + # Generate unique ping times. + while len(ping_times) < len(self.client.nodes): + ping_times.add(random.random()) + for ping_time, host in zip(ping_times, self.client.nodes): + ServerDescription._host_to_round_trip_time[host] = ping_time + try: + client = await connected( + await self.async_rs_client(readPreference="nearest", localThresholdMS=0) + ) + await async_wait_until( + lambda: client.nodes == self.client.nodes, "discovered all nodes" + ) + host = await self.read_from_which_host(client) + for _ in range(5): + self.assertEqual(host, await self.read_from_which_host(client)) + finally: + ServerDescription._host_to_round_trip_time.clear() + + async def test_primary(self): + await self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY) + + async def test_primary_with_tags(self): + # Tags not allowed with PRIMARY + with self.assertRaises(ConfigurationError): + await self.async_rs_client(tag_sets=[{"dc": "ny"}]) + + async def test_primary_preferred(self): + await self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED) + + async def test_secondary(self): + await self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY) + + async def test_secondary_preferred(self): + await self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY_PREFERRED) + + async def test_nearest(self): + # With high localThresholdMS, expect to read from any + # member + c = await self.async_rs_client( + read_preference=ReadPreference.NEAREST, localThresholdMS=10000 + ) # 10 seconds + + data_members = {await self.client.primary} | await self.client.secondaries + + # This is a probabilistic test; track which members we've read from so + # far, and keep reading until we've used all the members or give up. + # Chance of using only 2 of 3 members 10k times if there's no bug = + # 3 * (2/3)**10000, very low. + used: set = set() + i = 0 + while data_members.difference(used) and i < 10000: + address = await self.read_from_which_host(c) + used.add(address) + i += 1 + + not_used = data_members.difference(used) + latencies = ", ".join( + "%s: %sms" % (server.description.address, server.description.round_trip_time) + for server in await (await c._get_topology()).select_servers( + readable_server_selector, _Op.TEST + ) + ) + + self.assertFalse( + not_used, + "Expected to use primary and all secondaries for mode NEAREST," + f" but didn't use {not_used}\nlatencies: {latencies}", + ) + + +class ReadPrefTester(AsyncMongoClient): + def __init__(self, *args, **kwargs): + self.has_read_from = set() + client_options = async_client_context.client_options + client_options.update(kwargs) + super().__init__(*args, **client_options) + + async def _conn_for_reads(self, read_preference, session, operation): + context = await super()._conn_for_reads(read_preference, session, operation) + return context + + @contextlib.asynccontextmanager + async def _conn_from_server(self, read_preference, server, session): + context = super()._conn_from_server(read_preference, server, session) + async with context as (conn, read_preference): + await self.record_a_read(conn.address) + yield conn, read_preference + + async def record_a_read(self, address): + server = await (await self._get_topology()).select_server_by_address(address, _Op.TEST, 0) + self.has_read_from.add(server) + + +_PREF_MAP = [ + (Primary, SERVER_TYPE.RSPrimary), + (PrimaryPreferred, SERVER_TYPE.RSPrimary), + (Secondary, SERVER_TYPE.RSSecondary), + (SecondaryPreferred, SERVER_TYPE.RSSecondary), + (Nearest, "any"), +] + + +class TestCommandAndReadPreference(AsyncIntegrationTest): + c: ReadPrefTester + client_version: Version + + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.c = ReadPrefTester( + # Ignore round trip times, to test ReadPreference modes only. + localThresholdMS=1000 * 1000, + ) + self.client_version = await Version.async_from_client(self.c) + # mapReduce fails if the collection does not exist. + coll = self.c.pymongo_test.get_collection( + "test", write_concern=WriteConcern(w=async_client_context.w) + ) + await coll.insert_one({}) + + async def asyncTearDown(self): + await self.c.drop_database("pymongo_test") + await self.c.close() + + async def executed_on_which_server(self, client, fn, *args, **kwargs): + """Execute fn(*args, **kwargs) and return the Server instance used.""" + client.has_read_from.clear() + await fn(*args, **kwargs) + self.assertEqual(1, len(client.has_read_from)) + return one(client.has_read_from) + + async def assertExecutedOn(self, server_type, client, fn, *args, **kwargs): + server = await self.executed_on_which_server(client, fn, *args, **kwargs) + self.assertEqual( + SERVER_TYPE._fields[server_type], SERVER_TYPE._fields[server.description.server_type] + ) + + async def _test_fn(self, server_type, fn): + for _ in range(10): + if server_type == "any": + used = set() + for _ in range(1000): + server = await self.executed_on_which_server(self.c, fn) + used.add(server.description.address) + if len(used) == len(await self.c.secondaries) + 1: + # Success + break + + assert await self.c.primary is not None + unused = (await self.c.secondaries).union({await self.c.primary}).difference(used) + if unused: + self.fail("Some members not used for NEAREST: %s" % (unused)) + else: + await self.assertExecutedOn(server_type, self.c, fn) + + async def _test_primary_helper(self, func): + # Helpers that ignore read preference. + await self._test_fn(SERVER_TYPE.RSPrimary, func) + + async def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs): + for mode, server_type in _PREF_MAP: + new_coll = coll.with_options(read_preference=mode()) + + async def func(): + return await getattr(new_coll, meth)(*args, **kwargs) + + if secondary_ok: + await self._test_fn(server_type, func) + else: + await self._test_fn(SERVER_TYPE.RSPrimary, func) + + async def test_command(self): + # Test that the generic command helper obeys the read preference + # passed to it. + for mode, server_type in _PREF_MAP: + + async def func(): + return await self.c.pymongo_test.command("dbStats", read_preference=mode()) + + await self._test_fn(server_type, func) + + async def test_create_collection(self): + # create_collection runs listCollections on the primary to check if + # the collection already exists. + async def func(): + return await self.c.pymongo_test.create_collection( + "some_collection%s" % random.randint(0, sys.maxsize) + ) + + await self._test_primary_helper(func) + + async def test_count_documents(self): + await self._test_coll_helper(True, self.c.pymongo_test.test, "count_documents", {}) + + async def test_estimated_document_count(self): + await self._test_coll_helper(True, self.c.pymongo_test.test, "estimated_document_count") + + async def test_distinct(self): + await self._test_coll_helper(True, self.c.pymongo_test.test, "distinct", "a") + + async def test_aggregate(self): + await self._test_coll_helper( + True, self.c.pymongo_test.test, "aggregate", [{"$project": {"_id": 1}}] + ) + + async def test_aggregate_write(self): + # 5.0 servers support $out on secondaries. + secondary_ok = async_client_context.version.at_least(5, 0) + await self._test_coll_helper( + secondary_ok, + self.c.pymongo_test.test, + "aggregate", + [{"$project": {"_id": 1}}, {"$out": "agg_write_test"}], + ) + + +class TestMovingAverage(unittest.TestCase): + def test_moving_average(self): + avg = MovingAverage() + self.assertIsNone(avg.get()) + avg.add_sample(10) + self.assertAlmostEqual(10, avg.get()) # type: ignore + avg.add_sample(20) + self.assertAlmostEqual(12, avg.get()) # type: ignore + avg.add_sample(30) + self.assertAlmostEqual(15.6, avg.get()) # type: ignore + + +class TestMongosAndReadPreference(AsyncIntegrationTest): + def test_read_preference_document(self): + pref = Primary() + self.assertEqual(pref.document, {"mode": "primary"}) + + pref = PrimaryPreferred() + self.assertEqual(pref.document, {"mode": "primaryPreferred"}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "primaryPreferred", "tags": [{"dc": "sf"}]}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, + {"mode": "primaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) + + pref = Secondary() + self.assertEqual(pref.document, {"mode": "secondary"}) + pref = Secondary(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}]}) + pref = Secondary(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) + + pref = SecondaryPreferred() + self.assertEqual(pref.document, {"mode": "secondaryPreferred"}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}]}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, + {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) + + pref = Nearest() + self.assertEqual(pref.document, {"mode": "nearest"}) + pref = Nearest(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}]}) + pref = Nearest(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) + + with self.assertRaises(TypeError): + # Float is prohibited. + Nearest(max_staleness=1.5) # type: ignore + + with self.assertRaises(ValueError): + Nearest(max_staleness=0) + + with self.assertRaises(ValueError): + Nearest(max_staleness=-2) + + def test_read_preference_document_hedge(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for mode, cls in cases.items(): + with self.assertRaises(TypeError): + cls(hedge=[]) # type: ignore + with _ignore_deprecations(): + pref = cls(hedge={}) + self.assertEqual(pref.document, {"mode": mode}) + out = _maybe_add_read_preference({}, pref) + if cls == SecondaryPreferred: + # SecondaryPreferred without hedge doesn't add $readPreference. + self.assertEqual(out, {}) + else: + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge: dict[str, Any] = {"enabled": True} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": False} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": False, "extra": "option"} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + def test_read_preference_hedge_deprecated(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for _, cls in cases.items(): + with self.assertRaises(DeprecationWarning): + cls(hedge={"enabled": True}) + + async def test_send_hedge(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + if await async_client_context.supports_secondary_read_pref: + cases["secondary"] = Secondary + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + await client.admin.command("ping") + for _mode, cls in cases.items(): + with _ignore_deprecations(): + pref = cls(hedge={"enabled": True}) + coll = client.test.get_collection("test", read_preference=pref) + listener.reset() + await coll.find_one() + started = listener.started_events + self.assertEqual(len(started), 1, started) + cmd = started[0].command + if async_client_context.is_rs or async_client_context.is_mongos: + self.assertIn("$readPreference", cmd) + self.assertEqual(cmd["$readPreference"], pref.document) + else: + self.assertNotIn("$readPreference", cmd) + + def test_maybe_add_read_preference(self): + # Primary doesn't add $readPreference + out = _maybe_add_read_preference({}, Primary()) + self.assertEqual(out, {}) + + pref = PrimaryPreferred() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = PrimaryPreferred(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + pref = Secondary() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Secondary(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + # SecondaryPreferred without tag_sets or max_staleness doesn't add + # $readPreference + pref = SecondaryPreferred() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, {}) + pref = SecondaryPreferred(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = SecondaryPreferred(max_staleness=120) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + pref = Nearest() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))]) + pref = Nearest() + out = _maybe_add_read_preference(criteria, pref) + self.assertEqual( + out, + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference(criteria, pref) + self.assertEqual( + out, + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + + @async_client_context.require_mongos + async def test_mongos(self): + res = await async_client_context.client.config.shards.find_one() + assert res is not None + shard = res["host"] + num_members = shard.count(",") + 1 + if num_members == 1: + raise SkipTest("Need a replica set shard to test.") + coll = async_client_context.client.pymongo_test.get_collection( + "test", write_concern=WriteConcern(w=num_members) + ) + await coll.drop() + res = await coll.insert_many([{} for _ in range(5)]) + first_id = res.inserted_ids[0] + last_id = res.inserted_ids[-1] + + # Note - this isn't a perfect test since there's no way to + # tell what shard member a query ran on. + for pref in (Primary(), PrimaryPreferred(), Secondary(), SecondaryPreferred(), Nearest()): + qcoll = coll.with_options(read_preference=pref) + results = await qcoll.find().sort([("_id", 1)]).to_list() + self.assertEqual(first_id, results[0]["_id"]) + self.assertEqual(last_id, results[-1]["_id"]) + results = await qcoll.find().sort([("_id", -1)]).to_list() + self.assertEqual(first_id, results[-1]["_id"]) + self.assertEqual(last_id, results[0]["_id"]) + + @async_client_context.require_mongos + async def test_mongos_max_staleness(self): + # Sanity check that we're sending maxStalenessSeconds + coll = async_client_context.client.pymongo_test.get_collection( + "test", read_preference=SecondaryPreferred(max_staleness=120) + ) + # No error + await coll.find_one() + + coll = async_client_context.client.pymongo_test.get_collection( + "test", read_preference=SecondaryPreferred(max_staleness=10) + ) + try: + await coll.find_one() + except OperationFailure as exc: + self.assertEqual(160, exc.code) + else: + self.fail("mongos accepted invalid staleness") + + coll = ( + await self.async_single_client( + readPreference="secondaryPreferred", maxStalenessSeconds=120 + ) + ).pymongo_test.test + # No error + await coll.find_one() + + coll = ( + await self.async_single_client( + readPreference="secondaryPreferred", maxStalenessSeconds=10 + ) + ).pymongo_test.test + try: + await coll.find_one() + except OperationFailure as exc: + self.assertEqual(160, exc.code) + else: + self.fail("mongos accepted invalid staleness") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_read_write_concern_spec.py b/test/asynchronous/test_read_write_concern_spec.py new file mode 100644 index 0000000000..b5cb32932f --- /dev/null +++ b/test/asynchronous/test_read_write_concern_spec.py @@ -0,0 +1,348 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the read and write concern tests.""" +from __future__ import annotations + +import json +import os +import sys +import warnings +from pathlib import Path + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import OvertCommandListener + +from pymongo import DESCENDING +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + WriteConcernError, + WriteError, + WTimeoutError, +) +from pymongo.operations import IndexModel, InsertOne +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "read_write_concern") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "read_write_concern") + + +class TestReadWriteConcernSpec(AsyncIntegrationTest): + async def test_omit_default_read_write_concern(self): + listener = OvertCommandListener() + # Client with default readConcern and writeConcern + client = await self.async_rs_or_single_client(event_listeners=[listener]) + collection = client.pymongo_test.collection + # Prepare for tests of find() and aggregate(). + await collection.insert_many([{} for _ in range(10)]) + self.addAsyncCleanup(collection.drop) + self.addAsyncCleanup(client.pymongo_test.collection2.drop) + # Commands MUST NOT send the default read/write concern to the server. + + async def rename_and_drop(): + # Ensure collection exists. + await collection.insert_one({}) + await collection.rename("collection2") + await client.pymongo_test.collection2.drop() + + async def insert_command_default_write_concern(): + await collection.database.command( + "insert", "collection", documents=[{}], write_concern=WriteConcern() + ) + + async def aggregate_op(): + await (await collection.aggregate([])).to_list() + + ops = [ + ("aggregate", aggregate_op), + ("find", lambda: collection.find().to_list()), + ("insert_one", lambda: collection.insert_one({})), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), + ("command", insert_command_default_write_concern), + ] + + for name, f in ops: + listener.reset() + await f() + + self.assertGreaterEqual(len(listener.started_events), 1) + for _i, event in enumerate(listener.started_events): + self.assertNotIn( + "readConcern", + event.command, + f"{name} sent default readConcern with {event.command_name}", + ) + self.assertNotIn( + "writeConcern", + event.command, + f"{name} sent default writeConcern with {event.command_name}", + ) + + async def assertWriteOpsRaise(self, write_concern, expected_exception): + wc = write_concern.document + # Set socket timeout to avoid indefinite stalls + client = await self.async_rs_or_single_client( + w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000 + ) + db = client.get_database("pymongo_test") + coll = db.test + + async def insert_command(): + await coll.database.command( + "insert", + "new_collection", + documents=[{}], + writeConcern=write_concern.document, + parse_write_concern_error=True, + ) + + ops = [ + ("insert_one", lambda: coll.insert_one({})), + ("insert_many", lambda: coll.insert_many([{}, {}])), + ("update_one", lambda: coll.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: coll.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: coll.delete_one({})), + ("delete_many", lambda: coll.delete_many({})), + ("bulk_write", lambda: coll.bulk_write([InsertOne({})])), + ("command", insert_command), + ("aggregate", lambda: coll.aggregate([{"$out": "out"}])), + # SERVER-46668 Delete all the documents in the collection to + # workaround a hang in createIndexes. + ("delete_many", lambda: coll.delete_many({})), + ("create_index", lambda: coll.create_index([("a", DESCENDING)])), + ("create_indexes", lambda: coll.create_indexes([IndexModel("b")])), + ("drop_index", lambda: coll.drop_index([("a", DESCENDING)])), + ("create", lambda: db.create_collection("new")), + ("rename", lambda: coll.rename("new")), + ("drop", lambda: db.new.drop()), + ] + # SERVER-47194: dropDatabase does not respect wtimeout in 3.6. + if async_client_context.version[:2] != (3, 6): + ops.append(("drop_database", lambda: client.drop_database(db))) + + for name, f in ops: + # Ensure insert_many and bulk_write still raise BulkWriteError. + if name in ("insert_many", "bulk_write"): + expected = BulkWriteError + else: + expected = expected_exception + with self.assertRaises(expected, msg=name) as cm: + await f() + if expected == BulkWriteError: + bulk_result = cm.exception.details + assert bulk_result is not None + wc_errors = bulk_result["writeConcernErrors"] + self.assertTrue(wc_errors) + + @async_client_context.require_replica_set + async def test_raise_write_concern_error(self): + self.addAsyncCleanup(async_client_context.client.drop_database, "pymongo_test") + assert async_client_context.w is not None + await self.assertWriteOpsRaise( + WriteConcern(w=async_client_context.w + 1, wtimeout=1), WriteConcernError + ) + + @async_client_context.require_secondaries_count(1) + @async_client_context.require_test_commands + async def test_raise_wtimeout(self): + self.addAsyncCleanup(async_client_context.client.drop_database, "pymongo_test") + self.addAsyncCleanup(self.enable_replication, async_client_context.client) + # Disable replication to guarantee a wtimeout error. + await self.disable_replication(async_client_context.client) + await self.assertWriteOpsRaise( + WriteConcern(w=async_client_context.w, wtimeout=1), WTimeoutError + ) + + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 1 (included here instead of test_client_bulk_write.py) + @async_client_context.require_failCommand_fail_point + async def test_error_includes_errInfo(self): + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + "errInfo": {"writeConcern": {"w": 2, "wtimeout": 0, "provenance": "clientSupplied"}}, + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + async with self.fail_point(cause_wce): + # Write concern error on insert includes errInfo. + with self.assertRaises(WriteConcernError) as ctx: + await self.db.test.insert_one({}) + self.assertEqual(ctx.exception.details, expected_wce) + + # Test bulk_write as well. + with self.assertRaises(BulkWriteError) as ctx: + await self.db.test.bulk_write([InsertOne({})]) + expected_details = { + "writeErrors": [], + "writeConcernErrors": [expected_wce], + "nInserted": 1, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + self.assertEqual(ctx.exception.details, expected_details) + + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 2 (included here instead of test_client_bulk_write.py) + @async_client_context.require_version_min(4, 9) + async def test_write_error_details_exposes_errinfo(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + db = client.errinfotest + self.addAsyncCleanup(client.drop_database, "errinfotest") + validator = {"x": {"$type": "string"}} + await db.create_collection("test", validator=validator) + with self.assertRaises(WriteError) as ctx: + await db.test.insert_one({"x": 1}) + self.assertEqual(ctx.exception.code, 121) + self.assertIsNotNone(ctx.exception.details) + assert ctx.exception.details is not None + self.assertIsNotNone(ctx.exception.details.get("errInfo")) + for event in listener.succeeded_events: + if event.command_name == "insert": + self.assertEqual(event.reply["writeErrors"][0], ctx.exception.details) + break + else: + self.fail("Couldn't find insert event.") + + +def normalize_write_concern(concern): + result = {} + for key in concern: + if key.lower() == "wtimeoutms": + result["wtimeout"] = concern[key] + elif key == "journal": + result["j"] = concern[key] + else: + result[key] = concern[key] + return result + + +def create_connection_string_test(test_case): + def run_test(self): + uri = test_case["uri"] + valid = test_case["valid"] + warning = test_case["warning"] + + if not valid: + if warning is False: + self.assertRaises( + (ConfigurationError, ValueError), AsyncMongoClient, uri, connect=False + ) + else: + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + self.assertRaises(UserWarning, AsyncMongoClient, uri, connect=False) + else: + client = AsyncMongoClient(uri, connect=False) + if "writeConcern" in test_case: + document = client.write_concern.document + self.assertEqual(document, normalize_write_concern(test_case["writeConcern"])) + if "readConcern" in test_case: + document = client.read_concern.document + self.assertEqual(document, test_case["readConcern"]) + + return run_test + + +def create_document_test(test_case): + def run_test(self): + valid = test_case["valid"] + + if "writeConcern" in test_case: + normalized = normalize_write_concern(test_case["writeConcern"]) + if not valid: + self.assertRaises((ConfigurationError, ValueError), WriteConcern, **normalized) + else: + write_concern = WriteConcern(**normalized) + self.assertEqual(write_concern.document, test_case["writeConcernDocument"]) + self.assertEqual(write_concern.acknowledged, test_case["isAcknowledged"]) + self.assertEqual(write_concern.is_server_default, test_case["isServerDefault"]) + if "readConcern" in test_case: + # Any string for 'level' is equally valid + read_concern = ReadConcern(**test_case["readConcern"]) + self.assertEqual(read_concern.document, test_case["readConcernDocument"]) + self.assertEqual(not bool(read_concern.level), test_case["isServerDefault"]) + + return run_test + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + dirname = os.path.split(dirpath)[-1] + + if dirname == "operation": + # This directory is tested by TestOperations. + continue + elif dirname == "connection-string": + create_test = create_connection_string_test + else: + create_test = create_document_test + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as test_stream: + test_cases = json.load(test_stream)["tests"] + + fname = os.path.splitext(filename)[0] + for test_case in test_cases: + new_test = create_test(test_case) + test_name = "test_{}_{}_{}".format( + dirname.replace("-", "_"), + fname.replace("-", "_"), + str(test_case["description"].lower().replace(" ", "_")), + ) + + new_test.__name__ = test_name + setattr(TestReadWriteConcernSpec, new_test.__name__, new_test) + + +create_tests() + + +# Generate unified tests. +# PyMongo does not support MapReduce. +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "operation"), + module=__name__, + expected_failures=["MapReduce .*"], + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_retryable_reads.py b/test/asynchronous/test_retryable_reads.py new file mode 100644 index 0000000000..47ac91b0f5 --- /dev/null +++ b/test/asynchronous/test_retryable_reads.py @@ -0,0 +1,266 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test retryable reads spec.""" +from __future__ import annotations + +import os +import pprint +import sys +import threading +from test.asynchronous.utils import async_set_fail_point + +from pymongo.errors import OperationFailure + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + async_client_context, + client_knobs, + unittest, +) +from test.utils_shared import ( + CMAPListener, + OvertCommandListener, +) + +from pymongo.monitoring import ( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) + +_IS_SYNC = False + + +class TestClientOptions(AsyncPyMongoTestCase): + async def test_default(self): + client = self.simple_client(connect=False) + self.assertEqual(client.options.retry_reads, True) + + async def test_kwargs(self): + client = self.simple_client(retryReads=True, connect=False) + self.assertEqual(client.options.retry_reads, True) + client = self.simple_client(retryReads=False, connect=False) + self.assertEqual(client.options.retry_reads, False) + + async def test_uri(self): + client = self.simple_client("mongodb://h/?retryReads=true", connect=False) + self.assertEqual(client.options.retry_reads, True) + client = self.simple_client("mongodb://h/?retryReads=false", connect=False) + self.assertEqual(client.options.retry_reads, False) + + +class FindThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + async def run(self): + await self.collection.find_one({}) + self.passed = True + + +class TestPoolPausedError(AsyncIntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + + @async_client_context.require_sync + @async_client_context.require_failCommand_blockConnection + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + async def test_pool_paused_error_is_retryable(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3519 + self.skipTest("Test is flaky on PyPy") + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [FindThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + }, + } + async with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.started_events + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.succeeded_events + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.failed_events + self.assertEqual(1, len(failed), msg) + + +class TestRetryableReads(AsyncIntegrationTest): + @async_client_context.require_multiple_mongoses + @async_client_context.require_failCommand_fail_point + async def test_retryable_reads_are_retried_on_a_different_mongos_when_one_is_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 6}, + } + + mongos_clients = [] + + for mongos in async_client_context.mongos_seeds().split(","): + client = await self.async_rs_or_single_client(mongos) + await async_set_fail_point(client, fail_command) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + async_client_context.mongos_seeds(), + event_listeners=[listener], + retryReads=True, + ) + + with self.assertRaises(OperationFailure): + await client.t.t.find_one({}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + await async_set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + + # Assert that both events occurred on different mongos. + assert listener.failed_events[0].connection_id != listener.failed_events[1].connection_id + + @async_client_context.require_multiple_mongoses + @async_client_context.require_failCommand_fail_point + async def test_retryable_reads_are_retried_on_the_same_mongos_when_no_others_are_available( + self + ): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 6}, + } + + host = async_client_context.mongos_seeds().split(",")[0] + mongos_client = await self.async_rs_or_single_client(host) + await async_set_fail_point(mongos_client, fail_command) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + host, + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + await client.t.t.find_one({}) + + # Disable failpoint. + fail_command["mode"] = "off" + await async_set_fail_point(mongos_client, fail_command) + + # Assert that exactly one failed command event and one succeeded command event occurred. + self.assertEqual(len(listener.failed_events), 1) + self.assertEqual(len(listener.succeeded_events), 1) + + # Assert that both events occurred on the same mongos. + assert listener.succeeded_events[0].connection_id == listener.failed_events[0].connection_id + + @async_client_context.require_failCommand_fail_point + async def test_retryable_reads_are_retried_on_the_same_implicit_session(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + await client.t.t.insert_one({"x": 1}) + + commands = [ + ("aggregate", lambda: client.t.t.count_documents({})), + ("aggregate", lambda: client.t.t.aggregate([{"$match": {}}])), + ("count", lambda: client.t.t.estimated_document_count()), + ("distinct", lambda: client.t.t.distinct("x")), + ("find", lambda: client.t.t.find_one({})), + ("listDatabases", lambda: client.list_databases()), + ("listCollections", lambda: client.t.list_collections()), + ("listIndexes", lambda: client.t.t.list_indexes()), + ] + + for command_name, operation in commands: + listener.reset() + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": [command_name], "errorCode": 6}, + } + + async with self.fail_point(fail_command): + await operation() + + # Assert that both events occurred on the same session. + command_docs = [ + event.command + for event in listener.started_events + if event.command_name == command_name + ] + self.assertEqual(len(command_docs), 2) + self.assertEqual(command_docs[0]["lsid"], command_docs[1]["lsid"]) + self.assertIsNot(command_docs[0], command_docs[1]) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_retryable_reads_unified.py b/test/asynchronous/test_retryable_reads_unified.py new file mode 100644 index 0000000000..e62d606810 --- /dev/null +++ b/test/asynchronous/test_retryable_reads_unified.py @@ -0,0 +1,46 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Reads unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_reads/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_reads/unified") + +# Generate unified tests. +# PyMongo does not support MapReduce, ListDatabaseObjects or ListCollectionObjects. +globals().update( + generate_test_classes( + TEST_PATH, + module=__name__, + expected_failures=["ListDatabaseObjects .*", "ListCollectionObjects .*", "MapReduce .*"], + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_retryable_writes.py b/test/asynchronous/test_retryable_writes.py new file mode 100644 index 0000000000..ddb1d39eb7 --- /dev/null +++ b/test/asynchronous/test_retryable_writes.py @@ -0,0 +1,605 @@ +# Copyright 2017-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test retryable writes.""" +from __future__ import annotations + +import asyncio +import copy +import pprint +import sys +import threading +from test.asynchronous.utils import async_set_fail_point, flaky + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + SkipTest, + async_client_context, + unittest, +) +from test.asynchronous.helpers import client_knobs +from test.utils_shared import ( + CMAPListener, + DeprecationFilter, + EventListener, + OvertCommandListener, +) +from test.version import Version + +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.int64 import Int64 +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from pymongo.errors import ( + AutoReconnect, + ConnectionFailure, + OperationFailure, + ServerSelectionTimeoutError, + WriteConcernError, +) +from pymongo.monitoring import ( + CommandSucceededEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class InsertEventListener(EventListener): + def succeeded(self, event: CommandSucceededEvent) -> None: + super().succeeded(event) + if ( + event.command_name == "insert" + and event.reply.get("writeConcernError", {}).get("code", None) == 91 + ): + async_client_context.client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError", "NoWritesPerformed"], + "failCommands": ["insert"], + }, + } + ) + + +def retryable_single_statement_ops(coll): + return [ + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {"ordered": False}), + (coll.bulk_write, [[ReplaceOne({}, {"a1": 1})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a2": 1}), ReplaceOne({}, {"a3": 1})]], {}), + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a4": 1}}), UpdateOne({}, {"$set": {"a5": 1}})]], + {}, + ), + (coll.bulk_write, [[DeleteOne({})]], {}), + (coll.bulk_write, [[DeleteOne({}), DeleteOne({})]], {}), + (coll.insert_one, [{}], {}), + (coll.insert_many, [[{}, {}]], {}), + (coll.replace_one, [{}, {"a6": 1}], {}), + (coll.update_one, [{}, {"$set": {"a7": 1}}], {}), + (coll.delete_one, [{}], {}), + (coll.find_one_and_replace, [{}, {"a8": 1}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a9": 1}}], {}), + (coll.find_one_and_delete, [{}, {"a10": 1}], {}), + ] + + +def non_retryable_single_statement_ops(coll): + return [ + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a": 1}}), UpdateMany({}, {"$set": {"a": 1}})]], + {}, + ), + (coll.bulk_write, [[DeleteOne({}), DeleteMany({})]], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), + (coll.delete_many, [{}], {}), + ] + + +class IgnoreDeprecationsTest(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + deprecation_filter: DeprecationFilter + + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.deprecation_filter = DeprecationFilter() + + async def asyncTearDown(self) -> None: + await super().asyncTearDown() + self.deprecation_filter.stop() + + +class TestRetryableWrites(IgnoreDeprecationsTest): + listener: OvertCommandListener + knobs: client_knobs + + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + # Speed up the tests by decreasing the heartbeat frequency. + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client( + retryWrites=True, event_listeners=[self.listener] + ) + self.db = self.client.pymongo_test + + if async_client_context.is_rs and async_client_context.test_commands_enabled: + await self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "alwaysOn")]) + ) + + async def asyncTearDown(self): + if async_client_context.is_rs and async_client_context.test_commands_enabled: + await self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) + ) + self.knobs.disable() + await super().asyncTearDown() + + async def test_supported_single_statement_no_retry(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=False, event_listeners=[listener]) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + await method(*args, **kwargs) + for event in listener.started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + async def test_supported_single_statement_unsupported_cluster(self): + if async_client_context.is_rs or async_client_context.is_mongos: + raise SkipTest("This cluster supports retryable writes") + + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() + await method(*args, **kwargs) + + for event in self.listener.started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + async def test_unsupported_single_statement(self): + coll = self.db.retryable_write_test + await coll.insert_many([{}, {}]) + coll_w0 = coll.with_options(write_concern=WriteConcern(w=0)) + for method, args, kwargs in non_retryable_single_statement_ops( + coll + ) + retryable_single_statement_ops(coll_w0): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() + await method(*args, **kwargs) + started_events = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), len(started_events), msg) + self.assertEqual(len(self.listener.failed_events), 0, msg) + for event in started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + async def test_server_selection_timeout_not_retried(self): + """A ServerSelectionTimeoutError is not retried.""" + listener = OvertCommandListener() + client = self.simple_client( + "somedomainthatdoesntexist.org", + serverSelectionTimeoutMS=1, + retryWrites=True, + event_listeners=[listener], + ) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + with self.assertRaises(ServerSelectionTimeoutError, msg=msg): + await method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 0, msg) + + @async_client_context.require_replica_set + @async_client_context.require_test_commands + async def test_retry_timeout_raises_original_error(self): + """A ServerSelectionTimeoutError on the retry attempt raises the + original error. + """ + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=True, event_listeners=[listener]) + topology = client._topology + select_server = topology.select_server + + def mock_select_server(*args, **kwargs): + server = select_server(*args, **kwargs) + + def raise_error(*args, **kwargs): + raise ServerSelectionTimeoutError("No primary available for writes") + + # Raise ServerSelectionTimeout on the retry attempt. + topology.select_server = raise_error + return server + + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + topology.select_server = mock_select_server + with self.assertRaises(ConnectionFailure, msg=msg): + await method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 1, msg) + + @async_client_context.require_replica_set + @async_client_context.require_test_commands + async def test_batch_splitting(self): + """Test retry succeeds after failures during batch splitting.""" + large = "s" * 1024 * 1024 * 15 + coll = self.db.retryable_write_test + await coll.delete_many({}) + self.listener.reset() + bulk_result = await coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + UpdateOne({"_id": 1, "l": large}, {"$unset": {"l": 1}, "$inc": {"count": 1}}), + UpdateOne({"_id": 2, "l": large}, {"$set": {"foo": "bar"}}), + DeleteOne({"l": large}), + DeleteOne({"l": large}), + ] + ) + # Each command should fail and be retried. + # With OP_MSG 3 inserts are one batch. 2 updates another. + # 2 deletes a third. + self.assertEqual(len(self.listener.started_events), 6) + self.assertEqual(await coll.find_one(), {"_id": 1, "count": 1}) + # Assert the final result + expected_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 3, + "nUpserted": 0, + "nMatched": 2, + "nModified": 2, + "nRemoved": 2, + "upserted": [], + } + self.assertEqual(bulk_result.bulk_api_result, expected_result) + + @async_client_context.require_replica_set + @async_client_context.require_test_commands + async def test_batch_splitting_retry_fails(self): + """Test retry fails during batch splitting.""" + large = "s" * 1024 * 1024 * 15 + coll = self.db.retryable_write_test + await coll.delete_many({}) + await self.client.admin.command( + SON( + [ + ("configureFailPoint", "onPrimaryTransactionalWrite"), + ("mode", {"skip": 3}), # The number of _documents_ to skip. + ("data", {"failBeforeCommitExceptionCode": 1}), + ] + ) + ) + self.listener.reset() + async with self.client.start_session() as session: + initial_txn = session._transaction_id + try: + await coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + InsertOne({"_id": 4, "l": large}), + ], + session=session, + ) + except ConnectionFailure: + pass + else: + self.fail("bulk_write should have failed") + + started = self.listener.started_events + self.assertEqual(len(started), 3) + self.assertEqual(len(self.listener.succeeded_events), 1) + expected_txn = Int64(initial_txn + 1) + self.assertEqual(started[0].command["txnNumber"], expected_txn) + self.assertEqual(started[0].command["lsid"], session.session_id) + expected_txn = Int64(initial_txn + 2) + self.assertEqual(started[1].command["txnNumber"], expected_txn) + self.assertEqual(started[1].command["lsid"], session.session_id) + started[1].command.pop("$clusterTime") + started[2].command.pop("$clusterTime") + self.assertEqual(started[1].command, started[2].command) + final_txn = session._transaction_id + self.assertEqual(final_txn, expected_txn) + self.assertEqual(await coll.find_one(projection={"_id": True}), {"_id": 1}) + + @async_client_context.require_multiple_mongoses + @async_client_context.require_failCommand_fail_point + async def test_retryable_writes_in_sharded_cluster_multiple_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + "appName": "retryableWriteTest", + }, + } + + mongos_clients = [] + + for mongos in async_client_context.mongos_seeds().split(","): + client = await self.async_rs_or_single_client(mongos) + await async_set_fail_point(client, fail_command) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + async_client_context.mongos_seeds(), + appName="retryableWriteTest", + event_listeners=[listener], + retryWrites=True, + ) + + with self.assertRaises(AutoReconnect): + await client.t.t.insert_one({"x": 1}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + await async_set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + + +class TestWriteConcernError(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + fail_insert: dict + + @async_client_context.require_replica_set + @async_client_context.require_failCommand_fail_point + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.fail_insert = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": { + "failCommands": ["insert"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}, + }, + } + + @async_client_context.require_version_min(4, 0) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + async def test_RetryableWriteError_error_label(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=True, event_listeners=[listener]) + + # Ensure collection exists. + await client.pymongo_test.testcoll.insert_one({}) + + async with self.fail_point(self.fail_insert): + with self.assertRaises(WriteConcernError) as cm: + await client.pymongo_test.testcoll.insert_one({}) + self.assertTrue(cm.exception.has_error_label("RetryableWriteError")) + + if async_client_context.version >= Version(4, 4): + # In MongoDB 4.4+ we rely on the server returning the error label. + self.assertIn("RetryableWriteError", listener.succeeded_events[-1].reply["errorLabels"]) + + @async_client_context.require_version_min(4, 4) + async def test_RetryableWriteError_error_label_RawBSONDocument(self): + # using RawBSONDocument should not cause errorLabel parsing to fail + async with self.fail_point(self.fail_insert): + async with self.client.start_session() as s: + s._start_retryable_write() + result = await self.client.pymongo_test.command( + "insert", + "testcoll", + documents=[{"_id": 1}], + txnNumber=s._transaction_id, + session=s, + codec_options=DEFAULT_CODEC_OPTIONS.with_options( + document_class=RawBSONDocument + ), + ) + + self.assertIn("writeConcernError", result) + self.assertIn("RetryableWriteError", result["errorLabels"]) + + +class InsertThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + async def run(self): + await self.collection.insert_one({}) + self.passed = True + + +class TestPoolPausedError(AsyncIntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + + @async_client_context.require_sync + @async_client_context.require_failCommand_blockConnection + @async_client_context.require_retryable_writes + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + @flaky(reason="PYTHON-5291") + async def test_pool_paused_error_is_retryable(self): + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + "errorLabels": ["RetryableWriteError"], + }, + } + async with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.started_events + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.succeeded_events + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.failed_events + self.assertEqual(1, len(failed), msg) + + @async_client_context.require_sync + @async_client_context.require_failCommand_fail_point + @async_client_context.require_replica_set + @async_client_context.require_version_min( + 6, 0, 0 + ) # the spec requires that this prose test only be run on 6.0+ + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + async def test_returns_original_error_code( + self, + ): + cmd_listener = InsertEventListener() + client = await self.async_rs_or_single_client( + retryWrites=True, event_listeners=[cmd_listener] + ) + await client.test.test.drop() + cmd_listener.reset() + await client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "writeConcernError": { + "code": 91, + "errorLabels": ["RetryableWriteError"], + }, + "failCommands": ["insert"], + }, + } + ) + with self.assertRaises(WriteConcernError) as exc: + await client.test.test.insert_one({"_id": 1}) + self.assertEqual(exc.exception.code, 91) + await client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": "off", + } + ) + + +# TODO: Make this a real integration test where we stepdown the primary. +class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): + @async_client_context.require_replica_set + async def test_increment_transaction_id_without_sending_command(self): + """Test that the txnNumber field is properly incremented, even when + the first attempt fails before sending the command. + """ + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=True, event_listeners=[listener]) + topology = client._topology + select_server = topology.select_server + + def raise_connection_err_select_server(*args, **kwargs): + # Raise ConnectionFailure on the first attempt and perform + # normal selection on the retry attempt. + topology.select_server = select_server + raise ConnectionFailure("Connection refused") + + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + listener.reset() + topology.select_server = raise_connection_err_select_server + async with client.start_session() as session: + kwargs = copy.deepcopy(kwargs) + kwargs["session"] = session + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + initial_txn_id = session._transaction_id + + # Each operation should fail on the first attempt and succeed + # on the second. + await method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 1, msg) + retry_cmd = listener.started_events[0].command + sent_txn_id = retry_cmd["txnNumber"] + final_txn_id = session._transaction_id + self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) + self.assertEqual(sent_txn_id, final_txn_id, msg) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_retryable_writes_unified.py b/test/asynchronous/test_retryable_writes_unified.py new file mode 100644 index 0000000000..bb493e6010 --- /dev/null +++ b/test/asynchronous/test_retryable_writes_unified.py @@ -0,0 +1,39 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Writes unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_writes/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_writes/unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_run_command.py b/test/asynchronous/test_run_command.py new file mode 100644 index 0000000000..3ac8c32706 --- /dev/null +++ b/test/asynchronous/test_run_command.py @@ -0,0 +1,41 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run Command unified tests.""" +from __future__ import annotations + +import os +import unittest +from pathlib import Path +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "run_command") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "run_command") + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_sdam_monitoring_spec.py b/test/asynchronous/test_sdam_monitoring_spec.py new file mode 100644 index 0000000000..71ec6c6b46 --- /dev/null +++ b/test/asynchronous/test_sdam_monitoring_spec.py @@ -0,0 +1,374 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the sdam monitoring spec tests.""" +from __future__ import annotations + +import asyncio +import json +import os +import sys +import time +from pathlib import Path + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs, unittest +from test.utils_shared import ( + ServerAndTopologyEventListener, + async_wait_until, + server_name_to_type, +) + +from bson.json_util import object_hook +from pymongo import AsyncMongoClient, monitoring +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.monitor import Monitor +from pymongo.common import clean_node +from pymongo.errors import ConnectionFailure, NotPrimaryError +from pymongo.hello import Hello +from pymongo.server_description import ServerDescription +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sdam_monitoring") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sdam_monitoring") + + +def compare_server_descriptions(expected, actual): + if (expected["address"] != "{}:{}".format(*actual.address)) or ( + server_name_to_type(expected["type"]) != actual.server_type + ): + return False + expected_hosts = set(expected["arbiters"] + expected["passives"] + expected["hosts"]) + return expected_hosts == {"{}:{}".format(*s) for s in actual.all_hosts} + + +def compare_topology_descriptions(expected, actual): + if TOPOLOGY_TYPE.__getattribute__(expected["topologyType"]) != actual.topology_type: + return False + expected = expected["servers"] + actual = actual.server_descriptions() + if len(expected) != len(actual): + return False + for exp_server in expected: + for _address, actual_server in actual.items(): + if compare_server_descriptions(exp_server, actual_server): + break + else: + return False + return True + + +def compare_events(expected_dict, actual): + if not expected_dict: + return False, "Error: Bad expected value in YAML test" + if not actual: + return False, "Error: Event published was None" + + expected_type, expected = list(expected_dict.items())[0] + + if expected_type == "server_opening_event": + if not isinstance(actual, monitoring.ServerOpeningEvent): + return False, "Expected ServerOpeningEvent, got %s" % (actual.__class__) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerOpeningEvent published with wrong address (expected" " {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + elif expected_type == "server_description_changed_event": + if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): + return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerDescriptionChangedEvent has wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + if not compare_server_descriptions(expected["newDescription"], actual.new_description): + return (False, "New ServerDescription incorrect in ServerDescriptionChangedEvent") + if not compare_server_descriptions( + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous ServerDescription incorrect in ServerDescriptionChangedEvent", + ) + + elif expected_type == "server_closed_event": + if not isinstance(actual, monitoring.ServerClosedEvent): + return False, "Expected ServerClosedEvent, got %s" % (actual.__class__) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerClosedEvent published with wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + elif expected_type == "topology_opening_event": + if not isinstance(actual, monitoring.TopologyOpenedEvent): + return False, "Expected TopologyOpenedEvent, got %s" % (actual.__class__) + + elif expected_type == "topology_description_changed_event": + if not isinstance(actual, monitoring.TopologyDescriptionChangedEvent): + return ( + False, + "Expected TopologyDescriptionChangedEvent, got %s" % (actual.__class__), + ) + if not compare_topology_descriptions(expected["newDescription"], actual.new_description): + return ( + False, + "New TopologyDescription incorrect in TopologyDescriptionChangedEvent", + ) + if not compare_topology_descriptions( + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous TopologyDescription incorrect in TopologyDescriptionChangedEvent", + ) + + elif expected_type == "topology_await aclosed_event": + if not isinstance(actual, monitoring.TopologyClosedEvent): + return False, "Expected TopologyClosedEvent, got %s" % (actual.__class__) + + else: + return False, f"Incorrect event: expected {expected_type}, actual {actual}" + + return True, "" + + +def compare_multiple_events(i, expected_results, actual_results): + events_in_a_row = [] + j = i + while j < len(expected_results) and isinstance(actual_results[j], actual_results[i].__class__): + events_in_a_row.append(actual_results[j]) + j += 1 + message = "" + for event in events_in_a_row: + for k in range(i, j): + passed, message = compare_events(expected_results[k], event) + if passed: + expected_results[k] = None + break + else: + return i, False, message + return j, True, "" + + +class TestAllScenarios(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.all_listener = ServerAndTopologyEventListener() + + +def create_test(scenario_def): + async def run_scenario(self): + with client_knobs(events_queue_frequency=0.05, min_heartbeat_interval=0.05): + await _run_scenario(self) + + async def _run_scenario(self): + class NoopMonitor(Monitor): + """Override the _run method to do nothing.""" + + async def _run(self): + await asyncio.sleep(0.05) + + m = AsyncMongoClient( + host=scenario_def["uri"], + port=27017, + event_listeners=[self.all_listener], + _monitor_class=NoopMonitor, + ) + topology = await m._get_topology() + + try: + for phase in scenario_def["phases"]: + for source, response in phase.get("responses", []): + source_address = clean_node(source) + await topology.on_change( + ServerDescription( + address=source_address, hello=Hello(response), round_trip_time=0 + ) + ) + + expected_results = phase["outcome"]["events"] + expected_len = len(expected_results) + await async_wait_until( + lambda: len(self.all_listener.results) >= expected_len, + "publish all events", + timeout=15, + ) + + # Wait some time to catch possible lagging extra events. + await async_wait_until(lambda: topology._events.empty(), "publish lagging events") + + i = 0 + while i < expected_len: + result = ( + self.all_listener.results[i] if len(self.all_listener.results) > i else None + ) + # The order of ServerOpening/ClosedEvents doesn't matter + if isinstance( + result, (monitoring.ServerOpeningEvent, monitoring.ServerClosedEvent) + ): + i, passed, message = compare_multiple_events( + i, expected_results, self.all_listener.results + ) + self.assertTrue(passed, message) + else: + self.assertTrue(*compare_events(expected_results[i], result)) + i += 1 + + # Assert no extra events. + extra_events = self.all_listener.results[expected_len:] + if extra_events: + self.fail(f"Extra events {extra_events!r}") + + self.all_listener.reset() + finally: + await m.close() + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json.load(scenario_stream, object_hook=object_hook) + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{os.path.splitext(filename)[0]}" + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + + +class TestSdamMonitoring(AsyncIntegrationTest): + knobs: client_knobs + listener: ServerAndTopologyEventListener + test_client: AsyncMongoClient + coll: AsyncCollection + + @classmethod + def setUpClass(cls): + # Speed up the tests by decreasing the event publish frequency. + cls.knobs = client_knobs( + events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1 + ) + cls.knobs.enable() + cls.listener = ServerAndTopologyEventListener() + + @classmethod + def tearDownClass(cls): + cls.knobs.disable() + + @async_client_context.require_failCommand_fail_point + async def asyncSetUp(self): + await super().asyncSetUp() + + retry_writes = async_client_context.supports_transactions() + self.test_client = await self.async_rs_or_single_client( + event_listeners=[self.listener], retryWrites=retry_writes + ) + self.coll = self.test_client[self.client.db.name].test + await self.coll.insert_one({}) + self.listener.reset() + + async def asyncTearDown(self): + await super().asyncTearDown() + + async def _test_app_error(self, fail_command_opts, expected_error): + address = await self.test_client.address + + # Test that an application error causes a ServerDescriptionChangedEvent + # to be published. + data = {"failCommands": ["insert"]} + data.update(fail_command_opts) + fail_insert = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": data, + } + async with self.fail_point(fail_insert): + if self.test_client.options.retry_writes: + await self.coll.insert_one({}) + else: + with self.assertRaises(expected_error): + await self.coll.insert_one({}) + await self.coll.insert_one({}) + + def marked_unknown(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.new_description.is_server_type_known + ) + + def discovered_node(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) + + def marked_unknown_and_rediscovered(): + return ( + len(self.listener.matching(marked_unknown)) >= 1 + and len(self.listener.matching(discovered_node)) >= 1 + ) + + # Topology events are not published synchronously + await async_wait_until(marked_unknown_and_rediscovered, "rediscover node") + + # Expect a single ServerDescriptionChangedEvent for the network error. + marked_unknown_events = self.listener.matching(marked_unknown) + self.assertEqual(len(marked_unknown_events), 1, marked_unknown_events) + self.assertIsInstance(marked_unknown_events[0].new_description.error, expected_error) + + async def test_network_error_publishes_events(self): + await self._test_app_error({"closeConnection": True}, ConnectionFailure) + + # In 4.4+, not primary errors from failCommand don't cause SDAM state + # changes because topologyVersion is not incremented. + @async_client_context.require_version_max(4, 3) + async def test_not_primary_error_publishes_events(self): + await self._test_app_error( + {"errorCode": 10107, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) + + async def test_shutdown_error_publishes_events(self): + await self._test_app_error( + {"errorCode": 91, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_server_selection.py b/test/asynchronous/test_server_selection.py new file mode 100644 index 0000000000..f570662b85 --- /dev/null +++ b/test/asynchronous/test_server_selection.py @@ -0,0 +1,212 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +from pymongo import AsyncMongoClient, ReadPreference +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology +from pymongo.errors import ServerSelectionTimeoutError +from pymongo.hello import HelloCompat +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector +from pymongo.typings import strip_optional + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils import async_wait_until +from test.asynchronous.utils_selection_tests import ( + create_selection_tests, + get_topology_settings_dict, +) +from test.utils_selection_tests_shared import ( + get_addresses, + make_server_description, +) +from test.utils_shared import ( + FunctionCallRecorder, + OvertCommandListener, +) + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent, "server_selection", "server_selection" + ) +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "server_selection" + ) + + +class SelectionStoreSelector: + """No-op selector that keeps track of what was passed to it.""" + + def __init__(self): + self.selection = None + + def __call__(self, selection): + self.selection = selection + return selection + + +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore + pass + + +class TestCustomServerSelectorFunction(AsyncIntegrationTest): + @async_client_context.require_replica_set + async def test_functional_select_max_port_number_host(self): + # Selector that returns server with highest port number. + def custom_selector(servers): + ports = [s.address[1] for s in servers] + idx = ports.index(max(ports)) + return [servers[idx]] + + # Initialize client with appropriate listeners. + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + server_selector=custom_selector, event_listeners=[listener] + ) + coll = client.get_database("testdb", read_preference=ReadPreference.NEAREST).coll + self.addAsyncCleanup(client.drop_database, "testdb") + + # Wait the node list to be fully populated. + async def all_hosts_started(): + return len((await client.admin.command(HelloCompat.LEGACY_CMD))["hosts"]) == len( + client._topology._description.readable_servers + ) + + await async_wait_until(all_hosts_started, "receive heartbeat from all hosts") + + expected_port = max( + [strip_optional(n.address[1]) for n in client._topology._description.readable_servers] + ) + + # Insert 1 record and access it 10 times. + await coll.insert_one({"name": "John Doe"}) + for _ in range(10): + await coll.find_one({"name": "John Doe"}) + + # Confirm all find commands are run against appropriate host. + for command in listener.started_events: + if command.command_name == "find": + self.assertEqual(command.connection_id[1], expected_port) + + async def test_invalid_server_selector(self): + # Client initialization must fail if server_selector is not callable. + for selector_candidate in [[], 10, "string", {}]: + with self.assertRaisesRegex(ValueError, "must be a callable"): + AsyncMongoClient(connect=False, server_selector=selector_candidate) + + # None value for server_selector is OK. + AsyncMongoClient(connect=False, server_selector=None) + + @async_client_context.require_replica_set + async def test_selector_called(self): + selector = FunctionCallRecorder(lambda x: x) + + # Client setup. + mongo_client = await self.async_rs_or_single_client(server_selector=selector) + test_collection = mongo_client.testdb.test_collection + self.addAsyncCleanup(mongo_client.drop_database, "testdb") + + # Do N operations and test selector is called at least N-1 times due to fast path. + await test_collection.insert_one({"age": 20, "name": "John"}) + await test_collection.insert_one({"age": 31, "name": "Jane"}) + await test_collection.update_one({"name": "Jane"}, {"$set": {"age": 21}}) + await test_collection.find_one({"name": "Roe"}) + self.assertGreaterEqual(selector.call_count, 3) + + @async_client_context.require_replica_set + async def test_latency_threshold_application(self): + selector = SelectionStoreSelector() + + scenario_def: dict = { + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSPrimary", "tag": {}}, + ], + } + } + + # Create & populate Topology such that all but one server is too slow. + rtt_times = [srv["avg_rtt_ms"] for srv in scenario_def["topology_description"]["servers"]] + min_rtt_idx = rtt_times.index(min(rtt_times)) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + settings = get_topology_settings_dict( + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) + topology = Topology(TopologySettings(**settings)) + await topology.open() + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + await topology.on_change(server_description) + + # Invoke server selection and assert no filtering based on latency + # prior to custom server selection logic kicking in. + server = await topology.select_server(ReadPreference.NEAREST, _Op.TEST) + assert selector.selection is not None + self.assertEqual(len(selector.selection), len(topology.description.server_descriptions())) + + # Ensure proper filtering based on latency after custom selection. + self.assertEqual(server.description.address, seeds[min_rtt_idx]) + + @async_client_context.require_replica_set + async def test_server_selector_bypassed(self): + selector = FunctionCallRecorder(lambda x: x) + + scenario_def = { + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSSecondary", "tag": {}}, + ], + } + } + + # Create & populate Topology such that no server is writeable. + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + settings = get_topology_settings_dict( + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) + topology = Topology(TopologySettings(**settings)) + await topology.open() + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + await topology.on_change(server_description) + + # Invoke server selection and assert no calls to our custom selector. + with self.assertRaisesRegex(ServerSelectionTimeoutError, "No primary available for writes"): + await topology.select_server( + writable_server_selector, _Op.TEST, server_selection_timeout=0.1 + ) + self.assertEqual(selector.call_count, 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_server_selection_in_window.py b/test/asynchronous/test_server_selection_in_window.py new file mode 100644 index 0000000000..dd0ff734f7 --- /dev/null +++ b/test/asynchronous/test_server_selection_in_window.py @@ -0,0 +1,180 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations + +import asyncio +import os +import threading +from pathlib import Path +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.helpers import ConcurrentRunner +from test.asynchronous.utils import flaky +from test.asynchronous.utils_selection_tests import create_topology +from test.asynchronous.utils_spec_runner import AsyncSpecTestCreator +from test.utils_shared import ( + CMAPListener, + OvertCommandListener, + async_wait_until, +) + +from pymongo.common import clean_node +from pymongo.monitoring import ConnectionReadyEvent +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection", "in_window") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "in_window" + ) + + +class TestAllScenarios(unittest.IsolatedAsyncioTestCase): + async def run_scenario(self, scenario_def): + topology = await create_topology(scenario_def) + + # Update mock operation_count state: + for mock in scenario_def["mocked_topology_state"]: + address = clean_node(mock["address"]) + server = topology.get_server_by_address(address) + server.pool.operation_count = mock["operation_count"] + + pref = ReadPreference.NEAREST + counts = {address: 0 for address in topology._description.server_descriptions()} + + # Number of times to repeat server selection + iterations = scenario_def["iterations"] + for _ in range(iterations): + server = await topology.select_server(pref, _Op.TEST, server_selection_timeout=0) + counts[server.description.address] += 1 + + # Verify expected_frequencies + outcome = scenario_def["outcome"] + tolerance = outcome["tolerance"] + expected_frequencies = outcome["expected_frequencies"] + for host_str, freq in expected_frequencies.items(): + address = clean_node(host_str) + actual_freq = float(counts[address]) / iterations + if freq == 0: + # Should be exactly 0. + self.assertEqual(actual_freq, 0) + else: + # Should be within 'tolerance'. + self.assertAlmostEqual(actual_freq, freq, delta=tolerance) + + +def create_test(scenario_def, test, name): + async def run_scenario(self): + await self.run_scenario(scenario_def) + + return run_scenario + + +class CustomSpecTestCreator(AsyncSpecTestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + Server selection in_window tests do not have a 'tests' field. + The whole file represents a single test case. + """ + return [scenario_def] + + +CustomSpecTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() + + +class FinderTask(ConcurrentRunner): + def __init__(self, collection, iterations): + super().__init__() + self.daemon = True + self.collection = collection + self.iterations = iterations + self.passed = False + + async def run(self): + for _ in range(self.iterations): + await self.collection.find_one({}) + self.passed = True + + +class TestProse(AsyncIntegrationTest): + async def frequencies(self, client, listener, n_finds=10): + coll = client.test.test + N_TASKS = 10 + tasks = [FinderTask(coll, n_finds) for _ in range(N_TASKS)] + for task in tasks: + await task.start() + for task in tasks: + await task.join() + for task in tasks: + self.assertTrue(task.passed) + + events = listener.started_events + self.assertEqual(len(events), n_finds * N_TASKS) + nodes = client.nodes + self.assertEqual(len(nodes), 2) + freqs = {address: 0.0 for address in nodes} + for event in events: + freqs[event.connection_id] += 1 + for address in freqs: + freqs[address] = freqs[address] / float(len(events)) + return freqs + + @async_client_context.require_failCommand_appName + @async_client_context.require_multiple_mongoses + @flaky(reason="PYTHON-3689") + async def test_load_balancing(self): + listener = OvertCommandListener() + cmap_listener = CMAPListener() + # PYTHON-2584: Use a large localThresholdMS to avoid the impact of + # varying RTTs. + client = await self.async_rs_client( + async_client_context.mongos_seeds(), + appName="loadBalancingTest", + event_listeners=[listener, cmap_listener], + localThresholdMS=30000, + minPoolSize=10, + ) + await async_wait_until(lambda: len(client.nodes) == 2, "discover both nodes") + # Wait for both pools to be populated. + await cmap_listener.async_wait_for_event(ConnectionReadyEvent, 20) + # Delay find commands on only one mongos. + delay_finds = { + "configureFailPoint": "failCommand", + "mode": {"times": 10000}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 500, + "appName": "loadBalancingTest", + }, + } + async with self.fail_point(delay_finds): + nodes = async_client_context.client.nodes + self.assertEqual(len(nodes), 1) + delayed_server = next(iter(nodes)) + freqs = await self.frequencies(client, listener) + self.assertLessEqual(freqs[delayed_server], 0.25) + listener.reset() + freqs = await self.frequencies(client, listener, n_finds=150) + self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_server_selection_logging.py b/test/asynchronous/test_server_selection_logging.py new file mode 100644 index 0000000000..6b0975318a --- /dev/null +++ b/test/asynchronous/test_server_selection_logging.py @@ -0,0 +1,45 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the server selection logging unified format spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection_logging") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection_logging") + + +globals().update( + generate_test_classes( + TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_server_selection_rtt.py b/test/asynchronous/test_server_selection_rtt.py new file mode 100644 index 0000000000..1f8f6bc7df --- /dev/null +++ b/test/asynchronous/test_server_selection_rtt.py @@ -0,0 +1,77 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module.""" +from __future__ import annotations + +import json +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous import AsyncPyMongoTestCase + +from pymongo.read_preferences import MovingAverage + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection/rtt") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection/rtt") + + +class TestAllScenarios(AsyncPyMongoTestCase): + pass + + +def create_test(scenario_def): + def run_scenario(self): + moving_average = MovingAverage() + + if scenario_def["avg_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["avg_rtt_ms"]) + + if scenario_def["new_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["new_rtt_ms"]) + + self.assertAlmostEqual(moving_average.get(), scenario_def["new_avg_rtt"]) + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json.load(scenario_stream) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py new file mode 100644 index 0000000000..ff0feebafc --- /dev/null +++ b/test/asynchronous/test_session.py @@ -0,0 +1,1251 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the client_session module.""" +from __future__ import annotations + +import asyncio +import copy +import sys +import time +from inspect import iscoroutinefunction +from io import BytesIO +from test.asynchronous.helpers import ExceptionCatchingTask +from typing import Any, Callable, List, Set, Tuple + +from pymongo.synchronous.mongo_client import MongoClient + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncUnitTest, + SkipTest, + async_client_context, + unittest, +) +from test.asynchronous.helpers import client_knobs +from test.utils_shared import ( + EventListener, + HeartbeatEventListener, + OvertCommandListener, + async_wait_until, +) + +from bson import DBRef +from gridfs.asynchronous.grid_file import AsyncGridFS, AsyncGridFSBucket +from pymongo import ASCENDING, AsyncMongoClient, _csot, monitoring +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.common import _MAX_END_SESSIONS +from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure +from pymongo.operations import IndexModel, InsertOne, UpdateOne +from pymongo.read_concern import ReadConcern + +_IS_SYNC = False + + +# Ignore auth commands like saslStart, so we can assert lsid is in all commands. +class SessionTestListener(EventListener): + def started(self, event): + if not event.command_name.startswith("sasl"): + super().started(event) + + def succeeded(self, event): + if not event.command_name.startswith("sasl"): + super().succeeded(event) + + def failed(self, event): + if not event.command_name.startswith("sasl"): + super().failed(event) + + def first_command_started(self): + assert len(self.started_events) >= 1, "No command-started events" + + return self.started_events[0] + + +def session_ids(client): + return [s.session_id for s in copy.copy(client._topology._session_pool)] + + +class TestSession(AsyncIntegrationTest): + client2: AsyncMongoClient + sensitive_commands: Set[str] + + @async_client_context.require_sessions + async def asyncSetUp(self): + await super().asyncSetUp() + # Create a second client so we can make sure clients cannot share + # sessions. + self.client2 = await self.async_rs_or_single_client() + + # Redact no commands, so we can test user-admin commands have "lsid". + self.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() + monitoring._SENSITIVE_COMMANDS.clear() + + self.listener = SessionTestListener() + self.session_checker_listener = SessionTestListener() + self.client = await self.async_rs_or_single_client( + event_listeners=[self.listener, self.session_checker_listener] + ) + self.db = self.client.pymongo_test + self.initial_lsids = {s["id"] for s in session_ids(self.client)} + + async def asyncTearDown(self): + monitoring._SENSITIVE_COMMANDS.update(self.sensitive_commands) + await self.client.drop_database("pymongo_test") + used_lsids = self.initial_lsids.copy() + for event in self.session_checker_listener.started_events: + if "lsid" in event.command: + used_lsids.add(event.command["lsid"]["id"]) + + current_lsids = {s["id"] for s in session_ids(self.client)} + self.assertLessEqual(used_lsids, current_lsids) + + await super().asyncTearDown() + + async def _test_ops(self, client, *ops): + listener = client.options.event_listeners[0] + + for f, args, kw in ops: + async with client.start_session() as s: + listener.reset() + s._materialize() + last_use = s._server_session.last_use + start = time.monotonic() + self.assertLessEqual(last_use, start) + # In case "f" modifies its inputs. + args = copy.copy(args) + kw = copy.copy(kw) + kw["session"] = s + await f(*args, **kw) + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + self.assertEqual( + s.session_id, + event.command["lsid"], + f"{f.__name__} sent wrong lsid with {event.command_name}", + ) + + self.assertFalse(s.has_ended) + + self.assertTrue(s.has_ended) + with self.assertRaisesRegex(InvalidOperation, "ended session"): + await f(*args, **kw) + + # Test a session cannot be used on another client. + async with self.client2.start_session() as s: + # In case "f" modifies its inputs. + args = copy.copy(args) + kw = copy.copy(kw) + kw["session"] = s + with self.assertRaisesRegex( + InvalidOperation, + "Can only use session with the AsyncMongoClient that started it", + ): + await f(*args, **kw) + + # No explicit session. + for f, args, kw in ops: + listener.reset() + await f(*args, **kw) + self.assertGreaterEqual(len(listener.started_events), 1) + lsids = [] + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + lsids.append(event.command["lsid"]) + + if not (sys.platform.startswith("java") or "PyPy" in sys.version): + # Server session was returned to pool. Ignore interpreters with + # non-deterministic GC. + for lsid in lsids: + self.assertIn( + lsid, + session_ids(client), + f"{f.__name__} did not return implicit session to pool", + ) + + async def test_implicit_sessions_checkout(self): + # "To confirm that implicit sessions only allocate their server session after a + # successful connection checkout" test from Driver Sessions Spec. + succeeded = False + lsid_set = set() + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + # Retry up to 10 times because there is a known race condition that can cause multiple + # sessions to be used: connection check in happens before session check in + for _ in range(10): + cursor = client.db.test.find({}) + ops: List[Tuple[Callable, List[Any]]] = [ + (client.db.test.find_one, [{"_id": 1}]), + (client.db.test.delete_one, [{}]), + (client.db.test.update_one, [{}, {"$set": {"x": 2}}]), + (client.db.test.bulk_write, [[UpdateOne({}, {"$set": {"x": 2}})]]), + (client.db.test.find_one_and_delete, [{}]), + (client.db.test.find_one_and_update, [{}, {"$set": {"x": 1}}]), + (client.db.test.find_one_and_replace, [{}, {}]), + (client.db.test.aggregate, [[{"$limit": 1}]]), + (client.db.test.find, []), + (client.server_info, []), + (client.db.aggregate, [[{"$listLocalSessions": {}}, {"$limit": 1}]]), + (cursor.distinct, ["_id"]), + (client.db.list_collections, []), + ] + tasks = [] + listener.reset() + + async def target(op, *args): + if iscoroutinefunction(op): + res = await op(*args) + else: + res = op(*args) + if isinstance(res, (AsyncCursor, AsyncCommandCursor)): + await res.to_list() + + for op, args in ops: + tasks.append( + ExceptionCatchingTask(target=target, args=[op, *args], name=op.__name__) + ) + await tasks[-1].start() + self.assertEqual(len(tasks), len(ops)) + for t in tasks: + await t.join() + self.assertIsNone(t.exc) + lsid_set.clear() + for i in listener.started_events: + if i.command.get("lsid"): + lsid_set.add(i.command.get("lsid")["id"]) + if len(lsid_set) == 1: + # Break on first success. + succeeded = True + break + self.assertTrue(succeeded, lsid_set) + + async def test_pool_lifo(self): + # "Pool is LIFO" test from Driver Sessions Spec. + a = self.client.start_session() + b = self.client.start_session() + a_id = a.session_id + b_id = b.session_id + await a.end_session() + await b.end_session() + + s = self.client.start_session() + self.assertEqual(b_id, s.session_id) + self.assertNotEqual(a_id, s.session_id) + + s2 = self.client.start_session() + self.assertEqual(a_id, s2.session_id) + self.assertNotEqual(b_id, s2.session_id) + + await s.end_session() + await s2.end_session() + + async def test_end_session(self): + # We test elsewhere that using an ended session throws InvalidOperation. + client = self.client + s = client.start_session() + self.assertFalse(s.has_ended) + self.assertIsNotNone(s.session_id) + + await s.end_session() + self.assertTrue(s.has_ended) + + with self.assertRaisesRegex(InvalidOperation, "ended session"): + s.session_id + + async def test_end_sessions(self): + # Use a new client so that the tearDown hook does not error. + listener = SessionTestListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + # Start many sessions. + sessions = [client.start_session() for _ in range(_MAX_END_SESSIONS + 1)] + for s in sessions: + s._materialize() + for s in sessions: + await s.end_session() + + # Closing the client should end all sessions and clear the pool. + self.assertEqual(len(client._topology._session_pool), _MAX_END_SESSIONS + 1) + await client.close() + self.assertEqual(len(client._topology._session_pool), 0) + end_sessions = [e for e in listener.started_events if e.command_name == "endSessions"] + self.assertEqual(len(end_sessions), 2) + + # Closing again should not send any commands. + listener.reset() + await client.close() + self.assertEqual(len(listener.started_events), 0) + + async def test_client(self): + client = self.client + ops: list = [ + (client.server_info, [], {}), + (client.list_database_names, [], {}), + (client.drop_database, ["pymongo_test"], {}), + ] + + await self._test_ops(client, *ops) + + async def test_database(self): + client = self.client + db = client.pymongo_test + ops: list = [ + (db.command, ["ping"], {}), + (db.create_collection, ["collection"], {}), + (db.list_collection_names, [], {}), + (db.validate_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), + (db.dereference, [DBRef("collection", 1)], {}), + ] + await self._test_ops(client, *ops) + + @staticmethod + def collection_write_ops(coll): + """Generate database write ops for tests.""" + return [ + (coll.drop, [], {}), + (coll.bulk_write, [[InsertOne({})]], {}), + (coll.insert_one, [{}], {}), + (coll.insert_many, [[{}, {}]], {}), + (coll.replace_one, [{}, {}], {}), + (coll.update_one, [{}, {"$set": {"a": 1}}], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), + (coll.delete_one, [{}], {}), + (coll.delete_many, [{}], {}), + (coll.find_one_and_replace, [{}, {}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), + (coll.find_one_and_delete, [{}, {}], {}), + (coll.rename, ["collection2"], {}), + # Drop collection2 between tests of "rename", above. + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), + (coll.drop_indexes, [], {}), + (coll.aggregate, [[{"$out": "aggout"}]], {}), + ] + + async def test_collection(self): + client = self.client + coll = client.pymongo_test.collection + + # Test some collection methods - the rest are in test_cursor. + ops = self.collection_write_ops(coll) + ops.extend( + [ + (coll.distinct, ["a"], {}), + (coll.find_one, [], {}), + (coll.count_documents, [{}], {}), + (coll.list_indexes, [], {}), + (coll.index_information, [], {}), + (coll.options, [], {}), + (coll.aggregate, [[]], {}), + ] + ) + + await self._test_ops(client, *ops) + + async def test_cursor_clone(self): + coll = self.client.pymongo_test.collection + # Ensure some batches. + await coll.insert_many({} for _ in range(10)) + self.addAsyncCleanup(coll.drop) + + async with self.client.start_session() as s: + cursor = coll.find(session=s) + self.assertIs(cursor.session, s) + clone = cursor.clone() + self.assertIs(clone.session, s) + + # No explicit session. + cursor = coll.find(batch_size=2) + await anext(cursor) + # Session is "owned" by cursor. + self.assertIsNone(cursor.session) + self.assertIsNotNone(cursor._session) + clone = cursor.clone() + await anext(clone) + self.assertIsNone(clone.session) + self.assertIsNotNone(clone._session) + self.assertIsNot(cursor._session, clone._session) + await cursor.close() + await clone.close() + + async def test_cursor(self): + listener = self.listener + client = self.client + coll = client.pymongo_test.collection + await coll.insert_many([{} for _ in range(1000)]) + + # Test all cursor methods. + if _IS_SYNC: + # getitem is only supported in the synchronous API + ops = [ + ("find", lambda session: coll.find(session=session).to_list()), + ("getitem", lambda session: coll.find(session=session)[0]), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), + ] + else: + ops = [ + ("find", lambda session: coll.find(session=session).to_list()), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), + ] + + for name, f in ops: + async with client.start_session() as s: + listener.reset() + await f(session=s) + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{name} sent no lsid with {event.command_name}", + ) + + self.assertEqual( + s.session_id, + event.command["lsid"], + f"{name} sent wrong lsid with {event.command_name}", + ) + + with self.assertRaisesRegex(InvalidOperation, "ended session"): + await f(session=s) + + # No explicit session. + for name, f in ops: + listener.reset() + await f(session=None) + event0 = listener.first_command_started() + self.assertIn("lsid", event0.command, f"{name} sent no lsid with {event0.command_name}") + + lsid = event0.command["lsid"] + + for event in listener.started_events[1:]: + self.assertIn( + "lsid", event.command, f"{name} sent no lsid with {event.command_name}" + ) + + self.assertEqual( + lsid, + event.command["lsid"], + f"{name} sent wrong lsid with {event.command_name}", + ) + + async def test_gridfs(self): + client = self.client + fs = AsyncGridFS(client.pymongo_test) + + async def new_file(session=None): + grid_file = fs.new_file(_id=1, filename="f", session=session) + # 1 MB, 5 chunks, to test that each chunk is fetched with same lsid. + await grid_file.write(b"a" * 1048576) + await grid_file.close() + + async def find(session=None): + files = await fs.find({"_id": 1}, session=session).to_list() + for f in files: + await f.read() + + async def get(session=None): + await (await fs.get(1, session=session)).read() + + async def get_version(session=None): + await (await fs.get_version("f", session=session)).read() + + async def get_last_version(session=None): + await (await fs.get_last_version("f", session=session)).read() + + async def find_list(session=None): + await fs.find(session=session).to_list() + + await self._test_ops( + client, + (new_file, [], {}), + (fs.put, [b"data"], {}), + (get, [], {}), + (get_version, [], {}), + (get_last_version, [], {}), + (fs.list, [], {}), + (fs.find_one, [1], {}), + (find_list, [], {}), + (fs.exists, [1], {}), + (find, [], {}), + (fs.delete, [1], {}), + ) + + async def test_gridfs_bucket(self): + client = self.client + bucket = AsyncGridFSBucket(client.pymongo_test) + + async def upload(session=None): + stream = bucket.open_upload_stream("f", session=session) + await stream.write(b"a" * 1048576) + await stream.close() + + async def upload_with_id(session=None): + stream = bucket.open_upload_stream_with_id(1, "f1", session=session) + await stream.write(b"a" * 1048576) + await stream.close() + + async def open_download_stream(session=None): + stream = await bucket.open_download_stream(1, session=session) + await stream.read() + + async def open_download_stream_by_name(session=None): + stream = await bucket.open_download_stream_by_name("f", session=session) + await stream.read() + + async def find(session=None): + files = await bucket.find({"_id": 1}, session=session).to_list() + for f in files: + await f.read() + + sio = BytesIO() + + await self._test_ops( + client, + (upload, [], {}), + (upload_with_id, [], {}), + (bucket.upload_from_stream, ["f", b"data"], {}), + (bucket.upload_from_stream_with_id, [2, "f", b"data"], {}), + (open_download_stream, [], {}), + (open_download_stream_by_name, [], {}), + (bucket.download_to_stream, [1, sio], {}), + (bucket.download_to_stream_by_name, ["f", sio], {}), + (find, [], {}), + (bucket.rename, [1, "f2"], {}), + (bucket.rename_by_name, ["f2", "f3"], {}), + # Delete both files so _test_ops can run these operations twice. + (bucket.delete, [1], {}), + (bucket.delete_by_name, ["f"], {}), + ) + + async def test_gridfsbucket_cursor(self): + client = self.client + bucket = AsyncGridFSBucket(client.pymongo_test) + + for file_id in 1, 2: + stream = bucket.open_upload_stream_with_id(file_id, str(file_id)) + await stream.write(b"a" * 1048576) + await stream.close() + + async with client.start_session() as s: + cursor = bucket.find(session=s) + async for f in cursor: + await f.read() + + self.assertFalse(s.has_ended) + + self.assertTrue(s.has_ended) + + # No explicit session. + cursor = bucket.find(batch_size=1) + files = [await cursor.next()] + + s = cursor._session + self.assertFalse(s.has_ended) + cursor.__del__() + + self.assertTrue(s.has_ended) + self.assertIsNone(cursor._session) + + # Files are still valid, they use their own sessions. + for f in files: + await f.read() + + # Explicit session. + async with client.start_session() as s: + cursor = bucket.find(session=s) + assert cursor.session is not None + s = cursor.session + files = await cursor.to_list() + cursor.__del__() + self.assertFalse(s.has_ended) + + for f in files: + await f.read() + + for f in files: + # Attempt to read the file again. + await f.seek(0) + with self.assertRaisesRegex(InvalidOperation, "ended session"): + await f.read() + + async def test_aggregate(self): + client = self.client + coll = client.pymongo_test.collection + + async def agg(session=None): + await (await coll.aggregate([], batchSize=2, session=session)).to_list() + + # With empty collection. + await self._test_ops(client, (agg, [], {})) + + # Now with documents. + await coll.insert_many([{} for _ in range(10)]) + self.addAsyncCleanup(coll.drop) + await self._test_ops(client, (agg, [], {})) + + async def test_killcursors(self): + client = self.client + coll = client.pymongo_test.collection + await coll.insert_many([{} for _ in range(10)]) + + async def explicit_close(session=None): + cursor = coll.find(batch_size=2, session=session) + await anext(cursor) + await cursor.close() + + await self._test_ops(client, (explicit_close, [], {})) + + async def test_aggregate_error(self): + listener = self.listener + client = self.client + coll = client.pymongo_test.collection + # 3.6.0 mongos only validates the aggregate pipeline when the + # database exists. + await coll.insert_one({}) + listener.reset() + + with self.assertRaises(OperationFailure): + await coll.aggregate([{"$badOperation": {"bar": 1}}]) + + event = listener.first_command_started() + self.assertEqual(event.command_name, "aggregate") + lsid = event.command["lsid"] + # Session was returned to pool despite error. + self.assertIn(lsid, session_ids(client)) + + async def _test_cursor_helper(self, create_cursor, close_cursor): + coll = self.client.pymongo_test.collection + await coll.insert_many([{} for _ in range(1000)]) + + cursor = await create_cursor(coll, None) + await anext(cursor) + # Session is "owned" by cursor. + session = cursor._session + self.assertIsNotNone(session) + lsid = session.session_id + await anext(cursor) + + # Cursor owns its session unto death. + self.assertNotIn(lsid, session_ids(self.client)) + await close_cursor(cursor) + self.assertIn(lsid, session_ids(self.client)) + + # An explicit session is not ended by cursor.close() or list(cursor). + async with self.client.start_session() as s: + cursor = await create_cursor(coll, s) + await anext(cursor) + await close_cursor(cursor) + self.assertFalse(s.has_ended) + lsid = s.session_id + + self.assertTrue(s.has_ended) + self.assertIn(lsid, session_ids(self.client)) + + async def test_cursor_close(self): + async def find(coll, session): + return coll.find(session=session) + + await self._test_cursor_helper(find, lambda cursor: cursor.close()) + + async def test_command_cursor_close(self): + async def aggregate(coll, session): + return await coll.aggregate([], session=session) + + await self._test_cursor_helper(aggregate, lambda cursor: cursor.close()) + + async def test_cursor_del(self): + async def find(coll, session): + return coll.find(session=session) + + async def delete(cursor): + return cursor.__del__() + + await self._test_cursor_helper(find, delete) + + async def test_command_cursor_del(self): + async def aggregate(coll, session): + return await coll.aggregate([], session=session) + + async def delete(cursor): + return cursor.__del__() + + await self._test_cursor_helper(aggregate, delete) + + async def test_cursor_exhaust(self): + async def find(coll, session): + return coll.find(session=session) + + await self._test_cursor_helper(find, lambda cursor: cursor.to_list()) + + async def test_command_cursor_exhaust(self): + async def aggregate(coll, session): + return await coll.aggregate([], session=session) + + await self._test_cursor_helper(aggregate, lambda cursor: cursor.to_list()) + + async def test_cursor_limit_reached(self): + async def find(coll, session): + return coll.find(limit=4, batch_size=2, session=session) + + await self._test_cursor_helper( + find, + lambda cursor: cursor.to_list(), + ) + + async def test_command_cursor_limit_reached(self): + async def aggregate(coll, session): + return await coll.aggregate([], batchSize=900, session=session) + + await self._test_cursor_helper( + aggregate, + lambda cursor: cursor.to_list(), + ) + + async def _test_unacknowledged_ops(self, client, *ops): + listener = client.options.event_listeners[0] + + for f, args, kw in ops: + async with client.start_session() as s: + listener.reset() + # In case "f" modifies its inputs. + args = copy.copy(args) + kw = copy.copy(kw) + kw["session"] = s + with self.assertRaises( + ConfigurationError, msg=f"{f.__name__} did not raise ConfigurationError" + ): + await f(*args, **kw) + if f.__name__ == "create_collection": + # create_collection runs listCollections first. + event = listener.started_events.pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + # Should not run any command before raising an error. + self.assertFalse(listener.started_events, f"{f.__name__} sent command") + + self.assertTrue(s.has_ended) + + # Unacknowledged write without a session does not send an lsid. + for f, args, kw in ops: + listener.reset() + await f(*args, **kw) + self.assertGreaterEqual(len(listener.started_events), 1) + + if f.__name__ == "create_collection": + # create_collection runs listCollections first. + event = listener.started_events.pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + for event in listener.started_events: + self.assertNotIn( + "lsid", event.command, f"{f.__name__} sent lsid with {event.command_name}" + ) + + async def test_unacknowledged_writes(self): + # Ensure the collection exists. + await self.client.pymongo_test.test_unacked_writes.insert_one({}) + client = await self.async_rs_or_single_client(w=0, event_listeners=[self.listener]) + db = client.pymongo_test + coll = db.test_unacked_writes + ops: list = [ + (client.drop_database, [db.name], {}), + (db.create_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), + ] + ops.extend(self.collection_write_ops(coll)) + await self._test_unacknowledged_ops(client, *ops) + + async def drop_db(): + try: + await self.client.drop_database(db.name) + return True + except OperationFailure as exc: + # Try again on BackgroundOperationInProgressForDatabase and + # BackgroundOperationInProgressForNamespace. + if exc.code in (12586, 12587): + return False + raise + + await async_wait_until(drop_db, "dropped database after w=0 writes") + + async def test_snapshot_incompatible_with_causal_consistency(self): + async with self.client.start_session(causal_consistency=False, snapshot=False): + pass + async with self.client.start_session(causal_consistency=False, snapshot=True): + pass + async with self.client.start_session(causal_consistency=True, snapshot=False): + pass + with self.assertRaises(ConfigurationError): + async with self.client.start_session(causal_consistency=True, snapshot=True): + pass + + async def test_session_not_copyable(self): + client = self.client + async with client.start_session() as s: + self.assertRaises(TypeError, lambda: copy.copy(s)) + + +class TestCausalConsistency(AsyncUnitTest): + listener: SessionTestListener + client: AsyncMongoClient + + @async_client_context.require_sessions + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = SessionTestListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + + @async_client_context.require_no_standalone + async def test_core(self): + async with self.client.start_session() as sess: + self.assertIsNone(sess.cluster_time) + self.assertIsNone(sess.operation_time) + self.listener.reset() + await self.client.pymongo_test.test.find_one(session=sess) + started = self.listener.started_events[0] + cmd = started.command + self.assertIsNone(cmd.get("readConcern")) + op_time = sess.operation_time + self.assertIsNotNone(op_time) + succeeded = self.listener.succeeded_events[0] + reply = succeeded.reply + self.assertEqual(op_time, reply.get("operationTime")) + + # No explicit session + await self.client.pymongo_test.test.insert_one({}) + self.assertEqual(sess.operation_time, op_time) + self.listener.reset() + try: + await self.client.pymongo_test.command("doesntexist", session=sess) + except: + pass + failed = self.listener.failed_events[0] + failed_op_time = failed.failure.get("operationTime") + # Some older builds of MongoDB 3.5 / 3.6 return None for + # operationTime when a command fails. Make sure we don't + # change operation_time to None. + if failed_op_time is None: + self.assertIsNotNone(sess.operation_time) + else: + self.assertEqual(sess.operation_time, failed_op_time) + + async with self.client.start_session() as sess2: + self.assertIsNone(sess2.cluster_time) + self.assertIsNone(sess2.operation_time) + self.assertRaises(TypeError, sess2.advance_cluster_time, 1) + self.assertRaises(ValueError, sess2.advance_cluster_time, {}) + self.assertRaises(TypeError, sess2.advance_operation_time, 1) + # No error + assert sess.cluster_time is not None + assert sess.operation_time is not None + sess2.advance_cluster_time(sess.cluster_time) + sess2.advance_operation_time(sess.operation_time) + self.assertEqual(sess.cluster_time, sess2.cluster_time) + self.assertEqual(sess.operation_time, sess2.operation_time) + + async def _test_reads(self, op, exception=None): + coll = self.client.pymongo_test.test + async with self.client.start_session() as sess: + await coll.find_one({}, session=sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + self.listener.reset() + if exception: + with self.assertRaises(exception): + await op(coll, sess) + else: + await op(coll, sess) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertEqual(operation_time, act) + + @async_client_context.require_no_standalone + async def test_reads(self): + # Make sure the collection exists. + await self.client.pymongo_test.test.insert_one({}) + + async def aggregate(coll, session): + return await (await coll.aggregate([], session=session)).to_list() + + async def aggregate_raw(coll, session): + return await (await coll.aggregate_raw_batches([], session=session)).to_list() + + async def find_raw(coll, session): + return await coll.find_raw_batches({}, session=session).to_list() + + await self._test_reads(aggregate) + await self._test_reads(lambda coll, session: coll.find({}, session=session).to_list()) + await self._test_reads(lambda coll, session: coll.find_one({}, session=session)) + await self._test_reads(lambda coll, session: coll.count_documents({}, session=session)) + await self._test_reads(lambda coll, session: coll.distinct("foo", session=session)) + await self._test_reads(aggregate_raw) + await self._test_reads(find_raw) + + with self.assertRaises(ConfigurationError): + await self._test_reads( + lambda coll, session: coll.estimated_document_count(session=session) + ) + + async def _test_writes(self, op): + coll = self.client.pymongo_test.test + async with self.client.start_session() as sess: + await op(coll, sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + self.listener.reset() + await coll.find_one({}, session=sess) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertEqual(operation_time, act) + + @async_client_context.require_no_standalone + async def test_writes(self): + await self._test_writes( + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) + await self._test_writes(lambda coll, session: coll.insert_one({}, session=session)) + await self._test_writes(lambda coll, session: coll.insert_many([{}], session=session)) + await self._test_writes( + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) + await self._test_writes( + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) + await self._test_writes( + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + await self._test_writes(lambda coll, session: coll.delete_one({}, session=session)) + await self._test_writes(lambda coll, session: coll.delete_many({}, session=session)) + await self._test_writes( + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) + await self._test_writes( + lambda coll, session: coll.find_one_and_update( + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) + await self._test_writes( + lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session) + ) + await self._test_writes(lambda coll, session: coll.create_index("foo", session=session)) + await self._test_writes( + lambda coll, session: coll.create_indexes( + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + await self._test_writes(lambda coll, session: coll.drop_index("foo_1", session=session)) + await self._test_writes(lambda coll, session: coll.drop_indexes(session=session)) + + async def _test_no_read_concern(self, op): + coll = self.client.pymongo_test.test + async with self.client.start_session() as sess: + await coll.find_one({}, session=sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + self.listener.reset() + await op(coll, sess) + rc = self.listener.started_events[0].command.get("readConcern") + self.assertIsNone(rc) + + @async_client_context.require_no_standalone + async def test_writes_do_not_include_read_concern(self): + await self._test_no_read_concern( + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) + await self._test_no_read_concern(lambda coll, session: coll.insert_one({}, session=session)) + await self._test_no_read_concern( + lambda coll, session: coll.insert_many([{}], session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + await self._test_no_read_concern(lambda coll, session: coll.delete_one({}, session=session)) + await self._test_no_read_concern( + lambda coll, session: coll.delete_many({}, session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.find_one_and_update( + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) + await self._test_no_read_concern( + lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.create_index("foo", session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.create_indexes( + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + await self._test_no_read_concern( + lambda coll, session: coll.drop_index("foo_1", session=session) + ) + await self._test_no_read_concern(lambda coll, session: coll.drop_indexes(session=session)) + + # Not a write, but explain also doesn't support readConcern. + await self._test_no_read_concern( + lambda coll, session: coll.find({}, session=session).explain() + ) + + @async_client_context.require_no_standalone + async def test_get_more_does_not_include_read_concern(self): + coll = self.client.pymongo_test.test + async with self.client.start_session() as sess: + await coll.find_one({}, session=sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + await coll.insert_many([{}, {}]) + cursor = coll.find({}).batch_size(1) + await anext(cursor) + self.listener.reset() + await cursor.to_list() + started = self.listener.started_events[0] + self.assertEqual(started.command_name, "getMore") + self.assertIsNone(started.command.get("readConcern")) + + async def test_session_not_causal(self): + async with self.client.start_session(causal_consistency=False) as s: + await self.client.pymongo_test.test.insert_one({}, session=s) + self.listener.reset() + await self.client.pymongo_test.test.find_one({}, session=s) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertIsNone(act) + + @async_client_context.require_standalone + async def test_server_not_causal(self): + async with self.client.start_session(causal_consistency=True) as s: + await self.client.pymongo_test.test.insert_one({}, session=s) + self.listener.reset() + await self.client.pymongo_test.test.find_one({}, session=s) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertIsNone(act) + + @async_client_context.require_no_standalone + async def test_read_concern(self): + async with self.client.start_session(causal_consistency=True) as s: + coll = self.client.pymongo_test.test + await coll.insert_one({}, session=s) + self.listener.reset() + await coll.find_one({}, session=s) + read_concern = self.listener.started_events[0].command.get("readConcern") + self.assertIsNotNone(read_concern) + self.assertIsNone(read_concern.get("level")) + self.assertIsNotNone(read_concern.get("afterClusterTime")) + + coll = coll.with_options(read_concern=ReadConcern("majority")) + self.listener.reset() + await coll.find_one({}, session=s) + read_concern = self.listener.started_events[0].command.get("readConcern") + self.assertIsNotNone(read_concern) + self.assertEqual(read_concern.get("level"), "majority") + self.assertIsNotNone(read_concern.get("afterClusterTime")) + + @async_client_context.require_no_standalone + async def test_cluster_time_with_server_support(self): + await self.client.pymongo_test.test.insert_one({}) + self.listener.reset() + await self.client.pymongo_test.test.find_one({}) + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") + self.assertIsNotNone(after_cluster_time) + + @async_client_context.require_standalone + async def test_cluster_time_no_server_support(self): + await self.client.pymongo_test.test.insert_one({}) + self.listener.reset() + await self.client.pymongo_test.test.find_one({}) + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") + self.assertIsNone(after_cluster_time) + + +class TestClusterTime(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + if "$clusterTime" not in (await async_client_context.hello): + raise SkipTest("$clusterTime not supported") + + # Sessions prose test: 3) $clusterTime in commands + async def test_cluster_time(self): + listener = SessionTestListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + collection = client.pymongo_test.collection + # Prepare for tests of find() and aggregate(). + await collection.insert_many([{} for _ in range(10)]) + self.addAsyncCleanup(collection.drop) + self.addAsyncCleanup(client.pymongo_test.collection2.drop) + + async def rename_and_drop(): + # Ensure collection exists. + await collection.insert_one({}) + await collection.rename("collection2") + await client.pymongo_test.collection2.drop() + + async def insert_and_find(): + cursor = collection.find().batch_size(1) + for _ in range(10): + # Advance the cluster time. + await collection.insert_one({}) + await anext(cursor) + + await cursor.close() + + async def insert_and_aggregate(): + cursor = (await collection.aggregate([], batchSize=1)).batch_size(1) + for _ in range(5): + # Advance the cluster time. + await collection.insert_one({}) + await anext(cursor) + + await cursor.close() + + async def aggregate(): + await (await collection.aggregate([])).to_list() + + ops = [ + # Tests from Driver Sessions Spec. + ("ping", lambda: client.admin.command("ping")), + ("aggregate", lambda: aggregate()), + ("find", lambda: collection.find().to_list()), + ("insert_one", lambda: collection.insert_one({})), + # Additional PyMongo tests. + ("insert_and_find", insert_and_find), + ("insert_and_aggregate", insert_and_aggregate), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), + ] + + for _name, f in ops: + listener.reset() + # Call f() twice, insert to advance clusterTime, call f() again. + await f() + await f() + await collection.insert_one({}) + await f() + + self.assertGreaterEqual(len(listener.started_events), 1) + for i, event in enumerate(listener.started_events): + self.assertIn( + "$clusterTime", + event.command, + f"{f.__name__} sent no $clusterTime with {event.command_name}", + ) + + if i > 0: + succeeded = listener.succeeded_events[i - 1] + self.assertIn( + "$clusterTime", + succeeded.reply, + f"{f.__name__} received no $clusterTime with {succeeded.command_name}", + ) + + self.assertTrue( + event.command["$clusterTime"]["clusterTime"] + >= succeeded.reply["$clusterTime"]["clusterTime"], + f"{f.__name__} sent wrong $clusterTime with {event.command_name}", + ) + + # Sessions prose test: 20) Drivers do not gossip `$clusterTime` on SDAM commands + async def test_cluster_time_not_used_by_sdam(self): + heartbeat_listener = HeartbeatEventListener() + cmd_listener = OvertCommandListener() + with client_knobs(min_heartbeat_interval=0.01): + c1 = await self.async_single_client( + event_listeners=[heartbeat_listener, cmd_listener], heartbeatFrequencyMS=10 + ) + cluster_time = (await c1.admin.command({"ping": 1}))["$clusterTime"] + self.assertEqual(c1._topology.max_cluster_time(), cluster_time) + + # Advance the server's $clusterTime by performing an insert via another client. + await self.db.test.insert_one({"advance": "$clusterTime"}) + # Wait until the client C1 processes the next pair of SDAM heartbeat started + succeeded events. + heartbeat_listener.reset() + + async def next_heartbeat(): + events = heartbeat_listener.events + for i in range(len(events) - 1): + if isinstance(events[i], monitoring.ServerHeartbeatStartedEvent): + if isinstance(events[i + 1], monitoring.ServerHeartbeatSucceededEvent): + return True + return False + + await async_wait_until( + next_heartbeat, "never found pair of heartbeat started + succeeded events" + ) + # Assert that C1's max $clusterTime is still the same and has not been updated by SDAM. + cmd_listener.reset() + await c1.admin.command({"ping": 1}) + started = cmd_listener.started_events[0] + self.assertEqual(started.command_name, "ping") + self.assertEqual(started.command["$clusterTime"], cluster_time) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_sessions_unified.py b/test/asynchronous/test_sessions_unified.py new file mode 100644 index 0000000000..b4cbac5704 --- /dev/null +++ b/test/asynchronous/test_sessions_unified.py @@ -0,0 +1,40 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Sessions unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sessions") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sessions") + + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py new file mode 100644 index 0000000000..3d4aed1bc1 --- /dev/null +++ b/test/asynchronous/test_srv_polling.py @@ -0,0 +1,387 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the SRV support tests.""" +from __future__ import annotations + +import asyncio +import sys +import time +from test.asynchronous.utils import flaky +from test.utils_shared import FunctionCallRecorder +from typing import Any + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncPyMongoTestCase, client_knobs, unittest +from test.asynchronous.utils import async_wait_until + +import pymongo +from pymongo import common +from pymongo.asynchronous.srv_resolver import _have_dnspython +from pymongo.errors import ConfigurationError + +_IS_SYNC = False + +WAIT_TIME = 0.1 + + +class SrvPollingKnobs: + def __init__( + self, + ttl_time=None, + min_srv_rescan_interval=None, + nodelist_callback=None, + count_resolver_calls=False, + ): + self.ttl_time = ttl_time + self.min_srv_rescan_interval = min_srv_rescan_interval + self.nodelist_callback = nodelist_callback + self.count_resolver_calls = count_resolver_calls + + self.old_min_srv_rescan_interval = None + self.old_dns_resolver_response = None + + def enable(self): + self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL + self.old_dns_resolver_response = ( + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl + ) + + if self.min_srv_rescan_interval is not None: + common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval + + async def mock_get_hosts_and_min_ttl(resolver, *args): + assert self.old_dns_resolver_response is not None + nodes, ttl = await self.old_dns_resolver_response(resolver) + if self.nodelist_callback is not None: + nodes = self.nodelist_callback() + if self.ttl_time is not None: + ttl = self.ttl_time + return nodes, ttl + + patch_func: Any + if self.count_resolver_calls: + patch_func = FunctionCallRecorder(mock_get_hosts_and_min_ttl) + else: + patch_func = mock_get_hosts_and_min_ttl + + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore + + def __enter__(self): + self.enable() + + def disable(self): + common.MIN_SRV_RESCAN_INTERVAL = self.old_min_srv_rescan_interval # type: ignore + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore + self.old_dns_resolver_response + ) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.disable() + + +class TestSrvPolling(AsyncPyMongoTestCase): + BASE_SRV_RESPONSE = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27018), + ] + + CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" + + async def asyncSetUp(self): + # Patch timeouts to ensure short rescan SRV interval. + self.client_knobs = client_knobs( + heartbeat_frequency=WAIT_TIME, + min_heartbeat_interval=WAIT_TIME, + events_queue_frequency=WAIT_TIME, + ) + self.client_knobs.enable() + + async def asyncTearDown(self): + self.client_knobs.disable() + + def get_nodelist(self, client): + return client._topology.description.server_descriptions().keys() + + async def assert_nodelist_change(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): + """Check if the client._topology eventually sees all nodes in the + expected_nodelist. + """ + + def predicate(): + nodelist = self.get_nodelist(client) + if set(expected_nodelist) == set(nodelist): + return True + return False + + await async_wait_until(predicate, "see expected nodelist", timeout=timeout) + + async def assert_nodelist_nochange(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): + """Check if the client._topology ever deviates from seeing all nodes + in the expected_nodelist. Consistency is checked after sleeping for + (WAIT_TIME * 10) seconds. Also check that the resolver is called at + least once. + """ + + def predicate(): + if set(expected_nodelist) == set(self.get_nodelist(client)): + return ( + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count + >= 1 + ) + return False + + await async_wait_until(predicate, "Node list equals expected nodelist", timeout=timeout) + + nodelist = self.get_nodelist(client) + if set(expected_nodelist) != set(nodelist): + msg = "Client nodelist %s changed unexpectedly (expected %s)" + raise self.fail(msg % (nodelist, expected_nodelist)) + self.assertGreaterEqual( + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore + 1, + "resolver was never called", + ) + return True + + async def run_scenario(self, dns_response, expect_change): + self.assertEqual(_have_dnspython(), True) + if callable(dns_response): + dns_resolver_response = dns_response + else: + + def dns_resolver_response(): + return dns_response + + if expect_change: + assertion_method = self.assert_nodelist_change + count_resolver_calls = False + expected_response = dns_response + else: + assertion_method = self.assert_nodelist_nochange + count_resolver_calls = True + expected_response = self.BASE_SRV_RESPONSE + + # Patch timeouts to ensure short test running times. + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING) + await client.aconnect() + await self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) + # Patch list of hosts returned by DNS query. + with SrvPollingKnobs( + nodelist_callback=dns_resolver_response, count_resolver_calls=count_resolver_calls + ): + await assertion_method(expected_response, client) + + # Close the client early to avoid affecting the next scenario run. + await client.close() + + async def test_addition(self): + response = self.BASE_SRV_RESPONSE[:] + response.append(("localhost.test.build.10gen.cc", 27019)) + await self.run_scenario(response, True) + + async def test_removal(self): + response = self.BASE_SRV_RESPONSE[:] + response.remove(("localhost.test.build.10gen.cc", 27018)) + await self.run_scenario(response, True) + + async def test_replace_one(self): + response = self.BASE_SRV_RESPONSE[:] + response.remove(("localhost.test.build.10gen.cc", 27018)) + response.append(("localhost.test.build.10gen.cc", 27019)) + await self.run_scenario(response, True) + + async def test_replace_both_with_one(self): + response = [("localhost.test.build.10gen.cc", 27019)] + await self.run_scenario(response, True) + + async def test_replace_both_with_two(self): + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + await self.run_scenario(response, True) + + async def test_dns_failures(self): + from dns import exception + + for exc in (exception.FormError, exception.TooBig, exception.Timeout): + + def response_callback(*args): + raise exc("DNS Failure!") + + await self.run_scenario(response_callback, False) + + @flaky(reason="PYTHON-5500", max_runs=3) + async def test_dns_failures_logging(self): + from dns import exception + + with self.assertLogs("pymongo.topology", level="DEBUG") as cm: + + def response_callback(*args): + raise exception.Timeout("DNS Failure!") + + await self.run_scenario(response_callback, False) + + srv_failure_logs = [r for r in cm.records if "SRV monitor check failed" in r.getMessage()] + self.assertEqual(len(srv_failure_logs), 1) + + async def test_dns_record_lookup_empty(self): + response: list = [] + await self.run_scenario(response, False) + + async def _test_recover_from_initial(self, initial_callback): + # Construct a valid final response callback distinct from base. + response_final = self.BASE_SRV_RESPONSE[:] + response_final.pop() + + def final_callback(): + return response_final + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=initial_callback, + count_resolver_calls=True, + ): + # Client uses unpatched method to get initial nodelist + client = self.simple_client(self.CONNECTION_STRING) + await client.aconnect() + # Invalid DNS resolver response should not change nodelist. + await self.assert_nodelist_nochange(self.BASE_SRV_RESPONSE, client) + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, nodelist_callback=final_callback + ): + # Nodelist should reflect new valid DNS resolver response. + await self.assert_nodelist_change(response_final, client) + + @flaky(reason="PYTHON-5315") + async def test_recover_from_initially_empty_seedlist(self): + def empty_seedlist(): + return [] + + await self._test_recover_from_initial(empty_seedlist) + + @flaky(reason="PYTHON-5315") + async def test_recover_from_initially_erroring_seedlist(self): + def erroring_seedlist(): + raise ConfigurationError + + await self._test_recover_from_initial(erroring_seedlist) + + async def test_10_all_dns_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=0) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await self.assert_nodelist_change(response, client) + + async def test_11_all_dns_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await self.assert_nodelist_change(response, client) + + async def test_12_new_dns_randomly_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27020), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27017), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await asyncio.sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) + final_topology = set(client.topology_description.server_descriptions()) + self.assertIn(("localhost.test.build.10gen.cc", 27017), final_topology) + self.assertEqual(len(final_topology), 2) + + async def test_does_not_flipflop(self): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=1) + await client.aconnect() + old = set(client.topology_description.server_descriptions()) + await asyncio.sleep(4 * WAIT_TIME) + new = set(client.topology_description.server_descriptions()) + self.assertSetEqual(old, new) + + async def test_srv_service_name(self): + # Construct a valid final response callback distinct from base. + response = [ + ("localhost.test.build.10gen.cc.", 27019), + ("localhost.test.build.10gen.cc.", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client( + "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname" + ) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await self.assert_nodelist_change(response, client) + + async def test_srv_waits_to_poll(self): + modified = [("localhost.test.build.10gen.cc", 27019)] + + def resolver_response(): + return modified + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=resolver_response, + ): + client = self.simple_client(self.CONNECTION_STRING) + await client.aconnect() + with self.assertRaises(AssertionError): + await self.assert_nodelist_change(modified, client, timeout=WAIT_TIME / 2) + + def test_import_dns_resolver(self): + # Regression test for PYTHON-4407 + import dns.resolver + + self.assertTrue(hasattr(dns.resolver, "resolve") or hasattr(dns.resolver, "query")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_ssl.py b/test/asynchronous/test_ssl.py new file mode 100644 index 0000000000..0ce3e8bbac --- /dev/null +++ b/test/asynchronous/test_ssl.py @@ -0,0 +1,691 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for SSL support.""" +from __future__ import annotations + +import os +import pathlib +import socket +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import ( + HAVE_IPADDRESS, + AsyncIntegrationTest, + AsyncPyMongoTestCase, + SkipTest, + async_client_context, + connected, + remove_all_users, + unittest, +) +from test.utils_shared import ( + EventListener, + OvertCommandListener, + cat_files, + ignore_deprecations, +) +from urllib.parse import quote_plus + +from pymongo import AsyncMongoClient, ssl_support +from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.ssl_support import HAVE_PYSSL, HAVE_SSL, _ssl, get_ssl_context +from pymongo.write_concern import WriteConcern + +_HAVE_PYOPENSSL = False +try: + # All of these must be available to use PyOpenSSL + import OpenSSL + import requests + import service_identity + + # Ensure service_identity>=18.1 is installed + from service_identity.pyopenssl import verify_ip_address + + from pymongo.ocsp_support import _load_trusted_ca_certs + + _HAVE_PYOPENSSL = True +except ImportError: + _load_trusted_ca_certs = None # type: ignore + + +if HAVE_SSL: + import ssl + +_IS_SYNC = False + +if _IS_SYNC: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "certificates") +else: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "certificates") + +CLIENT_PEM = os.path.join(CERT_PATH, "client.pem") +CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, "password_protected.pem") +CA_PEM = os.path.join(CERT_PATH, "ca.pem") +CA_BUNDLE_PEM = os.path.join(CERT_PATH, "trusted-ca.pem") +CRL_PEM = os.path.join(CERT_PATH, "crl.pem") +MONGODB_X509_USERNAME = "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client" + +# To fully test this start a mongod instance (built with SSL support) like so: +# mongod --dbpath /path/to/data/directory --sslOnNormalPorts \ +# --sslPEMKeyFile /path/to/pymongo/test/certificates/server.pem \ +# --sslCAFile /path/to/pymongo/test/certificates/ca.pem \ +# --sslWeakCertificateValidation +# Also, make sure you have 'server' as an alias for localhost in /etc/hosts +# +# Note: For all replica set tests to pass, the replica set configuration must +# use 'localhost' for the hostname of all hosts. + + +class TestClientSSL(AsyncPyMongoTestCase): + @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what happens without it.") + def test_no_ssl_module(self): + # Explicit + self.assertRaises(ConfigurationError, self.simple_client, ssl=True) + + # Implied + self.assertRaises(ConfigurationError, self.simple_client, tlsCertificateKeyFile=CLIENT_PEM) + + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + @ignore_deprecations + def test_config_ssl(self): + # Tests various ssl configurations + self.assertRaises(ValueError, self.simple_client, ssl="foo") + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(TypeError, self.simple_client, ssl=0) + self.assertRaises(TypeError, self.simple_client, ssl=5.5) + self.assertRaises(TypeError, self.simple_client, ssl=[]) + + self.assertRaises(IOError, self.simple_client, tlsCertificateKeyFile="NoSuchFile") + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=True) + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=[]) + + # Test invalid combinations + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCAFile=CA_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCRLFile=CRL_PEM) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidCertificates=False + ) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidHostnames=False + ) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsDisableOCSPEndpointCheck=False + ) + + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + def test_use_pyopenssl_when_available(self): + self.assertTrue(HAVE_PYSSL) + + @unittest.skipUnless(_HAVE_PYOPENSSL, "Cannot test without PyOpenSSL") + def test_load_trusted_ca_certs(self): + trusted_ca_certs = _load_trusted_ca_certs(CA_BUNDLE_PEM) + self.assertEqual(2, len(trusted_ca_certs)) + + +class TestSSL(AsyncIntegrationTest): + saved_port: int + + async def assertClientWorks(self, client): + coll = client.pymongo_test.ssl_test.with_options( + write_concern=WriteConcern(w=async_client_context.w) + ) + await coll.drop() + await coll.insert_one({"ssl": True}) + self.assertTrue((await coll.find_one())["ssl"]) + await coll.drop() + + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + async def asyncSetUp(self): + await super().asyncSetUp() + # MongoClient should connect to the primary by default. + self.saved_port = AsyncMongoClient.PORT + AsyncMongoClient.PORT = await async_client_context.port + + async def asyncTearDown(self): + AsyncMongoClient.PORT = self.saved_port + + @async_client_context.require_tls + async def test_simple_ssl(self): + if "PyPy" in sys.version: + self.skipTest("Test is flaky on PyPy") + # Expects the server to be running with ssl and with + # no --sslPEMKeyFile or with --sslWeakCertificateValidation + await self.assertClientWorks(self.client) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_tlsCertificateKeyFilePassword(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + if not hasattr(ssl, "SSLContext") and not HAVE_PYSSL: + self.assertRaises( + ConfigurationError, + self.simple_client, + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=1000, + ) + else: + await connected( + self.simple_client( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=5000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + uri_fmt = ( + "mongodb://localhost/?ssl=true" + "&tlsCertificateKeyFile=%s&tlsCertificateKeyFilePassword=qwerty" + "&tlsCAFile=%s&serverSelectionTimeoutMS=5000" + ) + await connected( + self.simple_client(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_auth + @ignore_deprecations + async def test_cert_ssl_implicitly_set(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # + + # test that setting tlsCertificateKeyFile causes ssl to be set to True + client = self.simple_client( + await async_client_context.host, + await async_client_context.port, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + response = await client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" in response: + client = self.simple_client( + await async_client_context.pair, + replicaSet=response["setName"], + w=len(response["hosts"]), + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + await self.assertClientWorks(client) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_auth + @ignore_deprecations + async def test_cert_ssl_validation(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # + client = self.simple_client( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + response = await client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" in response: + if response["primary"].split(":")[0] != "localhost": + raise SkipTest( + "No hosts in the replicaset for 'localhost'. " + "Cannot validate hostname in the certificate" + ) + + client = self.simple_client( + "localhost", + replicaSet=response["setName"], + w=len(response["hosts"]), + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + + await self.assertClientWorks(client) + + if HAVE_IPADDRESS: + client = self.simple_client( + "127.0.0.1", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + await self.assertClientWorks(client) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_auth + @ignore_deprecations + async def test_cert_ssl_uri_support(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" + "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false" + ) + client = self.simple_client(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) + await self.assertClientWorks(client) + + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_server_resolvable + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_cert_ssl_validation_hostname_matching(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, True, False, False, _IS_SYNC) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, False, True, False, _IS_SYNC) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) + self.assertTrue(ctx.check_hostname) + + response = await self.client.admin.command(HelloCompat.LEGACY_CMD) + + with self.assertRaises(ConnectionFailure) as cm: + await connected( + self.simple_client( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + # PYTHON-5414 Check for "module service_identity has no attribute SICertificateError" + self.assertNotIn("has no attribute", str(cm.exception)) + + await connected( + self.simple_client( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + if "setName" in response: + with self.assertRaises(ConnectionFailure): + await connected( + self.simple_client( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + await connected( + self.simple_client( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_sync + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_tlsCRLFile_support(self): + if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or HAVE_PYSSL: + self.assertRaises( + ConfigurationError, + self.simple_client, + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=1000, + ) + else: + await connected( + self.simple_client( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + with self.assertRaises(ConnectionFailure): + await connected( + self.simple_client( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + await connected(self.simple_client(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore + + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCRLFile=%s" + "&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + ) + with self.assertRaises(ConnectionFailure): + await connected( + self.simple_client(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) + + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_server_resolvable + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_validation_with_system_ca_certs(self): + # Expects the server to be running with server.pem and ca.pem. + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # --sslWeakCertificateValidation + # + self.patch_system_certs(CA_PEM) + with self.assertRaises(ConnectionFailure): + # Server cert is verified but hostname matching fails + await connected( + self.simple_client( + "server", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] + ) + + # Server cert is verified. Disable hostname matching. + await connected( + self.simple_client( + "server", + ssl=True, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + # Server cert and hostname are verified. + await connected( + self.simple_client( + "localhost", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] + ) + + # Server cert and hostname are verified. + await connected( + self.simple_client( + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", + **self.credentials, # type: ignore[arg-type] + ) + ) + + def test_system_certs_config_error(self): + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) + if (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr( + ctx, "load_default_certs" + ): + raise SkipTest("Can't test when system CA certificates are loadable.") + + have_certifi = ssl_support.HAVE_CERTIFI + have_wincertstore = ssl_support.HAVE_WINCERTSTORE + # Force the test regardless of environment. + ssl_support.HAVE_CERTIFI = False + ssl_support.HAVE_WINCERTSTORE = False + try: + with self.assertRaises(ConfigurationError): + self.simple_client("mongodb://localhost/?ssl=true") + finally: + ssl_support.HAVE_CERTIFI = have_certifi + ssl_support.HAVE_WINCERTSTORE = have_wincertstore + + def test_certifi_support(self): + if hasattr(ssl, "SSLContext"): + # SSLSocket doesn't provide ca_certs attribute on pythons + # with SSLContext and SSLContext provides no information + # about ca_certs. + raise SkipTest("Can't test when SSLContext available.") + if not ssl_support.HAVE_CERTIFI: + raise SkipTest("Need certifi to test certifi support.") + + have_wincertstore = ssl_support.HAVE_WINCERTSTORE + # Force the test on Windows, regardless of environment. + ssl_support.HAVE_WINCERTSTORE = False + try: + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, CA_PEM) + + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, ssl_support.certifi.where()) + finally: + ssl_support.HAVE_WINCERTSTORE = have_wincertstore + + def test_wincertstore(self): + if sys.platform != "win32": + raise SkipTest("Only valid on Windows.") + if hasattr(ssl, "SSLContext"): + # SSLSocket doesn't provide ca_certs attribute on pythons + # with SSLContext and SSLContext provides no information + # about ca_certs. + raise SkipTest("Can't test when SSLContext available.") + if not ssl_support.HAVE_WINCERTSTORE: + raise SkipTest("Need wincertstore to test wincertstore.") + + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, CA_PEM) + + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) + + @async_client_context.require_auth + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_mongodb_x509_auth(self): + host, port = await async_client_context.host, await async_client_context.port + self.addAsyncCleanup(remove_all_users, async_client_context.client["$external"]) + + # Give x509 user all necessary privileges. + await async_client_context.create_user( + "$external", + MONGODB_X509_USERNAME, + roles=[ + {"role": "readWriteAnyDatabase", "db": "admin"}, + {"role": "userAdminAnyDatabase", "db": "admin"}, + ], + ) + + noauth = self.simple_client( + await async_client_context.pair, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + with self.assertRaises(OperationFailure): + await noauth.pymongo_test.test.find_one() + + listener = EventListener() + auth = self.simple_client( + await async_client_context.pair, + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + event_listeners=[listener], + ) + + # No error + await auth.pymongo_test.test.find_one() + names = listener.started_command_names() + if async_client_context.version.at_least(4, 4, -1): + # Speculative auth skips the authenticate command. + self.assertEqual(names, ["find"]) + else: + self.assertEqual(names, ["authenticate", "find"]) + + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + # No error + await client.pymongo_test.test.find_one() + + uri = "mongodb://%s:%d/?authMechanism=MONGODB-X509" % (host, port) + client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + # No error + await client.pymongo_test.test.find_one() + # Auth should fail if username and certificate do not match + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus("not the username"), + host, + port, + ) + + bad_client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + + with self.assertRaises(OperationFailure): + await bad_client.pymongo_test.test.find_one() + + bad_client = self.simple_client( + await async_client_context.pair, + username="not the username", + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + with self.assertRaises(OperationFailure): + await bad_client.pymongo_test.test.find_one() + + # Invalid certificate (using CA certificate as client certificate) + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + try: + await connected( + self.simple_client( + uri, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CA_PEM, + serverSelectionTimeoutMS=1000, + ) + ) + except (ConnectionFailure, ConfigurationError): + pass + else: + self.fail("Invalid certificate accepted.") + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_connect_with_ca_bundle(self): + def remove(path): + try: + os.remove(path) + except OSError: + pass + + temp_ca_bundle = os.path.join(CERT_PATH, "trusted-ca-bundle.pem") + self.addCleanup(remove, temp_ca_bundle) + # Add the CA cert file to the bundle. + cat_files(temp_ca_bundle, CA_BUNDLE_PEM, CA_PEM) + async with self.simple_client( + "localhost", tls=True, tlsCertificateKeyFile=CLIENT_PEM, tlsCAFile=temp_ca_bundle + ) as client: + self.assertTrue(await client.admin.command("ping")) + + @async_client_context.require_async + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + async def test_pyopenssl_ignored_in_async(self): + client = AsyncMongoClient( + "mongodb://localhost:27017?tls=true&tlsAllowInvalidCertificates=true" + ) + await client.admin.command("ping") # command doesn't matter, just needs it to connect + await client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_streaming_protocol.py b/test/asynchronous/test_streaming_protocol.py new file mode 100644 index 0000000000..70ec49de80 --- /dev/null +++ b/test/asynchronous/test_streaming_protocol.py @@ -0,0 +1,228 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the database module.""" +from __future__ import annotations + +import sys +import time + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import ( + HeartbeatEventListener, + ServerEventListener, + async_wait_until, +) + +from pymongo import monitoring +from pymongo.hello import HelloCompat + +_IS_SYNC = False + + +class TestStreamingProtocol(AsyncIntegrationTest): + @async_client_context.require_failCommand_appName + async def test_failCommand_streaming(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + client = await self.async_rs_or_single_client( + event_listeners=[listener, hb_listener], + heartbeatFrequencyMS=500, + appName="failingHeartbeatTest", + ) + # Force a connection. + await client.admin.command("ping") + address = await client.address + listener.reset() + + fail_hello = { + "configureFailPoint": "failCommand", + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": False, + "errorCode": 10107, + "appName": "failingHeartbeatTest", + }, + } + async with self.fail_point(fail_hello): + + def _marked_unknown(event): + return ( + event.server_address == address + and not event.new_description.is_server_type_known + ) + + def _discovered_node(event): + return ( + event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) + + def marked_unknown(): + return len(listener.matching(_marked_unknown)) >= 1 + + def rediscovered(): + return len(listener.matching(_discovered_node)) >= 1 + + # Topology events are not published synchronously + await async_wait_until(marked_unknown, "mark node unknown") + await async_wait_until(rediscovered, "rediscover node") + + # Server should be selectable. + await client.admin.command("ping") + + @async_client_context.require_failCommand_appName + async def test_streaming_rtt(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + # On Windows, RTT can actually be 0.0 because time.time() only has + # 1-15 millisecond resolution. We need to delay the initial hello + # to ensure that RTT is never zero. + name = "streamingRttTest" + delay_hello: dict = { + "configureFailPoint": "failCommand", + "mode": {"times": 1000}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "blockConnection": True, + "blockTimeMS": 20, + # This can be uncommented after SERVER-49220 is fixed. + # 'appName': name, + }, + } + async with self.fail_point(delay_hello): + client = await self.async_rs_or_single_client( + event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName=name + ) + # Force a connection. + await client.admin.command("ping") + address = await client.address + + delay_hello["data"]["blockTimeMS"] = 500 + delay_hello["data"]["appName"] = name + async with self.fail_point(delay_hello): + + def rtt_exceeds_250_ms(): + # XXX: Add a public TopologyDescription getter to MongoClient? + topology = client._topology + sd = topology.description.server_descriptions()[address] + assert sd.round_trip_time is not None + return sd.round_trip_time > 0.250 + + await async_wait_until(rtt_exceeds_250_ms, "exceed 250ms RTT") + + # Server should be selectable. + await client.admin.command("ping") + + def changed_event(event): + return event.server_address == address and isinstance( + event, monitoring.ServerDescriptionChangedEvent + ) + + # There should only be one event published, for the initial discovery. + events = listener.matching(changed_event) + self.assertEqual(1, len(events)) + self.assertGreater(events[0].new_description.round_trip_time, 0) + + @async_client_context.require_failCommand_appName + async def test_monitor_waits_after_server_check_error(self): + # This test implements: + # https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.md#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks + fail_hello = { + "mode": {"times": 5}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMMinHeartbeatFrequencyTest", + }, + } + async with self.fail_point(fail_hello): + start = time.time() + client = await self.async_single_client( + appName="SDAMMinHeartbeatFrequencyTest", serverSelectionTimeoutMS=5000 + ) + # Force a connection. + await client.admin.command("ping") + duration = time.time() - start + # Explanation of the expected events: + # 0ms: run configureFailPoint + # 1ms: create MongoClient + # 2ms: failed monitor handshake, 1 + # 502ms: failed monitor handshake, 2 + # 1002ms: failed monitor handshake, 3 + # 1502ms: failed monitor handshake, 4 + # 2002ms: failed monitor handshake, 5 + # 2502ms: monitor handshake succeeds + # 2503ms: run awaitable hello + # 2504ms: application handshake succeeds + # 2505ms: ping command succeeds + self.assertGreaterEqual(duration, 2) + self.assertLessEqual(duration, 4.0) + + @async_client_context.require_failCommand_appName + async def test_heartbeat_awaited_flag(self): + hb_listener = HeartbeatEventListener() + client = await self.async_single_client( + event_listeners=[hb_listener], + heartbeatFrequencyMS=500, + appName="heartbeatEventAwaitedFlag", + ) + # Force a connection. + await client.admin.command("ping") + + def hb_succeeded(event): + return isinstance(event, monitoring.ServerHeartbeatSucceededEvent) + + def hb_failed(event): + return isinstance(event, monitoring.ServerHeartbeatFailedEvent) + + fail_heartbeat = { + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": True, + "appName": "heartbeatEventAwaitedFlag", + }, + } + async with self.fail_point(fail_heartbeat): + await async_wait_until( + lambda: hb_listener.matching(hb_failed), "published failed event" + ) + # Reconnect. + await client.admin.command("ping") + + hb_succeeded_events = hb_listener.matching(hb_succeeded) + hb_failed_events = hb_listener.matching(hb_failed) + self.assertFalse(hb_succeeded_events[0].awaited) + self.assertTrue(hb_failed_events[0].awaited) + # Depending on thread scheduling, the failed heartbeat could occur on + # the second or third check. + events = [type(e) for e in hb_listener.events[:4]] + if events == [ + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatSucceededEvent, + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatFailedEvent, + ]: + self.assertFalse(hb_succeeded_events[1].awaited) + else: + self.assertTrue(hb_succeeded_events[1].awaited) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py new file mode 100644 index 0000000000..478710362e --- /dev/null +++ b/test/asynchronous/test_transactions.py @@ -0,0 +1,630 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Execute Transactions Spec tests.""" +from __future__ import annotations + +import sys +from io import BytesIO +from test.asynchronous.utils_spec_runner import AsyncSpecRunner + +from gridfs.asynchronous.grid_file import AsyncGridFS, AsyncGridFSBucket +from pymongo.asynchronous.pool import PoolState +from pymongo.server_selectors import writable_server_selector + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import ( + OvertCommandListener, + async_wait_until, +) +from typing import List + +from bson import encode +from bson.raw_bson import RawBSONDocument +from pymongo import WriteConcern, _csot +from pymongo.asynchronous import client_session +from pymongo.asynchronous.client_session import TransactionOptions +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.errors import ( + AutoReconnect, + CollectionInvalid, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, +) +from pymongo.operations import IndexModel, InsertOne +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False + +# Max number of operations to perform after a transaction to prove unpinning +# occurs. Chosen so that there's a low false positive rate. With 2 mongoses, +# 50 attempts yields a one in a quadrillion chance of a false positive +# (1/(0.5^50)). +UNPIN_TEST_MAX_ATTEMPTS = 50 + + +class AsyncTransactionsBase(AsyncSpecRunner): + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + if ( + "secondary" in self.id() + and not async_client_context.is_mongos + and not async_client_context.has_secondaries + ): + raise unittest.SkipTest("No secondaries") + + +class TestTransactions(AsyncTransactionsBase): + @async_client_context.require_transactions + def test_transaction_options_validation(self): + default_options = TransactionOptions() + self.assertIsNone(default_options.read_concern) + self.assertIsNone(default_options.write_concern) + self.assertIsNone(default_options.read_preference) + self.assertIsNone(default_options.max_commit_time_ms) + # No error when valid options are provided. + TransactionOptions( + read_concern=ReadConcern(), + write_concern=WriteConcern(), + read_preference=ReadPreference.PRIMARY, + max_commit_time_ms=10000, + ) + with self.assertRaisesRegex(TypeError, "read_concern must be "): + TransactionOptions(read_concern={}) # type: ignore + with self.assertRaisesRegex(TypeError, "write_concern must be "): + TransactionOptions(write_concern={}) # type: ignore + with self.assertRaisesRegex( + ConfigurationError, "transactions do not support unacknowledged write concern" + ): + TransactionOptions(write_concern=WriteConcern(w=0)) + with self.assertRaisesRegex(TypeError, "is not valid for read_preference"): + TransactionOptions(read_preference={}) # type: ignore + with self.assertRaisesRegex(TypeError, "max_commit_time_ms must be an integer or None"): + TransactionOptions(max_commit_time_ms="10000") # type: ignore + + @async_client_context.require_transactions + async def test_transaction_write_concern_override(self): + """Test txn overrides Client/Database/Collection write_concern.""" + client = await self.async_rs_client(w=0) + db = client.test + coll = db.test + await coll.insert_one({}) + async with client.start_session() as s: + async with await s.start_transaction(write_concern=WriteConcern(w=1)): + self.assertTrue((await coll.insert_one({}, session=s)).acknowledged) + self.assertTrue((await coll.insert_many([{}, {}], session=s)).acknowledged) + self.assertTrue((await coll.bulk_write([InsertOne({})], session=s)).acknowledged) + self.assertTrue((await coll.replace_one({}, {}, session=s)).acknowledged) + self.assertTrue( + (await coll.update_one({}, {"$set": {"a": 1}}, session=s)).acknowledged + ) + self.assertTrue( + (await coll.update_many({}, {"$set": {"a": 1}}, session=s)).acknowledged + ) + self.assertTrue((await coll.delete_one({}, session=s)).acknowledged) + self.assertTrue((await coll.delete_many({}, session=s)).acknowledged) + await coll.find_one_and_delete({}, session=s) + await coll.find_one_and_replace({}, {}, session=s) + await coll.find_one_and_update({}, {"$set": {"a": 1}}, session=s) + + unsupported_txn_writes: list = [ + (client.drop_database, [db.name], {}), + (db.drop_collection, ["collection"], {}), + (coll.drop, [], {}), + (coll.rename, ["collection2"], {}), + # Drop collection2 between tests of "rename", above. + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), + (coll.drop_indexes, [], {}), + (coll.aggregate, [[{"$out": "aggout"}]], {}), + ] + # Creating a collection in a transaction requires MongoDB 4.4+. + if async_client_context.version < (4, 3, 4): + unsupported_txn_writes.extend( + [ + (db.create_collection, ["collection"], {}), + ] + ) + + for op in unsupported_txn_writes: + op, args, kwargs = op + async with client.start_session() as s: + kwargs["session"] = s + await s.start_transaction(write_concern=WriteConcern(w=1)) + with self.assertRaises(OperationFailure): + await op(*args, **kwargs) + await s.abort_transaction() + + @async_client_context.require_transactions + @async_client_context.require_multiple_mongoses + async def test_unpin_for_next_transaction(self): + # Increase localThresholdMS and wait until both nodes are discovered + # to avoid false positives. + client = await self.async_rs_client( + async_client_context.mongos_seeds(), localThresholdMS=1000 + ) + await async_wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") + coll = client.test.test + # Create the collection. + await coll.insert_one({}) + async with client.start_session() as s: + # Session is pinned to Mongos. + async with await s.start_transaction(): + await coll.insert_one({}, session=s) + + addresses = set() + for _ in range(UNPIN_TEST_MAX_ATTEMPTS): + async with await s.start_transaction(): + cursor = coll.find({}, session=s) + self.assertTrue(await anext(cursor)) + addresses.add(cursor.address) + # Break early if we can. + if len(addresses) > 1: + break + + self.assertGreater(len(addresses), 1) + + @async_client_context.require_transactions + @async_client_context.require_multiple_mongoses + async def test_unpin_for_non_transaction_operation(self): + # Increase localThresholdMS and wait until both nodes are discovered + # to avoid false positives. + client = await self.async_rs_client( + async_client_context.mongos_seeds(), localThresholdMS=1000 + ) + await async_wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") + coll = client.test.test + # Create the collection. + await coll.insert_one({}) + async with client.start_session() as s: + # Session is pinned to Mongos. + async with await s.start_transaction(): + await coll.insert_one({}, session=s) + + addresses = set() + for _ in range(UNPIN_TEST_MAX_ATTEMPTS): + cursor = coll.find({}, session=s) + self.assertTrue(await anext(cursor)) + addresses.add(cursor.address) + # Break early if we can. + if len(addresses) > 1: + break + + self.assertGreater(len(addresses), 1) + + @async_client_context.require_transactions + @async_client_context.require_version_min(4, 3, 4) + async def test_create_collection(self): + client = async_client_context.client + db = client.pymongo_test + coll = db.test_create_collection + self.addAsyncCleanup(coll.drop) + + # Use with_transaction to avoid StaleConfig errors on sharded clusters. + async def create_and_insert(session): + coll2 = await db.create_collection(coll.name, session=session) + self.assertEqual(coll, coll2) + await coll.insert_one({}, session=session) + + async with client.start_session() as s: + await s.with_transaction(create_and_insert) + + # Outside a transaction we raise CollectionInvalid on existing colls. + with self.assertRaises(CollectionInvalid): + await db.create_collection(coll.name) + + # Inside a transaction we raise the OperationFailure from create. + async with client.start_session() as s: + await s.start_transaction() + with self.assertRaises(OperationFailure) as ctx: + await db.create_collection(coll.name, session=s) + self.assertEqual(ctx.exception.code, 48) # NamespaceExists + + @async_client_context.require_transactions + async def test_gridfs_does_not_support_transactions(self): + client = async_client_context.client + db = client.pymongo_test + gfs = AsyncGridFS(db) + bucket = AsyncGridFSBucket(db) + + async def gridfs_find(*args, **kwargs): + return await gfs.find(*args, **kwargs).next() + + async def gridfs_open_upload_stream(*args, **kwargs): + await (await bucket.open_upload_stream(*args, **kwargs)).write(b"1") + + gridfs_ops = [ + (gfs.put, (b"123",)), + (gfs.get, (1,)), + (gfs.get_version, ("name",)), + (gfs.get_last_version, ("name",)), + (gfs.delete, (1,)), + (gfs.list, ()), + (gfs.find_one, ()), + (gridfs_find, ()), + (gfs.exists, ()), + (gridfs_open_upload_stream, ("name",)), + ( + bucket.upload_from_stream, + ( + "name", + b"data", + ), + ), + ( + bucket.download_to_stream, + ( + 1, + BytesIO(), + ), + ), + ( + bucket.download_to_stream_by_name, + ( + "name", + BytesIO(), + ), + ), + (bucket.delete, (1,)), + (bucket.find, ()), + (bucket.open_download_stream, (1,)), + (bucket.open_download_stream_by_name, ("name",)), + ( + bucket.rename, + ( + 1, + "new-name", + ), + ), + ( + bucket.rename_by_name, + ( + "new-name", + "new-name2", + ), + ), + (bucket.delete_by_name, ("new-name2",)), + ] + + async with client.start_session() as s, await s.start_transaction(): + for op, args in gridfs_ops: + with self.assertRaisesRegex( + InvalidOperation, + "GridFS does not support multi-document transactions", + ): + await op(*args, session=s) # type: ignore + + # Require 4.2+ for large (16MB+) transactions. + @async_client_context.require_version_min(4, 2) + @async_client_context.require_transactions + @unittest.skipIf(sys.platform == "win32", "Our Windows machines are too slow to pass this test") + async def test_transaction_starts_with_batched_write(self): + if "PyPy" in sys.version and async_client_context.tls: + self.skipTest( + "PYTHON-2937 PyPy is so slow sending large " + "messages over TLS that this test fails" + ) + # Start a transaction with a batch of operations that needs to be + # split. + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + await coll.delete_many({}) + listener.reset() + self.addAsyncCleanup(coll.drop) + large_str = "\0" * (1 * 1024 * 1024) + ops: List[InsertOne[RawBSONDocument]] = [ + InsertOne(RawBSONDocument(encode({"a": large_str}))) for _ in range(48) + ] + async with client.start_session() as session: + async with await session.start_transaction(): + await coll.bulk_write(ops, session=session) # type: ignore[arg-type] + # Assert commands were constructed properly. + self.assertEqual( + ["insert", "insert", "commitTransaction"], listener.started_command_names() + ) + first_cmd = listener.started_events[0].command + self.assertTrue(first_cmd["startTransaction"]) + lsid = first_cmd["lsid"] + txn_number = first_cmd["txnNumber"] + for event in listener.started_events[1:]: + self.assertNotIn("startTransaction", event.command) + self.assertEqual(lsid, event.command["lsid"]) + self.assertEqual(txn_number, event.command["txnNumber"]) + self.assertEqual(48, await coll.count_documents({})) + + @async_client_context.require_transactions + async def test_transaction_direct_connection(self): + client = await self.async_single_client() + coll = client.pymongo_test.test + + # Make sure the collection exists. + await coll.insert_one({}) + self.assertEqual(client.topology_description.topology_type_name, "Single") + + async def find(*args, **kwargs): + return coll.find(*args, **kwargs) + + async def find_raw_batches(*args, **kwargs): + return coll.find_raw_batches(*args, **kwargs) + + ops = [ + (coll.bulk_write, [[InsertOne[dict]({})]]), + (coll.insert_one, [{}]), + (coll.insert_many, [[{}, {}]]), + (coll.replace_one, [{}, {}]), + (coll.update_one, [{}, {"$set": {"a": 1}}]), + (coll.update_many, [{}, {"$set": {"a": 1}}]), + (coll.delete_one, [{}]), + (coll.delete_many, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.find_one_and_delete, [{}, {}]), + (coll.find_one, [{}]), + (coll.count_documents, [{}]), + (coll.distinct, ["foo"]), + (coll.aggregate, [[]]), + (find, [{}]), + (coll.aggregate_raw_batches, [[]]), + (find_raw_batches, [{}]), + (coll.database.command, ["find", coll.name]), + ] + for f, args in ops: + async with client.start_session() as s, await s.start_transaction(): + res = await f(*args, session=s) # type:ignore[operator] + if isinstance(res, (AsyncCommandCursor, AsyncCursor)): + await res.to_list() + + @async_client_context.require_transactions + async def test_transaction_pool_cleared_error_labelled_transient(self): + c = await self.async_single_client() + + with self.assertRaises(AutoReconnect) as context: + async with c.start_session() as session: + async with await session.start_transaction(): + server = await c._select_server(writable_server_selector, session, "test") + # Pause the server's pool, causing it to fail connection checkout. + server.pool.state = PoolState.PAUSED + async with c._checkout(server, session): + pass + + # Verify that the TransientTransactionError label is present in the error. + self.assertTrue(context.exception.has_error_label("TransientTransactionError")) + + +class PatchSessionTimeout: + """Patches the client_session's with_transaction timeout for testing.""" + + def __init__(self, mock_timeout): + self.real_timeout = client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT + self.mock_timeout = mock_timeout + + def __enter__(self): + client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.mock_timeout + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.real_timeout + + +class TestTransactionsConvenientAPI(AsyncTransactionsBase): + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.mongos_clients = [] + if async_client_context.supports_transactions(): + for address in async_client_context.mongoses: + self.mongos_clients.append(await self.async_single_client("{}:{}".format(*address))) + + async def set_fail_point(self, command_args): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + await self.configure_fail_point(client, command_args) + + @async_client_context.require_transactions + async def test_callback_raises_custom_error(self): + class _MyException(Exception): + pass + + async def raise_error(_): + raise _MyException + + async with self.client.start_session() as s: + with self.assertRaises(_MyException): + await s.with_transaction(raise_error) + + @async_client_context.require_transactions + async def test_callback_returns_value(self): + async def callback(_): + return "Foo" + + async with self.client.start_session() as s: + self.assertEqual(await s.with_transaction(callback), "Foo") + + await self.db.test.insert_one({}) + + async def callback2(session): + await self.db.test.insert_one({}, session=session) + return "Foo" + + async with self.client.start_session() as s: + self.assertEqual(await s.with_transaction(callback2), "Foo") + + @async_client_context.require_transactions + async def test_callback_not_retried_after_timeout(self): + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + + async def callback(session): + await coll.insert_one({}, session=session) + err: dict = { + "ok": 0, + "errmsg": "Transaction 7819 has been aborted.", + "code": 251, + "codeName": "NoSuchTransaction", + "errorLabels": ["TransientTransactionError"], + } + raise OperationFailure(err["errmsg"], err["code"], err) + + # Create the collection. + await coll.insert_one({}) + listener.reset() + async with client.start_session() as s: + with PatchSessionTimeout(0): + with self.assertRaises(OperationFailure): + await s.with_transaction(callback) + + self.assertEqual(listener.started_command_names(), ["insert", "abortTransaction"]) + + @async_client_context.require_test_commands + @async_client_context.require_transactions + async def test_callback_not_retried_after_commit_timeout(self): + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + + async def callback(session): + await coll.insert_one({}, session=session) + + # Create the collection. + await coll.insert_one({}) + await self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["commitTransaction"], + "errorCode": 251, # NoSuchTransaction + }, + } + ) + self.addAsyncCleanup( + self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"} + ) + listener.reset() + + async with client.start_session() as s: + with PatchSessionTimeout(0): + with self.assertRaises(OperationFailure): + await s.with_transaction(callback) + + self.assertEqual(listener.started_command_names(), ["insert", "commitTransaction"]) + + @async_client_context.require_test_commands + @async_client_context.require_transactions + async def test_commit_not_retried_after_timeout(self): + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + + async def callback(session): + await coll.insert_one({}, session=session) + + # Create the collection. + await coll.insert_one({}) + await self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["commitTransaction"], "closeConnection": True}, + } + ) + self.addAsyncCleanup( + self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"} + ) + listener.reset() + + async with client.start_session() as s: + with PatchSessionTimeout(0): + with self.assertRaises(ConnectionFailure): + await s.with_transaction(callback) + + # One insert for the callback and two commits (includes the automatic + # retry). + self.assertEqual( + listener.started_command_names(), ["insert", "commitTransaction", "commitTransaction"] + ) + + # Tested here because this supports Motor's convenient transactions API. + @async_client_context.require_transactions + async def test_in_transaction_property(self): + client = async_client_context.client + coll = client.test.testcollection + await coll.insert_one({}) + self.addAsyncCleanup(coll.drop) + + async with client.start_session() as s: + self.assertFalse(s.in_transaction) + await s.start_transaction() + self.assertTrue(s.in_transaction) + await coll.insert_one({}, session=s) + self.assertTrue(s.in_transaction) + await s.commit_transaction() + self.assertFalse(s.in_transaction) + + async with client.start_session() as s: + await s.start_transaction() + # commit empty transaction + await s.commit_transaction() + self.assertFalse(s.in_transaction) + + async with client.start_session() as s: + await s.start_transaction() + await s.abort_transaction() + self.assertFalse(s.in_transaction) + + # Using a callback + async def callback(session): + self.assertTrue(session.in_transaction) + + async with client.start_session() as s: + self.assertFalse(s.in_transaction) + await s.with_transaction(callback) + self.assertFalse(s.in_transaction) + + +class TestOptionsInsideTransactionProse(AsyncTransactionsBase): + @async_client_context.require_transactions + @async_client_context.require_no_standalone + async def test_case_1(self): + # Write concern not inherited from collection object inside transaction + # Create a MongoClient running against a configured sharded/replica set/load balanced cluster. + client = async_client_context.client + coll = client[self.db.name].test + await coll.delete_many({}) + # Start a new session on the client. + async with client.start_session() as s: + # Start a transaction on the session. + await s.start_transaction() + # Instantiate a collection object in the driver with a default write concern of { w: 0 }. + inner_coll = coll.with_options(write_concern=WriteConcern(w=0)) + # Insert the document { n: 1 } on the instantiated collection. + result = await inner_coll.insert_one({"n": 1}, session=s) + # Commit the transaction. + await s.commit_transaction() + # End the session. + # Ensure the document was inserted and no error was thrown from the transaction. + assert result.inserted_id is not None + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_transactions_unified.py b/test/asynchronous/test_transactions_unified.py new file mode 100644 index 0000000000..8e5b1ae181 --- /dev/null +++ b/test/asynchronous/test_transactions_unified.py @@ -0,0 +1,55 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Transactions unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import client_context, unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + + +def setUpModule(): + pass + + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "transactions/unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +# Location of JSON test specifications for transactions-convenient-api. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions-convenient-api/unified") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "transactions-convenient-api/unified" + ) + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_unified_format.py b/test/asynchronous/test_unified_format.py new file mode 100644 index 0000000000..58a1ea3326 --- /dev/null +++ b/test/asynchronous/test_unified_format.py @@ -0,0 +1,97 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys +from pathlib import Path +from typing import Any + +sys.path[0:0] = [""] + +from test import UnitTest, unittest +from test.asynchronous.unified_format import MatchEvaluatorUtil, generate_test_classes + +from bson import ObjectId + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "unified-test-format") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "unified-test-format") + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "valid-pass"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + expected_failures=[ + "Client side error in command starting transaction", # PYTHON-1894 + ], + ) +) + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "valid-fail"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + bypass_test_generation_errors=True, + expected_failures=[ + ".*", # All tests expected to fail + ], + ) +) + + +class TestMatchEvaluatorUtil(UnitTest): + def setUp(self): + self.match_evaluator = MatchEvaluatorUtil(self) + + def test_unsetOrMatches(self): + spec: dict[str, Any] = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} + for actual in [{}, {"y": 2}, None]: + self.match_evaluator.match_result(spec, actual) + + spec = {"x": {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}}} + for actual in [{}, {"x": {}}, {"x": {"y": 2}}]: + self.match_evaluator.match_result(spec, actual) + + spec = {"y": {"$$unsetOrMatches": {"$$exists": True}}} + self.match_evaluator.match_result(spec, {}) + self.match_evaluator.match_result(spec, {"y": 2}) + self.match_evaluator.match_result(spec, {"x": 1}) + self.match_evaluator.match_result(spec, {"y": {}}) + + def test_type(self): + self.match_evaluator.match_result( + { + "operationType": "insert", + "ns": {"db": "change-stream-tests", "coll": "test"}, + "fullDocument": {"_id": {"$$type": "objectId"}, "x": 1}, + }, + { + "operationType": "insert", + "fullDocument": {"_id": ObjectId("5fc93511ac93941052098f0c"), "x": 1}, + "ns": {"db": "change-stream-tests", "coll": "test"}, + }, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_versioned_api_integration.py b/test/asynchronous/test_versioned_api_integration.py new file mode 100644 index 0000000000..0f6b544465 --- /dev/null +++ b/test/asynchronous/test_versioned_api_integration.py @@ -0,0 +1,85 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys +from pathlib import Path +from test.asynchronous.unified_format import generate_test_classes + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import OvertCommandListener + +from pymongo.server_api import ServerApi + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "versioned-api") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "versioned-api") + + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestServerApiIntegration(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + + def assertServerApi(self, event): + self.assertIn("apiVersion", event.command) + self.assertEqual(event.command["apiVersion"], "1") + + def assertServerApiInAllCommands(self, events): + for event in events: + self.assertServerApi(event) + + @async_client_context.require_version_min(4, 7) + async def test_command_options(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + server_api=ServerApi("1"), event_listeners=[listener] + ) + coll = client.test.test + await coll.insert_many([{} for _ in range(100)]) + self.addAsyncCleanup(coll.delete_many, {}) + await coll.find(batch_size=25).to_list() + await client.admin.command("ping") + self.assertServerApiInAllCommands(listener.started_events) + + @async_client_context.require_version_min(4, 7) + @async_client_context.require_transactions + async def test_command_options_txn(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + server_api=ServerApi("1"), event_listeners=[listener] + ) + coll = client.test.test + await coll.insert_many([{} for _ in range(100)]) + self.addAsyncCleanup(coll.delete_many, {}) + + listener.reset() + async with client.start_session() as s, await s.start_transaction(): + await coll.insert_many([{} for _ in range(100)], session=s) + await coll.find(batch_size=25, session=s).to_list() + await client.test.command("find", "test", session=s) + self.assertServerApiInAllCommands(listener.started_events) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py new file mode 100644 index 0000000000..0c9e8c10c8 --- /dev/null +++ b/test/asynchronous/unified_format.py @@ -0,0 +1,1640 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unified test format runner. + +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.md +""" +from __future__ import annotations + +import asyncio +import binascii +import copy +import functools +import os +import re +import sys +import time +import traceback +from collections import defaultdict +from inspect import iscoroutinefunction +from test.asynchronous import ( + AsyncIntegrationTest, + async_client_context, + client_knobs, + unittest, +) +from test.asynchronous.utils import async_get_pool, flaky +from test.asynchronous.utils_spec_runner import SpecRunnerTask +from test.helpers_shared import ALL_KMS_PROVIDERS, DEFAULT_KMS_TLS +from test.unified_format_shared import ( + KMS_TLS_OPTS, + PLACEHOLDER_MAP, + EventListenerUtil, + MatchEvaluatorUtil, + coerce_result, + parse_bulk_write_error_result, + parse_bulk_write_result, + parse_client_bulk_write_error_result, + parse_collection_or_database_options, + with_metaclass, +) +from test.utils_shared import ( + async_wait_until, + camel_to_snake, + camel_to_snake_args, + parse_spec_options, + prepare_spec_arguments, + snake_to_camel, + wait_until, +) +from test.version import Version +from typing import Any, Dict, List, Mapping, Optional + +import pytest + +import pymongo +from bson import SON, json_util +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.objectid import ObjectId +from gridfs import AsyncGridFSBucket, GridOut, NoFile +from gridfs.errors import CorruptGridFile +from pymongo import ASCENDING, AsyncMongoClient, CursorType, _csot +from pymongo.asynchronous.change_stream import AsyncChangeStream +from pymongo.asynchronous.client_session import AsyncClientSession, TransactionOptions, _TxnState +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.encryption import AsyncClientEncryption +from pymongo.driver_info import DriverInfo +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ClientBulkWriteException, + ConfigurationError, + ConnectionFailure, + EncryptionError, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, +) +from pymongo.monitoring import ( + CommandStartedEvent, +) +from pymongo.operations import ( + SearchIndexModel, +) +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import ServerApi +from pymongo.server_selectors import Selection, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.topology_description import TopologyDescription +from pymongo.typings import _Address +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +IS_INTERRUPTED = False + + +def interrupt_loop(): + global IS_INTERRUPTED + IS_INTERRUPTED = True + + +async def is_run_on_requirement_satisfied(requirement): + topology_satisfied = True + req_topologies = requirement.get("topologies") + if req_topologies: + topology_satisfied = await async_client_context.is_topology_type(req_topologies) + + server_version = Version(*async_client_context.version[:3]) + + min_version_satisfied = True + req_min_server_version = requirement.get("minServerVersion") + if req_min_server_version: + min_version_satisfied = Version.from_string(req_min_server_version) <= server_version + + max_version_satisfied = True + req_max_server_version = requirement.get("maxServerVersion") + if req_max_server_version: + max_version_satisfied = Version.from_string(req_max_server_version) >= server_version + + params_satisfied = True + params = requirement.get("serverParameters") + if params: + for param, val in params.items(): + if param not in async_client_context.server_parameters: + params_satisfied = False + elif async_client_context.server_parameters[param] != val: + params_satisfied = False + + auth_satisfied = True + req_auth = requirement.get("auth") + if req_auth is not None: + if req_auth: + auth_satisfied = async_client_context.auth_enabled + if auth_satisfied and "authMechanism" in requirement: + auth_satisfied = async_client_context.check_auth_type(requirement["authMechanism"]) + else: + auth_satisfied = not async_client_context.auth_enabled + + csfle_satisfied = True + req_csfle = requirement.get("csfle") + if req_csfle is True: + # Don't overwrite unsatisfied minimum version requirements. + if min_version_satisfied: + min_version_satisfied = Version.from_string("4.2") <= server_version + csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + elif isinstance(req_csfle, dict) and "minLibmongocryptVersion" in req_csfle: + csfle_satisfied = False + req_version = req_csfle["minLibmongocryptVersion"] + if _HAVE_PYMONGOCRYPT: + from pymongocrypt import libmongocrypt_version + + if Version.from_string(libmongocrypt_version()) >= Version.from_string(req_version): + csfle_satisfied = True + + return ( + topology_satisfied + and min_version_satisfied + and max_version_satisfied + and params_satisfied + and auth_satisfied + and csfle_satisfied + ) + + +class NonLazyCursor: + """A find cursor proxy that creates the remote cursor when initialized.""" + + def __init__(self, find_cursor, client): + self.client = client + self.find_cursor = find_cursor + # Create the server side cursor. + self.first_result = None + + @classmethod + async def create(cls, find_cursor, client): + cursor = cls(find_cursor, client) + try: + cursor.first_result = await anext(cursor.find_cursor) + except StopAsyncIteration: + cursor.first_result = None + return cursor + + @property + def alive(self): + return self.first_result is not None or self.find_cursor.alive + + async def __anext__(self): + if self.first_result is not None: + first = self.first_result + self.first_result = None + return first + return await anext(self.find_cursor) + + # Added to support the iterateOnce operation. + try_next = __anext__ + + async def close(self): + await self.find_cursor.close() + self.client = None + + +class EntityMapUtil: + """Utility class that implements an entity map as per the unified + test format specification. + """ + + def __init__(self, test_class): + self._entities: Dict[str, Any] = {} + self._listeners: Dict[str, EventListenerUtil] = {} + self._session_lsids: Dict[str, Mapping[str, Any]] = {} + self.test: UnifiedSpecTestMixinV1 = test_class + + def __contains__(self, item): + return item in self._entities + + def __len__(self): + return len(self._entities) + + def __getitem__(self, item): + try: + return self._entities[item] + except KeyError: + self.test.fail(f"Could not find entity named {item} in map") + + def __setitem__(self, key, value): + if not isinstance(key, str): + self.test.fail("Expected entity name of type str, got %s" % (type(key))) + + if key in self._entities: + self.test.fail(f"Entity named {key} already in map") + + self._entities[key] = value + + def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: + if "$$placeholder" in current: + if path not in PLACEHOLDER_MAP: + raise ValueError(f"Could not find a placeholder value for {path}") + return PLACEHOLDER_MAP[path] + + # Distinguish between temp and non-temp aws credentials. + if path.endswith("/kmsProviders/aws") and "sessionToken" in current: + path = path.replace("aws", "aws_temp") + + for key in list(current): + value = current[key] + if isinstance(value, dict): + subpath = f"{path}/{key}" + current[key] = self._handle_placeholders(spec, value, subpath) + return current + + async def _create_entity(self, entity_spec, uri=None): + if len(entity_spec) != 1: + self.test.fail(f"Entity spec {entity_spec} did not contain exactly one top-level key") + + entity_type, spec = next(iter(entity_spec.items())) + spec = self._handle_placeholders(spec, spec, "") + if entity_type == "client": + kwargs: dict = {} + observe_events = spec.get("observeEvents", []) + + if "autoEncryptOpts" in spec: + auto_encrypt_opts = spec["autoEncryptOpts"].copy() + auto_encrypt_kwargs: dict = dict(kms_tls_options=DEFAULT_KMS_TLS) + kms_providers = auto_encrypt_opts.pop("kmsProviders", ALL_KMS_PROVIDERS.copy()) + key_vault_namespace = auto_encrypt_opts.pop("keyVaultNamespace") + extra_opts = auto_encrypt_opts.pop("extraOptions", {}) + for key, value in extra_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + for key, value in auto_encrypt_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, **auto_encrypt_kwargs + ) + kwargs["auto_encryption_opts"] = auto_encryption_opts + + # The unified tests use topologyOpeningEvent, we use topologyOpenedEvent + for i in range(len(observe_events)): + if "topologyOpeningEvent" == observe_events[i]: + observe_events[i] = "topologyOpenedEvent" + ignore_commands = spec.get("ignoreCommandMonitoringEvents", []) + observe_sensitive_commands = spec.get("observeSensitiveCommands", False) + ignore_commands = [cmd.lower() for cmd in ignore_commands] + listener = EventListenerUtil( + observe_events, + ignore_commands, + observe_sensitive_commands, + spec.get("storeEventsAsEntities"), + self, + ) + self._listeners[spec["id"]] = listener + kwargs["event_listeners"] = [listener] + if spec.get("useMultipleMongoses"): + if async_client_context.load_balancer: + kwargs["h"] = async_client_context.MULTI_MONGOS_LB_URI + elif async_client_context.is_mongos: + kwargs["h"] = async_client_context.mongos_seeds() + kwargs.update(spec.get("uriOptions", {})) + server_api = spec.get("serverApi") + if "waitQueueSize" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueSize") + if "waitQueueMultiple" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueMultiple") + if server_api: + kwargs["server_api"] = ServerApi( + server_api["version"], + strict=server_api.get("strict"), + deprecation_errors=server_api.get("deprecationErrors"), + ) + if uri: + kwargs["h"] = uri + client = await self.test.async_rs_or_single_client(**kwargs) + await client.aconnect() + self[spec["id"]] = client + return + elif entity_type == "database": + client = self[spec["client"]] + if type(client).__name__ != "AsyncMongoClient": + self.test.fail( + "Expected entity {} to be of type AsyncMongoClient, got {}".format( + spec["client"], type(client) + ) + ) + options = parse_collection_or_database_options(spec.get("databaseOptions", {})) + self[spec["id"]] = client.get_database(spec["databaseName"], **options) + return + elif entity_type == "collection": + database = self[spec["database"]] + if not isinstance(database, AsyncDatabase): + self.test.fail( + "Expected entity {} to be of type AsyncDatabase, got {}".format( + spec["database"], type(database) + ) + ) + options = parse_collection_or_database_options(spec.get("collectionOptions", {})) + self[spec["id"]] = database.get_collection(spec["collectionName"], **options) + return + elif entity_type == "session": + client = self[spec["client"]] + if type(client).__name__ != "AsyncMongoClient": + self.test.fail( + "Expected entity {} to be of type AsyncMongoClient, got {}".format( + spec["client"], type(client) + ) + ) + opts = camel_to_snake_args(spec.get("sessionOptions", {})) + if "default_transaction_options" in opts: + txn_opts = parse_spec_options(opts["default_transaction_options"]) + txn_opts = TransactionOptions(**txn_opts) + opts = copy.deepcopy(opts) + opts["default_transaction_options"] = txn_opts + session = client.start_session(**dict(opts)) + self[spec["id"]] = session + self._session_lsids[spec["id"]] = copy.deepcopy(session.session_id) + self.test.addAsyncCleanup(session.end_session) + return + elif entity_type == "bucket": + db = self[spec["database"]] + kwargs = parse_spec_options(spec.get("bucketOptions", {}).copy()) + bucket = AsyncGridFSBucket(db, **kwargs) + + # PyMongo does not support AsyncGridFSBucket.drop(), emulate it. + @_csot.apply + async def drop(self: AsyncGridFSBucket, *args: Any, **kwargs: Any) -> None: + await self._files.drop(*args, **kwargs) + await self._chunks.drop(*args, **kwargs) + + if not hasattr(bucket, "drop"): + bucket.drop = drop.__get__(bucket) + self[spec["id"]] = bucket + return + elif entity_type == "clientEncryption": + opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) + if isinstance(opts["key_vault_client"], str): + opts["key_vault_client"] = self[opts["key_vault_client"]] + # Set TLS options for providers like "kmip:name1". + kms_tls_options = {} + for provider in opts["kms_providers"]: + provider_type = provider.split(":")[0] + if provider_type in KMS_TLS_OPTS: + kms_tls_options[provider] = KMS_TLS_OPTS[provider_type] + self[spec["id"]] = AsyncClientEncryption( + opts["kms_providers"], + opts["key_vault_namespace"], + opts["key_vault_client"], + DEFAULT_CODEC_OPTIONS, + opts.get("kms_tls_options", kms_tls_options), + opts.get("key_expiration_ms"), + ) + return + elif entity_type == "thread": + name = spec["id"] + thread = SpecRunnerTask(name) + await thread.start() + self.test.addAsyncCleanup(thread.join, 5) + self[name] = thread + return + + self.test.fail(f"Unable to create entity of unknown type {entity_type}") + + async def create_entities_from_spec(self, entity_spec, uri=None): + for spec in entity_spec: + await self._create_entity(spec, uri=uri) + + def get_listener_for_client(self, client_name: str) -> EventListenerUtil: + client = self[client_name] + if type(client).__name__ != "AsyncMongoClient": + self.test.fail( + f"Expected entity {client_name} to be of type AsyncMongoClient, got {type(client)}" + ) + + listener = self._listeners.get(client_name) + if not listener: + self.test.fail(f"No listeners configured for client {client_name}") + + return listener + + def get_lsid_for_session(self, session_name): + session = self[session_name] + if not isinstance(session, AsyncClientSession): + self.test.fail( + f"Expected entity {session_name} to be of type AsyncClientSession, got {type(session)}" + ) + + try: + return session.session_id + except InvalidOperation: + # session has been closed. + return self._session_lsids[session_name] + + async def advance_cluster_times(self, cluster_time) -> None: + """Manually synchronize entities when desired""" + for entity in self._entities.values(): + if isinstance(entity, AsyncClientSession) and cluster_time: + entity.advance_cluster_time(cluster_time) + + +class UnifiedSpecTestMixinV1(AsyncIntegrationTest): + """Mixin class to run test cases from test specification files. + + Assumes that tests conform to the `unified test format + `_. + + Specification of the test suite being currently run is available as + a class attribute ``TEST_SPEC``. + """ + + SCHEMA_VERSION = Version.from_string("1.25") + RUN_ON_LOAD_BALANCER = True + TEST_SPEC: Any + TEST_PATH = "" # This gets filled in by generate_test_classes + mongos_clients: list[AsyncMongoClient] = [] + + @staticmethod + async def should_run_on(run_on_spec): + if not run_on_spec: + # Always run these tests. + return True + + for req in run_on_spec: + if await is_run_on_requirement_satisfied(req): + return True + return False + + async def insert_initial_data(self, initial_data): + for i, collection_data in enumerate(initial_data): + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + opts = collection_data.get("createOptions", {}) + documents = collection_data["documents"] + + # Setup the collection with as few majority writes as possible. + db = self.client[db_name] + await db.drop_collection(coll_name) + # Only use majority wc only on the final write. + if i == len(initial_data) - 1: + wc = WriteConcern(w="majority") + else: + wc = WriteConcern(w=1) + + # Remove any encryption collections associated with the collection. + collections = await db.list_collection_names() + for collection in collections: + if collection in [f"enxcol_.{coll_name}.esc", f"enxcol_.{coll_name}.ecoc"]: + await db.drop_collection(collection) + + if documents: + if opts: + await db.create_collection(coll_name, **opts) + await db.get_collection(coll_name, write_concern=wc).insert_many(documents) + else: + # Ensure collection exists + await db.create_collection(coll_name, write_concern=wc, **opts) + + @classmethod + def setUpClass(cls) -> None: + # Speed up the tests by decreasing the heartbeat frequency. + cls.knobs = client_knobs( + heartbeat_frequency=0.1, + min_heartbeat_interval=0.1, + kill_cursor_frequency=0.1, + events_queue_frequency=0.1, + ) + cls.knobs.enable() + + @classmethod + def tearDownClass(cls) -> None: + cls.knobs.disable() + + async def asyncSetUp(self): + # super call creates internal client cls.client + await super().asyncSetUp() + # process file-level runOnRequirements + run_on_spec = self.TEST_SPEC.get("runOnRequirements", []) + if not await self.should_run_on(run_on_spec): + raise unittest.SkipTest(f"{self.__class__.__name__} runOnRequirements not satisfied") + + # add any special-casing for skipping tests here + + # Handle mongos_clients for transactions tests. + self.mongos_clients = [] + if async_client_context.supports_transactions() and not async_client_context.load_balancer: + for address in async_client_context.mongoses: + self.mongos_clients.append(await self.async_single_client("{}:{}".format(*address))) + + # process schemaVersion + # note: we check major schema version during class generation + version = Version.from_string(self.TEST_SPEC["schemaVersion"]) + self.assertLessEqual( + version, + self.SCHEMA_VERSION, + f"expected schema version {self.SCHEMA_VERSION} or lower, got {version}", + ) + + # initialize internals + self.match_evaluator = MatchEvaluatorUtil(self) + + def maybe_skip_test(self, spec): + # add any special-casing for skipping tests here + class_name = self.__class__.__name__.lower() + description = spec["description"].lower() + + if "client side error in command starting transaction" in description: + self.skipTest("Implement PYTHON-1894") + if "type=symbol" in description: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to entire download" in description: + self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + if any( + x in description + for x in [ + "first insertone is never committed", + "second updateone is never committed", + "third updateone is never committed", + ] + ): + self.skipTest("Implement PYTHON-4597") + + if "csot" in class_name: + # Skip tests that are too slow to run on a given platform. + slow_macos = [ + "operation fails after two consecutive socket timeouts.*", + "operation succeeds after one socket timeout.*", + "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + ] + slow_win32 = [ + *slow_macos, + "maxTimeMS value in the command is less than timeoutMS", + "timeoutMS applies to whole operation.*", + ] + slow_pypy = [ + "timeoutMS applies to whole operation.*", + ] + if "CI" in os.environ and sys.platform == "win32" and "gridfs" in class_name: + self.skipTest("PYTHON-3522 CSOT GridFS test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "win32": + for pat in slow_win32: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "darwin": + for pat in slow_macos: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on MacOS") + if "CI" in os.environ and sys.implementation.name.lower() == "pypy": + for pat in slow_pypy: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on PyPy") + if "change" in description or "change" in class_name: + self.skipTest("CSOT not implemented for watch()") + if "cursors" in class_name: + self.skipTest("CSOT not implemented for cursors") + if ( + "tailable" in class_name + or "tailable" in description + and "non-tailable" not in description + ): + self.skipTest("CSOT not implemented for tailable cursors") + if "sessions" in class_name: + self.skipTest("CSOT not implemented for sessions") + if "withtransaction" in description: + self.skipTest("CSOT not implemented for with_transaction") + if "transaction" in class_name or "transaction" in description: + self.skipTest("CSOT not implemented for transactions") + + # Some tests need to be skipped based on the operations they try to run. + for op in spec["operations"]: + name = op["name"] + if name == "count": + self.skipTest("PyMongo does not support count()") + if name == "listIndexNames": + self.skipTest("PyMongo does not support list_index_names()") + if not async_client_context.test_commands_enabled: + if name == "failPoint" or name == "targetedFailPoint": + self.skipTest("Test commands must be enabled to use fail points") + if name == "modifyCollection": + self.skipTest("PyMongo does not support modifyCollection") + if "timeoutMode" in op.get("arguments", {}): + self.skipTest("PyMongo does not support timeoutMode") + + def process_error(self, exception, spec): + if isinstance(exception, unittest.SkipTest): + raise + is_error = spec.get("isError") + is_client_error = spec.get("isClientError") + is_timeout_error = spec.get("isTimeoutError") + error_contains = spec.get("errorContains") + error_code = spec.get("errorCode") + error_code_name = spec.get("errorCodeName") + error_labels_contain = spec.get("errorLabelsContain") + error_labels_omit = spec.get("errorLabelsOmit") + expect_result = spec.get("expectResult") + error_response = spec.get("errorResponse") + if error_response: + if isinstance(exception, ClientBulkWriteException): + self.match_evaluator.match_result(error_response, exception.error.details) + else: + self.match_evaluator.match_result(error_response, exception.details) + + if is_error: + # already satisfied because exception was raised + pass + + if is_client_error: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception + # Connection errors are considered client errors. + if isinstance(error, ConnectionFailure): + self.assertNotIsInstance(error, NotPrimaryError) + elif isinstance(error, CorruptGridFile): + pass + elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError, NoFile)): + pass + else: + self.assertNotIsInstance(error, PyMongoError) + + if is_timeout_error: + self.assertIsInstance(exception, PyMongoError) + if not exception.timeout: + # Re-raise the exception for better diagnostics. + raise exception + + if error_contains: + if isinstance(exception, BulkWriteError): + errmsg = str(exception.details).lower() + elif isinstance(exception, ClientBulkWriteException): + errmsg = str(exception.details).lower() + else: + errmsg = str(exception).lower() + self.assertIn(error_contains.lower(), errmsg) + + if error_code: + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("code")) + else: + self.assertEqual(error_code, exception.details.get("code")) + + if error_code_name: + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("codeName")) + else: + self.assertEqual(error_code_name, exception.details.get("codeName")) + + if error_labels_contain: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception + labels = [ + err_label for err_label in error_labels_contain if error.has_error_label(err_label) + ] + self.assertEqual(labels, error_labels_contain) + + if error_labels_omit: + for err_label in error_labels_omit: + if exception.has_error_label(err_label): + self.fail(f"Exception '{exception}' unexpectedly had label '{err_label}'") + + if expect_result: + if isinstance(exception, BulkWriteError): + result = parse_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) + elif isinstance(exception, ClientBulkWriteException): + result = parse_client_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) + else: + self.fail( + f"expectResult can only be specified with {BulkWriteError} or {ClientBulkWriteException} exceptions, got {exception}" + ) + + return exception + + def __raise_if_unsupported(self, opname, target, *target_types): + if not isinstance(target, target_types): + self.fail(f"Operation {opname} not supported for entity of type {type(target)}") + + async def __entityOperation_createChangeStream(self, target, *args, **kwargs): + self.__raise_if_unsupported( + "createChangeStream", target, AsyncMongoClient, AsyncDatabase, AsyncCollection + ) + stream = await target.watch(*args, **kwargs) + self.addAsyncCleanup(stream.close) + return stream + + async def _clientOperation_createChangeStream(self, target, *args, **kwargs): + return await self.__entityOperation_createChangeStream(target, *args, **kwargs) + + async def _databaseOperation_createChangeStream(self, target, *args, **kwargs): + return await self.__entityOperation_createChangeStream(target, *args, **kwargs) + + async def _collectionOperation_createChangeStream(self, target, *args, **kwargs): + return await self.__entityOperation_createChangeStream(target, *args, **kwargs) + + async def _databaseOperation_runCommand(self, target, **kwargs): + self.__raise_if_unsupported("runCommand", target, AsyncDatabase) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + return await target.command(**kwargs) + + async def _databaseOperation_runCursorCommand(self, target, **kwargs): + return await (await self._databaseOperation_createCommandCursor(target, **kwargs)).to_list() + + async def _databaseOperation_createCommandCursor(self, target, **kwargs): + self.__raise_if_unsupported("createCommandCursor", target, AsyncDatabase) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + batch_size = 0 + + cursor_type = kwargs.pop("cursor_type", "nonTailable") + if cursor_type == CursorType.TAILABLE: + ordered_command["tailable"] = True + elif cursor_type == CursorType.TAILABLE_AWAIT: + ordered_command["tailable"] = True + ordered_command["awaitData"] = True + elif cursor_type != "nonTailable": + self.fail(f"unknown cursorType: {cursor_type}") + + if "maxTimeMS" in kwargs: + kwargs["max_await_time_ms"] = kwargs.pop("maxTimeMS") + + if "batch_size" in kwargs: + batch_size = kwargs.pop("batch_size") + + cursor = await target.cursor_command(**kwargs) + + if batch_size > 0: + cursor.batch_size(batch_size) + + return cursor + + async def _collectionOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] async for idx in await collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + async def _collectionOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + async for index in await collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + async def _collectionOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = await self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + + async def _databaseOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] async for idx in await collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + async def _databaseOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + async for index in await collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + async def _databaseOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = await self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + + async def kill_all_sessions(self): + if getattr(self, "client", None) is None: + return + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + try: + await client.admin.command("killAllSessions", []) + except (OperationFailure, AutoReconnect): + # "operation was interrupted" by killing the command's + # own session. + # On 8.0+ killAllSessions sometimes returns a network error. + pass + + async def _databaseOperation_listCollections(self, target, *args, **kwargs): + if "batch_size" in kwargs: + kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} + cursor = await target.list_collections(*args, **kwargs) + return await cursor.to_list() + + async def _databaseOperation_createCollection(self, target, *args, **kwargs): + # PYTHON-1936 Ignore the listCollections event from create_collection. + kwargs["check_exists"] = False + ret = await target.create_collection(*args, **kwargs) + return ret + + async def __entityOperation_aggregate(self, target, *args, **kwargs): + self.__raise_if_unsupported("aggregate", target, AsyncDatabase, AsyncCollection) + return await (await target.aggregate(*args, **kwargs)).to_list() + + async def _databaseOperation_aggregate(self, target, *args, **kwargs): + return await self.__entityOperation_aggregate(target, *args, **kwargs) + + async def _collectionOperation_aggregate(self, target, *args, **kwargs): + return await self.__entityOperation_aggregate(target, *args, **kwargs) + + async def _collectionOperation_find(self, target, *args, **kwargs): + self.__raise_if_unsupported("find", target, AsyncCollection) + find_cursor = target.find(*args, **kwargs) + return await find_cursor.to_list() + + async def _collectionOperation_createFindCursor(self, target, *args, **kwargs): + self.__raise_if_unsupported("find", target, AsyncCollection) + if "filter" not in kwargs: + self.fail('createFindCursor requires a "filter" argument') + cursor = await NonLazyCursor.create(target.find(*args, **kwargs), target.database.client) + self.addAsyncCleanup(cursor.close) + return cursor + + def _collectionOperation_count(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support collection.count()") + + async def _collectionOperation_listIndexes(self, target, *args, **kwargs): + if "batch_size" in kwargs: + self.skipTest("PyMongo does not support batch_size for list_indexes") + return await (await target.list_indexes(*args, **kwargs)).to_list() + + def _collectionOperation_listIndexNames(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support list_index_names") + + async def _collectionOperation_createSearchIndexes(self, target, *args, **kwargs): + models = [SearchIndexModel(**i) for i in kwargs["models"]] + return await target.create_search_indexes(models) + + async def _collectionOperation_listSearchIndexes(self, target, *args, **kwargs): + name = kwargs.get("name") + agg_kwargs = kwargs.get("aggregation_options", dict()) + return await (await target.list_search_indexes(name, **agg_kwargs)).to_list() + + async def _sessionOperation_withTransaction(self, target, *args, **kwargs): + self.__raise_if_unsupported("withTransaction", target, AsyncClientSession) + return await target.with_transaction(*args, **kwargs) + + async def _sessionOperation_startTransaction(self, target, *args, **kwargs): + self.__raise_if_unsupported("startTransaction", target, AsyncClientSession) + return await target.start_transaction(*args, **kwargs) + + async def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported("iterateUntilDocumentOrError", target, AsyncChangeStream) + return await anext(target) + + async def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported( + "iterateUntilDocumentOrError", target, NonLazyCursor, AsyncCommandCursor + ) + while target.alive: + try: + return await anext(target) + except StopAsyncIteration: + pass + return None + + async def _cursor_close(self, target, *args, **kwargs): + self.__raise_if_unsupported("close", target, NonLazyCursor, AsyncCommandCursor) + return await target.close() + + async def _clientOperation_appendMetadata(self, target, *args, **kwargs): + info_opts = kwargs["driver_info_options"] + driver_info = DriverInfo(info_opts["name"], info_opts["version"], info_opts["platform"]) + target.append_metadata(driver_info) + + async def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + + return await target.create_data_key(*args, **kwargs) + + async def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): + return await target.get_keys(*args, **kwargs).to_list() + + async def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): + result = await target.delete_key(*args, **kwargs) + response = result.raw_result + response["deletedCount"] = result.deleted_count + return response + + async def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + data = await target.rewrap_many_data_key(*args, **kwargs) + if data.bulk_write_result: + return {"bulkWriteResult": parse_bulk_write_result(data.bulk_write_result)} + return {} + + async def _clientEncryptionOperation_encrypt(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + return await target.encrypt(*args, **kwargs) + + async def _bucketOperation_download( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> bytes: + async with await target.open_download_stream(*args, **kwargs) as gout: + return await gout.read() + + async def _bucketOperation_downloadByName( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> bytes: + async with await target.open_download_stream_by_name(*args, **kwargs) as gout: + return await gout.read() + + async def _bucketOperation_upload( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> ObjectId: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return await target.upload_from_stream(*args, **kwargs) + + async def _bucketOperation_uploadWithId( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> Any: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return await target.upload_from_stream_with_id(*args, **kwargs) + + async def _bucketOperation_find( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> List[GridOut]: + return await target.find(*args, **kwargs).to_list() + + async def run_entity_operation(self, spec): + target = self.entity_map[spec["object"]] + opname = spec["name"] + opargs = spec.get("arguments") + expect_error = spec.get("expectError") + save_as_entity = spec.get("saveResultAsEntity") + expect_result = spec.get("expectResult") + ignore = spec.get("ignoreResultAndError") + if ignore and (expect_error or save_as_entity or expect_result): + raise ValueError( + "ignoreResultAndError is incompatible with saveResultAsEntity" + ", expectError, and expectResult" + ) + if opargs: + arguments = parse_spec_options(copy.deepcopy(opargs)) + prepare_spec_arguments( + spec, + arguments, + camel_to_snake(opname), + self.entity_map, + self.run_operations_and_throw, + ) + else: + arguments = {} + + if isinstance(target, AsyncMongoClient): + method_name = f"_clientOperation_{opname}" + elif isinstance(target, AsyncDatabase): + method_name = f"_databaseOperation_{opname}" + elif isinstance(target, AsyncCollection): + method_name = f"_collectionOperation_{opname}" + # contentType is always stored in metadata in pymongo. + if target.name.endswith(".files") and opname == "find": + for doc in spec.get("expectResult", []): + if "contentType" in doc: + doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") + elif isinstance(target, AsyncChangeStream): + method_name = f"_changeStreamOperation_{opname}" + elif isinstance(target, (NonLazyCursor, AsyncCommandCursor)): + method_name = f"_cursor_{opname}" + elif isinstance(target, AsyncClientSession): + method_name = f"_sessionOperation_{opname}" + elif isinstance(target, AsyncGridFSBucket): + method_name = f"_bucketOperation_{opname}" + if "id" in arguments: + arguments["file_id"] = arguments.pop("id") + # MD5 is always disabled in pymongo. + arguments.pop("disable_md5", None) + elif isinstance(target, AsyncClientEncryption): + method_name = f"_clientEncryptionOperation_{opname}" + else: + method_name = "doesNotExist" + + try: + method = getattr(self, method_name) + except AttributeError: + target_opname = camel_to_snake(opname) + if target_opname == "iterate_once": + target_opname = "try_next" + if target_opname == "client_bulk_write": + target_opname = "bulk_write" + try: + cmd = getattr(target, target_opname) + except AttributeError: + self.fail(f"Unsupported operation {opname} on entity {target}") + else: + cmd = functools.partial(method, target) + + try: + # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. + if "timeout" in arguments: + timeout = arguments.pop("timeout") + with pymongo.timeout(timeout): + result = await cmd(**dict(arguments)) + else: + result = await cmd(**dict(arguments)) + except Exception as exc: + # Ignore all operation errors but to avoid masking bugs don't + # ignore things like TypeError and ValueError. + if ignore and isinstance(exc, (PyMongoError,)): + return exc + if expect_error: + return self.process_error(exc, expect_error) + raise + else: + if expect_error: + self.fail(f'Expected error {expect_error} but "{opname}" succeeded: {result}') + + if expect_result: + actual = coerce_result(opname, result) + self.match_evaluator.match_result(expect_result, actual) + + if save_as_entity: + self.entity_map[save_as_entity] = result + return None + return None + + async def __set_fail_point(self, client, command_args): + if not async_client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") + + await self.configure_fail_point(client, command_args) + self.addAsyncCleanup(self.configure_fail_point, client, command_args, off=True) + + async def _testOperation_failPoint(self, spec): + await self.__set_fail_point( + client=self.entity_map[spec["client"]], command_args=spec["failPoint"] + ) + + async def _testOperation_targetedFailPoint(self, spec): + session = self.entity_map[spec["session"]] + if not session._pinned_address: + self.fail( + "Cannot use targetedFailPoint operation with unpinned " "session {}".format( + spec["session"] + ) + ) + + client = await self.async_single_client("{}:{}".format(*session._pinned_address)) + await self.__set_fail_point(client=client, command_args=spec["failPoint"]) + + async def _testOperation_createEntities(self, spec): + await self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) + await self.entity_map.advance_cluster_times(self._cluster_time) + + def _testOperation_assertSessionTransactionState(self, spec): + session = self.entity_map[spec["session"]] + expected_state = getattr(_TxnState, spec["state"].upper()) + self.assertEqual(expected_state, session._transaction.state) + + def _testOperation_assertSessionPinned(self, spec): + session = self.entity_map[spec["session"]] + self.assertIsNotNone(session._transaction.pinned_address) + + def _testOperation_assertSessionUnpinned(self, spec): + session = self.entity_map[spec["session"]] + self.assertIsNone(session._pinned_address) + self.assertIsNone(session._transaction.pinned_address) + + def __get_last_two_command_lsids(self, listener): + cmd_started_events = [] + for event in reversed(listener.events): + if isinstance(event, CommandStartedEvent): + cmd_started_events.append(event) + if len(cmd_started_events) < 2: + self.fail( + "Needed 2 CommandStartedEvents to compare lsids, " + "got %s" % (len(cmd_started_events)) + ) + return tuple([e.command["lsid"] for e in cmd_started_events][:2]) + + def _testOperation_assertDifferentLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec["client"]) + self.assertNotEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSameLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec["client"]) + self.assertEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSessionDirty(self, spec): + session = self.entity_map[spec["session"]] + self.assertTrue(session._server_session.dirty) + + def _testOperation_assertSessionNotDirty(self, spec): + session = self.entity_map[spec["session"]] + return self.assertFalse(session._server_session.dirty) + + async def _testOperation_assertCollectionExists(self, spec): + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list( + await self.client.get_database(database_name).list_collection_names() + ) + self.assertIn(collection_name, collection_name_list) + + async def _testOperation_assertCollectionNotExists(self, spec): + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list( + await self.client.get_database(database_name).list_collection_names() + ) + self.assertNotIn(collection_name, collection_name_list) + + async def _testOperation_assertIndexExists(self, spec): + collection = self.client[spec["databaseName"]][spec["collectionName"]] + index_names = [idx["name"] async for idx in await collection.list_indexes()] + self.assertIn(spec["indexName"], index_names) + + async def _testOperation_assertIndexNotExists(self, spec): + collection = self.client[spec["databaseName"]][spec["collectionName"]] + async for index in await collection.list_indexes(): + self.assertNotEqual(spec["indexName"], index["name"]) + + async def _testOperation_assertNumberConnectionsCheckedOut(self, spec): + client = self.entity_map[spec["client"]] + pool = await async_get_pool(client) + self.assertEqual(spec["connections"], pool.active_sockets) + + def _event_count(self, client_name, event): + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events("all") + count = 0 + for actual in actual_events: + try: + self.match_evaluator.match_event(event, actual) + except AssertionError: + continue + else: + count += 1 + return count + + def _testOperation_assertEventCount(self, spec): + """Run the assertEventCount test operation. + + Assert the given event was published exactly `count` times. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + self.assertEqual(self._event_count(client, event), count, f"expected {count} not {event!r}") + + async def _testOperation_waitForEvent(self, spec): + """Run the waitForEvent test operation. + + Wait for a number of events to be published, or fail. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + await async_wait_until( + lambda: self._event_count(client, event) >= count, + f"find {count} {event} event(s)", + ) + + async def _testOperation_wait(self, spec): + """Run the "wait" test operation.""" + await asyncio.sleep(spec["ms"] / 1000.0) + + def _testOperation_recordTopologyDescription(self, spec): + """Run the recordTopologyDescription test operation.""" + self.entity_map[spec["id"]] = self.entity_map[spec["client"]].topology_description + + def _testOperation_assertTopologyType(self, spec): + """Run the assertTopologyType test operation.""" + description = self.entity_map[spec["topologyDescription"]] + self.assertIsInstance(description, TopologyDescription) + self.assertEqual(description.topology_type_name, spec["topologyType"]) + + async def _testOperation_waitForPrimaryChange(self, spec: dict) -> None: + """Run the waitForPrimaryChange test operation.""" + client = self.entity_map[spec["client"]] + old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] + timeout = spec["timeoutMS"] / 1000.0 + + def get_primary(td: TopologyDescription) -> Optional[_Address]: + servers = writable_server_selector(Selection.from_topology_description(td)) + if servers and servers[0].server_type == SERVER_TYPE.RSPrimary: + return servers[0].address + return None + + old_primary = get_primary(old_description) + + async def primary_changed() -> bool: + primary = await client.primary + if primary is None: + return False + return primary != old_primary + + await async_wait_until(primary_changed, "change primary", timeout=timeout) + + async def _testOperation_runOnThread(self, spec): + """Run the 'runOnThread' operation.""" + thread = self.entity_map[spec["thread"]] + await thread.schedule(functools.partial(self.run_entity_operation, spec["operation"])) + + async def _testOperation_waitForThread(self, spec): + """Run the 'waitForThread' operation.""" + thread = self.entity_map[spec["thread"]] + await thread.stop() + await thread.join(10) + if thread.exc: + raise thread.exc + self.assertFalse(thread.is_alive(), "Thread {} is still running".format(spec["thread"])) + + async def _testOperation_loop(self, spec): + failure_key = spec.get("storeFailuresAsEntity") + error_key = spec.get("storeErrorsAsEntity") + successes_key = spec.get("storeSuccessesAsEntity") + iteration_key = spec.get("storeIterationsAsEntity") + iteration_limiter_key = spec.get("numIterations") + for i in [failure_key, error_key]: + if i: + self.entity_map[i] = [] + for i in [successes_key, iteration_key]: + if i: + self.entity_map[i] = 0 + i = 0 + global IS_INTERRUPTED + while True: + if iteration_limiter_key and i >= iteration_limiter_key: + break + i += 1 + if IS_INTERRUPTED: + break + try: + if iteration_key: + self.entity_map._entities[iteration_key] += 1 + for op in spec["operations"]: + await self.run_entity_operation(op) + if successes_key: + self.entity_map._entities[successes_key] += 1 + except Exception as exc: + if isinstance(exc, AssertionError): + key = failure_key or error_key + else: + key = error_key or failure_key + if not key: + raise + self.entity_map[key].append( + {"error": str(exc), "time": time.time(), "type": type(exc).__name__} + ) + + async def run_special_operation(self, spec): + opname = spec["name"] + method_name = f"_testOperation_{opname}" + try: + method = getattr(self, method_name) + except AttributeError: + self.fail(f"Unsupported special test operation {opname}") + else: + if iscoroutinefunction(method): + await method(spec["arguments"]) + else: + method(spec["arguments"]) + + async def run_operations(self, spec): + for op in spec: + if op["object"] == "testRunner": + await self.run_special_operation(op) + else: + await self.run_entity_operation(op) + + async def run_operations_and_throw(self, spec): + for op in spec: + if op["object"] == "testRunner": + await self.run_special_operation(op) + else: + result = await self.run_entity_operation(op) + if isinstance(result, Exception): + raise result + + def check_events(self, spec): + for event_spec in spec: + client_name = event_spec["client"] + events = event_spec["events"] + event_type = event_spec.get("eventType", "command") + ignore_extra_events = event_spec.get("ignoreExtraEvents", False) + server_connection_id = event_spec.get("serverConnectionId") + has_server_connection_id = event_spec.get("hasServerConnectionId", False) + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events(event_type) + if ignore_extra_events: + actual_events = actual_events[: len(events)] + + if len(events) == 0: + self.assertEqual(actual_events, []) + continue + + if len(actual_events) != len(events): + expected = "\n".join(str(e) for e in events) + actual = "\n".join(str(a) for a in actual_events) + self.assertEqual( + len(actual_events), + len(events), + f"expected events:\n{expected}\nactual events:\n{actual}", + ) + + for idx, expected_event in enumerate(events): + self.match_evaluator.match_event(expected_event, actual_events[idx]) + + if has_server_connection_id: + assert server_connection_id is not None + assert server_connection_id >= 0 + else: + assert server_connection_id is None + + def process_ignore_messages(self, ignore_logs, actual_logs): + final_logs = [] + for log in actual_logs: + ignored = False + for ignore_log in ignore_logs: + if log["data"]["message"] == ignore_log["data"][ + "message" + ] and self.match_evaluator.match_result(ignore_log, log, test=False): + ignored = True + break + if not ignored: + final_logs.append(log) + return final_logs + + async def check_log_messages(self, operations, spec): + def format_logs(log_list): + client_to_log = defaultdict(list) + for log in log_list: + if log.module == "ocsp_support": + continue + data = json_util.loads(log.getMessage()) + client_id = data.get("clientId", data.get("topologyId")) + client_to_log[client_id].append( + { + "level": log.levelname.lower(), + "component": log.name.replace("pymongo.", "", 1), + "data": data, + } + ) + return client_to_log + + with self.assertLogs("pymongo", level="DEBUG") as cm: + await self.run_operations(operations) + formatted_logs = format_logs(cm.records) + for client in spec: + components = set() + for message in client["messages"]: + components.add(message["component"]) + + clientid = self.entity_map[client["client"]]._topology_settings._topology_id + actual_logs = formatted_logs[clientid] + actual_logs = [log for log in actual_logs if log["component"] in components] + + ignore_logs = client.get("ignoreMessages", []) + if ignore_logs: + actual_logs = self.process_ignore_messages(ignore_logs, actual_logs) + + if client.get("ignoreExtraMessages", False): + actual_logs = actual_logs[: len(client["messages"])] + self.assertEqual( + len(client["messages"]), + len(actual_logs), + f"expected {client['messages']} but got {actual_logs}", + ) + for expected_msg, actual_msg in zip(client["messages"], actual_logs): + expected_data, actual_data = expected_msg.pop("data"), actual_msg.pop("data") + + if "failureIsRedacted" in expected_msg: + self.assertIn("failure", actual_data) + should_redact = expected_msg.pop("failureIsRedacted") + if should_redact: + actual_fields = set(json_util.loads(actual_data["failure"]).keys()) + self.assertTrue( + {"code", "codeName", "errorLabels"}.issuperset(actual_fields) + ) + + self.match_evaluator.match_result(expected_data, actual_data) + self.match_evaluator.match_result(expected_msg, actual_msg) + + async def verify_outcome(self, spec): + for collection_data in spec: + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + expected_documents = collection_data["documents"] + + coll = self.client.get_database(db_name).get_collection( + coll_name, + read_preference=ReadPreference.PRIMARY, + read_concern=ReadConcern(level="local"), + ) + + if expected_documents: + sorted_expected_documents = sorted(expected_documents, key=lambda doc: doc["_id"]) + actual_documents = await coll.find({}, sort=[("_id", ASCENDING)]).to_list() + self.assertListEqual(sorted_expected_documents, actual_documents) + + async def run_scenario(self, spec, uri=None): + # Kill all sessions before and after each test to prevent an open + # transaction (from a test failure) from blocking collection/database + # operations during test set up and tear down. + await self.kill_all_sessions() + + # Handle flaky tests. + flaky_tests = [ + ("PYTHON-5170", ".*test_discovery_and_monitoring.*"), + ("PYTHON-5174", ".*Driver_extends_timeout_while_streaming"), + ("PYTHON-5315", ".*TestSrvPolling.test_recover_from_initially_.*"), + ("PYTHON-4987", ".*UnknownTransactionCommitResult_labels_to_connection_errors"), + ("PYTHON-3689", ".*TestProse.test_load_balancing"), + ("PYTHON-3522", ".*csot.*"), + ] + for reason, flaky_test in flaky_tests: + if re.match(flaky_test.lower(), self.id().lower()) is not None: + func_name = self.id() + options = dict(reason=reason, reset_func=self.asyncSetUp, func_name=func_name) + if "csot" in func_name.lower(): + options["max_runs"] = 3 + options["affects_cpython_linux"] = True + decorator = flaky(**options) + await decorator(self._run_scenario)(spec, uri) + return + await self._run_scenario(spec, uri) + + async def _run_scenario(self, spec, uri=None): + # maybe skip test manually + self.maybe_skip_test(spec) + + # process test-level runOnRequirements + run_on_spec = spec.get("runOnRequirements", []) + if not await self.should_run_on(run_on_spec): + raise unittest.SkipTest("runOnRequirements not satisfied") + + # process skipReason + skip_reason = spec.get("skipReason", None) + if skip_reason is not None: + raise unittest.SkipTest(f"{skip_reason}") + + # process createEntities + self._uri = uri + self.entity_map = EntityMapUtil(self) + await self.entity_map.create_entities_from_spec( + self.TEST_SPEC.get("createEntities", []), uri=uri + ) + self._cluster_time = None + # process initialData + if "initialData" in self.TEST_SPEC: + await self.insert_initial_data(self.TEST_SPEC["initialData"]) + self._cluster_time = self.client._topology.max_cluster_time() + await self.entity_map.advance_cluster_times(self._cluster_time) + + if "expectLogMessages" in spec: + expect_log_messages = spec["expectLogMessages"] + self.assertTrue(expect_log_messages, "expectEvents must be non-empty") + await self.check_log_messages(spec["operations"], expect_log_messages) + else: + # process operations + await self.run_operations(spec["operations"]) + + # process expectEvents + if "expectEvents" in spec: + expect_events = spec["expectEvents"] + self.assertTrue(expect_events, "expectEvents must be non-empty") + self.check_events(expect_events) + + # process outcome + await self.verify_outcome(spec.get("outcome", [])) + + +class UnifiedSpecTestMeta(type): + """Metaclass for generating test classes.""" + + TEST_SPEC: Any + EXPECTED_FAILURES: Any + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + + def create_test(spec): + async def test_case(self): + await self.run_scenario(spec) + + return test_case + + for test_spec in cls.TEST_SPEC["tests"]: + description = test_spec["description"] + test_name = "test_{}".format( + description.strip(". ").replace(" ", "_").replace(".", "_") + ) + test_method = create_test(copy.deepcopy(test_spec)) + test_method.__name__ = str(test_name) + + for fail_pattern in cls.EXPECTED_FAILURES: + if re.search(fail_pattern, description): + test_method = unittest.expectedFailure(test_method) + break + + setattr(cls, test_name, test_method) + + +_ALL_MIXIN_CLASSES = [ + UnifiedSpecTestMixinV1, + # add mixin classes for new schema major versions here +] + + +_SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS = { + KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES +} + + +def generate_test_classes( + test_path, + module=__name__, + class_name_prefix="", + expected_failures=[], # noqa: B006 + bypass_test_generation_errors=False, + **kwargs, +): + """Method for generating test classes. Returns a dictionary where keys are + the names of test classes and values are the test class objects. + """ + test_klasses = {} + + def test_base_class_factory(test_spec): + """Utility that creates the base class to use for test generation. + This is needed to ensure that cls.TEST_SPEC is appropriately set when + the metaclass __init__ is invoked. + """ + + class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore + TEST_SPEC = test_spec + EXPECTED_FAILURES = expected_failures + + base = SpecTestBase + + # Add "encryption" marker if the "csfle" runOnRequirement is set. + for req in test_spec.get("runOnRequirements", []): + if "csfle" in req: + base = pytest.mark.encryption(base) + + return base + + for dirpath, _, filenames in os.walk(test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + fpath = os.path.join(dirpath, filename) + with open(fpath) as scenario_stream: + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = json_util.loads(scenario_stream.read(), json_options=opts) + + test_type = os.path.splitext(filename)[0] + snake_class_name = "Test{}_{}_{}".format( + class_name_prefix, + dirname.replace("-", "_"), + test_type.replace("-", "_").replace(".", "_"), + ) + class_name = snake_to_camel(snake_class_name) + + try: + schema_version = Version.from_string(scenario_def["schemaVersion"]) + mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(schema_version[0]) + if mixin_class is None: + raise ValueError( + f"test file '{fpath}' has unsupported schemaVersion '{schema_version}'" + ) + module_dict = {"__module__": module, "TEST_PATH": test_path} + module_dict.update(kwargs) + test_klasses[class_name] = type( + class_name, + ( + mixin_class, + test_base_class_factory(scenario_def), + ), + module_dict, + ) + except Exception: + if bypass_test_generation_errors: + continue + raise + + return test_klasses diff --git a/test/asynchronous/utils.py b/test/asynchronous/utils.py new file mode 100644 index 0000000000..02ba46c71a --- /dev/null +++ b/test/asynchronous/utils.py @@ -0,0 +1,276 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing pymongo that require synchronization.""" +from __future__ import annotations + +import asyncio +import contextlib +import os +import random +import sys +import threading # Used in the synchronized version of this file +import time +import traceback +from functools import wraps +from inspect import iscoroutinefunction + +from bson.son import SON +from pymongo import AsyncMongoClient +from pymongo.errors import ConfigurationError +from pymongo.hello import HelloCompat +from pymongo.lock import _async_create_lock +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference +from pymongo.server_selectors import any_server_selector, writable_server_selector +from pymongo.synchronous.pool import _CancellationContext, _PoolGeneration + +_IS_SYNC = False + + +async def async_get_pool(client): + """Get the standalone, primary, or mongos pool.""" + topology = await client._get_topology() + server = await topology._select_server(writable_server_selector, _Op.TEST) + return server.pool + + +async def async_get_pools(client): + """Get all pools.""" + return [ + server.pool + for server in await (await client._get_topology()).select_servers( + any_server_selector, _Op.TEST + ) + ] + + +async def async_wait_until(predicate, success_description, timeout=10): + """Wait up to 10 seconds (by default) for predicate to be true. + + E.g.: + + wait_until(lambda: client.primary == ('a', 1), + 'connect to the primary') + + If the lambda-expression isn't true after 10 seconds, we raise + AssertionError("Didn't ever connect to the primary"). + + Returns the predicate's first true value. + """ + start = time.time() + interval = min(float(timeout) / 100, 0.1) + while True: + if iscoroutinefunction(predicate): + retval = await predicate() + else: + retval = predicate() + if retval: + return retval + + if time.time() - start > timeout: + raise AssertionError("Didn't ever %s" % success_description) + + await asyncio.sleep(interval) + + +async def async_is_mongos(client): + res = await client.admin.command(HelloCompat.LEGACY_CMD) + return res.get("msg", "") == "isdbgrid" + + +async def async_ensure_all_connected(client: AsyncMongoClient) -> None: + """Ensure that the client's connection pool has socket connections to all + members of a replica set. Raises ConfigurationError when called with a + non-replica set client. + + Depending on the use-case, the caller may need to clear any event listeners + that are configured on the client. + """ + hello: dict = await client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" not in hello: + raise ConfigurationError("cluster is not a replica set") + + target_host_list = set(hello["hosts"] + hello.get("passives", [])) + connected_host_list = {hello["me"]} + + # Run hello until we have connected to each host at least once. + async def discover(): + i = 0 + while i < 100 and connected_host_list != target_host_list: + hello: dict = await client.admin.command( + HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY + ) + connected_host_list.update([hello["me"]]) + i += 1 + return connected_host_list + + try: + + async def predicate(): + return target_host_list == await discover() + + await async_wait_until(predicate, "connected to all hosts") + except AssertionError as exc: + raise AssertionError( + f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" + ) + + +async def asyncAssertRaisesExactly(cls, fn, *args, **kwargs): + """ + Unlike the standard assertRaises, this checks that a function raises a + specific class of exception, and not a subclass. E.g., check that + MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. + """ + try: + await fn(*args, **kwargs) + except Exception as e: + assert e.__class__ == cls, f"got {e.__class__.__name__}, expected {cls.__name__}" + else: + raise AssertionError("%s not raised" % cls) + + +async def async_set_fail_point(client, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + await client.admin.command(cmd) + + +async def async_joinall(tasks): + """Join threads with a 5-minute timeout, assert joins succeeded""" + if _IS_SYNC: + for t in tasks: + t.join(300) + assert not t.is_alive(), "Thread %s hung" % t + else: + await asyncio.wait([t.task for t in tasks if t is not None], timeout=300) + + +def flaky( + *, + reason=None, + max_runs=2, + min_passes=1, + delay=1, + affects_cpython_linux=False, + func_name=None, + reset_func=None, +): + """Decorate a test as flaky. + + :param reason: the reason why the test is flaky + :param max_runs: the maximum number of runs before raising an error + :param min_passes: the minimum number of passing runs + :param delay: the delay in seconds between retries + :param affects_cpython_links: whether the test is flaky on CPython on Linux + :param func_name: the name of the function, used for the rety message + :param reset_func: a function to call before retrying + + """ + if reason is None: + raise ValueError("flaky requires a reason input") + is_cpython_linux = sys.platform == "linux" and sys.implementation.name == "cpython" + disable_flaky = "DISABLE_FLAKY" in os.environ + if "CI" not in os.environ and "ENABLE_FLAKY" not in os.environ: + disable_flaky = True + + if disable_flaky or (is_cpython_linux and not affects_cpython_linux): + max_runs = 1 + min_passes = 1 + + def decorator(target_func): + @wraps(target_func) + async def wrapper(*args, **kwargs): + passes = 0 + for i in range(max_runs): + try: + result = await target_func(*args, **kwargs) + passes += 1 + if passes == min_passes: + return result + except Exception as e: + if i == max_runs - 1: + raise e + print( + f"Retrying after attempt {i+1} of {func_name or target_func.__name__} failed with ({reason})):\n" + f"{traceback.format_exc()}", + file=sys.stderr, + ) + await asyncio.sleep(delay) + if reset_func: + await reset_func() + + return wrapper + + return decorator + + +class AsyncMockConnection: + def __init__(self): + self.cancel_context = _CancellationContext() + self.more_to_come = False + self.id = random.randint(0, 100) + self.is_sdam = False + self.server_connection_id = random.randint(0, 100) + + def close_conn(self, reason): + pass + + def __aenter__(self): + return self + + def __aexit__(self, exc_type, exc_val, exc_tb): + pass + + +class AsyncMockPool: + def __init__(self, address, options, is_sdam=False, client_id=None): + self.gen = _PoolGeneration() + self._lock = _async_create_lock() + self.opts = options + self.operation_count = 0 + self.conns = [] + + def stale_generation(self, gen, service_id): + return self.gen.stale(gen, service_id) + + @contextlib.asynccontextmanager + async def checkout(self, handler=None): + yield AsyncMockConnection() + + async def checkin(self, *args, **kwargs): + pass + + async def _reset(self, service_id=None): + async with self._lock: + self.gen.inc(service_id) + + async def ready(self): + pass + + async def reset(self, service_id=None, interrupt_connections=False): + await self._reset() + + async def reset_without_pause(self): + await self._reset() + + async def close(self): + await self._reset() + + async def update_is_writable(self, is_writable): + pass + + async def remove_stale_sockets(self, *args, **kwargs): + pass diff --git a/test/asynchronous/utils_selection_tests.py b/test/asynchronous/utils_selection_tests.py new file mode 100644 index 0000000000..d6b92fadb4 --- /dev/null +++ b/test/asynchronous/utils_selection_tests.py @@ -0,0 +1,204 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing Server Selection and Max Staleness.""" +from __future__ import annotations + +import datetime +import os +import sys +from test.asynchronous import AsyncPyMongoTestCase +from test.asynchronous.utils import AsyncMockPool + +sys.path[0:0] = [""] + +from test import unittest +from test.pymongo_mocks import DummyMonitor +from test.utils_selection_tests_shared import ( + get_addresses, + get_topology_type_name, + make_server_description, +) +from test.utils_shared import parse_read_preference + +from bson import json_util +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology +from pymongo.common import HEARTBEAT_FREQUENCY +from pymongo.errors import AutoReconnect, ConfigurationError +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector + +_IS_SYNC = False + + +def get_topology_settings_dict(**kwargs): + settings = { + "monitor_class": DummyMonitor, + "heartbeat_frequency": HEARTBEAT_FREQUENCY, + "pool_class": AsyncMockPool, + } + settings.update(kwargs) + return settings + + +async def create_topology(scenario_def, **kwargs): + # Initialize topologies. + if "heartbeatFrequencyMS" in scenario_def: + frequency = int(scenario_def["heartbeatFrequencyMS"]) / 1000.0 + else: + frequency = HEARTBEAT_FREQUENCY + + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + + topology_type = get_topology_type_name(scenario_def) + if topology_type == "LoadBalanced": + kwargs.setdefault("load_balanced", True) + # Force topology description to ReplicaSet + elif topology_type in ["ReplicaSetNoPrimary", "ReplicaSetWithPrimary"]: + kwargs.setdefault("replica_set_name", "rs") + settings = get_topology_settings_dict(heartbeat_frequency=frequency, seeds=seeds, **kwargs) + + # "Eligible servers" is defined in the server selection spec as + # the set of servers matching both the ReadPreference's mode + # and tag sets. + topology = Topology(TopologySettings(**settings)) + await topology.open() + + # Update topologies with server descriptions. + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + await topology.on_change(server_description) + + # Assert that descriptions match + assert ( + scenario_def["topology_description"]["type"] == topology.description.topology_type_name + ), topology.description.topology_type_name + + return topology + + +def create_test(scenario_def): + async def run_scenario(self): + _, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + # "Eligible servers" is defined in the server selection spec as + # the set of servers matching both the ReadPreference's mode + # and tag sets. + top_latency = await create_topology(scenario_def) + + # "In latency window" is defined in the server selection + # spec as the subset of suitable_servers that falls within the + # allowable latency window. + top_suitable = await create_topology(scenario_def, local_threshold_ms=1000000) + + # Create server selector. + if scenario_def.get("operation") == "write": + pref = writable_server_selector + else: + # Make first letter lowercase to match read_pref's modes. + pref_def = scenario_def["read_preference"] + if scenario_def.get("error"): + with self.assertRaises((ConfigurationError, ValueError)): + # Error can be raised when making Read Pref or selecting. + pref = parse_read_preference(pref_def) + await top_latency.select_server(pref, _Op.TEST) + return + + pref = parse_read_preference(pref_def) + + # Select servers. + if not scenario_def.get("suitable_servers"): + with self.assertRaises(AutoReconnect): + await top_suitable.select_server(pref, _Op.TEST, server_selection_timeout=0) + + return + + if not scenario_def["in_latency_window"]: + with self.assertRaises(AutoReconnect): + await top_latency.select_server(pref, _Op.TEST, server_selection_timeout=0) + + return + + actual_suitable_s = await top_suitable.select_servers( + pref, _Op.TEST, server_selection_timeout=0 + ) + actual_latency_s = await top_latency.select_servers( + pref, _Op.TEST, server_selection_timeout=0 + ) + + expected_suitable_servers = {} + for server in scenario_def["suitable_servers"]: + server_description = make_server_description(server, hosts) + expected_suitable_servers[server["address"]] = server_description + + actual_suitable_servers = {} + for s in actual_suitable_s: + actual_suitable_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description + + self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) + for k, actual in actual_suitable_servers.items(): + expected = expected_suitable_servers[k] + self.assertEqual(expected.address, actual.address) + self.assertEqual(expected.server_type, actual.server_type) + self.assertEqual(expected.round_trip_time, actual.round_trip_time) + self.assertEqual(expected.tags, actual.tags) + self.assertEqual(expected.all_hosts, actual.all_hosts) + + expected_latency_servers = {} + for server in scenario_def["in_latency_window"]: + server_description = make_server_description(server, hosts) + expected_latency_servers[server["address"]] = server_description + + actual_latency_servers = {} + for s in actual_latency_s: + actual_latency_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description + + self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) + for k, actual in actual_latency_servers.items(): + expected = expected_latency_servers[k] + self.assertEqual(expected.address, actual.address) + self.assertEqual(expected.server_type, actual.server_type) + self.assertEqual(expected.round_trip_time, actual.round_trip_time) + self.assertEqual(expected.tags, actual.tags) + self.assertEqual(expected.all_hosts, actual.all_hosts) + + return run_scenario + + +def create_selection_tests(test_dir): + class TestAllScenarios(AsyncPyMongoTestCase): + pass + + for dirpath, _, filenames in os.walk(test_dir): + dirname = os.path.split(dirpath) + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] + + for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json_util.loads(scenario_stream.read()) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + return TestAllScenarios diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py new file mode 100644 index 0000000000..496c28a045 --- /dev/null +++ b/test/asynchronous/utils_spec_runner.py @@ -0,0 +1,817 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing driver specs.""" +from __future__ import annotations + +import asyncio +import functools +import os +import time +import unittest +from collections import abc +from inspect import iscoroutinefunction +from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs +from test.asynchronous.helpers import ConcurrentRunner +from test.utils_shared import ( + CMAPListener, + CompareType, + EventListener, + OvertCommandListener, + ScenarioDict, + ServerAndTopologyEventListener, + camel_to_snake, + camel_to_snake_args, + parse_spec_options, + prepare_spec_arguments, +) +from typing import List + +from bson import ObjectId, decode, encode, json_util +from bson.binary import Binary +from bson.int64 import Int64 +from bson.son import SON +from gridfs import GridFSBucket +from gridfs.asynchronous.grid_file import AsyncGridFSBucket +from pymongo.asynchronous import client_session +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.errors import AutoReconnect, BulkWriteError, OperationFailure, PyMongoError +from pymongo.lock import _async_cond_wait, _async_create_condition, _async_create_lock +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.results import BulkWriteResult, _WriteResult +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class SpecRunnerTask(ConcurrentRunner): + def __init__(self, name): + super().__init__(name=name) + self.exc = None + self.daemon = True + self.cond = _async_create_condition(_async_create_lock()) + self.ops = [] + + async def schedule(self, work): + self.ops.append(work) + async with self.cond: + self.cond.notify() + + async def stop(self): + self.stopped = True + async with self.cond: + self.cond.notify() + + async def run(self): + while not self.stopped or self.ops: + if not self.ops: + async with self.cond: + await _async_cond_wait(self.cond, 10) + if self.ops: + try: + work = self.ops.pop(0) + await work() + except Exception as exc: + self.exc = exc + await self.stop() + + +class AsyncSpecTestCreator: + """Class to create test cases from specifications.""" + + def __init__(self, create_test, test_class, test_path): + """Create a TestCreator object. + + :Parameters: + - `create_test`: callback that returns a test case. The callback + must accept the following arguments - a dictionary containing the + entire test specification (the `scenario_def`), a dictionary + containing the specification for which the test case will be + generated (the `test_def`). + - `test_class`: the unittest.TestCase class in which to create the + test case. + - `test_path`: path to the directory containing the JSON files with + the test specifications. + """ + self._create_test = create_test + self._test_class = test_class + self.test_path = test_path + + def _ensure_min_max_server_version(self, scenario_def, method): + """Test modifier that enforces a version range for the server on a + test case. + """ + if "minServerVersion" in scenario_def: + min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) + if min_ver is not None: + method = async_client_context.require_version_min(*min_ver)(method) + + if "maxServerVersion" in scenario_def: + max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) + if max_ver is not None: + method = async_client_context.require_version_max(*max_ver)(method) + + return method + + @staticmethod + async def valid_topology(run_on_req): + return await async_client_context.is_topology_type( + run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) + ) + + @staticmethod + def min_server_version(run_on_req): + version = run_on_req.get("minServerVersion") + if version: + min_ver = tuple(int(elt) for elt in version.split(".")) + return async_client_context.version >= min_ver + return True + + @staticmethod + def max_server_version(run_on_req): + version = run_on_req.get("maxServerVersion") + if version: + max_ver = tuple(int(elt) for elt in version.split(".")) + return async_client_context.version <= max_ver + return True + + @staticmethod + def valid_auth_enabled(run_on_req): + if "authEnabled" in run_on_req: + if run_on_req["authEnabled"]: + return async_client_context.auth_enabled + return not async_client_context.auth_enabled + return True + + async def should_run_on(self, scenario_def): + run_on = scenario_def.get("runOn", []) + if not run_on: + # Always run these tests. + return True + + for req in run_on: + if ( + await self.valid_topology(req) + and self.min_server_version(req) + and self.max_server_version(req) + and self.valid_auth_enabled(req) + ): + return True + return False + + def ensure_run_on(self, scenario_def, method): + """Test modifier that enforces a 'runOn' on a test case.""" + + async def predicate(): + return await self.should_run_on(scenario_def) + + return async_client_context._require(predicate, "runOn not satisfied", method) + + def tests(self, scenario_def): + """Allow CMAP spec test to override the location of test.""" + return scenario_def["tests"] + + async def _create_tests(self): + for dirpath, _, filenames in os.walk(self.test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: # noqa: ASYNC101, RUF100 + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = ScenarioDict( + json_util.loads(scenario_stream.read(), json_options=opts) + ) + + test_type = os.path.splitext(filename)[0] + + # Construct test from scenario. + for test_def in self.tests(scenario_def): + test_name = "test_{}_{}_{}".format( + dirname, + test_type.replace("-", "_").replace(".", "_"), + str(test_def["description"].replace(" ", "_").replace(".", "_")), + ) + + new_test = self._create_test(scenario_def, test_def, test_name) + new_test = self._ensure_min_max_server_version(scenario_def, new_test) + new_test = self.ensure_run_on(scenario_def, new_test) + + new_test.__name__ = test_name + setattr(self._test_class, new_test.__name__, new_test) + + def create_tests(self): + if _IS_SYNC: + self._create_tests() + else: + asyncio.run(self._create_tests()) + + +class AsyncSpecRunner(AsyncIntegrationTest): + mongos_clients: List + knobs: client_knobs + listener: EventListener + + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.mongos_clients = [] + + # Speed up the tests by decreasing the heartbeat frequency. + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.targets = {} + self.listener = None # type: ignore + self.pool_listener = None + self.server_listener = None + self.maxDiff = None + + async def asyncTearDown(self) -> None: + self.knobs.disable() + + async def set_fail_point(self, command_args): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + await self.configure_fail_point(client, command_args) + + async def targeted_fail_point(self, session, fail_point): + """Run the targetedFailPoint test operation. + + Enable the fail point on the session's pinned mongos. + """ + clients = {c.address: c for c in self.mongos_clients} + client = clients[session._pinned_address] + await self.configure_fail_point(client, fail_point) + self.addAsyncCleanup(self.set_fail_point, {"mode": "off"}) + + def assert_session_pinned(self, session): + """Run the assertSessionPinned test operation. + + Assert that the given session is pinned. + """ + self.assertIsNotNone(session._transaction.pinned_address) + + def assert_session_unpinned(self, session): + """Run the assertSessionUnpinned test operation. + + Assert that the given session is not pinned. + """ + self.assertIsNone(session._pinned_address) + self.assertIsNone(session._transaction.pinned_address) + + async def assert_collection_exists(self, database, collection): + """Run the assertCollectionExists test operation.""" + db = self.client[database] + self.assertIn(collection, await db.list_collection_names()) + + async def assert_collection_not_exists(self, database, collection): + """Run the assertCollectionNotExists test operation.""" + db = self.client[database] + self.assertNotIn(collection, await db.list_collection_names()) + + async def assert_index_exists(self, database, collection, index): + """Run the assertIndexExists test operation.""" + coll = self.client[database][collection] + self.assertIn(index, [doc["name"] async for doc in await coll.list_indexes()]) + + async def assert_index_not_exists(self, database, collection, index): + """Run the assertIndexNotExists test operation.""" + coll = self.client[database][collection] + self.assertNotIn(index, [doc["name"] async for doc in await coll.list_indexes()]) + + async def wait(self, ms): + """Run the "wait" test operation.""" + await asyncio.sleep(ms / 1000.0) + + def assertErrorLabelsContain(self, exc, expected_labels): + labels = [l for l in expected_labels if exc.has_error_label(l)] + self.assertEqual(labels, expected_labels) + + def assertErrorLabelsOmit(self, exc, omit_labels): + for label in omit_labels: + self.assertFalse( + exc.has_error_label(label), msg=f"error labels should not contain {label}" + ) + + async def kill_all_sessions(self): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + try: + await client.admin.command("killAllSessions", []) + except (OperationFailure, AutoReconnect): + # "operation was interrupted" by killing the command's + # own session. + # On 8.0+ killAllSessions sometimes returns a network error. + pass + + def check_command_result(self, expected_result, result): + # Only compare the keys in the expected result. + filtered_result = {} + for key in expected_result: + try: + filtered_result[key] = result[key] + except KeyError: + pass + self.assertEqual(filtered_result, expected_result) + + # TODO: factor the following function with test_crud.py. + def check_result(self, expected_result, result): + if isinstance(result, _WriteResult): + for res in expected_result: + prop = camel_to_snake(res) + # SPEC-869: Only BulkWriteResult has upserted_count. + if prop == "upserted_count" and not isinstance(result, BulkWriteResult): + if result.upserted_id is not None: + upserted_count = 1 + else: + upserted_count = 0 + self.assertEqual(upserted_count, expected_result[res], prop) + elif prop == "inserted_ids": + # BulkWriteResult does not have inserted_ids. + if isinstance(result, BulkWriteResult): + self.assertEqual(len(expected_result[res]), result.inserted_count) + else: + # InsertManyResult may be compared to [id1] from the + # crud spec or {"0": id1} from the retryable write spec. + ids = expected_result[res] + if isinstance(ids, dict): + ids = [ids[str(i)] for i in range(len(ids))] + + self.assertEqual(ids, result.inserted_ids, prop) + elif prop == "upserted_ids": + # Convert indexes from strings to integers. + ids = expected_result[res] + expected_ids = {} + for str_index in ids: + expected_ids[int(str_index)] = ids[str_index] + self.assertEqual(expected_ids, result.upserted_ids, prop) + else: + self.assertEqual(getattr(result, prop), expected_result[res], prop) + + return True + else: + + def _helper(expected_result, result): + if isinstance(expected_result, abc.Mapping): + for i in expected_result.keys(): + self.assertEqual(expected_result[i], result[i]) + + elif isinstance(expected_result, list): + for i, k in zip(expected_result, result): + _helper(i, k) + else: + self.assertEqual(expected_result, result) + + _helper(expected_result, result) + return None + + def get_object_name(self, op): + """Allow subclasses to override handling of 'object' + + Transaction spec says 'object' is required. + """ + return op["object"] + + @staticmethod + def parse_options(opts): + return parse_spec_options(opts) + + async def run_operation(self, sessions, collection, operation): + original_collection = collection + name = camel_to_snake(operation["name"]) + if name == "run_command": + name = "command" + elif name == "download_by_name": + name = "open_download_stream_by_name" + elif name == "download": + name = "open_download_stream" + elif name == "map_reduce": + self.skipTest("PyMongo does not support mapReduce") + elif name == "count": + self.skipTest("PyMongo does not support count") + + database = collection.database + collection = database.get_collection(collection.name) + if "collectionOptions" in operation: + collection = collection.with_options( + **self.parse_options(operation["collectionOptions"]) + ) + + object_name = self.get_object_name(operation) + if object_name == "gridfsbucket": + # Only create the GridFSBucket when we need it (for the gridfs + # retryable reads tests). + obj = AsyncGridFSBucket(database, bucket_name=collection.name) + else: + objects = { + "client": database.client, + "database": database, + "collection": collection, + "testRunner": self, + } + objects.update(sessions) + obj = objects[object_name] + + # Combine arguments with options and handle special cases. + arguments = operation.get("arguments", {}) + arguments.update(arguments.pop("options", {})) + self.parse_options(arguments) + + cmd = getattr(obj, name) + + with_txn_callback = functools.partial( + self.run_operations, sessions, original_collection, in_with_transaction=True + ) + prepare_spec_arguments(operation, arguments, name, sessions, with_txn_callback) + + if name == "run_on_thread": + args = {"sessions": sessions, "collection": collection} + args.update(arguments) + arguments = args + + if not _IS_SYNC and iscoroutinefunction(cmd): + result = await cmd(**dict(arguments)) + else: + result = cmd(**dict(arguments)) + # Cleanup open change stream cursors. + if name == "watch": + self.addAsyncCleanup(result.close) + + if name == "aggregate": + if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: + # Read from the primary to ensure causal consistency. + out = collection.database.get_collection( + arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY + ) + return out.find() + if "download" in name: + result = Binary(result.read()) + + if isinstance(result, AsyncCursor) or isinstance(result, AsyncCommandCursor): + return await result.to_list() + + return result + + def allowable_errors(self, op): + """Allow encryption spec to override expected error classes.""" + return (PyMongoError,) + + async def _run_op(self, sessions, collection, op, in_with_transaction): + expected_result = op.get("result") + if expect_error(op): + with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: + await self.run_operation(sessions, collection, op.copy()) + exc = context.exception + if expect_error_message(expected_result): + if isinstance(exc, BulkWriteError): + errmsg = str(exc.details).lower() + else: + errmsg = str(exc).lower() + self.assertIn(expected_result["errorContains"].lower(), errmsg) + if expect_error_code(expected_result): + self.assertEqual(expected_result["errorCodeName"], exc.details.get("codeName")) + if expect_error_labels_contain(expected_result): + self.assertErrorLabelsContain(exc, expected_result["errorLabelsContain"]) + if expect_error_labels_omit(expected_result): + self.assertErrorLabelsOmit(exc, expected_result["errorLabelsOmit"]) + if expect_timeout_error(expected_result): + self.assertIsInstance(exc, PyMongoError) + if not exc.timeout: + # Re-raise the exception for better diagnostics. + raise exc + + # Reraise the exception if we're in the with_transaction + # callback. + if in_with_transaction: + raise context.exception + else: + result = await self.run_operation(sessions, collection, op.copy()) + if "result" in op: + if op["name"] == "runCommand": + self.check_command_result(expected_result, result) + else: + self.check_result(expected_result, result) + + async def run_operations(self, sessions, collection, ops, in_with_transaction=False): + for op in ops: + await self._run_op(sessions, collection, op, in_with_transaction) + + # TODO: factor with test_command_monitoring.py + def check_events(self, test, listener, session_ids): + events = listener.started_events + if not len(test["expectations"]): + return + + # Give a nicer message when there are missing or extra events + cmds = decode_raw([event.command for event in events]) + self.assertEqual(len(events), len(test["expectations"]), cmds) + for i, expectation in enumerate(test["expectations"]): + event_type = next(iter(expectation)) + event = events[i] + + # The tests substitute 42 for any number other than 0. + if event.command_name == "getMore" and event.command["getMore"]: + event.command["getMore"] = Int64(42) + elif event.command_name == "killCursors": + event.command["cursors"] = [Int64(42)] + elif event.command_name == "update": + # TODO: remove this once PYTHON-1744 is done. + # Add upsert and multi fields back into expectations. + updates = expectation[event_type]["command"]["updates"] + for update in updates: + update.setdefault("upsert", False) + update.setdefault("multi", False) + + # Replace afterClusterTime: 42 with actual afterClusterTime. + expected_cmd = expectation[event_type]["command"] + expected_read_concern = expected_cmd.get("readConcern") + if expected_read_concern is not None: + time = expected_read_concern.get("afterClusterTime") + if time == 42: + actual_time = event.command.get("readConcern", {}).get("afterClusterTime") + if actual_time is not None: + expected_read_concern["afterClusterTime"] = actual_time + + recovery_token = expected_cmd.get("recoveryToken") + if recovery_token == 42: + expected_cmd["recoveryToken"] = CompareType(dict) + + # Replace lsid with a name like "session0" to match test. + if "lsid" in event.command: + for name, lsid in session_ids.items(): + if event.command["lsid"] == lsid: + event.command["lsid"] = name + break + + for attr, expected in expectation[event_type].items(): + actual = getattr(event, attr) + expected = wrap_types(expected) + if isinstance(expected, dict): + for key, val in expected.items(): + if val is None: + if key in actual: + self.fail(f"Unexpected key [{key}] in {actual!r}") + elif key not in actual: + self.fail(f"Expected key [{key}] in {actual!r}") + else: + self.assertEqual( + val, decode_raw(actual[key]), f"Key [{key}] in {actual}" + ) + else: + self.assertEqual(actual, expected) + + def maybe_skip_scenario(self, test): + if test.get("skipReason"): + self.skipTest(test.get("skipReason")) + + def get_scenario_db_name(self, scenario_def): + """Allow subclasses to override a test's database name.""" + return scenario_def["database_name"] + + def get_scenario_coll_name(self, scenario_def): + """Allow subclasses to override a test's collection name.""" + return scenario_def["collection_name"] + + def get_outcome_coll_name(self, outcome, collection): + """Allow subclasses to override outcome collection.""" + return collection.name + + async def run_test_ops(self, sessions, collection, test): + """Added to allow retryable writes spec to override a test's + operation. + """ + await self.run_operations(sessions, collection, test["operations"]) + + def parse_client_options(self, opts): + """Allow encryption spec to override a clientOptions parsing.""" + # Convert test['clientOptions'] to dict to avoid a Jython bug using + # "**" with ScenarioDict. + return dict(opts) + + async def setup_scenario(self, scenario_def): + """Allow specs to override a test's setup.""" + db_name = self.get_scenario_db_name(scenario_def) + coll_name = self.get_scenario_coll_name(scenario_def) + documents = scenario_def["data"] + + # Setup the collection with as few majority writes as possible. + db = async_client_context.client.get_database(db_name) + coll_exists = bool(await db.list_collection_names(filter={"name": coll_name})) + if coll_exists: + await db[coll_name].delete_many({}) + # Only use majority wc only on the final write. + wc = WriteConcern(w="majority") + if documents: + db.get_collection(coll_name, write_concern=wc).insert_many(documents) + elif not coll_exists: + # Ensure collection exists. + await db.create_collection(coll_name, write_concern=wc) + + async def run_scenario(self, scenario_def, test): + self.maybe_skip_scenario(test) + + # Kill all sessions before and after each test to prevent an open + # transaction (from a test failure) from blocking collection/database + # operations during test set up and tear down. + await self.kill_all_sessions() + self.addAsyncCleanup(self.kill_all_sessions) + await self.setup_scenario(scenario_def) + database_name = self.get_scenario_db_name(scenario_def) + collection_name = self.get_scenario_coll_name(scenario_def) + # SPEC-1245 workaround StaleDbVersion on distinct + for c in self.mongos_clients: + await c[database_name][collection_name].distinct("x") + + # Configure the fail point before creating the client. + if "failPoint" in test: + fp = test["failPoint"] + await self.set_fail_point(fp) + self.addAsyncCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + listener = OvertCommandListener() + pool_listener = CMAPListener() + server_listener = ServerAndTopologyEventListener() + # Create a new client, to avoid interference from pooled sessions. + client_options = self.parse_client_options(test["clientOptions"]) + use_multi_mongos = test["useMultipleMongoses"] + host = None + if use_multi_mongos: + if async_client_context.load_balancer: + host = async_client_context.MULTI_MONGOS_LB_URI + elif async_client_context.is_mongos: + host = async_client_context.mongos_seeds() + client = await self.async_rs_client( + h=host, event_listeners=[listener, pool_listener, server_listener], **client_options + ) + self.scenario_client = client + self.listener = listener + self.pool_listener = pool_listener + self.server_listener = server_listener + + # Create session0 and session1. + sessions = {} + session_ids = {} + for i in range(2): + # Don't attempt to create sessions if they are not supported by + # the running server version. + if not async_client_context.sessions_enabled: + break + session_name = "session%d" % i + opts = camel_to_snake_args(test["sessionOptions"][session_name]) + if "default_transaction_options" in opts: + txn_opts = self.parse_options(opts["default_transaction_options"]) + txn_opts = client_session.TransactionOptions(**txn_opts) + opts["default_transaction_options"] = txn_opts + + s = client.start_session(**dict(opts)) + + sessions[session_name] = s + # Store lsid so we can access it after end_session, in check_events. + session_ids[session_name] = s.session_id + + self.addAsyncCleanup(end_sessions, sessions) + + collection = client[database_name][collection_name] + await self.run_test_ops(sessions, collection, test) + + await end_sessions(sessions) + + self.check_events(test, listener, session_ids) + + # Disable fail points. + if "failPoint" in test: + fp = test["failPoint"] + await self.set_fail_point( + {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + # Assert final state is expected. + outcome = test["outcome"] + expected_c = outcome.get("collection") + if expected_c is not None: + outcome_coll_name = self.get_outcome_coll_name(outcome, collection) + + # Read from the primary with local read concern to ensure causal + # consistency. + outcome_coll = async_client_context.client[collection.database.name].get_collection( + outcome_coll_name, + read_preference=ReadPreference.PRIMARY, + read_concern=ReadConcern("local"), + ) + actual_data = await outcome_coll.find(sort=[("_id", 1)]).to_list() + + # The expected data needs to be the left hand side here otherwise + # CompareType(Binary) doesn't work. + self.assertEqual(wrap_types(expected_c["data"]), actual_data) + + +def expect_any_error(op): + if isinstance(op, dict): + return op.get("error") + + return False + + +def expect_error_message(expected_result): + if isinstance(expected_result, dict): + return isinstance(expected_result["errorContains"], str) + + return False + + +def expect_error_code(expected_result): + if isinstance(expected_result, dict): + return expected_result["errorCodeName"] + + return False + + +def expect_error_labels_contain(expected_result): + if isinstance(expected_result, dict): + return expected_result["errorLabelsContain"] + + return False + + +def expect_error_labels_omit(expected_result): + if isinstance(expected_result, dict): + return expected_result["errorLabelsOmit"] + + return False + + +def expect_timeout_error(expected_result): + if isinstance(expected_result, dict): + return expected_result["isTimeoutError"] + + return False + + +def expect_error(op): + expected_result = op.get("result") + return ( + expect_any_error(op) + or expect_error_message(expected_result) + or expect_error_code(expected_result) + or expect_error_labels_contain(expected_result) + or expect_error_labels_omit(expected_result) + or expect_timeout_error(expected_result) + ) + + +async def end_sessions(sessions): + for s in sessions.values(): + # Aborts the transaction if it's open. + await s.end_session() + + +def decode_raw(val): + """Decode RawBSONDocuments in the given container.""" + if isinstance(val, (list, abc.Mapping)): + return decode(encode({"v": val}))["v"] + return val + + +TYPES = { + "binData": Binary, + "long": Int64, + "int": int, + "string": str, + "objectId": ObjectId, + "object": dict, + "array": list, +} + + +def wrap_types(val): + """Support $$type assertion in command results.""" + if isinstance(val, list): + return [wrap_types(v) for v in val] + if isinstance(val, abc.Mapping): + typ = val.get("$$type") + if typ: + if isinstance(typ, str): + types = TYPES[typ] + else: + types = tuple(TYPES[t] for t in typ) + return CompareType(types) + d = {} + for key in val: + d[key] = wrap_types(val[key]) + return d + return val diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py new file mode 100644 index 0000000000..ac217ab40d --- /dev/null +++ b/test/atlas/test_connection.py @@ -0,0 +1,112 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test connections to various Atlas cluster types.""" +from __future__ import annotations + +import os +import sys +import unittest +from collections import defaultdict +from test import PyMongoTestCase + +import pytest + +sys.path[0:0] = [""] + +import pymongo +from pymongo.ssl_support import _has_sni + +pytestmark = pytest.mark.atlas_connect + + +URIS = { + "ATLAS_REPL": os.environ.get("ATLAS_REPL"), + "ATLAS_SHRD": os.environ.get("ATLAS_SHRD"), + "ATLAS_FREE": os.environ.get("ATLAS_FREE"), + "ATLAS_TLS11": os.environ.get("ATLAS_TLS11"), + "ATLAS_TLS12": os.environ.get("ATLAS_TLS12"), + "ATLAS_SRV_REPL": os.environ.get("ATLAS_SRV_REPL"), + "ATLAS_SRV_SHRD": os.environ.get("ATLAS_SRV_SHRD"), + "ATLAS_SRV_FREE": os.environ.get("ATLAS_SRV_FREE"), + "ATLAS_SRV_TLS11": os.environ.get("ATLAS_SRV_TLS11"), + "ATLAS_SRV_TLS12": os.environ.get("ATLAS_SRV_TLS12"), + "ATLAS_X509_DEV_WITH_CERT": os.environ.get("ATLAS_X509_DEV_WITH_CERT"), +} + + +class TestAtlasConnect(PyMongoTestCase): + def connect(self, uri): + if not uri: + raise Exception("Must set env variable to test.") + client = self.simple_client(uri) + # No TLS error + client.admin.command("ping") + # No auth error + client.test.test.count_documents({}) + + @unittest.skipUnless(_has_sni(True), "Free tier requires SNI support") + def test_free_tier(self): + self.connect(URIS["ATLAS_FREE"]) + + def test_replica_set(self): + self.connect(URIS["ATLAS_REPL"]) + + def test_sharded_cluster(self): + self.connect(URIS["ATLAS_SHRD"]) + + def test_tls_11(self): + self.connect(URIS["ATLAS_TLS11"]) + + def test_tls_12(self): + self.connect(URIS["ATLAS_TLS12"]) + + def connect_srv(self, uri): + self.connect(uri) + self.assertIn("mongodb+srv://", uri) + + @unittest.skipUnless(_has_sni(True), "Free tier requires SNI support") + def test_srv_free_tier(self): + self.connect_srv(URIS["ATLAS_SRV_FREE"]) + + def test_srv_replica_set(self): + self.connect_srv(URIS["ATLAS_SRV_REPL"]) + + def test_srv_sharded_cluster(self): + self.connect_srv(URIS["ATLAS_SRV_SHRD"]) + + def test_srv_tls_11(self): + self.connect_srv(URIS["ATLAS_SRV_TLS11"]) + + def test_srv_tls_12(self): + self.connect_srv(URIS["ATLAS_SRV_TLS12"]) + + def test_x509_with_cert(self): + self.connect(URIS["ATLAS_X509_DEV_WITH_CERT"]) + + def test_uniqueness(self): + """Ensure that we don't accidentally duplicate the test URIs.""" + uri_to_names = defaultdict(list) + for name, uri in URIS.items(): + if uri: + uri_to_names[uri].append(name) + duplicates = [names for names in uri_to_names.values() if len(names) > 1] + self.assertFalse( + duplicates, + f"Error: the following env variables have duplicate values: {duplicates}", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/auth/legacy/connection-string.json b/test/auth/legacy/connection-string.json new file mode 100644 index 0000000000..3a099c8137 --- /dev/null +++ b/test/auth/legacy/connection-string.json @@ -0,0 +1,651 @@ +{ + "tests": [ + { + "description": "should use the default source and mechanism", + "uri": "mongodb://user:password@localhost", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "admin", + "mechanism": null, + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified", + "uri": "mongodb://user:password@localhost/foo", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": null, + "mechanism_properties": null + } + }, + { + "description": "should use the authSource when specified", + "uri": "mongodb://user:password@localhost/foo?authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": null, + "mechanism_properties": null + } + }, + { + "description": "should recognise the mechanism (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "should ignore the database (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/foo?authMechanism=GSSAPI", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "should accept valid authSource (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=$external", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "should accept generic mechanism property (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forward,SERVICE_HOST:example.com", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "other", + "SERVICE_HOST": "example.com", + "CANONICALIZE_HOST_NAME": "forward" + } + } + }, + { + "description": "should accept forwardAndReverse hostname canonicalization (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forwardAndReverse", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "other", + "CANONICALIZE_HOST_NAME": "forwardAndReverse" + } + } + }, + { + "description": "should accept no hostname canonicalization (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:none", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "other", + "CANONICALIZE_HOST_NAME": "none" + } + } + }, + { + "description": "must raise an error when the hostname canonicalization is invalid", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:invalid", + "valid": false + }, + { + "description": "should accept the password (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM:password@localhost/?authMechanism=GSSAPI&authSource=$external", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": "password", + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "must raise an error when the authSource is empty", + "uri": "mongodb://user:password@localhost/foo?authSource=", + "valid": false + }, + { + "description": "must raise an error when the authSource is empty without credentials", + "uri": "mongodb://localhost/admin?authSource=", + "valid": false + }, + { + "description": "should throw an exception if authSource is invalid (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=foo", + "valid": false + }, + { + "description": "should throw an exception if no username (GSSAPI)", + "uri": "mongodb://localhost/?authMechanism=GSSAPI", + "valid": false + }, + { + "description": "should recognize the mechanism (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", + "valid": true, + "credential": { + "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should ignore the database (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509", + "valid": true, + "credential": { + "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should accept valid authSource (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509&authSource=$external", + "valid": true, + "credential": { + "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should recognize the mechanism with no username (MONGODB-X509)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-X509", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should recognize the mechanism with no username when auth source is explicitly specified (MONGODB-X509)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-X509&authSource=$external", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if supplied a password (MONGODB-X509)", + "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-X509", + "valid": false + }, + { + "description": "should throw an exception if authSource is invalid (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509&authSource=bar", + "valid": false + }, + { + "description": "should recognize the mechanism (PLAIN)", + "uri": "mongodb://user:password@localhost/?authMechanism=PLAIN", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "$external", + "mechanism": "PLAIN", + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified (PLAIN)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": "PLAIN", + "mechanism_properties": null + } + }, + { + "description": "should use the authSource when specified (PLAIN)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN&authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": "PLAIN", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if no username (PLAIN)", + "uri": "mongodb://localhost/?authMechanism=PLAIN", + "valid": false + }, + { + "description": "should recognize the mechanism (SCRAM-SHA-1)", + "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-1", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "admin", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified (SCRAM-SHA-1)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null + } + }, + { + "description": "should accept valid authSource (SCRAM-SHA-1)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1&authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if no username (SCRAM-SHA-1)", + "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-1", + "valid": false + }, + { + "description": "should recognize the mechanism (SCRAM-SHA-256)", + "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-256", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "admin", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified (SCRAM-SHA-256)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null + } + }, + { + "description": "should accept valid authSource (SCRAM-SHA-256)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256&authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if no username (SCRAM-SHA-256)", + "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-256", + "valid": false + }, + { + "description": "URI with no auth-related info doesn't create credential", + "uri": "mongodb://localhost/", + "valid": true, + "credential": null + }, + { + "description": "database in URI path doesn't create credentials", + "uri": "mongodb://localhost/foo", + "valid": true, + "credential": null + }, + { + "description": "authSource without username doesn't create credential (default mechanism)", + "uri": "mongodb://localhost/?authSource=foo", + "valid": true, + "credential": null + }, + { + "description": "should throw an exception if no username provided (userinfo implies default mechanism)", + "uri": "mongodb://@localhost.com/", + "valid": false + }, + { + "description": "should throw an exception if no username/password provided (userinfo implies default mechanism)", + "uri": "mongodb://:@localhost.com/", + "valid": false + }, + { + "description": "should recognise the mechanism (MONGODB-AWS)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified (MONGODB-AWS)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS&authSource=$external", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if username and no password (MONGODB-AWS)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-AWS", + "valid": false, + "credential": null + }, + { + "description": "should use username and password if specified (MONGODB-AWS)", + "uri": "mongodb://user%21%40%23%24%25%5E%26%2A%28%29_%2B:pass%21%40%23%24%25%5E%26%2A%28%29_%2B@localhost/?authMechanism=MONGODB-AWS", + "valid": true, + "credential": { + "username": "user!@#$%^&*()_+", + "password": "pass!@#$%^&*()_+", + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should use username, password and session token if specified (MONGODB-AWS)", + "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:token%21%40%23%24%25%5E%26%2A%28%29_%2B", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": { + "AWS_SESSION_TOKEN": "token!@#$%^&*()_+" + } + } + }, + { + "description": "should recognise the mechanism with test environment (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "test" + } + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified and with environment (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external&authMechanismProperties=ENVIRONMENT:test", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "test" + } + } + }, + { + "description": "should throw an exception if supplied a password (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if username is specified for test (MONGODB-OIDC)", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if specified environment is not supported (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:invalid", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if neither environment nor callbacks specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception when unsupported auth property is specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=UnsupportedProperty:unexisted", + "valid": false, + "credential": null + }, + { + "description": "should recognise the mechanism with azure provider (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:foo", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "foo" + } + } + }, + { + "description": "should accept a username with azure provider (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:foo", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "foo" + } + } + }, + { + "description": "should accept a url-encoded TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:mongodb%3A%2F%2Ftest-cluster", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "mongodb://test-cluster" + } + } + }, + { + "description": "should accept an un-encoded TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:mongodb://test-cluster", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "mongodb://test-cluster" + } + } + }, + { + "description": "should handle a complicated url-encoded TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:abcd%25ef%3Ag%26hi", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "abcd%ef:g&hi" + } + } + }, + { + "description": "should url-encode a TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:a$b", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "a$b" + } + } + }, + { + "description": "should accept a username and throw an error for a password with azure provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:foo", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if no token audience is given for azure provider (MONGODB-OIDC)", + "uri": "mongodb://username@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure", + "valid": false, + "credential": null + }, + { + "description": "should recognise the mechanism with gcp provider (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:foo", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "gcp", + "TOKEN_RESOURCE": "foo" + } + } + }, + { + "description": "should throw an error for a username and password with gcp provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:foo", + "valid": false, + "credential": null + }, + { + "description": "should throw an error if not TOKEN_RESOURCE with gcp provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp", + "valid": false, + "credential": null + }, + { + "description": "should recognise the mechanism with k8s provider (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:k8s", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "k8s" + } + } + }, + { + "description": "should throw an error for a username and password with k8s provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:k8s", + "valid": false, + "credential": null + } + ] +} diff --git a/test/auth/unified/mongodb-oidc-no-retry.json b/test/auth/unified/mongodb-oidc-no-retry.json new file mode 100644 index 0000000000..0a8658455e --- /dev/null +++ b/test/auth/unified/mongodb-oidc-no-retry.json @@ -0,0 +1,422 @@ +{ + "description": "MONGODB-OIDC authentication with retry disabled", + "schemaVersion": "1.19", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "auth": true, + "authMechanism": "MONGODB-OIDC", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client0", + "uriOptions": { + "authMechanism": "MONGODB-OIDC", + "authMechanismProperties": { + "$$placeholder": 1 + }, + "retryReads": false, + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collName" + } + } + ], + "initialData": [ + { + "collectionName": "collName", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "A read operation should succeed", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "A write operation should succeed", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Read commands should reauthenticate and retry when a ReauthenticationRequired error happens", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Write commands should reauthenticate and retry when a ReauthenticationRequired error happens", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Handshake with cached token should use speculative authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslStart" + ], + "errorCode": 18 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Handshake without cached token should not use speculative authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslStart" + ], + "errorCode": 18 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + }, + "expectError": { + "errorCode": 18 + } + } + ] + } + ] +} diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py new file mode 100644 index 0000000000..9738694d85 --- /dev/null +++ b/test/auth_aws/test_auth_aws.py @@ -0,0 +1,212 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-AWS Authentication.""" +from __future__ import annotations + +import os +import sys +import unittest +from test import PyMongoTestCase +from unittest.mock import patch + +import pytest + +sys.path[0:0] = [""] + +try: + from pymongo_auth_aws import AwsCredential, auth +except ImportError: + pass + +from pymongo import MongoClient +from pymongo.errors import OperationFailure +from pymongo.synchronous.uri_parser import parse_uri + +pytestmark = pytest.mark.auth_aws + + +class TestAuthAWS(PyMongoTestCase): + uri: str + + @classmethod + def setUpClass(cls): + cls.uri = os.environ["MONGODB_URI"] + + def test_should_fail_without_credentials(self): + if "@" not in self.uri: + self.skipTest("MONGODB_URI already has no credentials") + + hosts = ["{}:{}".format(*addr) for addr in parse_uri(self.uri)["nodelist"]] + self.assertTrue(hosts) + with MongoClient(hosts) as client: + with self.assertRaises(OperationFailure): + client.aws.test.find_one() + + def test_should_fail_incorrect_credentials(self): + with MongoClient( + self.uri, username="fake", password="fake", authMechanism="MONGODB-AWS" + ) as client: + with self.assertRaises(OperationFailure): + client.get_database().test.find_one() + + def test_connect_uri(self): + with MongoClient(self.uri) as client: + client.get_database().test.find_one() + + def setup_cache(self): + if os.environ.get("AWS_ACCESS_KEY_ID", None) or "@" in self.uri: + self.skipTest("Not testing cached credentials") + + # Make a connection to ensure that we enable caching. + client = self.simple_client(self.uri) + client.get_database().test.find_one() + client.close() + + self.assertTrue(auth.get_use_cached_credentials()) + + # Ensure cleared credentials. + auth.set_cached_credentials(None) + self.assertEqual(auth.get_cached_credentials(), None) + + client = self.simple_client(self.uri) + client.get_database().test.find_one() + client.close() + return auth.get_cached_credentials() + + def test_cache_credentials(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + + def test_cache_about_to_expire(self): + creds = self.setup_cache() + client = self.simple_client(self.uri) + + # Make the creds about to expire. + creds = auth.get_cached_credentials() + assert creds is not None + + creds = AwsCredential(creds.username, creds.password, creds.token, lambda x: True) + auth.set_cached_credentials(creds) + + client.get_database().test.find_one() + new_creds = auth.get_cached_credentials() + self.assertNotEqual(creds, new_creds) + + def test_poisoned_cache(self): + creds = self.setup_cache() + + client = self.simple_client(self.uri) + + # Poison the creds with invalid password. + assert creds is not None + creds = AwsCredential("a" * 24, "b" * 24, "c" * 24) + auth.set_cached_credentials(creds) + + with self.assertRaises(OperationFailure): + client.get_database().test.find_one() + + # Make sure the cache was cleared. + self.assertEqual(auth.get_cached_credentials(), None) + + # The next attempt should generate a new cred and succeed. + client.get_database().test.find_one() + self.assertNotEqual(auth.get_cached_credentials(), None) + + def test_environment_variables_ignored(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + os.environ.copy() + + client = self.simple_client(self.uri) + + client.get_database().test.find_one() + + self.assertIsNotNone(auth.get_cached_credentials()) + + mock_env = { + "AWS_ACCESS_KEY_ID": "foo", + "AWS_SECRET_ACCESS_KEY": "bar", + "AWS_SESSION_TOKEN": "baz", + } + + with patch.dict("os.environ", mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + client.get_database().test.find_one() + + auth.set_cached_credentials(None) + + client2 = self.simple_client(self.uri) + + with patch.dict("os.environ", mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + with self.assertRaises(OperationFailure): + client2.get_database().test.find_one() + + def test_no_cache_environment_variables(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + auth.set_cached_credentials(None) + + mock_env = {"AWS_ACCESS_KEY_ID": creds.username, "AWS_SECRET_ACCESS_KEY": creds.password} + if creds.token: + mock_env["AWS_SESSION_TOKEN"] = creds.token + + client = self.simple_client(self.uri) + + with patch.dict(os.environ, mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], creds.username) + client.get_database().test.find_one() + + self.assertIsNone(auth.get_cached_credentials()) + + mock_env["AWS_ACCESS_KEY_ID"] = "foo" + + client2 = self.simple_client(self.uri) + + with patch.dict("os.environ", mock_env), self.assertRaises(OperationFailure): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + client2.get_database().test.find_one() + + +class TestAWSLambdaExamples(PyMongoTestCase): + def test_shared_client(self): + # Start AWS Lambda Example 1 + import os + + client = self.simple_client(host=os.environ["MONGODB_URI"]) + + def lambda_handler(event, context): + return client.db.command("ping") + + # End AWS Lambda Example 1 + + def test_IAM_auth(self): + # Start AWS Lambda Example 2 + import os + + client = self.simple_client( + host=os.environ["MONGODB_URI"], + authSource="$external", + authMechanism="MONGODB-AWS", + ) + + def lambda_handler(event, context): + return client.db.command("ping") + + # End AWS Lambda Example 2 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/bson_binary_vector/float32.json b/test/bson_binary_vector/float32.json new file mode 100644 index 0000000000..72dafce10f --- /dev/null +++ b/test/bson_binary_vector/float32.json @@ -0,0 +1,65 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype FLOAT32", + "test_key": "vector", + "tests": [ + { + "description": "Simple Vector FLOAT32", + "valid": true, + "vector": [127.0, 7.0], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1C00000005766563746F72000A0000000927000000FE420000E04000" + }, + { + "description": "Vector with decimals and negative value FLOAT32", + "valid": true, + "vector": [127.7, -7.7], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1C00000005766563746F72000A0000000927006666FF426666F6C000" + }, + { + "description": "Empty Vector FLOAT32", + "valid": true, + "vector": [], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009270000" + }, + { + "description": "Infinity Vector FLOAT32", + "valid": true, + "vector": [{"$numberDouble": "-Infinity"}, 0.0, {"$numberDouble": "Infinity"} ], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "2000000005766563746F72000E000000092700000080FF000000000000807F00" + }, + { + "description": "FLOAT32 with padding", + "valid": false, + "vector": [127.0, 7.0], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 3, + "canonical_bson": "1C00000005766563746F72000A0000000927030000FE420000E04000" + }, + { + "description": "Insufficient vector data with 3 bytes FLOAT32", + "valid": false, + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "canonical_bson": "1700000005766563746F7200050000000927002A2A2A00" + }, + { + "description": "Insufficient vector data with 5 bytes FLOAT32", + "valid": false, + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "canonical_bson": "1900000005766563746F7200070000000927002A2A2A2A2A00" + } + ] +} diff --git a/test/bson_binary_vector/int8.json b/test/bson_binary_vector/int8.json new file mode 100644 index 0000000000..29524fb617 --- /dev/null +++ b/test/bson_binary_vector/int8.json @@ -0,0 +1,57 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype INT8", + "test_key": "vector", + "tests": [ + { + "description": "Simple Vector INT8", + "valid": true, + "vector": [127, 7], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0, + "canonical_bson": "1600000005766563746F7200040000000903007F0700" + }, + { + "description": "Empty Vector INT8", + "valid": true, + "vector": [], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009030000" + }, + { + "description": "Overflow Vector INT8", + "valid": false, + "vector": [128], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + }, + { + "description": "Underflow Vector INT8", + "valid": false, + "vector": [-129], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + }, + { + "description": "INT8 with padding", + "valid": false, + "vector": [127, 7], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 3, + "canonical_bson": "1600000005766563746F7200040000000903037F0700" + }, + { + "description": "INT8 with float inputs", + "valid": false, + "vector": [127.77, 7.77], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + } + ] +} diff --git a/test/bson_binary_vector/packed_bit.json b/test/bson_binary_vector/packed_bit.json new file mode 100644 index 0000000000..7cc272e38b --- /dev/null +++ b/test/bson_binary_vector/packed_bit.json @@ -0,0 +1,83 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype PACKED_BIT", + "test_key": "vector", + "tests": [ + { + "description": "Padding specified with no vector data PACKED_BIT", + "valid": false, + "vector": [], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 1, + "canonical_bson": "1400000005766563746F72000200000009100100" + }, + { + "description": "Simple Vector PACKED_BIT", + "valid": true, + "vector": [127, 7], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0, + "canonical_bson": "1600000005766563746F7200040000000910007F0700" + }, + { + "description": "PACKED_BIT with padding", + "valid": true, + "vector": [127, 8], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 3, + "canonical_bson": "1600000005766563746F7200040000000910037F0800" + }, + { + "description": "Empty Vector PACKED_BIT", + "valid": true, + "vector": [], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009100000" + }, + { + "description": "Overflow Vector PACKED_BIT", + "valid": false, + "vector": [256], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Underflow Vector PACKED_BIT", + "valid": false, + "vector": [-1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Vector with float values PACKED_BIT", + "valid": false, + "vector": [127.5], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Exceeding maximum padding PACKED_BIT", + "valid": false, + "vector": [1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 8, + "canonical_bson": "1500000005766563746F7200030000000910080100" + }, + { + "description": "Negative padding PACKED_BIT", + "valid": false, + "vector": [1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": -1 + } + ] +} diff --git a/test/bson_corpus/array.json b/test/bson_corpus/array.json new file mode 100644 index 0000000000..9ff953e5ae --- /dev/null +++ b/test/bson_corpus/array.json @@ -0,0 +1,49 @@ +{ + "description": "Array", + "bson_type": "0x04", + "test_key": "a", + "valid": [ + { + "description": "Empty", + "canonical_bson": "0D000000046100050000000000", + "canonical_extjson": "{\"a\" : []}" + }, + { + "description": "Single Element Array", + "canonical_bson": "140000000461000C0000001030000A0000000000", + "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" + }, + { + "description": "Single Element Array with index set incorrectly to empty string", + "degenerate_bson": "130000000461000B00000010000A0000000000", + "canonical_bson": "140000000461000C0000001030000A0000000000", + "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" + }, + { + "description": "Single Element Array with index set incorrectly to ab", + "degenerate_bson": "150000000461000D000000106162000A0000000000", + "canonical_bson": "140000000461000C0000001030000A0000000000", + "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" + }, + { + "description": "Multi Element Array with duplicate indexes", + "degenerate_bson": "1b000000046100130000001030000a000000103000140000000000", + "canonical_bson": "1b000000046100130000001030000a000000103100140000000000", + "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}, {\"$numberInt\": \"20\"}]}" + } + ], + "decodeErrors": [ + { + "description": "Array length too long: eats outer terminator", + "bson": "140000000461000D0000001030000A0000000000" + }, + { + "description": "Array length too short: leaks terminator", + "bson": "140000000461000B0000001030000A0000000000" + }, + { + "description": "Invalid Array: bad string length in field", + "bson": "1A00000004666F6F00100000000230000500000062617A000000" + } + ] +} diff --git a/test/bson_corpus/binary.json b/test/bson_corpus/binary.json new file mode 100644 index 0000000000..0e0056f3a2 --- /dev/null +++ b/test/bson_corpus/binary.json @@ -0,0 +1,153 @@ +{ + "description": "Binary type", + "bson_type": "0x05", + "test_key": "x", + "valid": [ + { + "description": "subtype 0x00 (Zero-length)", + "canonical_bson": "0D000000057800000000000000", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"\", \"subType\" : \"00\"}}}" + }, + { + "description": "subtype 0x00 (Zero-length, keys reversed)", + "canonical_bson": "0D000000057800000000000000", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"\", \"subType\" : \"00\"}}}", + "degenerate_extjson": "{\"x\" : { \"$binary\" : {\"subType\" : \"00\", \"base64\" : \"\"}}}" + }, + { + "description": "subtype 0x00", + "canonical_bson": "0F0000000578000200000000FFFF00", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"//8=\", \"subType\" : \"00\"}}}" + }, + { + "description": "subtype 0x01", + "canonical_bson": "0F0000000578000200000001FFFF00", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"//8=\", \"subType\" : \"01\"}}}" + }, + { + "description": "subtype 0x02", + "canonical_bson": "13000000057800060000000202000000FFFF00", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"//8=\", \"subType\" : \"02\"}}}" + }, + { + "description": "subtype 0x03", + "canonical_bson": "1D000000057800100000000373FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"03\"}}}" + }, + { + "description": "subtype 0x04", + "canonical_bson": "1D000000057800100000000473FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"04\"}}}" + }, + { + "description": "subtype 0x04 UUID", + "canonical_bson": "1D000000057800100000000473FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"04\"}}}", + "degenerate_extjson": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4\"}}" + }, + { + "description": "subtype 0x05", + "canonical_bson": "1D000000057800100000000573FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"05\"}}}" + }, + { + "description": "subtype 0x07", + "canonical_bson": "1D000000057800100000000773FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"07\"}}}" + }, + { + "description": "subtype 0x08", + "canonical_bson": "1D000000057800100000000873FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"08\"}}}" + }, + { + "description": "subtype 0x80", + "canonical_bson": "0F0000000578000200000080FFFF00", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"//8=\", \"subType\" : \"80\"}}}" + }, + { + "description": "$type query operator (conflicts with legacy $binary form with $type field)", + "canonical_bson": "1F000000037800170000000224747970650007000000737472696E67000000", + "canonical_extjson": "{\"x\" : { \"$type\" : \"string\"}}" + }, + { + "description": "$type query operator (conflicts with legacy $binary form with $type field)", + "canonical_bson": "180000000378001000000010247479706500020000000000", + "canonical_extjson": "{\"x\" : { \"$type\" : {\"$numberInt\": \"2\"}}}" + }, + { + "description": "subtype 0x09 Vector FLOAT32", + "canonical_bson": "170000000578000A0000000927000000FE420000E04000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"JwAAAP5CAADgQA==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector INT8", + "canonical_bson": "11000000057800040000000903007F0700", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"AwB/Bw==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector PACKED_BIT", + "canonical_bson": "11000000057800040000000910007F0700", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"EAB/Bw==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) FLOAT32", + "canonical_bson": "0F0000000578000200000009270000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"JwA=\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) INT8", + "canonical_bson": "0F0000000578000200000009030000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"AwA=\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) PACKED_BIT", + "canonical_bson": "0F0000000578000200000009100000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"EAA=\", \"subType\": \"09\"}}}" + } + ], + "decodeErrors": [ + { + "description": "Length longer than document", + "bson": "1D000000057800FF0000000573FFD26444B34C6990E8E7D1DFC035D400" + }, + { + "description": "Negative length", + "bson": "0D000000057800FFFFFFFF0000" + }, + { + "description": "subtype 0x02 length too long ", + "bson": "13000000057800060000000203000000FFFF00" + }, + { + "description": "subtype 0x02 length too short", + "bson": "13000000057800060000000201000000FFFF00" + }, + { + "description": "subtype 0x02 length negative one", + "bson": "130000000578000600000002FFFFFFFFFFFF00" + } + ], + "parseErrors": [ + { + "description": "$uuid wrong type", + "string": "{\"x\" : { \"$uuid\" : { \"data\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4\"}}}" + }, + { + "description": "$uuid invalid value--too short", + "string": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-90e8-e7d1dfc035d4\"}}" + }, + { + "description": "$uuid invalid value--too long", + "string": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4-789e4\"}}" + }, + { + "description": "$uuid invalid value--misplaced hyphens", + "string": "{\"x\" : { \"$uuid\" : \"73ff-d26444b-34c6-990e8e-7d1dfc035d4\"}}" + }, + { + "description": "$uuid invalid value--too many hyphens", + "string": "{\"x\" : { \"$uuid\" : \"----d264-44b3-4--9-90e8-e7d1dfc0----\"}}" + } + ] +} diff --git a/test/bson_corpus/boolean.json b/test/bson_corpus/boolean.json new file mode 100644 index 0000000000..84c282299a --- /dev/null +++ b/test/bson_corpus/boolean.json @@ -0,0 +1,27 @@ +{ + "description": "Boolean", + "bson_type": "0x08", + "test_key": "b", + "valid": [ + { + "description": "True", + "canonical_bson": "090000000862000100", + "canonical_extjson": "{\"b\" : true}" + }, + { + "description": "False", + "canonical_bson": "090000000862000000", + "canonical_extjson": "{\"b\" : false}" + } + ], + "decodeErrors": [ + { + "description": "Invalid boolean value of 2", + "bson": "090000000862000200" + }, + { + "description": "Invalid boolean value of -1", + "bson": "09000000086200FF00" + } + ] +} diff --git a/test/bson_corpus/code.json b/test/bson_corpus/code.json new file mode 100644 index 0000000000..b8482b2541 --- /dev/null +++ b/test/bson_corpus/code.json @@ -0,0 +1,67 @@ +{ + "description": "Javascript Code", + "bson_type": "0x0D", + "test_key": "a", + "valid": [ + { + "description": "Empty string", + "canonical_bson": "0D0000000D6100010000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\"}}" + }, + { + "description": "Single character", + "canonical_bson": "0E0000000D610002000000620000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"b\"}}" + }, + { + "description": "Multi-character", + "canonical_bson": "190000000D61000D0000006162616261626162616261620000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"abababababab\"}}" + }, + { + "description": "two-byte UTF-8 (\u00e9)", + "canonical_bson": "190000000D61000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\"}}" + }, + { + "description": "three-byte UTF-8 (\u2606)", + "canonical_bson": "190000000D61000D000000E29886E29886E29886E298860000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\\u2606\\u2606\\u2606\\u2606\"}}" + }, + { + "description": "Embedded nulls", + "canonical_bson": "190000000D61000D0000006162006261620062616261620000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"ab\\u0000bab\\u0000babab\"}}" + } + ], + "decodeErrors": [ + { + "description": "bad code string length: 0 (but no 0x00 either)", + "bson": "0C0000000D61000000000000" + }, + { + "description": "bad code string length: -1", + "bson": "0C0000000D6100FFFFFFFF00" + }, + { + "description": "bad code string length: eats terminator", + "bson": "100000000D6100050000006200620000" + }, + { + "description": "bad code string length: longer than rest of document", + "bson": "120000000D00FFFFFF00666F6F6261720000" + }, + { + "description": "code string is not null-terminated", + "bson": "100000000D610004000000616263FF00" + }, + { + "description": "empty code string, but extra null", + "bson": "0E0000000D610001000000000000" + }, + { + "description": "invalid UTF-8", + "bson": "0E0000000D610002000000E90000" + } + ] +} diff --git a/test/bson_corpus/code_w_scope.json b/test/bson_corpus/code_w_scope.json new file mode 100644 index 0000000000..f956bcd54f --- /dev/null +++ b/test/bson_corpus/code_w_scope.json @@ -0,0 +1,78 @@ +{ + "description": "Javascript Code with Scope", + "bson_type": "0x0F", + "test_key": "a", + "valid": [ + { + "description": "Empty code string, empty scope", + "canonical_bson": "160000000F61000E0000000100000000050000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\", \"$scope\" : {}}}" + }, + { + "description": "Non-empty code string, empty scope", + "canonical_bson": "1A0000000F610012000000050000006162636400050000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"abcd\", \"$scope\" : {}}}" + }, + { + "description": "Empty code string, non-empty scope", + "canonical_bson": "1D0000000F61001500000001000000000C000000107800010000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\", \"$scope\" : {\"x\" : {\"$numberInt\": \"1\"}}}}" + }, + { + "description": "Non-empty code string and non-empty scope", + "canonical_bson": "210000000F6100190000000500000061626364000C000000107800010000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"abcd\", \"$scope\" : {\"x\" : {\"$numberInt\": \"1\"}}}}" + }, + { + "description": "Unicode and embedded null in code string, empty scope", + "canonical_bson": "1A0000000F61001200000005000000C3A9006400050000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\\u00e9\\u0000d\", \"$scope\" : {}}}" + } + ], + "decodeErrors": [ + { + "description": "field length zero", + "bson": "280000000F6100000000000500000061626364001300000010780001000000107900010000000000" + }, + { + "description": "field length negative", + "bson": "280000000F6100FFFFFFFF0500000061626364001300000010780001000000107900010000000000" + }, + { + "description": "field length too short (less than minimum size)", + "bson": "160000000F61000D0000000100000000050000000000" + }, + { + "description": "field length too short (truncates scope)", + "bson": "280000000F61001F0000000500000061626364001300000010780001000000107900010000000000" + }, + { + "description": "field length too long (clips outer doc)", + "bson": "280000000F6100210000000500000061626364001300000010780001000000107900010000000000" + }, + { + "description": "field length too long (longer than outer doc)", + "bson": "280000000F6100FF0000000500000061626364001300000010780001000000107900010000000000" + }, + { + "description": "bad code string: length too short", + "bson": "280000000F6100200000000400000061626364001300000010780001000000107900010000000000" + }, + { + "description": "bad code string: length too long (clips scope)", + "bson": "280000000F6100200000000600000061626364001300000010780001000000107900010000000000" + }, + { + "description": "bad code string: negative length", + "bson": "280000000F610020000000FFFFFFFF61626364001300000010780001000000107900010000000000" + }, + { + "description": "bad code string: length longer than field", + "bson": "280000000F610020000000FF00000061626364001300000010780001000000107900010000000000" + }, + { + "description": "bad scope doc (field has bad string length)", + "bson": "1C0000000F001500000001000000000C000000020000000000000000" + } + ] +} diff --git a/test/bson_corpus/datetime.json b/test/bson_corpus/datetime.json new file mode 100644 index 0000000000..1554341d29 --- /dev/null +++ b/test/bson_corpus/datetime.json @@ -0,0 +1,43 @@ +{ + "description": "DateTime", + "bson_type": "0x09", + "test_key": "a", + "valid": [ + { + "description": "epoch", + "canonical_bson": "10000000096100000000000000000000", + "relaxed_extjson": "{\"a\" : {\"$date\" : \"1970-01-01T00:00:00Z\"}}", + "canonical_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"0\"}}}" + }, + { + "description": "positive ms", + "canonical_bson": "10000000096100C5D8D6CC3B01000000", + "relaxed_extjson": "{\"a\" : {\"$date\" : \"2012-12-24T12:15:30.501Z\"}}", + "canonical_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"1356351330501\"}}}" + }, + { + "description": "negative", + "canonical_bson": "10000000096100C33CE7B9BDFFFFFF00", + "relaxed_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"-284643869501\"}}}", + "canonical_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"-284643869501\"}}}" + }, + { + "description" : "Y10K", + "canonical_bson" : "1000000009610000DC1FD277E6000000", + "relaxed_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}", + "canonical_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}" + }, + { + "description": "leading zero ms", + "canonical_bson": "10000000096100D1D6D6CC3B01000000", + "relaxed_extjson": "{\"a\" : {\"$date\" : \"2012-12-24T12:15:30.001Z\"}}", + "canonical_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"1356351330001\"}}}" + } + ], + "decodeErrors": [ + { + "description": "datetime field truncated", + "bson": "0C0000000961001234567800" + } + ] +} diff --git a/test/bson_corpus/dbpointer.json b/test/bson_corpus/dbpointer.json new file mode 100644 index 0000000000..377e556a0a --- /dev/null +++ b/test/bson_corpus/dbpointer.json @@ -0,0 +1,56 @@ +{ + "description": "DBPointer type (deprecated)", + "bson_type": "0x0C", + "deprecated": true, + "test_key": "a", + "valid": [ + { + "description": "DBpointer", + "canonical_bson": "1A0000000C610002000000620056E1FC72E0C917E9C471416100", + "canonical_extjson": "{\"a\": {\"$dbPointer\": {\"$ref\": \"b\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}}", + "converted_bson": "2a00000003610022000000022472656600020000006200072469640056e1fc72e0c917e9c47141610000", + "converted_extjson": "{\"a\": {\"$ref\": \"b\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}" + }, + { + "description": "DBpointer with opposite key order", + "canonical_bson": "1A0000000C610002000000620056E1FC72E0C917E9C471416100", + "canonical_extjson": "{\"a\": {\"$dbPointer\": {\"$ref\": \"b\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}}", + "degenerate_extjson": "{\"a\": {\"$dbPointer\": {\"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}, \"$ref\": \"b\"}}}", + "converted_bson": "2a00000003610022000000022472656600020000006200072469640056e1fc72e0c917e9c47141610000", + "converted_extjson": "{\"a\": {\"$ref\": \"b\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}" + }, + { + "description": "With two-byte UTF-8", + "canonical_bson": "1B0000000C610003000000C3A90056E1FC72E0C917E9C471416100", + "canonical_extjson": "{\"a\": {\"$dbPointer\": {\"$ref\": \"é\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}}", + "converted_bson": "2B0000000361002300000002247265660003000000C3A900072469640056E1FC72E0C917E9C47141610000", + "converted_extjson": "{\"a\": {\"$ref\": \"é\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}" + } + ], + "decodeErrors": [ + { + "description": "String with negative length", + "bson": "1A0000000C6100FFFFFFFF620056E1FC72E0C917E9C471416100" + }, + { + "description": "String with zero length", + "bson": "1A0000000C610000000000620056E1FC72E0C917E9C471416100" + }, + { + "description": "String not null terminated", + "bson": "1A0000000C610002000000626256E1FC72E0C917E9C471416100" + }, + { + "description": "short OID (less than minimum length for field)", + "bson": "160000000C61000300000061620056E1FC72E0C91700" + }, + { + "description": "short OID (greater than minimum, but truncated)", + "bson": "1A0000000C61000300000061620056E1FC72E0C917E9C4716100" + }, + { + "description": "String with bad UTF-8", + "bson": "1A0000000C610002000000E90056E1FC72E0C917E9C471416100" + } + ] +} diff --git a/test/bson_corpus/dbref.json b/test/bson_corpus/dbref.json new file mode 100644 index 0000000000..41c0b09d0e --- /dev/null +++ b/test/bson_corpus/dbref.json @@ -0,0 +1,51 @@ +{ + "description": "Document type (DBRef sub-documents)", + "bson_type": "0x03", + "valid": [ + { + "description": "DBRef", + "canonical_bson": "37000000036462726566002b0000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e0000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}}}" + }, + { + "description": "DBRef with database", + "canonical_bson": "4300000003646272656600370000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e0224646200030000006462000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"$db\": \"db\"}}" + }, + { + "description": "DBRef with database and additional fields", + "canonical_bson": "48000000036462726566003c0000000224726566000b000000636f6c6c656374696f6e0010246964002a00000002246462000300000064620002666f6f0004000000626172000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$numberInt\": \"42\"}, \"$db\": \"db\", \"foo\": \"bar\"}}" + }, + { + "description": "DBRef with additional fields", + "canonical_bson": "4400000003646272656600380000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e02666f6f0004000000626172000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"foo\": \"bar\"}}" + }, + { + "description": "Document with key names similar to those of a DBRef", + "canonical_bson": "3e0000000224726566000c0000006e6f742d612d646272656600072469640058921b3e6e32ab156a22b59e022462616e616e6100050000007065656c0000", + "canonical_extjson": "{\"$ref\": \"not-a-dbref\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"$banana\": \"peel\"}" + }, + { + "description": "DBRef with additional dollar-prefixed and dotted fields", + "canonical_bson": "48000000036462726566003c0000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e10612e62000100000010246300010000000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"a.b\": {\"$numberInt\": \"1\"}, \"$c\": {\"$numberInt\": \"1\"}}}" + }, + { + "description": "Sub-document resembles DBRef but $id is missing", + "canonical_bson": "26000000036462726566001a0000000224726566000b000000636f6c6c656374696f6e000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\"}}" + }, + { + "description": "Sub-document resembles DBRef but $ref is not a string", + "canonical_bson": "2c000000036462726566002000000010247265660001000000072469640058921b3e6e32ab156a22b59e0000", + "canonical_extjson": "{\"dbref\": {\"$ref\": {\"$numberInt\": \"1\"}, \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}}}" + }, + { + "description": "Sub-document resembles DBRef but $db is not a string", + "canonical_bson": "4000000003646272656600340000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e1024646200010000000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"$db\": {\"$numberInt\": \"1\"}}}" + } + ] +} diff --git a/test/bson_corpus/decimal128-1.json b/test/bson_corpus/decimal128-1.json new file mode 100644 index 0000000000..8e7fbc93c6 --- /dev/null +++ b/test/bson_corpus/decimal128-1.json @@ -0,0 +1,341 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "Special - Canonical NaN", + "canonical_bson": "180000001364000000000000000000000000000000007C00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "Special - Negative NaN", + "canonical_bson": "18000000136400000000000000000000000000000000FC00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - Negative NaN", + "canonical_bson": "18000000136400000000000000000000000000000000FC00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-NaN\"}}", + "lossy": true + }, + { + "description": "Special - Canonical SNaN", + "canonical_bson": "180000001364000000000000000000000000000000007E00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - Negative SNaN", + "canonical_bson": "18000000136400000000000000000000000000000000FE00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - NaN with a payload", + "canonical_bson": "180000001364001200000000000000000000000000007E00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - Canonical Positive Infinity", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Special - Canonical Negative Infinity", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Special - Invalid representation treated as 0", + "canonical_bson": "180000001364000000000000000000000000000000106C00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}", + "lossy": true + }, + { + "description": "Special - Invalid representation treated as -0", + "canonical_bson": "18000000136400DCBA9876543210DEADBEEF00000010EC00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}", + "lossy": true + }, + { + "description": "Special - Invalid representation treated as 0E3", + "canonical_bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF116C00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}", + "lossy": true + }, + { + "description": "Regular - Adjusted Exponent Limit", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF22F00", + "canonical_extjson": "{\"d\": { \"$numberDecimal\": \"0.000001234567890123456789012345678901234\" }}" + }, + { + "description": "Regular - Smallest", + "canonical_bson": "18000000136400D204000000000000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001234\"}}" + }, + { + "description": "Regular - Smallest with Trailing Zeros", + "canonical_bson": "1800000013640040EF5A07000000000000000000002A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00123400000\"}}" + }, + { + "description": "Regular - 0.1", + "canonical_bson": "1800000013640001000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1\"}}" + }, + { + "description": "Regular - 0.1234567890123456789012345678901234", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFC2F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1234567890123456789012345678901234\"}}" + }, + { + "description": "Regular - 0", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "Regular - -0", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "Regular - -0.0", + "canonical_bson": "1800000013640000000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "Regular - 2", + "canonical_bson": "180000001364000200000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2\"}}" + }, + { + "description": "Regular - 2.000", + "canonical_bson": "18000000136400D0070000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2.000\"}}" + }, + { + "description": "Regular - Largest", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" + }, + { + "description": "Scientific - Tiniest", + "canonical_bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED010000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E-6143\"}}" + }, + { + "description": "Scientific - Tiny", + "canonical_bson": "180000001364000100000000000000000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "Scientific - Negative Tiny", + "canonical_bson": "180000001364000100000000000000000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "Scientific - Adjusted Exponent Limit", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF02F00", + "canonical_extjson": "{\"d\": { \"$numberDecimal\": \"1.234567890123456789012345678901234E-7\" }}" + }, + { + "description": "Scientific - Fractional", + "canonical_bson": "1800000013640064000000000000000000000000002CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}" + }, + { + "description": "Scientific - 0 with Exponent", + "canonical_bson": "180000001364000000000000000000000000000000205F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6000\"}}" + }, + { + "description": "Scientific - 0 with Negative Exponent", + "canonical_bson": "1800000013640000000000000000000000000000007A2B00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-611\"}}" + }, + { + "description": "Scientific - No Decimal with Signed Exponent", + "canonical_bson": "180000001364000100000000000000000000000000463000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" + }, + { + "description": "Scientific - Trailing Zero", + "canonical_bson": "180000001364001A04000000000000000000000000423000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.050E+4\"}}" + }, + { + "description": "Scientific - With Decimal", + "canonical_bson": "180000001364006900000000000000000000000000423000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.05E+3\"}}" + }, + { + "description": "Scientific - Full", + "canonical_bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5192296858534827628530496329220095\"}}" + }, + { + "description": "Scientific - Large", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "Scientific - Largest", + "canonical_bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}" + }, + { + "description": "Non-Canonical Parsing - Exponent Normalization", + "canonical_bson": "1800000013640064000000000000000000000000002CB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-100E-10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}" + }, + { + "description": "Non-Canonical Parsing - Unsigned Positive Exponent", + "canonical_bson": "180000001364000100000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" + }, + { + "description": "Non-Canonical Parsing - Lowercase Exponent Identifier", + "canonical_bson": "180000001364000100000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" + }, + { + "description": "Non-Canonical Parsing - Long Significand with Exponent", + "canonical_bson": "1800000013640079D9E0F9763ADA429D0200000000583000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12345689012345789012345E+12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.2345689012345789012345E+34\"}}" + }, + { + "description": "Non-Canonical Parsing - Positive Sign", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+1234567890123456789012345678901234\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" + }, + { + "description": "Non-Canonical Parsing - Long Decimal String", + "canonical_bson": "180000001364000100000000000000000000000000722800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \".000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-999\"}}" + }, + { + "description": "Non-Canonical Parsing - nan", + "canonical_bson": "180000001364000000000000000000000000000000007C00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"nan\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "Non-Canonical Parsing - nAn", + "canonical_bson": "180000001364000000000000000000000000000000007C00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"nAn\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "Non-Canonical Parsing - +infinity", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - infinity", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - infiniTY", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"infiniTY\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - inf", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"inf\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - inF", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"inF\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -infinity", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -infiniTy", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-infiniTy\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -Inf", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -inf", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-inf\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -inF", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-inF\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Rounded Subnormal number", + "canonical_bson": "180000001364000100000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10E-6177\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "Clamped", + "canonical_bson": "180000001364000a00000000000000000000000000fe5f00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" + }, + { + "description": "Exact rounding", + "canonical_bson": "18000000136400000000000a5bc138938d44c64d31cc3700", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+999\"}}" + }, + { + "description": "Clamped zeros with a large positive exponent", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "Clamped zeros with a large negative exponent", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "Clamped negative zeros with a large positive exponent", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "Clamped negative zeros with a large negative exponent", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + } + ] +} diff --git a/test/bson_corpus/decimal128-2.json b/test/bson_corpus/decimal128-2.json new file mode 100644 index 0000000000..316d3b0e61 --- /dev/null +++ b/test/bson_corpus/decimal128-2.json @@ -0,0 +1,793 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[decq021] Normality", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C40B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234567890123456789012345678901234\"}}" + }, + { + "description": "[decq823] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400010000800000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483649\"}}" + }, + { + "description": "[decq822] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400000000800000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483648\"}}" + }, + { + "description": "[decq821] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FFFFFF7F0000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483647\"}}" + }, + { + "description": "[decq820] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FEFFFF7F0000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483646\"}}" + }, + { + "description": "[decq152] fold-downs (more below)", + "canonical_bson": "18000000136400393000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-12345\"}}" + }, + { + "description": "[decq154] fold-downs (more below)", + "canonical_bson": "18000000136400D20400000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234\"}}" + }, + { + "description": "[decq006] derivative canonical plain strings", + "canonical_bson": "18000000136400EE0200000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-750\"}}" + }, + { + "description": "[decq164] fold-downs (more below)", + "canonical_bson": "1800000013640039300000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-123.45\"}}" + }, + { + "description": "[decq156] fold-downs (more below)", + "canonical_bson": "180000001364007B0000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-123\"}}" + }, + { + "description": "[decq008] derivative canonical plain strings", + "canonical_bson": "18000000136400EE020000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-75.0\"}}" + }, + { + "description": "[decq158] fold-downs (more below)", + "canonical_bson": "180000001364000C0000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-12\"}}" + }, + { + "description": "[decq122] Nmax and similar", + "canonical_bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFFDF00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999999999999999999999999999999999E+6144\"}}" + }, + { + "description": "[decq002] (mostly derived from the Strawman 4 document and examples)", + "canonical_bson": "18000000136400EE020000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50\"}}" + }, + { + "description": "[decq004] derivative canonical plain strings", + "canonical_bson": "18000000136400EE0200000000000000000000000042B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E+3\"}}" + }, + { + "description": "[decq018] derivative canonical plain strings", + "canonical_bson": "18000000136400EE020000000000000000000000002EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E-7\"}}" + }, + { + "description": "[decq125] Nmax and similar", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFEDF00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.234567890123456789012345678901234E+6144\"}}" + }, + { + "description": "[decq131] fold-downs (more below)", + "canonical_bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq162] fold-downs (more below)", + "canonical_bson": "180000001364007B000000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23\"}}" + }, + { + "description": "[decq176] Nmin and below", + "canonical_bson": "18000000136400010000000A5BC138938D44C64D31008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000001E-6143\"}}" + }, + { + "description": "[decq174] Nmin and below", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E-6143\"}}" + }, + { + "description": "[decq133] fold-downs (more below)", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FEDF00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq160] fold-downs (more below)", + "canonical_bson": "18000000136400010000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}" + }, + { + "description": "[decq172] Nmin and below", + "canonical_bson": "180000001364000100000000000000000000000000428000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6143\"}}" + }, + { + "description": "[decq010] derivative canonical plain strings", + "canonical_bson": "18000000136400EE020000000000000000000000003AB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.750\"}}" + }, + { + "description": "[decq012] derivative canonical plain strings", + "canonical_bson": "18000000136400EE0200000000000000000000000038B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0750\"}}" + }, + { + "description": "[decq014] derivative canonical plain strings", + "canonical_bson": "18000000136400EE0200000000000000000000000034B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000750\"}}" + }, + { + "description": "[decq016] derivative canonical plain strings", + "canonical_bson": "18000000136400EE0200000000000000000000000030B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000750\"}}" + }, + { + "description": "[decq404] zeros", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq424] negative zeros", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq407] zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[decq427] negative zeros", + "canonical_bson": "1800000013640000000000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[decq409] zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[decq428] negative zeros", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[decq700] Selected DPD codes", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[decq406] zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[decq426] negative zeros", + "canonical_bson": "1800000013640000000000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[decq410] zeros", + "canonical_bson": "180000001364000000000000000000000000000000463000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[decq431] negative zeros", + "canonical_bson": "18000000136400000000000000000000000000000046B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+3\"}}" + }, + { + "description": "[decq419] clamped zeros...", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq432] negative zeros", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq405] zeros", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq425] negative zeros", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq508] Specials", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "[decq528] Specials", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "[decq541] Specials", + "canonical_bson": "180000001364000000000000000000000000000000007C00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "[decq074] Nmin and below", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E-6143\"}}" + }, + { + "description": "[decq602] fold-down full sequence", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq604] fold-down full sequence", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}" + }, + { + "description": "[decq606] fold-down full sequence", + "canonical_bson": "1800000013640000000080264B91C02220BE377E00FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}" + }, + { + "description": "[decq608] fold-down full sequence", + "canonical_bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}" + }, + { + "description": "[decq610] fold-down full sequence", + "canonical_bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}" + }, + { + "description": "[decq612] fold-down full sequence", + "canonical_bson": "18000000136400000000106102253E5ECE4F200000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}" + }, + { + "description": "[decq614] fold-down full sequence", + "canonical_bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}" + }, + { + "description": "[decq616] fold-down full sequence", + "canonical_bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}" + }, + { + "description": "[decq618] fold-down full sequence", + "canonical_bson": "180000001364000000004A48011416954508000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}" + }, + { + "description": "[decq620] fold-down full sequence", + "canonical_bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}" + }, + { + "description": "[decq622] fold-down full sequence", + "canonical_bson": "18000000136400000080F64AE1C7022D1500000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}" + }, + { + "description": "[decq624] fold-down full sequence", + "canonical_bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}" + }, + { + "description": "[decq626] fold-down full sequence", + "canonical_bson": "180000001364000000A0DEC5ADC935360000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}" + }, + { + "description": "[decq628] fold-down full sequence", + "canonical_bson": "18000000136400000010632D5EC76B050000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}" + }, + { + "description": "[decq630] fold-down full sequence", + "canonical_bson": "180000001364000000E8890423C78A000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}" + }, + { + "description": "[decq632] fold-down full sequence", + "canonical_bson": "18000000136400000064A7B3B6E00D000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}" + }, + { + "description": "[decq634] fold-down full sequence", + "canonical_bson": "1800000013640000008A5D78456301000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}" + }, + { + "description": "[decq636] fold-down full sequence", + "canonical_bson": "180000001364000000C16FF2862300000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}" + }, + { + "description": "[decq638] fold-down full sequence", + "canonical_bson": "180000001364000080C6A47E8D0300000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}" + }, + { + "description": "[decq640] fold-down full sequence", + "canonical_bson": "1800000013640000407A10F35A0000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}" + }, + { + "description": "[decq642] fold-down full sequence", + "canonical_bson": "1800000013640000A0724E18090000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}" + }, + { + "description": "[decq644] fold-down full sequence", + "canonical_bson": "180000001364000010A5D4E8000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}" + }, + { + "description": "[decq646] fold-down full sequence", + "canonical_bson": "1800000013640000E8764817000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}" + }, + { + "description": "[decq648] fold-down full sequence", + "canonical_bson": "1800000013640000E40B5402000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}" + }, + { + "description": "[decq650] fold-down full sequence", + "canonical_bson": "1800000013640000CA9A3B00000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}" + }, + { + "description": "[decq652] fold-down full sequence", + "canonical_bson": "1800000013640000E1F50500000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}" + }, + { + "description": "[decq654] fold-down full sequence", + "canonical_bson": "180000001364008096980000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}" + }, + { + "description": "[decq656] fold-down full sequence", + "canonical_bson": "1800000013640040420F0000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}" + }, + { + "description": "[decq658] fold-down full sequence", + "canonical_bson": "18000000136400A086010000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}" + }, + { + "description": "[decq660] fold-down full sequence", + "canonical_bson": "180000001364001027000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}" + }, + { + "description": "[decq662] fold-down full sequence", + "canonical_bson": "18000000136400E803000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}" + }, + { + "description": "[decq664] fold-down full sequence", + "canonical_bson": "180000001364006400000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}" + }, + { + "description": "[decq666] fold-down full sequence", + "canonical_bson": "180000001364000A00000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" + }, + { + "description": "[decq060] fold-downs (more below)", + "canonical_bson": "180000001364000100000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}" + }, + { + "description": "[decq670] fold-down full sequence", + "canonical_bson": "180000001364000100000000000000000000000000FC5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6110\"}}" + }, + { + "description": "[decq668] fold-down full sequence", + "canonical_bson": "180000001364000100000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6111\"}}" + }, + { + "description": "[decq072] Nmin and below", + "canonical_bson": "180000001364000100000000000000000000000000420000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6143\"}}" + }, + { + "description": "[decq076] Nmin and below", + "canonical_bson": "18000000136400010000000A5BC138938D44C64D31000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000001E-6143\"}}" + }, + { + "description": "[decq036] fold-downs (more below)", + "canonical_bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq062] fold-downs (more below)", + "canonical_bson": "180000001364007B000000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23\"}}" + }, + { + "description": "[decq034] Nmax and similar", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234567890123456789012345678901234E+6144\"}}" + }, + { + "description": "[decq441] exponent lengths", + "canonical_bson": "180000001364000700000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}" + }, + { + "description": "[decq449] exponent lengths", + "canonical_bson": "1800000013640007000000000000000000000000001E5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5999\"}}" + }, + { + "description": "[decq447] exponent lengths", + "canonical_bson": "1800000013640007000000000000000000000000000E3800", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+999\"}}" + }, + { + "description": "[decq445] exponent lengths", + "canonical_bson": "180000001364000700000000000000000000000000063100", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+99\"}}" + }, + { + "description": "[decq443] exponent lengths", + "canonical_bson": "180000001364000700000000000000000000000000523000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}" + }, + { + "description": "[decq842] VG testcase", + "canonical_bson": "180000001364000000FED83F4E7C9FE4E269E38A5BCD1700", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7.049000000000010795488000000000000E-3097\"}}" + }, + { + "description": "[decq841] VG testcase", + "canonical_bson": "180000001364000000203B9DB5056F000000000000002400", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"8.000000000000000000E-1550\"}}" + }, + { + "description": "[decq840] VG testcase", + "canonical_bson": "180000001364003C17258419D710C42F0000000000002400", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"8.81125000000001349436E-1548\"}}" + }, + { + "description": "[decq701] Selected DPD codes", + "canonical_bson": "180000001364000900000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9\"}}" + }, + { + "description": "[decq032] Nmax and similar", + "canonical_bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}" + }, + { + "description": "[decq702] Selected DPD codes", + "canonical_bson": "180000001364000A00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" + }, + { + "description": "[decq057] fold-downs (more below)", + "canonical_bson": "180000001364000C00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}" + }, + { + "description": "[decq703] Selected DPD codes", + "canonical_bson": "180000001364001300000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"19\"}}" + }, + { + "description": "[decq704] Selected DPD codes", + "canonical_bson": "180000001364001400000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"20\"}}" + }, + { + "description": "[decq705] Selected DPD codes", + "canonical_bson": "180000001364001D00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"29\"}}" + }, + { + "description": "[decq706] Selected DPD codes", + "canonical_bson": "180000001364001E00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"30\"}}" + }, + { + "description": "[decq707] Selected DPD codes", + "canonical_bson": "180000001364002700000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"39\"}}" + }, + { + "description": "[decq708] Selected DPD codes", + "canonical_bson": "180000001364002800000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"40\"}}" + }, + { + "description": "[decq709] Selected DPD codes", + "canonical_bson": "180000001364003100000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"49\"}}" + }, + { + "description": "[decq710] Selected DPD codes", + "canonical_bson": "180000001364003200000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"50\"}}" + }, + { + "description": "[decq711] Selected DPD codes", + "canonical_bson": "180000001364003B00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"59\"}}" + }, + { + "description": "[decq712] Selected DPD codes", + "canonical_bson": "180000001364003C00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"60\"}}" + }, + { + "description": "[decq713] Selected DPD codes", + "canonical_bson": "180000001364004500000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"69\"}}" + }, + { + "description": "[decq714] Selected DPD codes", + "canonical_bson": "180000001364004600000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"70\"}}" + }, + { + "description": "[decq715] Selected DPD codes", + "canonical_bson": "180000001364004700000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"71\"}}" + }, + { + "description": "[decq716] Selected DPD codes", + "canonical_bson": "180000001364004800000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"72\"}}" + }, + { + "description": "[decq717] Selected DPD codes", + "canonical_bson": "180000001364004900000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"73\"}}" + }, + { + "description": "[decq718] Selected DPD codes", + "canonical_bson": "180000001364004A00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"74\"}}" + }, + { + "description": "[decq719] Selected DPD codes", + "canonical_bson": "180000001364004B00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"75\"}}" + }, + { + "description": "[decq720] Selected DPD codes", + "canonical_bson": "180000001364004C00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"76\"}}" + }, + { + "description": "[decq721] Selected DPD codes", + "canonical_bson": "180000001364004D00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"77\"}}" + }, + { + "description": "[decq722] Selected DPD codes", + "canonical_bson": "180000001364004E00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"78\"}}" + }, + { + "description": "[decq723] Selected DPD codes", + "canonical_bson": "180000001364004F00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"79\"}}" + }, + { + "description": "[decq056] fold-downs (more below)", + "canonical_bson": "180000001364007B00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"123\"}}" + }, + { + "description": "[decq064] fold-downs (more below)", + "canonical_bson": "1800000013640039300000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"123.45\"}}" + }, + { + "description": "[decq732] Selected DPD codes", + "canonical_bson": "180000001364000802000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"520\"}}" + }, + { + "description": "[decq733] Selected DPD codes", + "canonical_bson": "180000001364000902000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"521\"}}" + }, + { + "description": "[decq740] DPD: one of each of the huffman groups", + "canonical_bson": "180000001364000903000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"777\"}}" + }, + { + "description": "[decq741] DPD: one of each of the huffman groups", + "canonical_bson": "180000001364000A03000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"778\"}}" + }, + { + "description": "[decq742] DPD: one of each of the huffman groups", + "canonical_bson": "180000001364001303000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"787\"}}" + }, + { + "description": "[decq746] DPD: one of each of the huffman groups", + "canonical_bson": "180000001364001F03000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"799\"}}" + }, + { + "description": "[decq743] DPD: one of each of the huffman groups", + "canonical_bson": "180000001364006D03000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"877\"}}" + }, + { + "description": "[decq753] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "180000001364007803000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"888\"}}" + }, + { + "description": "[decq754] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "180000001364007903000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"889\"}}" + }, + { + "description": "[decq760] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "180000001364008203000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"898\"}}" + }, + { + "description": "[decq764] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "180000001364008303000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"899\"}}" + }, + { + "description": "[decq745] DPD: one of each of the huffman groups", + "canonical_bson": "18000000136400D303000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"979\"}}" + }, + { + "description": "[decq770] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "18000000136400DC03000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"988\"}}" + }, + { + "description": "[decq774] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "18000000136400DD03000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"989\"}}" + }, + { + "description": "[decq730] Selected DPD codes", + "canonical_bson": "18000000136400E203000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"994\"}}" + }, + { + "description": "[decq731] Selected DPD codes", + "canonical_bson": "18000000136400E303000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"995\"}}" + }, + { + "description": "[decq744] DPD: one of each of the huffman groups", + "canonical_bson": "18000000136400E503000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"997\"}}" + }, + { + "description": "[decq780] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "18000000136400E603000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"998\"}}" + }, + { + "description": "[decq787] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "18000000136400E703000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"999\"}}" + }, + { + "description": "[decq053] fold-downs (more below)", + "canonical_bson": "18000000136400D204000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234\"}}" + }, + { + "description": "[decq052] fold-downs (more below)", + "canonical_bson": "180000001364003930000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12345\"}}" + }, + { + "description": "[decq792] Miscellaneous (testers' queries, etc.)", + "canonical_bson": "180000001364003075000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"30000\"}}" + }, + { + "description": "[decq793] Miscellaneous (testers' queries, etc.)", + "canonical_bson": "1800000013640090940D0000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"890000\"}}" + }, + { + "description": "[decq824] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FEFFFF7F00000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483646\"}}" + }, + { + "description": "[decq825] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FFFFFF7F00000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483647\"}}" + }, + { + "description": "[decq826] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "180000001364000000008000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483648\"}}" + }, + { + "description": "[decq827] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "180000001364000100008000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483649\"}}" + }, + { + "description": "[decq828] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FEFFFFFF00000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967294\"}}" + }, + { + "description": "[decq829] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FFFFFFFF00000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967295\"}}" + }, + { + "description": "[decq830] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "180000001364000000000001000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967296\"}}" + }, + { + "description": "[decq831] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "180000001364000100000001000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967297\"}}" + }, + { + "description": "[decq022] Normality", + "canonical_bson": "18000000136400C7711CC7B548F377DC80A131C836403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1111111111111111111111111111111111\"}}" + }, + { + "description": "[decq020] Normality", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" + }, + { + "description": "[decq550] Specials", + "canonical_bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED413000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9999999999999999999999999999999999\"}}" + } + ] +} + diff --git a/test/bson_corpus/decimal128-3.json b/test/bson_corpus/decimal128-3.json new file mode 100644 index 0000000000..9b015343ce --- /dev/null +++ b/test/bson_corpus/decimal128-3.json @@ -0,0 +1,1771 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[basx066] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE0000000000000000000038B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-00345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" + }, + { + "description": "[basx065] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE0000000000000000000038B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" + }, + { + "description": "[basx064] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE0000000000000000000038B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" + }, + { + "description": "[basx041] strings without E cannot generate E in result", + "canonical_bson": "180000001364004C0000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-76\"}}" + }, + { + "description": "[basx027] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000F270000000000000000000000003AB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999\"}}" + }, + { + "description": "[basx026] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364009F230000000000000000000000003AB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.119\"}}" + }, + { + "description": "[basx025] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364008F030000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.11\"}}" + }, + { + "description": "[basx024] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364005B000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.1\"}}" + }, + { + "description": "[dqbsr531] negatives (Rounded)", + "canonical_bson": "1800000013640099761CC7B548F377DC80A131C836FEAF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.1111111111111111111111111111123450\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.111111111111111111111111111112345\"}}" + }, + { + "description": "[basx022] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000A000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0\"}}" + }, + { + "description": "[basx021] conform to rules and exponent will be in permitted range).", + "canonical_bson": "18000000136400010000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}" + }, + { + "description": "[basx601] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx622] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002EB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-9\"}}" + }, + { + "description": "[basx602] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" + }, + { + "description": "[basx621] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000030B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8\"}}" + }, + { + "description": "[basx603] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx620] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000032B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}" + }, + { + "description": "[basx604] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx619] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000034B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}" + }, + { + "description": "[basx605] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000363000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx618] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000036B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" + }, + { + "description": "[basx680] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"000000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx606] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000383000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx617] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000038B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx681] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"00000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx686] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+00000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx687] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-00000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx019] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640000000000000000000000000000003CB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-00.00\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[basx607] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx616] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003AB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" + }, + { + "description": "[basx682] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx155] Numbers with E", + "canonical_bson": "1800000013640000000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000e+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx130] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx290] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000038B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx131] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx291] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000036B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" + }, + { + "description": "[basx132] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx292] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000034B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}" + }, + { + "description": "[basx133] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx293] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000032B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}" + }, + { + "description": "[basx608] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx615] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[basx683] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx630] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx670] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx631] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx671] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx134] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx294] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000038B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx632] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx672] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx135] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx295] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000036B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" + }, + { + "description": "[basx633] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" + }, + { + "description": "[basx673] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx136] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx674] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx634] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" + }, + { + "description": "[basx137] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx635] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[basx675] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx636] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" + }, + { + "description": "[basx676] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" + }, + { + "description": "[basx637] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" + }, + { + "description": "[basx677] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx638] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" + }, + { + "description": "[basx678] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}" + }, + { + "description": "[basx149] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"000E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx639] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" + }, + { + "description": "[basx679] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-11\"}}" + }, + { + "description": "[basx063] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE00000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+00345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx018] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640000000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "[basx609] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx614] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "[basx684] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"00.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx640] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx660] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx641] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx661] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx296] some more negative zeros [systematic tests below]", + "canonical_bson": "1800000013640000000000000000000000000000003AB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" + }, + { + "description": "[basx642] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" + }, + { + "description": "[basx662] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx297] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000038B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx643] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" + }, + { + "description": "[basx663] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx644] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[basx664] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx645] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" + }, + { + "description": "[basx665] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx646] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" + }, + { + "description": "[basx666] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx647] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" + }, + { + "description": "[basx667] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" + }, + { + "description": "[basx648] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" + }, + { + "description": "[basx668] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx160] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"00E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx161] Numbers with E", + "canonical_bson": "1800000013640000000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"00E-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx649] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000503000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}" + }, + { + "description": "[basx669] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}" + }, + { + "description": "[basx062] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE00000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+0345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx001] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx017] conform to rules and exponent will be in permitted range).", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx611] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx613] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx685] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx688] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx689] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx650] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx651] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000423000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" + }, + { + "description": "[basx298] some more negative zeros [systematic tests below]", + "canonical_bson": "1800000013640000000000000000000000000000003CB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[basx652] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000443000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" + }, + { + "description": "[basx299] some more negative zeros [systematic tests below]", + "canonical_bson": "1800000013640000000000000000000000000000003AB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" + }, + { + "description": "[basx653] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000463000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[basx654] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000483000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" + }, + { + "description": "[basx655] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" + }, + { + "description": "[basx656] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" + }, + { + "description": "[basx657] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" + }, + { + "description": "[basx658] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000503000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}" + }, + { + "description": "[basx138] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+0E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx139] Numbers with E", + "canonical_bson": "18000000136400000000000000000000000000000052B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+9\"}}" + }, + { + "description": "[basx144] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx154] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx659] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx042] strings without E cannot generate E in result", + "canonical_bson": "18000000136400FC040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx143] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+1E+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx061] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE00000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx036] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640015CD5B0700000000000000000000203000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000123456789\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-8\"}}" + }, + { + "description": "[basx035] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640015CD5B0700000000000000000000223000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000123456789\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-7\"}}" + }, + { + "description": "[basx034] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640015CD5B0700000000000000000000243000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000123456789\"}}" + }, + { + "description": "[basx053] strings without E cannot generate E in result", + "canonical_bson": "180000001364003200000000000000000000000000323000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}" + }, + { + "description": "[basx033] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640015CD5B0700000000000000000000263000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000123456789\"}}" + }, + { + "description": "[basx016] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000C000000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.012\"}}" + }, + { + "description": "[basx015] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364007B000000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123\"}}" + }, + { + "description": "[basx037] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640078DF0D8648700000000000000000223000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012344\"}}" + }, + { + "description": "[basx038] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640079DF0D8648700000000000000000223000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012345\"}}" + }, + { + "description": "[basx250] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx257] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx256] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx258] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx251] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000103000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-21\"}}" + }, + { + "description": "[basx263] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000603000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+19\"}}" + }, + { + "description": "[basx255] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" + }, + { + "description": "[basx259] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx254] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}" + }, + { + "description": "[basx260] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx253] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}" + }, + { + "description": "[basx261] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx252] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000283000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-9\"}}" + }, + { + "description": "[basx262] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}" + }, + { + "description": "[basx159] Numbers with E", + "canonical_bson": "1800000013640049000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.73e-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7.3E-8\"}}" + }, + { + "description": "[basx004] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640064000000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00\"}}" + }, + { + "description": "[basx003] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000A000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}" + }, + { + "description": "[basx002] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000100000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}" + }, + { + "description": "[basx148] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx153] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx141] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx146] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx151] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1e09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx142] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000F43000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" + }, + { + "description": "[basx147] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000F43000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" + }, + { + "description": "[basx152] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000F43000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" + }, + { + "description": "[basx140] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx150] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx014] conform to rules and exponent will be in permitted range).", + "canonical_bson": "18000000136400D2040000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234\"}}" + }, + { + "description": "[basx170] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx177] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx176] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx178] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx171] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000123000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-20\"}}" + }, + { + "description": "[basx183] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000623000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+20\"}}" + }, + { + "description": "[basx175] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx179] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx174] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" + }, + { + "description": "[basx180] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx173] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}" + }, + { + "description": "[basx181] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000423000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx172] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000002A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-8\"}}" + }, + { + "description": "[basx182] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000004A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+8\"}}" + }, + { + "description": "[basx157] Numbers with E", + "canonical_bson": "180000001364000400000000000000000000000000523000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4E+9\"}}" + }, + { + "description": "[basx067] examples", + "canonical_bson": "180000001364000500000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}" + }, + { + "description": "[basx069] examples", + "canonical_bson": "180000001364000500000000000000000000000000323000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}" + }, + { + "description": "[basx385] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}" + }, + { + "description": "[basx365] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000543000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+10\"}}" + }, + { + "description": "[basx405] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000002C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-10\"}}" + }, + { + "description": "[basx363] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000563000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E11\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+11\"}}" + }, + { + "description": "[basx407] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000002A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-11\"}}" + }, + { + "description": "[basx361] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000583000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+12\"}}" + }, + { + "description": "[basx409] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000283000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-12\"}}" + }, + { + "description": "[basx411] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000263000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-13\"}}" + }, + { + "description": "[basx383] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+1\"}}" + }, + { + "description": "[basx387] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.7\"}}" + }, + { + "description": "[basx381] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+2\"}}" + }, + { + "description": "[basx389] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.07\"}}" + }, + { + "description": "[basx379] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+3\"}}" + }, + { + "description": "[basx391] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.007\"}}" + }, + { + "description": "[basx377] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+4\"}}" + }, + { + "description": "[basx393] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0007\"}}" + }, + { + "description": "[basx375] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000004A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5\"}}" + }, + { + "description": "[basx395] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00007\"}}" + }, + { + "description": "[basx373] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000004C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+6\"}}" + }, + { + "description": "[basx397] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000007\"}}" + }, + { + "description": "[basx371] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000004E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+7\"}}" + }, + { + "description": "[basx399] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000323000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-7\"}}" + }, + { + "description": "[basx369] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000503000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+8\"}}" + }, + { + "description": "[basx401] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000303000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-8\"}}" + }, + { + "description": "[basx367] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}" + }, + { + "description": "[basx403] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000002E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-9\"}}" + }, + { + "description": "[basx007] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640064000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.0\"}}" + }, + { + "description": "[basx005] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000A00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" + }, + { + "description": "[basx165] Numbers with E", + "canonical_bson": "180000001364000A00000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx163] Numbers with E", + "canonical_bson": "180000001364000A00000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx325] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" + }, + { + "description": "[basx305] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000543000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+11\"}}" + }, + { + "description": "[basx345] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000002C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-9\"}}" + }, + { + "description": "[basx303] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000563000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e11\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+12\"}}" + }, + { + "description": "[basx347] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000002A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-11\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-10\"}}" + }, + { + "description": "[basx301] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000583000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+13\"}}" + }, + { + "description": "[basx349] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000283000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-11\"}}" + }, + { + "description": "[basx351] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000263000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-13\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-12\"}}" + }, + { + "description": "[basx323] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+2\"}}" + }, + { + "description": "[basx327] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}" + }, + { + "description": "[basx321] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+3\"}}" + }, + { + "description": "[basx329] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.10\"}}" + }, + { + "description": "[basx319] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+4\"}}" + }, + { + "description": "[basx331] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.010\"}}" + }, + { + "description": "[basx317] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+5\"}}" + }, + { + "description": "[basx333] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0010\"}}" + }, + { + "description": "[basx315] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000004A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6\"}}" + }, + { + "description": "[basx335] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00010\"}}" + }, + { + "description": "[basx313] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000004C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+7\"}}" + }, + { + "description": "[basx337] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000010\"}}" + }, + { + "description": "[basx311] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000004E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+8\"}}" + }, + { + "description": "[basx339] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000010\"}}" + }, + { + "description": "[basx309] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000503000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+9\"}}" + }, + { + "description": "[basx341] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-7\"}}" + }, + { + "description": "[basx164] Numbers with E", + "canonical_bson": "180000001364000A00000000000000000000000000F43000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e+90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+91\"}}" + }, + { + "description": "[basx162] Numbers with E", + "canonical_bson": "180000001364000A00000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx307] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx343] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-8\"}}" + }, + { + "description": "[basx008] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640065000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.1\"}}" + }, + { + "description": "[basx009] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640068000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.4\"}}" + }, + { + "description": "[basx010] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640069000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.5\"}}" + }, + { + "description": "[basx011] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364006A000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.6\"}}" + }, + { + "description": "[basx012] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364006D000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.9\"}}" + }, + { + "description": "[basx013] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364006E000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"11.0\"}}" + }, + { + "description": "[basx040] strings without E cannot generate E in result", + "canonical_bson": "180000001364000C00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}" + }, + { + "description": "[basx190] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx197] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx196] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx198] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx191] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000143000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-19\"}}" + }, + { + "description": "[basx203] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000643000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+21\"}}" + }, + { + "description": "[basx195] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx199] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx194] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx200] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx193] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" + }, + { + "description": "[basx201] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" + }, + { + "description": "[basx192] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000002C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-7\"}}" + }, + { + "description": "[basx202] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000004C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+9\"}}" + }, + { + "description": "[basx044] strings without E cannot generate E in result", + "canonical_bson": "18000000136400FC040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"012.76\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx042] strings without E cannot generate E in result", + "canonical_bson": "18000000136400FC040000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx046] strings without E cannot generate E in result", + "canonical_bson": "180000001364001100000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"17.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"17\"}}" + }, + { + "description": "[basx049] strings without E cannot generate E in result", + "canonical_bson": "180000001364002C00000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0044\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}" + }, + { + "description": "[basx048] strings without E cannot generate E in result", + "canonical_bson": "180000001364002C00000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"044\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}" + }, + { + "description": "[basx158] Numbers with E", + "canonical_bson": "180000001364002C00000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"44E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4.4E+10\"}}" + }, + { + "description": "[basx068] examples", + "canonical_bson": "180000001364003200000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"50E-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}" + }, + { + "description": "[basx169] Numbers with E", + "canonical_bson": "180000001364006400000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" + }, + { + "description": "[basx167] Numbers with E", + "canonical_bson": "180000001364006400000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" + }, + { + "description": "[basx168] Numbers with E", + "canonical_bson": "180000001364006400000000000000000000000000F43000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"100E+90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+92\"}}" + }, + { + "description": "[basx166] Numbers with E", + "canonical_bson": "180000001364006400000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" + }, + { + "description": "[basx210] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx217] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx216] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx218] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx211] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000163000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-18\"}}" + }, + { + "description": "[basx223] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000663000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+22\"}}" + }, + { + "description": "[basx215] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx219] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx214] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx220] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" + }, + { + "description": "[basx213] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx221] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}" + }, + { + "description": "[basx212] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000001265\"}}" + }, + { + "description": "[basx222] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000004E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+10\"}}" + }, + { + "description": "[basx006] conform to rules and exponent will be in permitted range).", + "canonical_bson": "18000000136400E803000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1000\"}}" + }, + { + "description": "[basx230] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx237] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx236] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx238] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx231] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000183000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-17\"}}" + }, + { + "description": "[basx243] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000683000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+23\"}}" + }, + { + "description": "[basx235] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx239] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" + }, + { + "description": "[basx234] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx240] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}" + }, + { + "description": "[basx233] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx241] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}" + }, + { + "description": "[basx232] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}" + }, + { + "description": "[basx242] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000503000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+11\"}}" + }, + { + "description": "[basx060] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE00000000000000000000383000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx059] strings without E cannot generate E in result", + "canonical_bson": "18000000136400F198670C08000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0345678.54321\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.54321\"}}" + }, + { + "description": "[basx058] strings without E cannot generate E in result", + "canonical_bson": "180000001364006AF90B7C50000000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.543210\"}}" + }, + { + "description": "[basx057] strings without E cannot generate E in result", + "canonical_bson": "180000001364006A19562522020000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2345678.543210\"}}" + }, + { + "description": "[basx056] strings without E cannot generate E in result", + "canonical_bson": "180000001364006AB9C8733A0B0000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12345678.543210\"}}" + }, + { + "description": "[basx031] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640040AF0D8648700000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.000000\"}}" + }, + { + "description": "[basx030] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640080910F8648700000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.123456\"}}" + }, + { + "description": "[basx032] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640080910F8648700000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789123456\"}}" + } + ] +} diff --git a/test/bson_corpus/decimal128-4.json b/test/bson_corpus/decimal128-4.json new file mode 100644 index 0000000000..0957019351 --- /dev/null +++ b/test/bson_corpus/decimal128-4.json @@ -0,0 +1,165 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[basx023] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640001000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.1\"}}" + }, + + { + "description": "[basx045] strings without E cannot generate E in result", + "canonical_bson": "1800000013640003000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.003\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.003\"}}" + }, + { + "description": "[basx610] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \".0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx612] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003EB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-.0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "[basx043] strings without E cannot generate E in result", + "canonical_bson": "18000000136400FC040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx055] strings without E cannot generate E in result", + "canonical_bson": "180000001364000500000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000005\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-8\"}}" + }, + { + "description": "[basx054] strings without E cannot generate E in result", + "canonical_bson": "180000001364000500000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000005\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}" + }, + { + "description": "[basx052] strings without E cannot generate E in result", + "canonical_bson": "180000001364000500000000000000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}" + }, + { + "description": "[basx051] strings without E cannot generate E in result", + "canonical_bson": "180000001364000500000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"00.00005\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00005\"}}" + }, + { + "description": "[basx050] strings without E cannot generate E in result", + "canonical_bson": "180000001364000500000000000000000000000000383000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0005\"}}" + }, + { + "description": "[basx047] strings without E cannot generate E in result", + "canonical_bson": "1800000013640005000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \".5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.5\"}}" + }, + { + "description": "[dqbsr431] check rounding modes heeded (Rounded)", + "canonical_bson": "1800000013640099761CC7B548F377DC80A131C836FE2F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.1111111111111111111111111111123450\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.111111111111111111111111111112345\"}}" + }, + { + "description": "OK2", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FC2F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \".100000000000000000000000000000000000000000000000000000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1000000000000000000000000000000000\"}}" + } + ], + "parseErrors": [ + { + "description": "[basx564] Near-specials (Conversion_syntax)", + "string": "Infi" + }, + { + "description": "[basx565] Near-specials (Conversion_syntax)", + "string": "Infin" + }, + { + "description": "[basx566] Near-specials (Conversion_syntax)", + "string": "Infini" + }, + { + "description": "[basx567] Near-specials (Conversion_syntax)", + "string": "Infinit" + }, + { + "description": "[basx568] Near-specials (Conversion_syntax)", + "string": "-Infinit" + }, + { + "description": "[basx590] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".Infinity" + }, + { + "description": "[basx562] Near-specials (Conversion_syntax)", + "string": "NaNq" + }, + { + "description": "[basx563] Near-specials (Conversion_syntax)", + "string": "NaNs" + }, + { + "description": "[dqbas939] overflow results at different rounding modes (Overflow & Inexact & Rounded)", + "string": "-7e10000" + }, + { + "description": "[dqbsr534] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234650" + }, + { + "description": "[dqbsr535] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234551" + }, + { + "description": "[dqbsr533] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234550" + }, + { + "description": "[dqbsr532] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234549" + }, + { + "description": "[dqbsr432] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234549" + }, + { + "description": "[dqbsr433] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234550" + }, + { + "description": "[dqbsr435] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234551" + }, + { + "description": "[dqbsr434] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234650" + }, + { + "description": "[dqbas938] overflow results at different rounding modes (Overflow & Inexact & Rounded)", + "string": "7e10000" + }, + { + "description": "Inexact rounding#1", + "string": "100000000000000000000000000000000000000000000000000000000001" + }, + { + "description": "Inexact rounding#2", + "string": "1E-6177" + } + ] +} diff --git a/test/bson_corpus/decimal128-5.json b/test/bson_corpus/decimal128-5.json new file mode 100644 index 0000000000..e976eae407 --- /dev/null +++ b/test/bson_corpus/decimal128-5.json @@ -0,0 +1,402 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[decq035] fold-downs (more below) (Clamped)", + "canonical_bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq037] fold-downs (more below) (Clamped)", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq077] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.100000000000000000000000000000000E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq078] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq079] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000A00000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000010E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}" + }, + { + "description": "[decq080] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000A00000000000000000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}" + }, + { + "description": "[decq081] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000020000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}" + }, + { + "description": "[decq082] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000020000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}" + }, + { + "description": "[decq083] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "[decq084] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "[decq090] underflows cannot be tested for simple copies, check edge cases (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "[decq100] underflows cannot be tested for simple copies, check edge cases (Subnormal)", + "canonical_bson": "18000000136400FFFFFFFF095BC138938D44C64D31000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"999999999999999999999999999999999e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.99999999999999999999999999999999E-6144\"}}" + }, + { + "description": "[decq130] fold-downs (more below) (Clamped)", + "canonical_bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq132] fold-downs (more below) (Clamped)", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq177] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.100000000000000000000000000000000E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq178] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq179] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000A00000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000010E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}" + }, + { + "description": "[decq180] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000A00000000000000000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}" + }, + { + "description": "[decq181] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000028000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}" + }, + { + "description": "[decq182] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000028000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}" + }, + { + "description": "[decq183] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "[decq184] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "[decq190] underflow edge cases (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "[decq200] underflow edge cases (Subnormal)", + "canonical_bson": "18000000136400FFFFFFFF095BC138938D44C64D31008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-999999999999999999999999999999999e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.99999999999999999999999999999999E-6144\"}}" + }, + { + "description": "[decq400] zeros (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq401] zeros (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6177\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq414] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq416] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq418] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq420] negative zeros (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq421] negative zeros (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6177\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq434] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq436] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq438] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq601] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq603] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}" + }, + { + "description": "[decq605] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000000080264B91C02220BE377E00FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6142\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}" + }, + { + "description": "[decq607] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6141\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}" + }, + { + "description": "[decq609] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6140\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}" + }, + { + "description": "[decq611] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000106102253E5ECE4F200000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6139\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}" + }, + { + "description": "[decq613] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6138\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}" + }, + { + "description": "[decq615] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6137\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}" + }, + { + "description": "[decq617] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000000004A48011416954508000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6136\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}" + }, + { + "description": "[decq619] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6135\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}" + }, + { + "description": "[decq621] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000080F64AE1C7022D1500000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6134\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}" + }, + { + "description": "[decq623] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6133\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}" + }, + { + "description": "[decq625] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000000A0DEC5ADC935360000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6132\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}" + }, + { + "description": "[decq627] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000010632D5EC76B050000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6131\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}" + }, + { + "description": "[decq629] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000000E8890423C78A000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6130\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}" + }, + { + "description": "[decq631] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000064A7B3B6E00D000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6129\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}" + }, + { + "description": "[decq633] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000008A5D78456301000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6128\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}" + }, + { + "description": "[decq635] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000000C16FF2862300000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6127\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}" + }, + { + "description": "[decq637] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000080C6A47E8D0300000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6126\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}" + }, + { + "description": "[decq639] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000407A10F35A0000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6125\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}" + }, + { + "description": "[decq641] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000A0724E18090000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6124\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}" + }, + { + "description": "[decq643] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000010A5D4E8000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6123\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}" + }, + { + "description": "[decq645] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000E8764817000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6122\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}" + }, + { + "description": "[decq647] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000E40B5402000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6121\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}" + }, + { + "description": "[decq649] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000CA9A3B00000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6120\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}" + }, + { + "description": "[decq651] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000E1F50500000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6119\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}" + }, + { + "description": "[decq653] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364008096980000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6118\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}" + }, + { + "description": "[decq655] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640040420F0000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6117\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}" + }, + { + "description": "[decq657] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400A086010000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6116\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}" + }, + { + "description": "[decq659] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364001027000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6115\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}" + }, + { + "description": "[decq661] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400E803000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6114\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}" + }, + { + "description": "[decq663] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364006400000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6113\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}" + }, + { + "description": "[decq665] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000A00000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" + } + ] +} + diff --git a/test/bson_corpus/decimal128-6.json b/test/bson_corpus/decimal128-6.json new file mode 100644 index 0000000000..eba6764e85 --- /dev/null +++ b/test/bson_corpus/decimal128-6.json @@ -0,0 +1,131 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "parseErrors": [ + { + "description": "Incomplete Exponent", + "string": "1e" + }, + { + "description": "Exponent at the beginning", + "string": "E01" + }, + { + "description": "Just a decimal place", + "string": "." + }, + { + "description": "2 decimal places", + "string": "..3" + }, + { + "description": "2 decimal places", + "string": ".13.3" + }, + { + "description": "2 decimal places", + "string": "1..3" + }, + { + "description": "2 decimal places", + "string": "1.3.4" + }, + { + "description": "2 decimal places", + "string": "1.34." + }, + { + "description": "Decimal with no digits", + "string": ".e" + }, + { + "description": "2 signs", + "string": "+-32.4" + }, + { + "description": "2 signs", + "string": "-+32.4" + }, + { + "description": "2 negative signs", + "string": "--32.4" + }, + { + "description": "2 negative signs", + "string": "-32.-4" + }, + { + "description": "End in negative sign", + "string": "32.0-" + }, + { + "description": "2 negative signs", + "string": "32.4E--21" + }, + { + "description": "2 negative signs", + "string": "32.4E-2-1" + }, + { + "description": "2 signs", + "string": "32.4E+-21" + }, + { + "description": "Empty string", + "string": "" + }, + { + "description": "leading white space positive number", + "string": " 1" + }, + { + "description": "leading white space negative number", + "string": " -1" + }, + { + "description": "trailing white space", + "string": "1 " + }, + { + "description": "Invalid", + "string": "E" + }, + { + "description": "Invalid", + "string": "invalid" + }, + { + "description": "Invalid", + "string": "i" + }, + { + "description": "Invalid", + "string": "in" + }, + { + "description": "Invalid", + "string": "-in" + }, + { + "description": "Invalid", + "string": "Na" + }, + { + "description": "Invalid", + "string": "-Na" + }, + { + "description": "Invalid", + "string": "1.23abc" + }, + { + "description": "Invalid", + "string": "1.23abcE+02" + }, + { + "description": "Invalid", + "string": "1.23E+0aabs2" + } + ] +} diff --git a/test/bson_corpus/decimal128-7.json b/test/bson_corpus/decimal128-7.json new file mode 100644 index 0000000000..0b78f1237b --- /dev/null +++ b/test/bson_corpus/decimal128-7.json @@ -0,0 +1,327 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "parseErrors": [ + { + "description": "[basx572] Near-specials (Conversion_syntax)", + "string": "-9Inf" + }, + { + "description": "[basx516] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "-1-" + }, + { + "description": "[basx533] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "0000.." + }, + { + "description": "[basx534] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": ".0000." + }, + { + "description": "[basx535] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "00..00" + }, + { + "description": "[basx569] Near-specials (Conversion_syntax)", + "string": "0Inf" + }, + { + "description": "[basx571] Near-specials (Conversion_syntax)", + "string": "-0Inf" + }, + { + "description": "[basx575] Near-specials (Conversion_syntax)", + "string": "0sNaN" + }, + { + "description": "[basx503] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "++1" + }, + { + "description": "[basx504] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "--1" + }, + { + "description": "[basx505] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "-+1" + }, + { + "description": "[basx506] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "+-1" + }, + { + "description": "[basx510] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": " +1" + }, + { + "description": "[basx513] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": " + 1" + }, + { + "description": "[basx514] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": " - 1" + }, + { + "description": "[basx501] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "." + }, + { + "description": "[basx502] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": ".." + }, + { + "description": "[basx519] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "" + }, + { + "description": "[basx525] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "e100" + }, + { + "description": "[basx549] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "e+1" + }, + { + "description": "[basx577] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".e+1" + }, + { + "description": "[basx578] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.e+1" + }, + { + "description": "[basx581] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "E+1" + }, + { + "description": "[basx582] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".E+1" + }, + { + "description": "[basx583] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.E+1" + }, + { + "description": "[basx579] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.e+" + }, + { + "description": "[basx580] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.e" + }, + { + "description": "[basx584] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.E+" + }, + { + "description": "[basx585] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.E" + }, + { + "description": "[basx589] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.Inf" + }, + { + "description": "[basx586] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".NaN" + }, + { + "description": "[basx587] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.NaN" + }, + { + "description": "[basx545] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "ONE" + }, + { + "description": "[basx561] Near-specials (Conversion_syntax)", + "string": "qNaN" + }, + { + "description": "[basx573] Near-specials (Conversion_syntax)", + "string": "-sNa" + }, + { + "description": "[basx588] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.sNaN" + }, + { + "description": "[basx544] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "ten" + }, + { + "description": "[basx527] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "u0b65" + }, + { + "description": "[basx526] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "u0e5a" + }, + { + "description": "[basx515] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "x" + }, + { + "description": "[basx574] Near-specials (Conversion_syntax)", + "string": "xNaN" + }, + { + "description": "[basx530] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": ".123.5" + }, + { + "description": "[basx500] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1..2" + }, + { + "description": "[basx542] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e1.0" + }, + { + "description": "[basx553] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E+1.2.3" + }, + { + "description": "[basx543] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e123e" + }, + { + "description": "[basx552] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E+1.2" + }, + { + "description": "[basx546] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e.1" + }, + { + "description": "[basx547] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e1." + }, + { + "description": "[basx554] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E++1" + }, + { + "description": "[basx555] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E--1" + }, + { + "description": "[basx556] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E+-1" + }, + { + "description": "[basx557] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E-+1" + }, + { + "description": "[basx558] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E'1" + }, + { + "description": "[basx559] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E\"1" + }, + { + "description": "[basx520] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e-" + }, + { + "description": "[basx560] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E" + }, + { + "description": "[basx548] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1ee" + }, + { + "description": "[basx551] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1.2.1" + }, + { + "description": "[basx550] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1.23.4" + }, + { + "description": "[basx529] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1.34.5" + }, + { + "description": "[basx531] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "01.35." + }, + { + "description": "[basx532] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "01.35-" + }, + { + "description": "[basx518] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "3+" + }, + { + "description": "[basx521] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "7e99999a" + }, + { + "description": "[basx570] Near-specials (Conversion_syntax)", + "string": "9Inf" + }, + { + "description": "[basx512] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12 " + }, + { + "description": "[basx517] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12-" + }, + { + "description": "[basx507] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12e" + }, + { + "description": "[basx508] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12e++" + }, + { + "description": "[basx509] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12f4" + }, + { + "description": "[basx536] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e*123" + }, + { + "description": "[basx537] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e123-" + }, + { + "description": "[basx540] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e1*23" + }, + { + "description": "[basx538] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e+12+" + }, + { + "description": "[basx539] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e1-3-" + }, + { + "description": "[basx541] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111E1e+3" + }, + { + "description": "[basx528] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "123,65" + }, + { + "description": "[basx523] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "7e12356789012x" + }, + { + "description": "[basx522] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "7e123567890x" + } + ] +} diff --git a/test/bson_corpus/document.json b/test/bson_corpus/document.json new file mode 100644 index 0000000000..698e7ae90a --- /dev/null +++ b/test/bson_corpus/document.json @@ -0,0 +1,60 @@ +{ + "description": "Document type (sub-documents)", + "bson_type": "0x03", + "test_key": "x", + "valid": [ + { + "description": "Empty subdoc", + "canonical_bson": "0D000000037800050000000000", + "canonical_extjson": "{\"x\" : {}}" + }, + { + "description": "Empty-string key subdoc", + "canonical_bson": "150000000378000D00000002000200000062000000", + "canonical_extjson": "{\"x\" : {\"\" : \"b\"}}" + }, + { + "description": "Single-character key subdoc", + "canonical_bson": "160000000378000E0000000261000200000062000000", + "canonical_extjson": "{\"x\" : {\"a\" : \"b\"}}" + }, + { + "description": "Dollar-prefixed key in sub-document", + "canonical_bson": "170000000378000F000000022461000200000062000000", + "canonical_extjson": "{\"x\" : {\"$a\" : \"b\"}}" + }, + { + "description": "Dollar as key in sub-document", + "canonical_bson": "160000000378000E0000000224000200000061000000", + "canonical_extjson": "{\"x\" : {\"$\" : \"a\"}}" + }, + { + "description": "Dotted key in sub-document", + "canonical_bson": "180000000378001000000002612E62000200000063000000", + "canonical_extjson": "{\"x\" : {\"a.b\" : \"c\"}}" + }, + { + "description": "Dot as key in sub-document", + "canonical_bson": "160000000378000E000000022E000200000061000000", + "canonical_extjson": "{\"x\" : {\".\" : \"a\"}}" + } + ], + "decodeErrors": [ + { + "description": "Subdocument length too long: eats outer terminator", + "bson": "1800000003666F6F000F0000001062617200FFFFFF7F0000" + }, + { + "description": "Subdocument length too short: leaks terminator", + "bson": "1500000003666F6F000A0000000862617200010000" + }, + { + "description": "Invalid subdocument: bad string length in field", + "bson": "1C00000003666F6F001200000002626172000500000062617A000000" + }, + { + "description": "Null byte in sub-document key", + "bson": "150000000378000D00000010610000010000000000" + } + ] +} diff --git a/test/bson_corpus/double.json b/test/bson_corpus/double.json new file mode 100644 index 0000000000..d5b8fb3d7e --- /dev/null +++ b/test/bson_corpus/double.json @@ -0,0 +1,87 @@ +{ + "description": "Double type", + "bson_type": "0x01", + "test_key": "d", + "valid": [ + { + "description": "+1.0", + "canonical_bson": "10000000016400000000000000F03F00", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.0\"}}", + "relaxed_extjson": "{\"d\" : 1.0}" + }, + { + "description": "-1.0", + "canonical_bson": "10000000016400000000000000F0BF00", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.0\"}}", + "relaxed_extjson": "{\"d\" : -1.0}" + }, + { + "description": "+1.0001220703125", + "canonical_bson": "10000000016400000000008000F03F00", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.0001220703125\"}}", + "relaxed_extjson": "{\"d\" : 1.0001220703125}" + }, + { + "description": "-1.0001220703125", + "canonical_bson": "10000000016400000000008000F0BF00", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.0001220703125\"}}", + "relaxed_extjson": "{\"d\" : -1.0001220703125}" + }, + { + "description": "1.2345678921232E+18", + "canonical_bson": "100000000164002a1bf5f41022b14300", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.2345678921232E+18\"}}", + "relaxed_extjson": "{\"d\" : 1.2345678921232E+18}" + }, + { + "description": "-1.2345678921232E+18", + "canonical_bson": "100000000164002a1bf5f41022b1c300", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.2345678921232E+18\"}}", + "relaxed_extjson": "{\"d\" : -1.2345678921232E+18}" + }, + { + "description": "0.0", + "canonical_bson": "10000000016400000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"0.0\"}}", + "relaxed_extjson": "{\"d\" : 0.0}" + }, + { + "description": "-0.0", + "canonical_bson": "10000000016400000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-0.0\"}}", + "relaxed_extjson": "{\"d\" : -0.0}" + }, + { + "description": "NaN", + "canonical_bson": "10000000016400000000000000F87F00", + "canonical_extjson": "{\"d\": {\"$numberDouble\": \"NaN\"}}", + "relaxed_extjson": "{\"d\": {\"$numberDouble\": \"NaN\"}}", + "lossy": true + }, + { + "description": "NaN with payload", + "canonical_bson": "10000000016400120000000000F87F00", + "canonical_extjson": "{\"d\": {\"$numberDouble\": \"NaN\"}}", + "relaxed_extjson": "{\"d\": {\"$numberDouble\": \"NaN\"}}", + "lossy": true + }, + { + "description": "Inf", + "canonical_bson": "10000000016400000000000000F07F00", + "canonical_extjson": "{\"d\": {\"$numberDouble\": \"Infinity\"}}", + "relaxed_extjson": "{\"d\": {\"$numberDouble\": \"Infinity\"}}" + }, + { + "description": "-Inf", + "canonical_bson": "10000000016400000000000000F0FF00", + "canonical_extjson": "{\"d\": {\"$numberDouble\": \"-Infinity\"}}", + "relaxed_extjson": "{\"d\": {\"$numberDouble\": \"-Infinity\"}}" + } + ], + "decodeErrors": [ + { + "description": "double truncated", + "bson": "0B0000000164000000F03F00" + } + ] +} diff --git a/test/bson_corpus/int32.json b/test/bson_corpus/int32.json new file mode 100644 index 0000000000..1353fc3df8 --- /dev/null +++ b/test/bson_corpus/int32.json @@ -0,0 +1,43 @@ +{ + "description": "Int32 type", + "bson_type": "0x10", + "test_key": "i", + "valid": [ + { + "description": "MinValue", + "canonical_bson": "0C0000001069000000008000", + "canonical_extjson": "{\"i\" : {\"$numberInt\": \"-2147483648\"}}", + "relaxed_extjson": "{\"i\" : -2147483648}" + }, + { + "description": "MaxValue", + "canonical_bson": "0C000000106900FFFFFF7F00", + "canonical_extjson": "{\"i\" : {\"$numberInt\": \"2147483647\"}}", + "relaxed_extjson": "{\"i\" : 2147483647}" + }, + { + "description": "-1", + "canonical_bson": "0C000000106900FFFFFFFF00", + "canonical_extjson": "{\"i\" : {\"$numberInt\": \"-1\"}}", + "relaxed_extjson": "{\"i\" : -1}" + }, + { + "description": "0", + "canonical_bson": "0C0000001069000000000000", + "canonical_extjson": "{\"i\" : {\"$numberInt\": \"0\"}}", + "relaxed_extjson": "{\"i\" : 0}" + }, + { + "description": "1", + "canonical_bson": "0C0000001069000100000000", + "canonical_extjson": "{\"i\" : {\"$numberInt\": \"1\"}}", + "relaxed_extjson": "{\"i\" : 1}" + } + ], + "decodeErrors": [ + { + "description": "Bad int32 field length", + "bson": "090000001061000500" + } + ] +} diff --git a/test/bson_corpus/int64.json b/test/bson_corpus/int64.json new file mode 100644 index 0000000000..91f4abff95 --- /dev/null +++ b/test/bson_corpus/int64.json @@ -0,0 +1,43 @@ +{ + "description": "Int64 type", + "bson_type": "0x12", + "test_key": "a", + "valid": [ + { + "description": "MinValue", + "canonical_bson": "10000000126100000000000000008000", + "canonical_extjson": "{\"a\" : {\"$numberLong\" : \"-9223372036854775808\"}}", + "relaxed_extjson": "{\"a\" : -9223372036854775808}" + }, + { + "description": "MaxValue", + "canonical_bson": "10000000126100FFFFFFFFFFFFFF7F00", + "canonical_extjson": "{\"a\" : {\"$numberLong\" : \"9223372036854775807\"}}", + "relaxed_extjson": "{\"a\" : 9223372036854775807}" + }, + { + "description": "-1", + "canonical_bson": "10000000126100FFFFFFFFFFFFFFFF00", + "canonical_extjson": "{\"a\" : {\"$numberLong\" : \"-1\"}}", + "relaxed_extjson": "{\"a\" : -1}" + }, + { + "description": "0", + "canonical_bson": "10000000126100000000000000000000", + "canonical_extjson": "{\"a\" : {\"$numberLong\" : \"0\"}}", + "relaxed_extjson": "{\"a\" : 0}" + }, + { + "description": "1", + "canonical_bson": "10000000126100010000000000000000", + "canonical_extjson": "{\"a\" : {\"$numberLong\" : \"1\"}}", + "relaxed_extjson": "{\"a\" : 1}" + } + ], + "decodeErrors": [ + { + "description": "int64 field truncated", + "bson": "0C0000001261001234567800" + } + ] +} diff --git a/test/bson_corpus/maxkey.json b/test/bson_corpus/maxkey.json new file mode 100644 index 0000000000..67cad6db57 --- /dev/null +++ b/test/bson_corpus/maxkey.json @@ -0,0 +1,12 @@ +{ + "description": "Maxkey type", + "bson_type": "0x7F", + "test_key": "a", + "valid": [ + { + "description": "Maxkey", + "canonical_bson": "080000007F610000", + "canonical_extjson": "{\"a\" : {\"$maxKey\" : 1}}" + } + ] +} diff --git a/test/bson_corpus/minkey.json b/test/bson_corpus/minkey.json new file mode 100644 index 0000000000..8adee4509a --- /dev/null +++ b/test/bson_corpus/minkey.json @@ -0,0 +1,12 @@ +{ + "description": "Minkey type", + "bson_type": "0xFF", + "test_key": "a", + "valid": [ + { + "description": "Minkey", + "canonical_bson": "08000000FF610000", + "canonical_extjson": "{\"a\" : {\"$minKey\" : 1}}" + } + ] +} diff --git a/test/bson_corpus/multi-type-deprecated.json b/test/bson_corpus/multi-type-deprecated.json new file mode 100644 index 0000000000..665f388cd4 --- /dev/null +++ b/test/bson_corpus/multi-type-deprecated.json @@ -0,0 +1,15 @@ +{ + "description": "Multiple types within the same document", + "bson_type": "0x00", + "deprecated": true, + "valid": [ + { + "description": "All BSON types", + "canonical_bson": "38020000075F69640057E193D7A9CC81B4027498B50E53796D626F6C000700000073796D626F6C0002537472696E670007000000737472696E670010496E743332002A00000012496E743634002A0000000000000001446F75626C6500000000000000F0BF0542696E617279001000000003A34C38F7C3ABEDC8A37814A992AB8DB60542696E61727955736572446566696E656400050000008001020304050D436F6465000E00000066756E6374696F6E2829207B7D000F436F64655769746853636F7065001B0000000E00000066756E6374696F6E2829207B7D00050000000003537562646F63756D656E74001200000002666F6F0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696D657374616D7000010000002A0000000B5265676578007061747465726E0000094461746574696D6545706F6368000000000000000000094461746574696D65506F73697469766500FFFFFF7F00000000094461746574696D654E656761746976650000000080FFFFFFFF085472756500010846616C736500000C4442506F696E746572000B000000636F6C6C656374696F6E0057E193D7A9CC81B4027498B1034442526566003D0000000224726566000B000000636F6C6C656374696F6E00072469640057FD71E96E32AB4225B723FB02246462000900000064617461626173650000FF4D696E6B6579007F4D61786B6579000A4E756C6C0006556E646566696E65640000", + "converted_bson": "48020000075f69640057e193d7a9cc81b4027498b50253796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c73650000034442506f696e746572002b0000000224726566000b000000636f6c6c656374696f6e00072469640057e193d7a9cc81b4027498b100034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c000a556e646566696e65640000", + "canonical_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": {\"$symbol\": \"symbol\"}, \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$dbPointer\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": {\"$undefined\": true}}", + "converted_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": \"symbol\", \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": null}" + } + ] +} + diff --git a/test/bson_corpus/multi-type.json b/test/bson_corpus/multi-type.json new file mode 100644 index 0000000000..1e1d557c9b --- /dev/null +++ b/test/bson_corpus/multi-type.json @@ -0,0 +1,11 @@ +{ + "description": "Multiple types within the same document", + "bson_type": "0x00", + "valid": [ + { + "description": "All BSON types", + "canonical_bson": "F4010000075F69640057E193D7A9CC81B4027498B502537472696E670007000000737472696E670010496E743332002A00000012496E743634002A0000000000000001446F75626C6500000000000000F0BF0542696E617279001000000003A34C38F7C3ABEDC8A37814A992AB8DB60542696E61727955736572446566696E656400050000008001020304050D436F6465000E00000066756E6374696F6E2829207B7D000F436F64655769746853636F7065001B0000000E00000066756E6374696F6E2829207B7D00050000000003537562646F63756D656E74001200000002666F6F0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696D657374616D7000010000002A0000000B5265676578007061747465726E0000094461746574696D6545706F6368000000000000000000094461746574696D65506F73697469766500FFFFFF7F00000000094461746574696D654E656761746976650000000080FFFFFFFF085472756500010846616C73650000034442526566003D0000000224726566000B000000636F6C6C656374696F6E00072469640057FD71E96E32AB4225B723FB02246462000900000064617461626173650000FF4D696E6B6579007F4D61786B6579000A4E756C6C0000", + "canonical_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null}" + } + ] +} diff --git a/test/bson_corpus/null.json b/test/bson_corpus/null.json new file mode 100644 index 0000000000..f9b269473e --- /dev/null +++ b/test/bson_corpus/null.json @@ -0,0 +1,12 @@ +{ + "description": "Null type", + "bson_type": "0x0A", + "test_key": "a", + "valid": [ + { + "description": "Null", + "canonical_bson": "080000000A610000", + "canonical_extjson": "{\"a\" : null}" + } + ] +} diff --git a/test/bson_corpus/oid.json b/test/bson_corpus/oid.json new file mode 100644 index 0000000000..14e9caf4b4 --- /dev/null +++ b/test/bson_corpus/oid.json @@ -0,0 +1,28 @@ +{ + "description": "ObjectId", + "bson_type": "0x07", + "test_key": "a", + "valid": [ + { + "description": "All zeroes", + "canonical_bson": "1400000007610000000000000000000000000000", + "canonical_extjson": "{\"a\" : {\"$oid\" : \"000000000000000000000000\"}}" + }, + { + "description": "All ones", + "canonical_bson": "14000000076100FFFFFFFFFFFFFFFFFFFFFFFF00", + "canonical_extjson": "{\"a\" : {\"$oid\" : \"ffffffffffffffffffffffff\"}}" + }, + { + "description": "Random", + "canonical_bson": "1400000007610056E1FC72E0C917E9C471416100", + "canonical_extjson": "{\"a\" : {\"$oid\" : \"56e1fc72e0c917e9c4714161\"}}" + } + ], + "decodeErrors": [ + { + "description": "OID truncated", + "bson": "1200000007610056E1FC72E0C917E9C471" + } + ] +} diff --git a/test/bson_corpus/regex.json b/test/bson_corpus/regex.json new file mode 100644 index 0000000000..223802169d --- /dev/null +++ b/test/bson_corpus/regex.json @@ -0,0 +1,65 @@ +{ + "description": "Regular Expression type", + "bson_type": "0x0B", + "test_key": "a", + "valid": [ + { + "description": "empty regex with no options", + "canonical_bson": "0A0000000B6100000000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"\", \"options\" : \"\"}}}" + }, + { + "description": "regex without options", + "canonical_bson": "0D0000000B6100616263000000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"abc\", \"options\" : \"\"}}}" + }, + { + "description": "regex with options", + "canonical_bson": "0F0000000B610061626300696D0000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"abc\", \"options\" : \"im\"}}}" + }, + { + "description": "regex with options (keys reversed)", + "canonical_bson": "0F0000000B610061626300696D0000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"abc\", \"options\" : \"im\"}}}", + "degenerate_extjson": "{\"a\" : {\"$regularExpression\" : {\"options\" : \"im\", \"pattern\": \"abc\"}}}" + }, + { + "description": "regex with slash", + "canonical_bson": "110000000B610061622F636400696D0000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"ab/cd\", \"options\" : \"im\"}}}" + }, + { + "description": "flags not alphabetized", + "degenerate_bson": "100000000B6100616263006D69780000", + "canonical_bson": "100000000B610061626300696D780000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"abc\", \"options\" : \"imx\"}}}", + "degenerate_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"abc\", \"options\" : \"mix\"}}}" + }, + { + "description" : "Required escapes", + "canonical_bson" : "100000000B610061625C226162000000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"ab\\\\\\\"ab\", \"options\" : \"\"}}}" + }, + { + "description" : "Regular expression as value of $regex query operator", + "canonical_bson" : "180000000B247265676578007061747465726E0069780000", + "canonical_extjson": "{\"$regex\" : {\"$regularExpression\" : { \"pattern\": \"pattern\", \"options\" : \"ix\"}}}" + }, + { + "description" : "Regular expression as value of $regex query operator with $options", + "canonical_bson" : "270000000B247265676578007061747465726E000002246F7074696F6E73000300000069780000", + "canonical_extjson": "{\"$regex\" : {\"$regularExpression\" : { \"pattern\": \"pattern\", \"options\" : \"\"}}, \"$options\" : \"ix\"}" + } + ], + "decodeErrors": [ + { + "description": "Null byte in pattern string", + "bson": "0F0000000B610061006300696D0000" + }, + { + "description": "Null byte in flags string", + "bson": "100000000B61006162630069006D0000" + } + ] +} diff --git a/test/bson_corpus/string.json b/test/bson_corpus/string.json new file mode 100644 index 0000000000..148334d091 --- /dev/null +++ b/test/bson_corpus/string.json @@ -0,0 +1,72 @@ +{ + "description": "String", + "bson_type": "0x02", + "test_key": "a", + "valid": [ + { + "description": "Empty string", + "canonical_bson": "0D000000026100010000000000", + "canonical_extjson": "{\"a\" : \"\"}" + }, + { + "description": "Single character", + "canonical_bson": "0E00000002610002000000620000", + "canonical_extjson": "{\"a\" : \"b\"}" + }, + { + "description": "Multi-character", + "canonical_bson": "190000000261000D0000006162616261626162616261620000", + "canonical_extjson": "{\"a\" : \"abababababab\"}" + }, + { + "description": "two-byte UTF-8 (\u00e9)", + "canonical_bson": "190000000261000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", + "canonical_extjson": "{\"a\" : \"\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\"}" + }, + { + "description": "three-byte UTF-8 (\u2606)", + "canonical_bson": "190000000261000D000000E29886E29886E29886E298860000", + "canonical_extjson": "{\"a\" : \"\\u2606\\u2606\\u2606\\u2606\"}" + }, + { + "description": "Embedded nulls", + "canonical_bson": "190000000261000D0000006162006261620062616261620000", + "canonical_extjson": "{\"a\" : \"ab\\u0000bab\\u0000babab\"}" + }, + { + "description": "Required escapes", + "canonical_bson" : "320000000261002600000061625C220102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F61620000", + "canonical_extjson" : "{\"a\":\"ab\\\\\\\"\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\b\\t\\n\\u000b\\f\\r\\u000e\\u000f\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017\\u0018\\u0019\\u001a\\u001b\\u001c\\u001d\\u001e\\u001fab\"}" + } + ], + "decodeErrors": [ + { + "description": "bad string length: 0 (but no 0x00 either)", + "bson": "0C0000000261000000000000" + }, + { + "description": "bad string length: -1", + "bson": "0C000000026100FFFFFFFF00" + }, + { + "description": "bad string length: eats terminator", + "bson": "10000000026100050000006200620000" + }, + { + "description": "bad string length: longer than rest of document", + "bson": "120000000200FFFFFF00666F6F6261720000" + }, + { + "description": "string is not null-terminated", + "bson": "1000000002610004000000616263FF00" + }, + { + "description": "empty string, but extra null", + "bson": "0E00000002610001000000000000" + }, + { + "description": "invalid UTF-8", + "bson": "0E00000002610002000000E90000" + } + ] +} diff --git a/test/bson_corpus/symbol.json b/test/bson_corpus/symbol.json new file mode 100644 index 0000000000..3dd3577ebd --- /dev/null +++ b/test/bson_corpus/symbol.json @@ -0,0 +1,80 @@ +{ + "description": "Symbol", + "bson_type": "0x0E", + "deprecated": true, + "test_key": "a", + "valid": [ + { + "description": "Empty string", + "canonical_bson": "0D0000000E6100010000000000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"\"}}", + "converted_bson": "0D000000026100010000000000", + "converted_extjson": "{\"a\": \"\"}" + }, + { + "description": "Single character", + "canonical_bson": "0E0000000E610002000000620000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"b\"}}", + "converted_bson": "0E00000002610002000000620000", + "converted_extjson": "{\"a\": \"b\"}" + }, + { + "description": "Multi-character", + "canonical_bson": "190000000E61000D0000006162616261626162616261620000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"abababababab\"}}", + "converted_bson": "190000000261000D0000006162616261626162616261620000", + "converted_extjson": "{\"a\": \"abababababab\"}" + }, + { + "description": "two-byte UTF-8 (\u00e9)", + "canonical_bson": "190000000E61000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"éééééé\"}}", + "converted_bson": "190000000261000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", + "converted_extjson": "{\"a\": \"éééééé\"}" + }, + { + "description": "three-byte UTF-8 (\u2606)", + "canonical_bson": "190000000E61000D000000E29886E29886E29886E298860000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"☆☆☆☆\"}}", + "converted_bson": "190000000261000D000000E29886E29886E29886E298860000", + "converted_extjson": "{\"a\": \"☆☆☆☆\"}" + }, + { + "description": "Embedded nulls", + "canonical_bson": "190000000E61000D0000006162006261620062616261620000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"ab\\u0000bab\\u0000babab\"}}", + "converted_bson": "190000000261000D0000006162006261620062616261620000", + "converted_extjson": "{\"a\": \"ab\\u0000bab\\u0000babab\"}" + } + ], + "decodeErrors": [ + { + "description": "bad symbol length: 0 (but no 0x00 either)", + "bson": "0C0000000E61000000000000" + }, + { + "description": "bad symbol length: -1", + "bson": "0C0000000E6100FFFFFFFF00" + }, + { + "description": "bad symbol length: eats terminator", + "bson": "100000000E6100050000006200620000" + }, + { + "description": "bad symbol length: longer than rest of document", + "bson": "120000000E00FFFFFF00666F6F6261720000" + }, + { + "description": "symbol is not null-terminated", + "bson": "100000000E610004000000616263FF00" + }, + { + "description": "empty symbol, but extra null", + "bson": "0E0000000E610001000000000000" + }, + { + "description": "invalid UTF-8", + "bson": "0E0000000E610002000000E90000" + } + ] +} diff --git a/test/bson_corpus/timestamp.json b/test/bson_corpus/timestamp.json new file mode 100644 index 0000000000..6f46564a32 --- /dev/null +++ b/test/bson_corpus/timestamp.json @@ -0,0 +1,34 @@ +{ + "description": "Timestamp type", + "bson_type": "0x11", + "test_key": "a", + "valid": [ + { + "description": "Timestamp: (123456789, 42)", + "canonical_bson": "100000001161002A00000015CD5B0700", + "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 123456789, \"i\" : 42} } }" + }, + { + "description": "Timestamp: (123456789, 42) (keys reversed)", + "canonical_bson": "100000001161002A00000015CD5B0700", + "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 123456789, \"i\" : 42} } }", + "degenerate_extjson": "{\"a\" : {\"$timestamp\" : {\"i\" : 42, \"t\" : 123456789} } }" + }, + { + "description": "Timestamp with high-order bit set on both seconds and increment", + "canonical_bson": "10000000116100FFFFFFFFFFFFFFFF00", + "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 4294967295, \"i\" : 4294967295} } }" + }, + { + "description": "Timestamp with high-order bit set on both seconds and increment (not UINT32_MAX)", + "canonical_bson": "1000000011610000286BEE00286BEE00", + "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 4000000000, \"i\" : 4000000000} } }" + } + ], + "decodeErrors": [ + { + "description": "Truncated timestamp field", + "bson": "0f0000001161002A00000015CD5B00" + } + ] +} diff --git a/test/bson_corpus/top.json b/test/bson_corpus/top.json new file mode 100644 index 0000000000..9c649b5e3f --- /dev/null +++ b/test/bson_corpus/top.json @@ -0,0 +1,266 @@ +{ + "description": "Top-level document validity", + "bson_type": "0x00", + "valid": [ + { + "description": "Dollar-prefixed key in top-level document", + "canonical_bson": "0F00000010246B6579002A00000000", + "canonical_extjson": "{\"$key\": {\"$numberInt\": \"42\"}}" + }, + { + "description": "Dollar as key in top-level document", + "canonical_bson": "0E00000002240002000000610000", + "canonical_extjson": "{\"$\": \"a\"}" + }, + { + "description": "Dotted key in top-level document", + "canonical_bson": "1000000002612E620002000000630000", + "canonical_extjson": "{\"a.b\": \"c\"}" + }, + { + "description": "Dot as key in top-level document", + "canonical_bson": "0E000000022E0002000000610000", + "canonical_extjson": "{\".\": \"a\"}" + } + ], + "decodeErrors": [ + { + "description": "An object size that's too small to even include the object size, but is a well-formed, empty object", + "bson": "0100000000" + }, + { + "description": "An object size that's only enough for the object size, but is a well-formed, empty object", + "bson": "0400000000" + }, + { + "description": "One object, with length shorter than size (missing EOO)", + "bson": "05000000" + }, + { + "description": "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01", + "bson": "0500000001" + }, + { + "description": "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff", + "bson": "05000000FF" + }, + { + "description": "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70", + "bson": "0500000070" + }, + { + "description": "Byte count is zero (with non-zero input length)", + "bson": "00000000000000000000" + }, + { + "description": "Stated length exceeds byte count, with truncated document", + "bson": "1200000002666F6F0004000000626172" + }, + { + "description": "Stated length less than byte count, with garbage after envelope", + "bson": "1200000002666F6F00040000006261720000DEADBEEF" + }, + { + "description": "Stated length exceeds byte count, with valid envelope", + "bson": "1300000002666F6F00040000006261720000" + }, + { + "description": "Stated length less than byte count, with valid envelope", + "bson": "1100000002666F6F00040000006261720000" + }, + { + "description": "Invalid BSON type low range", + "bson": "07000000000000" + }, + { + "description": "Invalid BSON type high range", + "bson": "07000000800000" + }, + { + "description": "Document truncated mid-key", + "bson": "1200000002666F" + }, + { + "description": "Null byte in document key", + "bson": "0D000000107800000100000000" + } + ], + "parseErrors": [ + { + "description" : "Bad $regularExpression (extra field)", + "string" : "{\"a\" : {\"$regularExpression\": {\"pattern\": \"abc\", \"options\": \"\", \"unrelated\": true}}}" + }, + { + "description" : "Bad $regularExpression (missing options field)", + "string" : "{\"a\" : {\"$regularExpression\": {\"pattern\": \"abc\"}}}" + }, + { + "description": "Bad $regularExpression (pattern is number, not string)", + "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": 42, \"options\" : \"\"}}}" + }, + { + "description": "Bad $regularExpression (options are number, not string)", + "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": \"a\", \"options\" : 0}}}" + }, + { + "description" : "Bad $regularExpression (missing pattern field)", + "string" : "{\"a\" : {\"$regularExpression\": {\"options\":\"ix\"}}}" + }, + { + "description": "Bad $oid (number, not string)", + "string": "{\"a\" : {\"$oid\" : 42}}" + }, + { + "description": "Bad $oid (extra field)", + "string": "{\"a\" : {\"$oid\" : \"56e1fc72e0c917e9c4714161\", \"unrelated\": true}}" + }, + { + "description": "Bad $numberInt (number, not string)", + "string": "{\"a\" : {\"$numberInt\" : 42}}" + }, + { + "description": "Bad $numberInt (extra field)", + "string": "{\"a\" : {\"$numberInt\" : \"42\", \"unrelated\": true}}" + }, + { + "description": "Bad $numberLong (number, not string)", + "string": "{\"a\" : {\"$numberLong\" : 42}}" + }, + { + "description": "Bad $numberLong (extra field)", + "string": "{\"a\" : {\"$numberLong\" : \"42\", \"unrelated\": true}}" + }, + { + "description": "Bad $numberDouble (number, not string)", + "string": "{\"a\" : {\"$numberDouble\" : 42}}" + }, + { + "description": "Bad $numberDouble (extra field)", + "string": "{\"a\" : {\"$numberDouble\" : \".1\", \"unrelated\": true}}" + }, + { + "description": "Bad $numberDecimal (number, not string)", + "string": "{\"a\" : {\"$numberDecimal\" : 42}}" + }, + { + "description": "Bad $numberDecimal (extra field)", + "string": "{\"a\" : {\"$numberDecimal\" : \".1\", \"unrelated\": true}}" + }, + { + "description": "Bad $binary (binary is number, not string)", + "string": "{\"x\" : {\"$binary\" : {\"base64\" : 0, \"subType\" : \"00\"}}}" + }, + { + "description": "Bad $binary (type is number, not string)", + "string": "{\"x\" : {\"$binary\" : {\"base64\" : \"\", \"subType\" : 0}}}" + }, + { + "description": "Bad $binary (missing $type)", + "string": "{\"x\" : {\"$binary\" : {\"base64\" : \"//8=\"}}}" + }, + { + "description": "Bad $binary (missing $binary)", + "string": "{\"x\" : {\"$binary\" : {\"subType\" : \"00\"}}}" + }, + { + "description": "Bad $binary (extra field)", + "string": "{\"x\" : {\"$binary\" : {\"base64\" : \"//8=\", \"subType\" : 0, \"unrelated\": true}}}" + }, + { + "description": "Bad $code (type is number, not string)", + "string": "{\"a\" : {\"$code\" : 42}}" + }, + { + "description": "Bad $code (type is number, not string) when $scope is also present", + "string": "{\"a\" : {\"$code\" : 42, \"$scope\" : {}}}" + }, + { + "description": "Bad $code (extra field)", + "string": "{\"a\" : {\"$code\" : \"\", \"unrelated\": true}}" + }, + { + "description": "Bad $code with $scope (scope is number, not doc)", + "string": "{\"x\" : {\"$code\" : \"\", \"$scope\" : 42}}" + }, + { + "description": "Bad $timestamp (type is number, not doc)", + "string": "{\"a\" : {\"$timestamp\" : 42} }" + }, + { + "description": "Bad $timestamp ('t' type is string, not number)", + "string": "{\"a\" : {\"$timestamp\" : {\"t\" : \"123456789\", \"i\" : 42} } }" + }, + { + "description": "Bad $timestamp ('i' type is string, not number)", + "string": "{\"a\" : {\"$timestamp\" : {\"t\" : 123456789, \"i\" : \"42\"} } }" + }, + { + "description": "Bad $timestamp (extra field at same level as $timestamp)", + "string": "{\"a\" : {\"$timestamp\" : {\"t\" : \"123456789\", \"i\" : \"42\"}, \"unrelated\": true } }" + }, + { + "description": "Bad $timestamp (extra field at same level as t and i)", + "string": "{\"a\" : {\"$timestamp\" : {\"t\" : \"123456789\", \"i\" : \"42\", \"unrelated\": true} } }" + }, + { + "description": "Bad $timestamp (missing t)", + "string": "{\"a\" : {\"$timestamp\" : {\"i\" : \"42\"} } }" + }, + { + "description": "Bad $timestamp (missing i)", + "string": "{\"a\" : {\"$timestamp\" : {\"t\" : \"123456789\"} } }" + }, + { + "description": "Bad $date (number, not string or hash)", + "string": "{\"a\" : {\"$date\" : 42}}" + }, + { + "description": "Bad $date (extra field)", + "string": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"1356351330501\"}, \"unrelated\": true}}" + }, + { + "description": "Bad $minKey (boolean, not integer)", + "string": "{\"a\" : {\"$minKey\" : true}}" + }, + { + "description": "Bad $minKey (wrong integer)", + "string": "{\"a\" : {\"$minKey\" : 0}}" + }, + { + "description": "Bad $minKey (extra field)", + "string": "{\"a\" : {\"$minKey\" : 1, \"unrelated\": true}}" + }, + { + "description": "Bad $maxKey (boolean, not integer)", + "string": "{\"a\" : {\"$maxKey\" : true}}" + }, + { + "description": "Bad $maxKey (wrong integer)", + "string": "{\"a\" : {\"$maxKey\" : 0}}" + }, + { + "description": "Bad $maxKey (extra field)", + "string": "{\"a\" : {\"$maxKey\" : 1, \"unrelated\": true}}" + }, + { + "description": "Bad DBpointer (extra field)", + "string": "{\"a\": {\"$dbPointer\": {\"a\": {\"$numberInt\": \"1\"}, \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}, \"c\": {\"$numberInt\": \"2\"}, \"$ref\": \"b\"}}}" + }, + { + "description" : "Null byte in document key", + "string" : "{\"a\\u0000\": 1 }" + }, + { + "description" : "Null byte in sub-document key", + "string" : "{\"a\" : {\"b\\u0000\": 1 }}" + }, + { + "description": "Null byte in $regularExpression pattern", + "string": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"b\\u0000\", \"options\" : \"i\"}}}" + }, + { + "description": "Null byte in $regularExpression options", + "string": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"b\", \"options\" : \"i\\u0000\"}}}" + } + ] +} diff --git a/test/bson_corpus/undefined.json b/test/bson_corpus/undefined.json new file mode 100644 index 0000000000..285f068258 --- /dev/null +++ b/test/bson_corpus/undefined.json @@ -0,0 +1,15 @@ +{ + "description": "Undefined type (deprecated)", + "bson_type": "0x06", + "deprecated": true, + "test_key": "a", + "valid": [ + { + "description": "Undefined", + "canonical_bson": "0800000006610000", + "canonical_extjson": "{\"a\" : {\"$undefined\" : true}}", + "converted_bson": "080000000A610000", + "converted_extjson": "{\"a\" : null}" + } + ] +} diff --git a/test/certificates/ca.pem b/test/certificates/ca.pem index f739ef0627..24beea2d48 100644 --- a/test/certificates/ca.pem +++ b/test/certificates/ca.pem @@ -1,17 +1,21 @@ -----BEGIN CERTIFICATE----- -MIICnTCCAgYCCQD4+RCKzwZr/zANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMC -VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4w -DAYDVQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0 -IEF1dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEz -MDAyMzU0OVoXDTIzMTEyODAyMzU0OVowgZIxCzAJBgNVBAYTAlVTMREwDwYDVQQI -DAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwFMTBH -ZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3JpdHkx -GzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1czCBnzANBgkqhkiG9w0BAQEFAAOB -jQAwgYkCgYEA1xymeY+U/evUuQvxpun9moe4GopN80c1ptmaAHM/1Onwaq54Wt27 -nl1wUVme3dh4DdWviYY7mJ333HVEnp/QhVcT4kQhICZqdgPKPdCseQW3H+8x6Gwz -hrNRBdz0NkSoFxDlIymfy2Q2xoQpbCGAg+EnRYUTKlHMXNpUDLFhGjcCAwEAATAN -BgkqhkiG9w0BAQUFAAOBgQDRQB3c/9osTexEzMPHyMGTzG5nGwy8Wv77GgW3BETM -hECoGqueXLa5ZgvealJrnMHNKdj6vrCGgBDzE0K0VdXc4dLtLmx3DRntDOAWKJdB -2XPMvdC7Ec//Fwep/9emz0gDiJrTiEpL4p74+h+sp4Xy8cBokQ3Ss5S9NmnPXT7E -qQ== +MIIDfzCCAmegAwIBAgIDB1MGMA0GCSqGSIb3DQEBCwUAMHkxGzAZBgNVBAMTEkRy +aXZlcnMgVGVzdGluZyBDQTEQMA4GA1UECxMHRHJpdmVyczEQMA4GA1UEChMHTW9u +Z29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsx +CzAJBgNVBAYTAlVTMB4XDTE5MDUyMjIwMjMxMVoXDTM5MDUyMjIwMjMxMVoweTEb +MBkGA1UEAxMSRHJpdmVycyBUZXN0aW5nIENBMRAwDgYDVQQLEwdEcml2ZXJzMRAw +DgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQI +EwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCl7VN+WsQfHlwapcOpTLZVoeMAl1LTbWTFuXSAavIyy0W1Ytky1UP/ +bxCSW0mSWwCgqoJ5aXbAvrNRp6ArWu3LsTQIEcD3pEdrFIVQhYzWUs9fXqPyI9k+ +QNNQ+MRFKeGteTPYwF2eVEtPzUHU5ws3+OKp1m6MCLkwAG3RBFUAfddUnLvGoZiT +pd8/eNabhgHvdrCw+tYFCWvSjz7SluEVievpQehrSEPKe8DxJq/IM3tSl3tdylzT +zeiKNO7c7LuQrgjAfrZl7n2SriHIlNmqiDR/kdd8+TxBuxjFlcf2WyHCO3lIcIgH +KXTlhUCg50KfHaxHu05Qw0x8869yIzqbAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQELBQADggEBAEHuhTL8KQZcKCTSJbYA9MgZj7U32arMGBbc1hiq +VBREwvdVz4+9tIyWMzN9R/YCKmUTnCq8z3wTlC8kBtxYn/l4Tj8nJYcgLJjQ0Fwe +gT564CmvkUat8uXPz6olOCdwkMpJ9Sj62i0mpgXJdBfxKQ6TZ9yGz6m3jannjZpN +LchB7xSAEWtqUgvNusq0dApJsf4n7jZ+oBZVaQw2+tzaMfaLqHgMwcu1FzA8UKCD +sxCgIsZUs8DdxaD418Ot6nPfheOTqe24n+TTa+Z6O0W0QtnofJBx7tmAo1aEc57i +77s89pfwIJetpIlhzNSMKurCAocFCJMJLAASJFuu6dyDvPo= -----END CERTIFICATE----- diff --git a/test/certificates/client.pem b/test/certificates/client.pem index 3157a6819e..5b07001092 100644 --- a/test/certificates/client.pem +++ b/test/certificates/client.pem @@ -1,101 +1,48 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 7 (0x7) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus - Validity - Not Before: Aug 23 14:55:32 2013 GMT - Not After : Jan 7 14:55:32 2041 GMT - Subject: C=US, ST=New York, L=New York City, O=10Gen, OU=kerneluser, CN=client - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:ba:16:42:d4:8b:3d:5e:8a:67:9e:a7:c0:cd:4a: - 9c:9c:fd:95:b9:83:bf:f4:cf:03:8c:2e:db:a9:c1: - 35:58:80:f6:e2:e9:87:28:84:e3:d0:9b:68:60:51: - 0e:42:84:d8:6f:e8:34:cc:18:97:79:d3:8d:d8:2f: - 23:11:25:6f:69:7a:38:bb:8c:b2:29:e9:91:be:79: - 8c:cc:1b:56:98:98:d3:83:2a:c5:f9:9c:86:0c:2c: - 24:0e:5c:46:3b:a9:95:44:6c:c5:e0:7c:9d:03:ae: - 0d:23:99:49:a4:48:dd:0e:35:a2:e5:b4:8b:86:bd: - c0:c8:ce:d5:ac:c4:36:f3:9e:5f:17:00:23:8d:53: - a1:43:1b:a3:61:96:36:80:4d:35:50:b5:8b:69:31: - 39:b4:63:8b:96:59:5c:d1:ea:92:eb:eb:fa:1b:35: - 64:44:b3:f6:f3:a6:9d:49:3a:59:e5:e1:c2:cb:98: - be:29:b3:22:dd:33:97:d7:50:4f:db:c2:58:64:18: - b5:8c:3c:6b:2d:21:f6:bd:8d:e5:d2:da:8d:79:fe: - a7:80:75:a8:15:b9:ee:79:7f:01:31:1d:e5:e7:15: - 76:53:65:f6:fe:f0:93:7d:20:3d:cc:ff:9b:ca:b2: - 50:2c:1b:3a:69:d5:e6:70:cf:ac:be:7e:5c:33:c4: - 6e:a7 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Comment: - OpenSSL Generated Certificate - X509v3 Subject Key Identifier: - 4A:8B:EE:22:42:E6:F8:62:4C:86:38:8D:C5:78:95:98:C1:10:05:7C - X509v3 Authority Key Identifier: - keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16 - - Signature Algorithm: sha1WithRSAEncryption - 13:13:a8:f0:de:78:c6:b1:e0:85:cc:27:e6:04:28:44:93:1d: - f1:ff:5e:81:69:33:1f:f3:76:e0:49:ca:d9:ad:aa:db:f5:a5: - f8:a6:50:bb:a1:a7:40:14:e4:2f:8d:b8:21:7f:35:04:60:db: - af:f0:9e:dd:a1:ca:0b:7f:03:2e:2f:19:1e:32:6e:1e:2d:87: - 68:e3:37:47:a8:5b:93:d1:88:41:73:da:88:21:59:27:d4:35: - 1c:6a:27:b5:c0:c6:17:ba:f3:87:c8:e1:f4:8f:43:12:bc:fa: - 8d:90:d5:86:83:df:51:a5:c9:e0:92:f0:66:d0:37:61:6f:85: - 24:18 +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsNS8UEuin7/K29jXfIOLpIoh1jEyWVqxiie2Onx7uJJKcoKo +khA3XeUnVN0k6X5MwYWcN52xcns7LYtyt06nRpTG2/emoV44w9uKTuHsvUbiOwSV +m/ToKQQ4FUFZoqorXH+ZmJuIpJNfoW+3CkE1vEDCIecIq6BNg5ySsPtvSuSJHGjp +mc7/5ZUDvFE2aJ8QbJU3Ws0HXiEb6ymi048LlzEL2VKX3w6mqqh+7dcZGAy7qYk2 +5FZ9ktKvCeQau7mTyU1hsPrKFiKtMN8Q2ZAItX13asw5/IeSTq2LgLFHlbj5Kpq4 +GmLdNCshzH5X7Ew3IYM8EHmsX8dmD6mhv7vpVwIDAQABAoIBABOdpb4qhcG+3twA +c/cGCKmaASLnljQ/UU6IFTjrsjXJVKTbRaPeVKX/05sgZQXZ0t3s2mV5AsQ2U1w8 +Cd+3w+qaemzQThW8hAOGCROzEDX29QWi/o2sX0ydgTMqaq0Wv3SlWv6I0mGfT45y +/BURIsrdTCvCmz2erLqa1dL4MWJXRFjT9UTs5twlecIOM2IHKoGGagFhymRK4kDe +wTRC9fpfoAgyfus3pCO/wi/F8yKGPDEwY+zgkhrJQ+kSeki7oKdGD1H540vB8gRt +EIqssE0Y6rEYf97WssQlxJgvoJBDSftOijS6mwvoasDUwfFqyyPiirawXWWhHXkc +DjIi/XECgYEA5xfjilw9YyM2UGQNESbNNunPcj7gDZbN347xJwmYmi9AUdPLt9xN +3XaMqqR22k1DUOxC/5hH0uiXir7mDfqmC+XS/ic/VOsa3CDWejkEnyGLiwSHY502 +wD/xWgHwUiGVAG9HY64vnDGm6L3KGXA2oqxanL4V0+0+Ht49pZ16i8sCgYEAw+Ox +CHGtpkzjCP/z8xr+1VTSdpc/4CP2HONnYopcn48KfQnf7Nale69/1kZpypJlvQSG +eeA3jMGigNJEkb8/kaVoRLCisXcwLc0XIfCTeiK6FS0Ka30D/84Qm8UsHxRdpGkM +kYITAa2r64tgRL8as4/ukeXBKE+oOhX43LeEfyUCgYBkf7IX2Ndlhsm3GlvIarxy +NipeP9PGdR/hKlPbq0OvQf9R1q7QrcE7H7Q6/b0mYNV2mtjkOQB7S2WkFDMOP0P5 +BqDEoKLdNkV/F9TOYH+PCNKbyYNrodJOt0Ap6Y/u1+Xpw3sjcXwJDFrO+sKqX2+T +PStG4S+y84jBedsLbDoAEwKBgQCTz7/KC11o2yOFqv09N+WKvBKDgeWlD/2qFr3w +UU9K5viXGVhqshz0k5z25vL09Drowf1nAZVpFMO2SPOMtq8VC6b+Dfr1xmYIaXVH +Gu1tf77CM9Zk/VSDNc66e7GrUgbHBK2DLo+A+Ld9aRIfTcSsMbNnS+LQtCrQibvb +cG7+MQKBgQCY11oMT2dUekoZEyW4no7W5D74lR8ztMjp/fWWTDo/AZGPBY6cZoZF +IICrzYtDT/5BzB0Jh1f4O9ZQkm5+OvlFbmoZoSbMzHL3oJCBOY5K0/kdGXL46WWh +IRJSYakNU6VIS7SjDpKgm9D8befQqZeoSggSjIIULIiAtYgS80vmGA== +-----END RSA PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIIDdjCCAt+gAwIBAgIBBzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx -ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD -VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 -dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgyMzE0 -NTUzMloXDTQxMDEwNzE0NTUzMlowbjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l -dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjET -MBEGA1UECwwKa2VybmVsdXNlcjEPMA0GA1UEAwwGY2xpZW50MIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuhZC1Is9XopnnqfAzUqcnP2VuYO/9M8DjC7b -qcE1WID24umHKITj0JtoYFEOQoTYb+g0zBiXedON2C8jESVvaXo4u4yyKemRvnmM -zBtWmJjTgyrF+ZyGDCwkDlxGO6mVRGzF4HydA64NI5lJpEjdDjWi5bSLhr3AyM7V -rMQ2855fFwAjjVOhQxujYZY2gE01ULWLaTE5tGOLlllc0eqS6+v6GzVkRLP286ad -STpZ5eHCy5i+KbMi3TOX11BP28JYZBi1jDxrLSH2vY3l0tqNef6ngHWoFbnueX8B -MR3l5xV2U2X2/vCTfSA9zP+byrJQLBs6adXmcM+svn5cM8RupwIDAQABo3sweTAJ -BgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0 -aWZpY2F0ZTAdBgNVHQ4EFgQUSovuIkLm+GJMhjiNxXiVmMEQBXwwHwYDVR0jBBgw -FoAUB0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAExOo8N54 -xrHghcwn5gQoRJMd8f9egWkzH/N24EnK2a2q2/Wl+KZQu6GnQBTkL424IX81BGDb -r/Ce3aHKC38DLi8ZHjJuHi2HaOM3R6hbk9GIQXPaiCFZJ9Q1HGontcDGF7rzh8jh -9I9DErz6jZDVhoPfUaXJ4JLwZtA3YW+FJBg= +MIIDgzCCAmugAwIBAgIDAxOUMA0GCSqGSIb3DQEBCwUAMHkxGzAZBgNVBAMTEkRy +aXZlcnMgVGVzdGluZyBDQTEQMA4GA1UECxMHRHJpdmVyczEQMA4GA1UEChMHTW9u +Z29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsx +CzAJBgNVBAYTAlVTMB4XDTE5MDUyMjIzNTU1NFoXDTM5MDUyMjIzNTU1NFowaTEP +MA0GA1UEAxMGY2xpZW50MRAwDgYDVQQLEwdEcml2ZXJzMQwwCgYDVQQKEwNNREIx +FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD +VQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALDUvFBLop+/ +ytvY13yDi6SKIdYxMllasYontjp8e7iSSnKCqJIQN13lJ1TdJOl+TMGFnDedsXJ7 +Oy2LcrdOp0aUxtv3pqFeOMPbik7h7L1G4jsElZv06CkEOBVBWaKqK1x/mZibiKST +X6FvtwpBNbxAwiHnCKugTYOckrD7b0rkiRxo6ZnO/+WVA7xRNmifEGyVN1rNB14h +G+spotOPC5cxC9lSl98Opqqofu3XGRgMu6mJNuRWfZLSrwnkGru5k8lNYbD6yhYi +rTDfENmQCLV9d2rMOfyHkk6ti4CxR5W4+SqauBpi3TQrIcx+V+xMNyGDPBB5rF/H +Zg+pob+76VcCAwEAAaMkMCIwCwYDVR0PBAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUF +BwMCMA0GCSqGSIb3DQEBCwUAA4IBAQAqRcLAGvYMaGYOV4HJTzNotT2qE0I9THNQ +wOV1fBg69x6SrUQTQLjJEptpOA288Wue6Jt3H+p5qAGV5GbXjzN/yjCoItggSKxG +Xg7279nz6/C5faoIKRjpS9R+MsJGlttP9nUzdSxrHvvqm62OuSVFjjETxD39DupE +YPFQoHOxdFTtBQlc/zIKxVdd20rs1xJeeU2/L7jtRBSPuR/Sk8zot7G2/dQHX49y +kHrq8qz12kj1T6XDXf8KZawFywXaz0/Ur+fUYKmkVk1T0JZaNtF4sKqDeNE4zcns +p3xLVDSl1Q5Gwj7bgph9o4Hxs9izPwiqjmNaSjPimGYZ399zcurY -----END CERTIFICATE----- ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6FkLUiz1eimee -p8DNSpyc/ZW5g7/0zwOMLtupwTVYgPbi6YcohOPQm2hgUQ5ChNhv6DTMGJd5043Y -LyMRJW9peji7jLIp6ZG+eYzMG1aYmNODKsX5nIYMLCQOXEY7qZVEbMXgfJ0Drg0j -mUmkSN0ONaLltIuGvcDIztWsxDbznl8XACONU6FDG6NhljaATTVQtYtpMTm0Y4uW -WVzR6pLr6/obNWREs/bzpp1JOlnl4cLLmL4psyLdM5fXUE/bwlhkGLWMPGstIfa9 -jeXS2o15/qeAdagVue55fwExHeXnFXZTZfb+8JN9ID3M/5vKslAsGzpp1eZwz6y+ -flwzxG6nAgMBAAECggEBALYw92urjAFVFxCiA8W7aEzYhtAkaztft4R3mD/C19z4 -H0CZDeig+3+RuIactY5xDIu8WHz/EseHVlg0BmxSL5ugu4z8uq8IbNaFoVFw7r7m -2ieRKFY0ZpXiXcbllynw5iEhMjeRKhWhQmH5Qb2kTTINV5j4xKa+f9Lblx7Y2Uh4 -tsaOtlMwb98D2/KYJdTv5Nj1nyuSqRVhECsd00Cb6JUBGQBx8Ja0wFy9gEygq6kU -w3s1XNOSnYNEo4FaVZwp5KZyCyBENcKpNUq4nXt/7ncEfVYdJck0Li3wN4Jr2J9S -eHqRzh8QkHxc1Ro8ktcXaUSs9kFuwvVvb4rcGUpOMWkCgYEA9xxp8yDtFVgzMtc/ -vS8xgM1Wj4SrgKKYhE2wS05BJh/41oFMzfH1FpZ1GCM983r4QgYWoT71XsBgiOMC -yN2p2IbV4V44bMGKJqaVMkB91CVCUWI6piaCQb/1CJTwaXE7zPim6dlUSxxBBnRn -LP50NTscRLFcCZELD3Yl7jR8XFUCgYEAwMfkNFmGtBKAwlHZ3Y3XOwPWg+jCll7s -9nhv8TU2IB9pcCRGqyOT7k1YymvYkDT2Je4JUPWEBs4cW7yD61LrQ8w8+DrE9dGo -czzGPyjOAANSX0asG74UjkNIQThmyEOltVHIxYMaSqowjHRSPdA+R4Od9EdcDdfS -q5SfSVFxmwsCgYBtl1thqUOcCL7EGHQ7KdfxgJ+YDMWmyfWMD4xVCYKZLurD7xop -59nDR7zslIygE/RQC7Uzk+FsQTNO4ibVAIGX9syaI5gwm3DyjURzwehMEq4ju8W4 -9DEmicRZJvysNrzHvasA4RKiMQihnTQ43yyYgvuZd3MTBxF5rPNLfll89QKBgQC9 -SsmiOZIR+OUjaTmS2bbQBNm7Fm8TNcxZyzKn1wb5jb57VbNqUfnskVgxEqpIFyjn -X48YRqtH/1RLI5UpGXdXUBFB8Hr7oM1VsgQ7ejakPp7AXOWcLA2FDz3AhMAvvnTU -0KRihHPpgqk/EOy8M2Ej2XHcrcEO+q+quLmbRXRWtwKBgHacQiwci/2J+v0e9i52 -re/2AJHKP5MwNHFe1e01iNc5EEN0G+/Ut8XW19DWf6bsxqie0ChC+xN8TUst8alT -F+tXTsHHmt/lRcjTROjT5XVuoqjtU2Q0QeVeGLgvObso+fZy3ZNeQuSJjWukdMZ3 -57rGT6p0OuM8qbrTzpv3JMrm ------END PRIVATE KEY----- diff --git a/test/certificates/crl.pem b/test/certificates/crl.pem new file mode 100644 index 0000000000..733a0acdc0 --- /dev/null +++ b/test/certificates/crl.pem @@ -0,0 +1,13 @@ +-----BEGIN X509 CRL----- +MIIB6jCB0wIBATANBgkqhkiG9w0BAQsFADB5MRswGQYDVQQDExJEcml2ZXJzIFRl +c3RpbmcgQ0ExEDAOBgNVBAsTB0RyaXZlcnMxEDAOBgNVBAoTB01vbmdvREIxFjAU +BgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQG +EwJVUxcNMTkwNTIyMjI0NTUzWhcNMTkwNjIxMjI0NTUzWjAVMBMCAncVFw0xOTA1 +MjIyMjQ1MzJaoA8wDTALBgNVHRQEBAICEAAwDQYJKoZIhvcNAQELBQADggEBACwQ +W9OF6ExJSzzYbpCRroznkfdLG7ghNSxIpBQUGtcnYbkP4em6TdtAj5K3yBjcKn4a +hnUoa5EJGr2Xgg0QascV/1GuWEJC9rsYYB9boVi95l1CrkS0pseaunM086iItZ4a +hRVza8qEMBc3rdsracA7hElYMKdFTRLpIGciJehXzv40yT5XFBHGy/HIT0CD50O7 +BDOHzA+rCFCvxX8UY9myDfb1r1zUW7Gzjn241VT7bcIJmhFE9oV0popzDyqr6GvP +qB2t5VmFpbnSwkuc4ie8Jizip1P8Hg73lut3oVAHACFGPpfaNIAp4GcSH61zJmff +9UBe3CJ1INwqyiuqGeA= +-----END X509 CRL----- diff --git a/test/certificates/password_protected.pem b/test/certificates/password_protected.pem new file mode 100644 index 0000000000..cc9e124703 --- /dev/null +++ b/test/certificates/password_protected.pem @@ -0,0 +1,51 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQIC8as6PDVhwECAggA +MB0GCWCGSAFlAwQBAgQQTYOgCJcRqUI7dsgqNojv/ASCBNCG9fiu642V4AuFK34c +Q42lvy/cR0CIXLq/rDXN1L685kdeKex7AfDuRtnjY2+7CLJiJimgQNJXDJPHab/k +MBHbwbBs38fg6eSYX8V08/IyyTege5EJMhYxmieHDC3DXKt0gyHk6hA/r5+Mr49h +HeVGwqBLJEQ3gVIeHaOleZYspsXXWqOPHnFiqnk/biaJS0+LkDDEiQgTLEYSnOjP +lexxUc4BV/TN0Z920tZCMfwx7IXD/C+0AkV/Iqq4LALmT702EccB3indaIJ8biGR +radqDLR32Q+vT9uZHgT8EFiUsISMqhob2mnyTfFV/s9ghWwogjSz0HrRcq6fxdg7 +oeyT9K0ET53AGTGmV0206byPu6qCj1eNvtn+t1Ob+d5hecaTugRMVheWPlc5frsz +AcewDNa0pv4pZItjAGMqOPJHfzEDnzTJXpLqGYhg044H1+OCY8+1YK7U0u8dO+/3 +f5AoDMq18ipDVTFTooJURej4/Wjbrfad3ZFjp86nxfHPeWM1YjC9+IlLtK1wr0/U +V8TjGqCkw8yHayz01A86iA8X53YQBg+tyMGjxmivo6LgFGKa9mXGvDkN+B+0+OcA +PqldAuH/TJhnkqzja767e4n9kcr+TmV19Hn1hcJPTDrRU8+sSqQFsWN4pvHazAYB +UdWie+EXI0eU2Av9JFgrVcpRipXjB48BaPwuBw8hm+VStCH7ynF4lJy6/3esjYwk +Mx+NUf8+pp1DRzpzuJa2vAutzqia5r58+zloQMxkgTZtJkQU6OCRoUhHGVk7WNb1 +nxsibOSzyVSP9ZNbHIHAn43vICFGrPubRs200Kc4CdXsOSEWoP0XYebhiNJgGtQs +KoISsV4dFRLwhaJhIlayTBQz6w6Ph87WbtuiAqoLiuqdXhUGz/79j/6JZqCH8t/H +eZs4Dhu+HdD/wZKJDYAS+JBsiwYWnI3y/EowZYgLdOMI4u6xYDejhxwEw20LW445 +qjJ7pV/iX2uavazHgC91Bfd4zodfXIQ1IDyTmb51UFwx0ARzG6enntduO6xtcYU9 +MXwfrEpuZ/MkWTLkR0PHPbIPcR1MiVwPKdvrLk42Bzj/urtXYrAFUckMFMzEh+uv +0lix2hbq/Xwj4dXcY4w9hnC6QQDCJTf9S6MU6OisrZHKk0qZ2Vb4aU/eBcBsHBwo +X/QGcDHneHxlrrs2eLX26Vh8Odc5h8haeIxnfaa1t+Yv56OKHuAztPMnJOUL7KtQ +A556LxT0b5IGx0RcfUcbG8XbxEHseACptoDOoguh9923IBI0uXmpi8q0P815LPUu +0AsE47ATDMGPnXbopejRDicfgMGjykJn8vKO8r/Ia3Fpnomx4iJNCXGqomL+GMpZ +IhQbKNrRG6XZMlx5kVCT0Qr1nOWMiOTSDCQ5vrG3c1Viu+0bctvidEvs+LCm98tb +7ty8F0uOno0rYGNQz18OEE1Tj+E19Vauz1U35Z5SsgJJ/GfzhSJ79Srmdg2PsAzk +AUNTKXux1GLf1cMjTiiU5g+tCEtUL9Me7lsv3L6aFdrCyRbhXUQfJh4NAG8+3Pvh +EaprThBzKsVvbOfU81mOaH9YMmUgmxG86vxDiNtaWd4v6c1k+HGspJr/q49pcXZP +ltBMuS9AihstZ1sHJsyQCmNXkA== +-----END ENCRYPTED PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIDgzCCAmugAwIBAgIDBXUHMA0GCSqGSIb3DQEBCwUAMHkxGzAZBgNVBAMTEkRy +aXZlcnMgVGVzdGluZyBDQTEQMA4GA1UECxMHRHJpdmVyczEQMA4GA1UEChMHTW9u +Z29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsx +CzAJBgNVBAYTAlVTMB4XDTE5MDUyMzAwMDEyOVoXDTM5MDUyMzAwMDEyOVowaTEP +MA0GA1UEAxMGY2xpZW50MRAwDgYDVQQLEwdEcml2ZXJzMQwwCgYDVQQKEwNNREIx +FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD +VQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOqCb0Lo4XsV +W327Wlnqc5rwWa5Elw0rFuehSfViRIcYfuFWAPXoOj3fIDsYz6d41G8hp6tkF88p +swlbzDF8Fc7mXDhauwwl2F/NrWYUXwCT8fKju4DtGd2JlDMi1TRDeofkYCGVPp70 +vNqd0H8iDWWs8OmiNrdBLJwNiGaf9y15ena4ImQGitXLFn+qNSXYJ1Rs8p7Y2PTr +L+dff5gJCVbANwGII1rjMAsrMACPVmr8c1Lxoq4fSdJiLweosrv2Lk0WWGsO0Seg +ZY71dNHEyNjItE+VtFEtslJ5L261i3BfF/FqNnH2UmKXzShwfwxyHT8o84gSAltQ +5/lVJ4QQKosCAwEAAaMkMCIwCwYDVR0PBAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUF +BwMCMA0GCSqGSIb3DQEBCwUAA4IBAQBOAlKxIMFcTZ+4k8NJv97RSf+zOb5Wu2ct +uxSZxzgKTxLFUuEM8XQiEz1iHQ3XG+uV1fzA74YLQiKjjLrU0mx54eM1vaRtOXvF +sJlzZU8Z2+523FVPx4HBPyObQrfXmIoAiHoQ4VUeepkPRpXxpifgWd/OCWhLDr2/ +0Kgcb0ybaGVDpA0UD9uVIwgFjRu6id7wG+lVcdRxJYskTOOaN2o1hMdAKkrpFQbd +zNRfEoBPUYR3QAmAKP2HBjpgp4ktOHoOKMlfeAuuMCUocSnmPKc3xJaH/6O7rHcf +/Rm0X411RH8JfoXYsSiPsd601kZefhuWvJH0sJLibRDvT7zs8C1v +-----END CERTIFICATE----- diff --git a/test/certificates/server.pem b/test/certificates/server.pem new file mode 100644 index 0000000000..e745e037fc --- /dev/null +++ b/test/certificates/server.pem @@ -0,0 +1,49 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAhNrB0E6GY/kFSd8/vNpu/t952tbnOsD5drV0XPvmuy7SgKDY +a/S+xb/jPnlZKKehdBnH7qP/gYbv34ZykzcDFZscjPLiGc2cRGP+NQCSFK0d2/7d +y15zSD3zhj14G8+MkpAejTU+0/qFNZMc5neDvGanTe0+8aWa0DXssM0MuTxIv7j6 +CtsMWeqLLofN7a1Kw2UvmieCHfHMuA/08pJwRnV/+5T9WONBPJja2ZQRrG1BjpI4 +81zSPUZesIqi8yDlExdvgNaRZIEHi/njREqwVgJOZomUY57zmKypiMzbz48dDTsV +gUStxrEqbaP+BEjQYPX5+QQk4GdMjkLf52LR6QIDAQABAoIBAHSs+hHLJNOf2zkp +S3y8CUblVMsQeTpsR6otaehPgi9Zy50TpX4KD5D0GMrBH8BIl86y5Zd7h+VlcDzK +gs0vPxI2izhuBovKuzaE6rf5rFFkSBjxGDCG3o/PeJOoYFdsS3RcBbjVzju0hFCs +xnDQ/Wz0anJRrTnjyraY5SnQqx/xuhLXkj/lwWoWjP2bUqDprnuLOj16soNu60Um +JziWbmWx9ty0wohkI/8DPBl9FjSniEEUi9pnZXPElFN6kwPkgdfT5rY/TkMH4lsu +ozOUc5xgwlkT6kVjXHcs3fleuT/mOfVXLPgNms85JKLucfd6KiV7jYZkT/bXIjQ+ +7CZEn0ECgYEA5QiKZgsfJjWvZpt21V/i7dPje2xdwHtZ8F9NjX7ZUFA7mUPxUlwe +GiXxmy6RGzNdnLOto4SF0/7ebuF3koO77oLup5a2etL+y/AnNAufbu4S5D72sbiz +wdLzr3d5JQ12xeaEH6kQNk2SD5/ShctdS6GmTgQPiJIgH0MIdi9F3v0CgYEAlH84 +hMWcC+5b4hHUEexeNkT8kCXwHVcUjGRaYFdSHgovvWllApZDHSWZ+vRcMBdlhNPu +09Btxo99cjOZwGYJyt20QQLGc/ZyiOF4ximQzabTeFgLkTH3Ox6Mh2Rx9yIruYoX +nE3UfMDkYELanEJUv0zenKpZHw7tTt5yXXSlEF0CgYBSsEOvVcKYO/eoluZPYQAA +F2jgzZ4HeUFebDoGpM52lZD+463Dq2hezmYtPaG77U6V3bUJ/TWH9VN/Or290vvN +v83ECcC2FWlSXdD5lFyqYx/E8gqE3YdgqfW62uqM+xBvoKsA9zvYLydVpsEN9v8m +6CSvs/2btA4O21e5u5WBTQKBgGtAb6vFpe0gHRDs24SOeYUs0lWycPhf+qFjobrP +lqnHpa9iPeheat7UV6BfeW3qmBIVl/s4IPE2ld4z0qqZiB0Tf6ssu/TpXNPsNXS6 +dLFz+myC+ufFdNEoQUtQitd5wKbjTCZCOGRaVRgJcSdG6Tq55Fa22mOKPm+mTmed +ZdKpAoGAFsTYBAHPxs8nzkCJCl7KLa4/zgbgywO6EcQgA7tfelB8bc8vcAMG5o+8 +YqAfwxrzhVSVbJx0fibTARXROmbh2pn010l2wj3+qUajM8NiskCPFbSjGy7HSUze +P8Kt1uMDJdj55gATzn44au31QBioZY2zXleorxF21cr+BZCJgfA= +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIDlTCCAn2gAwIBAgICdxUwDQYJKoZIhvcNAQELBQAweTEbMBkGA1UEAxMSRHJp +dmVycyBUZXN0aW5nIENBMRAwDgYDVQQLEwdEcml2ZXJzMRAwDgYDVQQKEwdNb25n +b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL +MAkGA1UEBhMCVVMwHhcNMTkwNTIyMjIzMjU2WhcNMzkwNTIyMjIzMjU2WjBwMRIw +EAYDVQQDEwlsb2NhbGhvc3QxEDAOBgNVBAsTB0RyaXZlcnMxEDAOBgNVBAoTB01v +bmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3Jr +MQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAITa +wdBOhmP5BUnfP7zabv7fedrW5zrA+Xa1dFz75rsu0oCg2Gv0vsW/4z55WSinoXQZ +x+6j/4GG79+GcpM3AxWbHIzy4hnNnERj/jUAkhStHdv+3ctec0g984Y9eBvPjJKQ +Ho01PtP6hTWTHOZ3g7xmp03tPvGlmtA17LDNDLk8SL+4+grbDFnqiy6Hze2tSsNl +L5ongh3xzLgP9PKScEZ1f/uU/VjjQTyY2tmUEaxtQY6SOPNc0j1GXrCKovMg5RMX +b4DWkWSBB4v540RKsFYCTmaJlGOe85isqYjM28+PHQ07FYFErcaxKm2j/gRI0GD1 ++fkEJOBnTI5C3+di0ekCAwEAAaMwMC4wLAYDVR0RBCUwI4IJbG9jYWxob3N0hwR/ +AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3DQEBCwUAA4IBAQBol8+YH7MA +HwnIh7KcJ8h87GkCWsjOJCDJWiYBJArQ0MmgDO0qdx+QEtvLMn3XNtP05ZfK0WyX +or4cWllAkMFYaFbyB2hYazlD1UAAG+22Rku0UP6pJMLbWe6pnqzx+RL68FYdbZhN +fCW2xiiKsdPoo2VEY7eeZKrNr/0RFE5EKXgzmobpTBQT1Dl3Ve4aWLoTy9INlQ/g +z40qS7oq1PjjPLgxINhf4ncJqfmRXugYTOnyFiVXLZTys5Pb9SMKdToGl3NTYWLL +2AZdjr6bKtT+WtXyHqO0cQ8CkAW0M6VOlMluACllcJxfrtdlQS2S4lUIj76QKBdZ +khBHXq/b8MFX +-----END CERTIFICATE----- diff --git a/test/certificates/trusted-ca.pem b/test/certificates/trusted-ca.pem new file mode 100644 index 0000000000..a6f6f312d0 --- /dev/null +++ b/test/certificates/trusted-ca.pem @@ -0,0 +1,82 @@ +# CA bundle file used to test tlsCAFile loading for OCSP. +# Copied from the server: +# https://github.com/mongodb/mongo/blob/r4.3.4/jstests/libs/trusted-ca.pem + +# Autogenerated file, do not edit. +# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml trusted-ca.pem +# +# CA for alternate client/server certificate chain. +-----BEGIN CERTIFICATE----- +MIIDojCCAooCBG585gswDQYJKoZIhvcNAQELBQAwfDELMAkGA1UEBhMCVVMxETAP +BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK +DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxHzAdBgNVBAMMFlRydXN0ZWQgS2Vy +bmVsIFRlc3QgQ0EwHhcNMTkwOTI1MjMyNzQxWhcNMzkwOTI3MjMyNzQxWjB8MQsw +CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr +IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UE +AwwWVHJ1c3RlZCBLZXJuZWwgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANlRxtpMeCGhkotkjHQqgqvO6O6hoRoAGGJlDaTVtqrjmC8nwySz +1nAFndqUHttxS3A5j4enOabvffdOcV7+Z6vDQmREF6QZmQAk81pmazSc3wOnRiRs +AhXjld7i+rhB50CW01oYzQB50rlBFu+ONKYj32nBjD+1YN4AZ2tuRlbxfx2uf8Bo +Zowfr4n9nHVcWXBLFmaQLn+88WFO/wuwYUOn6Di1Bvtkvqum0or5QeAF0qkJxfhg +3a4vBnomPdwEXCgAGLvHlB41CWG09EuAjrnE3HPPi5vII8pjY2dKKMomOEYmA+KJ +AC1NlTWdN0TtsoaKnyhMMhLWs3eTyXL7kbkCAwEAAaMxMC8wDAYDVR0TBAUwAwEB +/zAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAuMTANBgkqhkiG9w0BAQsF +AAOCAQEAQk56MO9xAhtO077COCqIYe6pYv3uzOplqjXpJ7Cph7GXwQqdFWfKls7B +cLfF/fhIUZIu5itStEkY+AIwht4mBr1F5+hZUp9KZOed30/ewoBXAUgobLipJV66 +FKg8NRtmJbiZrrC00BSO+pKfQThU8k0zZjBmNmpjxnbKZZSFWUKtbhHV1vujver6 +SXZC7R6692vLwRBMoZxhgy/FkYRdiN0U9wpluKd63eo/O02Nt6OEMyeiyl+Z3JWi +8g5iHNrBYGBbGSnDOnqV6tjEY3eq600JDWiodpA1OQheLi78pkc/VQZwof9dyBCm +6BoCskTjip/UB+vIhdPFT9sgUdgDTg== +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDZUcbaTHghoZKL +ZIx0KoKrzujuoaEaABhiZQ2k1baq45gvJ8Mks9ZwBZ3alB7bcUtwOY+Hpzmm7333 +TnFe/merw0JkRBekGZkAJPNaZms0nN8Dp0YkbAIV45Xe4vq4QedAltNaGM0AedK5 +QRbvjjSmI99pwYw/tWDeAGdrbkZW8X8drn/AaGaMH6+J/Zx1XFlwSxZmkC5/vPFh +Tv8LsGFDp+g4tQb7ZL6rptKK+UHgBdKpCcX4YN2uLwZ6Jj3cBFwoABi7x5QeNQlh +tPRLgI65xNxzz4ubyCPKY2NnSijKJjhGJgPiiQAtTZU1nTdE7bKGip8oTDIS1rN3 +k8ly+5G5AgMBAAECggEAS7GjLKgT88reSzUTgubHquYf1fZwMak01RjTnsVdoboy +aMJVwzPsjgo2yEptUQvuNcGmz54cg5vJaVlmPaspGveg6WGaRmswEo/MP4GK98Fo +IFKkKM2CEHO74O14XLN/w8yFA02+IdtM3X/haEFE71VxXNmwawRXIBxN6Wp4j5Fb +mPLKIspnWQ/Y/Fn799sCFAzX5mKkbCt1IEgKssgQQEm1UkvmCkcZE+mdO/ErYP8A +COO0LpM+TK6WQY2LKiteeCCiosTZFb1GO7MkXrRP5uOBZKaW5kq1R0b6PcopJPCM +OcYF0Zli6KB7oiQLdXgU2jCaxYOnuRb6RYh2l7NvAQKBgQD6CZ9TKOn/EUQtukyw +pvYTyt1hoLXqYGcbRtLc1gcC+Z2BD28hd3eD/mEUv+g/8bq/OP4wYV9X+VRvR8xN +MmfAG/sJeOCOClz1A1TyNeA+G0GZ25qWHyHQ2W4WlSG1CXQgxGzU6wo/t6wiVW5R +O4jplFVEOXznf4vmVfBJK50R2QKBgQDegGxm23jF2N5sIYDZ14oxms8bbjPz8zH6 +tiIRYNGbSzI7J4KFGY2HiBwtf1yxS22HBL69Y1WrEzGm1vm4aZG/GUwBzI79QZAO ++YFIGaIrdlv12Zm6lpJMmAWlOs9XFirC17oQEwOQFweOdQSt7F/+HMZOigdikRBV +pK+8Kfay4QKBgQDarDevHwUmkg8yftA7Xomv3aenjkoK5KzH6jTX9kbDj1L0YG8s +sbLQuVRmNUAFTH+qZUnJPh+IbQIvIHfIu+CI3u+55QFeuCl8DqHoAr5PEr9Ys/qK +eEe2w7HIBj0oe1AYqDEWNUkNWLEuhdCpMowW3CeGN1DJlX7gvyAang4MYQKBgHwM +aWNnFQxo/oiWnTnWm2tQfgszA7AMdF7s0E2UBwhnghfMzU3bkzZuwhbznQATp3rR +QG5iRU7dop7717ni0akTN3cBTu8PcHuIy3UhJXLJyDdnG/gVHnepgew+v340E58R +muB/WUsqK8JWp0c4M8R+0mjTN47ShaLZ8EgdtTbBAoGBAKOcpuDfFEMI+YJgn8zX +h0nFT60LX6Lx+zcSDY9+6J6a4n5NhC+weYCDFOGlsLka1SwHcg1xanfrLVjpH7Ok +HDJGLrSh1FP2Rq/oFxZ/OKCjonHLa8IulqD/AA+sqYRbysKNsT3Pi0554F2xFEqQ +z/C84nlT1R2uTCWIxvrnpU2h +-----END PRIVATE KEY----- +# Pre Oct 2019 trusted-ca.pem +# Transitional pending BUILD update. +-----BEGIN CERTIFICATE----- +MIIDpjCCAo6gAwIBAgIDAghHMA0GCSqGSIb3DQEBBQUAMHwxHzAdBgNVBAMTFlRy +dXN0ZWQgS2VybmVsIFRlc3QgQ0ExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMH +TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv +cmsxCzAJBgNVBAYTAlVTMB4XDTE2MDMzMTE0NTY1NVoXDTM2MDMzMTE0NTY1NVow +fDEfMB0GA1UEAxMWVHJ1c3RlZCBLZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2Vy +bmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREw +DwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCePFHZTydC96SlSHSyu73vw//ddaE33kPllBB9DP2L7yRF +6D/blFmno9fSM+Dfg64VfGV+0pCXPIZbpH29nzJu0DkvHzKiWK7P1zUj8rAHaX++ +d6k0yeTLFM9v+7YE9rHoANVn22aOyDvTgAyMmA0CLn+SmUy6WObwMIf9cZn97Znd +lww7IeFNyK8sWtfsVN4yRBnjr7kKN2Qo0QmWeFa7jxVQptMJQrY8k1PcyVUOgOjQ +ocJLbWLlm9k0/OMEQSwQHJ+d9weUbKjlZ9ExOrm4QuuA2tJhb38baTdAYw3Jui4f +yD6iBAGD0Jkpc+3YaWv6CBmK8NEFkYJD/gn+lJ75AgMBAAGjMTAvMAwGA1UdEwQF +MAMBAf8wHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEwDQYJKoZIhvcN +AQEFBQADggEBADYikjB6iwAUs6sglwkE4rOkeMkJdRCNwK/5LpFJTWrDjBvBQCdA +Y5hlAVq8PfIYeh+wEuSvsEHXmx7W29X2+p4VuJ95/xBA6NLapwtzuiijRj2RBAOG +1EGuyFQUPTL27DR3+tfayNykDclsVDNN8+l7nt56j8HojP74P5OMHtn+6HX5+mtF +FfZMTy0mWguCsMOkZvjAskm6s4U5gEC8pYEoC0ZRbfUdyYsxZe/nrXIFguVlVPCB +XnfB/0iG9t+VH5cUVj1LP9skXTW4kXfhQmljUuo+EVBNR6n2nfTnpoC65WeAgHV4 +V+s9mJsUv2x72KtKYypqEVT0gaJ1WIN9N1s= +-----END CERTIFICATE----- diff --git a/test/change_streams/unified/change-streams-clusterTime.json b/test/change_streams/unified/change-streams-clusterTime.json new file mode 100644 index 0000000000..2b09e548f1 --- /dev/null +++ b/test/change_streams/unified/change-streams-clusterTime.json @@ -0,0 +1,81 @@ +{ + "description": "change-streams-clusterTime", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "clusterTime is present", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "ns": { + "db": "database0", + "coll": "collection0" + }, + "clusterTime": { + "$$exists": true + } + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-disambiguatedPaths.json b/test/change_streams/unified/change-streams-disambiguatedPaths.json new file mode 100644 index 0000000000..a8667b5436 --- /dev/null +++ b/test/change_streams/unified/change-streams-disambiguatedPaths.json @@ -0,0 +1,187 @@ +{ + "description": "disambiguatedPaths", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "6.1.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "disambiguatedPaths is present on updateDescription when an ambiguous path is present", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "1": 1 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "a.1": [ + "a", + "1" + ] + } + } + } + } + ] + }, + { + "description": "disambiguatedPaths returns array indices as integers", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": [ + { + "1": 1 + } + ] + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.0.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "a.0.1": [ + "a", + { + "$$type": "int" + }, + "1" + ] + } + } + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-errors.json b/test/change_streams/unified/change-streams-errors.json new file mode 100644 index 0000000000..65e99e541e --- /dev/null +++ b/test/change_streams/unified/change-streams-errors.json @@ -0,0 +1,246 @@ +{ + "description": "change-streams-errors", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "The watch helper must not throw a custom exception when executed against a single server topology, but instead depend on a server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "expectError": { + "errorCode": 40573 + } + } + ] + }, + { + "description": "Change Stream should error when an invalid aggregation stage is passed in", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$unsupported": "foo" + } + ] + }, + "expectError": { + "errorCode": 40324 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + }, + { + "$unsupported": "foo" + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Change Stream should error when _id is projected out", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 280 + } + } + ] + }, + { + "description": "change stream errors on ElectionInProgress", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 216, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 216 + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-nsType.json b/test/change_streams/unified/change-streams-nsType.json new file mode 100644 index 0000000000..1861c9a5e0 --- /dev/null +++ b/test/change_streams/unified/change-streams-nsType.json @@ -0,0 +1,145 @@ +{ + "description": "change-streams-nsType", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "8.1.0", + "topologies": [ + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + } + ], + "tests": [ + { + "description": "nsType is present when creating collections", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create", + "nsType": "collection" + } + } + ] + }, + { + "description": "nsType is present when creating timeseries", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create", + "nsType": "timeseries" + } + } + ] + }, + { + "description": "nsType is present when creating views", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "viewOn": "testName" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create", + "nsType": "view" + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-pre_and_post_images.json b/test/change_streams/unified/change-streams-pre_and_post_images.json new file mode 100644 index 0000000000..e62fc03459 --- /dev/null +++ b/test/change_streams/unified/change-streams-pre_and_post_images.json @@ -0,0 +1,827 @@ +{ + "description": "change-streams-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "collMod", + "insert", + "update", + "getMore", + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "change-stream-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "change-stream-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "fullDocument:whenAvailable with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:whenAvailable with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:required with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:required with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:whenAvailable with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:whenAvailable with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:required with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:required with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:off with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "off" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "$$exists": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "off" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:off with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "off" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "$$exists": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "off" + } + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-resume-allowlist.json b/test/change_streams/unified/change-streams-resume-allowlist.json new file mode 100644 index 0000000000..1ec72b432b --- /dev/null +++ b/test/change_streams/unified/change-streams-resume-allowlist.json @@ -0,0 +1,2348 @@ +{ + "description": "change-streams-resume-allowlist", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "tests": [ + { + "description": "change stream resumes after a network error", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostUnreachable", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostNotFound", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 7, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NetworkTimeout", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 89, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ShutdownInProgress", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 91, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after PrimarySteppedDown", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 189, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ExceededTimeLimit", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 262, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after SocketException", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 9001, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotWritablePrimary", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 10107, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedAtShutdown", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 11600, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedDueToReplStateChange", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 11602, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryNoSecondaryOk", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 13435, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryOrSecondary", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 13436, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleShardVersion", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 63, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleEpoch", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 150, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after RetryChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 234, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after FailedToSatisfyReadPreference", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 133, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after CursorNotFound", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 43, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-resume-errorLabels.json b/test/change_streams/unified/change-streams-resume-errorLabels.json new file mode 100644 index 0000000000..7fd70108f0 --- /dev/null +++ b/test/change_streams/unified/change-streams-resume-errorLabels.json @@ -0,0 +1,2130 @@ +{ + "description": "change-streams-resume-errorlabels", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "tests": [ + { + "description": "change stream resumes after HostUnreachable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostNotFound", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 7, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NetworkTimeout", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 89, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 91, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 189, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ExceededTimeLimit", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 262, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after SocketException", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 9001, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotWritablePrimary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 10107, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 11600, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedDueToReplStateChange", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 11602, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryNoSecondaryOk", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 13435, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryOrSecondary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 13436, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleShardVersion", + "runOnRequirements": [ + { + "maxServerVersion": "6.0.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 63, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleEpoch", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 150, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after RetryChangeStream", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 234, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after FailedToSatisfyReadPreference", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 133, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes if error contains ResumableChangeStreamError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 50, + "closeConnection": false, + "errorLabels": [ + "ResumableChangeStreamError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream does not resume if error does not contain ResumableChangeStreamError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 6 + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-showExpandedEvents.json b/test/change_streams/unified/change-streams-showExpandedEvents.json new file mode 100644 index 0000000000..b9594e0c1e --- /dev/null +++ b/test/change_streams/unified/change-streams-showExpandedEvents.json @@ -0,0 +1,516 @@ +{ + "description": "change-streams-showExpandedEvents", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "topologies": [ + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "collection1" + } + }, + { + "database": { + "id": "shardedDb", + "client": "client0", + "databaseName": "shardedDb" + } + }, + { + "database": { + "id": "adminDb", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "shardedCollection", + "database": "shardedDb", + "collectionName": "shardedCollection" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "when provided, showExpandedEvents is sent as a part of the aggregate command", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "showExpandedEvents": true + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "when omitted, showExpandedEvents is not sent as a part of the aggregate command", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "showExpandedEvents": { + "$$exists": false + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "when showExpandedEvents is true, new fields on change stream events are handled appropriately", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "a": 1 + } + } + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "rename", + "object": "collection0", + "arguments": { + "to": "foo", + "dropTarget": true + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "collectionUUID": { + "$$exists": true + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "createIndexes", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "operationDescription": { + "$$exists": true + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "foo" + }, + "operationDescription": { + "dropTarget": { + "$$exists": true + }, + "to": { + "db": "database0", + "coll": "foo" + } + } + } + } + ] + }, + { + "description": "when showExpandedEvents is true, createIndex events are reported", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "operationType": { + "$ne": "create" + } + } + } + ], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "createIndexes" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, dropIndexes events are reported", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropIndex", + "object": "collection0", + "arguments": { + "name": "x_1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "dropIndexes" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, create events are reported", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, create events on views are reported", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "viewOn": "testName" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, modify events are reported", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_2" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "command": { + "collMod": "collection0" + }, + "commandName": "collMod" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "modify" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, shardCollection events are reported", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "shardedDb", + "arguments": { + "collection": "shardedCollection" + } + }, + { + "name": "createCollection", + "object": "shardedDb", + "arguments": { + "collection": "shardedCollection" + } + }, + { + "name": "createChangeStream", + "object": "shardedCollection", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "runCommand", + "object": "adminDb", + "arguments": { + "command": { + "shardCollection": "shardedDb.shardedCollection", + "key": { + "_id": 1 + } + }, + "commandName": "shardCollection" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "shardCollection" + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json new file mode 100644 index 0000000000..a155d85b6e --- /dev/null +++ b/test/change_streams/unified/change-streams.json @@ -0,0 +1,1805 @@ +{ + "description": "change-streams", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "collection1" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase1", + "client": "globalClient", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "globalCollection1", + "database": "globalDatabase1", + "collectionName": "collection1" + } + }, + { + "collection": { + "id": "globalDb1Collection0", + "database": "globalDatabase1", + "collectionName": "collection0" + } + }, + { + "collection": { + "id": "globalDb0Collection1", + "database": "globalDatabase0", + "collectionName": "collection1" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "Test array truncation", + "runOnRequirements": [ + { + "minServerVersion": "4.7" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1, + "array": [ + "foo", + { + "a": "bar" + }, + 1, + 2, + 3 + ] + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "array": [ + "foo", + { + "a": "bar" + } + ] + } + } + ] + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": {}, + "removedFields": [], + "truncatedArrays": [ + { + "field": "array", + "newSize": 2 + } + ], + "disambiguatedPaths": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + } + ] + }, + { + "description": "Test with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "name": "test1" + } + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "name": "test1" + } + } + } + } + ] + } + ] + }, + { + "description": "Test with document comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "name": "test1" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "name": "test1" + } + } + } + } + ] + } + ] + }, + { + "description": "Test with string comment", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": "comment" + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "Test that comment is set on getMore", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "key": "value" + } + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "documents": [ + { + "_id": 1, + "a": 1 + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection0", + "comment": { + "key": "value" + } + }, + "commandName": "getMore", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test that comment is not set on getMore - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.3.99" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": "comment" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "documents": [ + { + "_id": 1, + "a": 1 + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection0", + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "to field is set in a rename change event", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection1" + } + }, + { + "name": "rename", + "object": "collection0", + "arguments": { + "to": "collection1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "collection1" + } + } + } + ] + }, + { + "description": "Test unknown operationType MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "addedInFutureMongoDBVersion", + "ns": 1 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "addedInFutureMongoDBVersion", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + } + ] + }, + { + "description": "Test newField added in response MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": 1, + "ns": 1, + "newField": "newFieldValue" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "newField": "newFieldValue" + } + } + ] + }, + { + "description": "Test new structure in ns document MUST NOT err", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "maxServerVersion": "5.2.99" + }, + { + "minServerVersion": "6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "insert", + "ns.viewOn": "db.coll" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "viewOn": "db.coll" + } + } + } + ] + }, + { + "description": "Test modified structure in ns document MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "insert", + "ns": { + "db": "$ns.db", + "coll": "$ns.coll", + "viewOn": "db.coll" + } + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0", + "viewOn": "db.coll" + } + } + } + ] + }, + { + "description": "Test server error on projecting out _id", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 280, + "errorCodeName": "ChangeStreamFatalError", + "errorLabelsContain": [ + "NonResumableChangeStreamError" + ] + } + } + ] + }, + { + "description": "Test projection in change stream returns expected fields", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "optype": "$operationType", + "ns": 1, + "newField": "value" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "optype": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "newField": "value" + } + } + ] + }, + { + "description": "$changeStream must be the first stage in a change stream pipeline sent to the server", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "The server returns change stream responses in the specified server response format", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ] + }, + { + "description": "Executing a watch helper on a Collection results in notifications for changes to the specified collection", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Change Stream should allow valid aggregate pipeline stages", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "fullDocument.z": 3 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + }, + { + "$match": { + "fullDocument.z": 3 + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Executing a watch helper on a Database results in notifications for changes to all collections in the specified database.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection1" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection1" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database1", + "coll": "collection0" + }, + "fullDocument": { + "y": 2, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "Test insert, update, replace, and delete event types", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "updateOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 1 + }, + "update": { + "$set": { + "x": 2 + } + } + } + }, + { + "name": "replaceOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 2 + }, + "replacement": { + "x": 3 + } + } + }, + { + "name": "deleteOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "x": 2 + }, + "removedFields": [], + "truncatedArrays": { + "$$unsetOrMatches": { + "$$exists": true + } + }, + "disambiguatedPaths": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "replace", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 3, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "delete", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test rename and invalidate event types", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection1" + } + }, + { + "name": "rename", + "object": "globalCollection0", + "arguments": { + "to": "collection1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "collection1" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "invalidate" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test drop and invalidate event types", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection0" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "drop", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "invalidate" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test consecutive resume", + "runOnRequirements": [ + { + "minServerVersion": "4.1.7" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "batchSize": 1 + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 2, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test wallTime field is set in a change event", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "wallTime": { + "$$exists": true + } + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/corpus/corpus-encrypted.json b/test/client-side-encryption/corpus/corpus-encrypted.json new file mode 100644 index 0000000000..1b72aa8a39 --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-encrypted.json @@ -0,0 +1,9515 @@ +{ + "_id": "client_side_encryption_corpus", + "altname_aws": "aws", + "altname_local": "local", + "aws_double_rand_auto_id": { + "kms": "aws", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAABchrWPF5OPeuFpk4tUV325TmoNpGW+L5iPSXcLQIr319WJFIp3EDy5QiAHBfz2rThI7imU4eLXndIUrsjM0S/vg==", + "subType": "06" + } + } + }, + "aws_double_rand_auto_altname": { + "kms": "aws", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAABga5hXFiFvH/wOr0wOHSHFWRZ4pEs/UCC1XJWf46Dod3GY9Ry5j1ZyzeHueJxc4Ym5M8UHKSmJuXmNo9m9ZnkiA==", + "subType": "06" + } + } + }, + "aws_double_rand_explicit_id": { + "kms": "aws", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAABjTYZbsro/YxLWBb88qPXEIDQdzY7UZyK4UaZZ8h62OTxp43Zp9j6WvOEzKhXt4oJPMxlAxyTdqO6MllX5bsDrw==", + "subType": "06" + } + } + }, + "aws_double_rand_explicit_altname": { + "kms": "aws", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAABqkyXdeS3aWH2tRFoKxsIIL3ZH05gkiAEbutrjrdfw0b110iPhuCCOb0gP/nX/NRNCg1kCFZ543Vu0xZ0BRXlvQ==", + "subType": "06" + } + } + }, + "aws_double_det_explicit_id": { + "kms": "aws", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$numberDouble": "1.234" } + }, + "aws_double_det_explicit_altname": { + "kms": "aws", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$numberDouble": "1.234" } + }, + "aws_string_rand_auto_id": { + "kms": "aws", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAACAsI5E0rVT8TpIONY3TnbRvIxUjKsiy9ynVd/fE7U1lndE7KR6dTzs8QWK13kdKxO+njKPeC2ObBX904QmJ65Sw==", + "subType": "06" + } + } + }, + "aws_string_rand_auto_altname": { + "kms": "aws", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAACgBE6J6MRxPSDe+gfJPL8nBvuEIRBYxNS/73LqBTDJYyN/lsHQ6UlFDT5B4EkIPmHPTe+UBMOhZQ1bsP+DK8Aog==", + "subType": "06" + } + } + }, + "aws_string_rand_explicit_id": { + "kms": "aws", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAACbdTVDBWn35M5caKZgLFoiSVeFGKRj5K/QtupKNc8/dPIyCE+/a4PU51G/YIzFpYmp91nLpyq7lD/eJ/V0q66Zw==", + "subType": "06" + } + } + }, + "aws_string_rand_explicit_altname": { + "kms": "aws", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAACa4O+kE2BaqM0E+yiBrbCuE0YEGTrZ7L/+SuWm9gN3UupxwAQpRfxXAuUCTc9u1CXnvL+ga+VJMcWD2bawnn/Rg==", + "subType": "06" + } + } + }, + "aws_string_det_auto_id": { + "kms": "aws", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAACyvOW8NcqRkZYzujivwVmYptJkic27PWr3Nq3Yv5Njz8cJdoyesVaQan6mn+U3wdfGEH8zbUUISdCx5qgvXEpvw==", + "subType": "06" + } + } + }, + "aws_string_det_explicit_id": { + "kms": "aws", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAACyvOW8NcqRkZYzujivwVmYptJkic27PWr3Nq3Yv5Njz8cJdoyesVaQan6mn+U3wdfGEH8zbUUISdCx5qgvXEpvw==", + "subType": "06" + } + } + }, + "aws_string_det_explicit_altname": { + "kms": "aws", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAACyvOW8NcqRkZYzujivwVmYptJkic27PWr3Nq3Yv5Njz8cJdoyesVaQan6mn+U3wdfGEH8zbUUISdCx5qgvXEpvw==", + "subType": "06" + } + } + }, + "aws_object_rand_auto_id": { + "kms": "aws", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAADI+/afY6Eka8j1VNThWIeGkDZ7vo4/l66a01Z+lVUFFnVLeUV/nz9kM6uTTplNRUa+RXmNmwkoR/BHRnGc7wRNA==", + "subType": "06" + } + } + }, + "aws_object_rand_auto_altname": { + "kms": "aws", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAADzN4hVXWXKerhggRRtwWnDu2W2wQ5KIWb/X1WCZJKTjQSQ5LNHVasabBCa4U1q46PQ5pDDM1PkVjW6o+zzl/4xw==", + "subType": "06" + } + } + }, + "aws_object_rand_explicit_id": { + "kms": "aws", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAADhSs5zKFMuuux3fqFFuPito3N+bp5TgmkUtJtFXjmA/EnLuexGARvEeGUsMJ/n0VzKbbsiE8+AsUNY3o9YXutqQ==", + "subType": "06" + } + } + }, + "aws_object_rand_explicit_altname": { + "kms": "aws", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAADpj8MSov16h26bFDrHepsNkW+tOLOjRP7oj1Tnj75qZ+uqxxVkQ5B/t/Ihk5fikHTJGAcRBR5Vv6kJ/ulMaDnvQ==", + "subType": "06" + } + } + }, + "aws_object_det_explicit_id": { + "kms": "aws", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "x": { "$numberInt": "1" } } + }, + "aws_object_det_explicit_altname": { + "kms": "aws", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "x": { "$numberInt": "1" } } + }, + "aws_array_rand_auto_id": { + "kms": "aws", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAETWDOZ6zV39H2+W+BkwZIoxI3BNF6phKoiBZ9+i4T9uEoyU3TmoTPjuI0YNwR1v/p5/9rlVCG0KLZd16eeMb3zxZXjqh6IAJqfhsBQ7bzBYI=", + "subType": "06" + } + } + }, + "aws_array_rand_auto_altname": { + "kms": "aws", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAE1xeHbld2JjUiPB1k+xMZuIzNSai7mv1iusCswxKEfYCZ7YtR0GDQTxN4676CwhcodSDiysjgOxSFIGlptKCvl0k46LNq0EGypP9yWBLvdjQ=", + "subType": "06" + } + } + }, + "aws_array_rand_explicit_id": { + "kms": "aws", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAEFVa4U2uW65MGihhdOmpZFgnwGTs3VeN5TXXbXJ5cfm0CwXF3EPlzAVjy5WO/+lbvFufpQnIiLH59/kVygmwn+2P9zPNJnSGIJW9gaV8Vye8=", + "subType": "06" + } + } + }, + "aws_array_rand_explicit_altname": { + "kms": "aws", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAE11VXbfg7DJQ5/CB9XdBO0hCrxOkK3RrEjPGJ0FXlUo76IMna1uo+NVmDnM63CRlGE3/TEbZPpp0w0jn4vZLKvBmGr7o7WQusRY4jnRf5oH4=", + "subType": "06" + } + } + }, + "aws_array_det_explicit_id": { + "kms": "aws", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { "$numberInt": "1" }, + { "$numberInt": "2" }, + { "$numberInt": "3" } + ] + }, + "aws_array_det_explicit_altname": { + "kms": "aws", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { "$numberInt": "1" }, + { "$numberInt": "2" }, + { "$numberInt": "3" } + ] + }, + "aws_binData=00_rand_auto_id": { + "kms": "aws", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAFpZYSktIHzGLZ6mcBFxywICqxdurqLVJcQR34ngix5YIOOulCYEhBSDzzSEyixEPCuU6cEzeuafpZRHX4qgcr9Q==", + "subType": "06" + } + } + }, + "aws_binData=00_rand_auto_altname": { + "kms": "aws", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAFshzESR9SyR++9r2yeaEjJYScMDez414s8pZkB3C8ihDa+rsyaxNy4yrF7qNEWjFrdFaH7zD2LdlPx+TKZgROlg==", + "subType": "06" + } + } + }, + "aws_binData=00_rand_explicit_id": { + "kms": "aws", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAFpYwZRPDom7qyAe5WW/QNSq97/OYgRT8xUEaaR5pkbQEFd/Cwtl8Aib/3Bs1CT3MVaHVWna2u5Gcc4s/v18zLhg==", + "subType": "06" + } + } + }, + "aws_binData=00_rand_explicit_altname": { + "kms": "aws", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAFBq1RIU1YGHKAS1SAtS42fKtQBHQ/BCQzRutirNdvWlrXxF81LSaS7QgQyycZ2ePiOLsSm2vZS4xaQETeCgRC4g==", + "subType": "06" + } + } + }, + "aws_binData=00_det_auto_id": { + "kms": "aws", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAF6SJGmfD3hLVc4tLPm4v2zFuHoRxUDLumBR8Q0AlKK2nQPyvuHEPVBD3vQdDi+Q7PwFxmovJsHccr59VnzvpJeg==", + "subType": "06" + } + } + }, + "aws_binData=00_det_explicit_id": { + "kms": "aws", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAF6SJGmfD3hLVc4tLPm4v2zFuHoRxUDLumBR8Q0AlKK2nQPyvuHEPVBD3vQdDi+Q7PwFxmovJsHccr59VnzvpJeg==", + "subType": "06" + } + } + }, + "aws_binData=00_det_explicit_altname": { + "kms": "aws", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAF6SJGmfD3hLVc4tLPm4v2zFuHoRxUDLumBR8Q0AlKK2nQPyvuHEPVBD3vQdDi+Q7PwFxmovJsHccr59VnzvpJeg==", + "subType": "06" + } + } + }, + "aws_binData=04_rand_auto_id": { + "kms": "aws", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAFM5685zqlM8pc3xubtCFuf724g/bWXsebpNzw5E5HrxUqSBBVOvjs3IJH74+Supe169qejY358nOG41mLZvO2wJByvT14qmgUGpgBaLaxPR0=", + "subType": "06" + } + } + }, + "aws_binData=04_rand_auto_altname": { + "kms": "aws", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAFfLqOzpfjz/XYHDLnliUAA5ehi6s+OIjvrLa59ubqEf8DuoCEWlO13Dl8X42IBB4hoSsO2RUeWtc9MeH4SdIUh/xJN3qS7qzjh/H+GvZRdAM=", + "subType": "06" + } + } + }, + "aws_binData=04_rand_explicit_id": { + "kms": "aws", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAFkmKfKAbz9tqVaiM9MRhYttiY3vgDwXpdYLQ4uUgWX89KRayLADWortYL+Oq+roFhO3oiwB9vjeWGIdgbj5wSh/50JT/2Gs85TXFe1GFjfWs=", + "subType": "06" + } + } + }, + "aws_binData=04_rand_explicit_altname": { + "kms": "aws", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAFKbufv83ddN+07Q5Ocq0VxUEV+BesSrVM7Bol3cMlWjHi7P+MrdwhNEa94xlxlDwU3b+RD6kW+AuNEQ2byA3CX2JjZE1gHwN7l0ukXuqpD0A=", + "subType": "06" + } + } + }, + "aws_binData=04_det_auto_id": { + "kms": "aws", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAFlg7ceq9w/JMhHcNzQks6UrKYAffpUyeWuBIpcuLoB7YbFO61Dphseh77pzZbk3OvmveUq6EtCP2pmsq7hA+QV4hkv6BTn4m6wnXw6ss/qfE=", + "subType": "06" + } + } + }, + "aws_binData=04_det_explicit_id": { + "kms": "aws", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAFlg7ceq9w/JMhHcNzQks6UrKYAffpUyeWuBIpcuLoB7YbFO61Dphseh77pzZbk3OvmveUq6EtCP2pmsq7hA+QV4hkv6BTn4m6wnXw6ss/qfE=", + "subType": "06" + } + } + }, + "aws_binData=04_det_explicit_altname": { + "kms": "aws", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAFlg7ceq9w/JMhHcNzQks6UrKYAffpUyeWuBIpcuLoB7YbFO61Dphseh77pzZbk3OvmveUq6EtCP2pmsq7hA+QV4hkv6BTn4m6wnXw6ss/qfE=", + "subType": "06" + } + } + }, + "aws_undefined_rand_explicit_id": { + "kms": "aws", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$undefined": true } + }, + "aws_undefined_rand_explicit_altname": { + "kms": "aws", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$undefined": true } + }, + "aws_undefined_det_explicit_id": { + "kms": "aws", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$undefined": true } + }, + "aws_undefined_det_explicit_altname": { + "kms": "aws", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$undefined": true } + }, + "aws_objectId_rand_auto_id": { + "kms": "aws", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAHASE+V+LlkmwgF9QNjBK8QBvC973NaTMk6wbd57VB2EpQzrgxMtR5gYzVeqq4xaaHqrncyZCOIxDJkFlaim2NqA==", + "subType": "06" + } + } + }, + "aws_objectId_rand_auto_altname": { + "kms": "aws", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAHf/+9Qj/ozcDoUb8RNBnajU1d9hJ/6fE17IEZnw+ma6v5yH8LqZk9w3dtm6Sfw1unMhcMKrmIgs6kxqRWhNREJg==", + "subType": "06" + } + } + }, + "aws_objectId_rand_explicit_id": { + "kms": "aws", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAHzX8ejVLhoarQ5xgWsJitU/9eBm/Hlt2IIbZtS0SBc80qzkkWTaP9Zl9wrILH/Hwwx8RFnts855eKII3NJFa3BA==", + "subType": "06" + } + } + }, + "aws_objectId_rand_explicit_altname": { + "kms": "aws", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAHG5l6nUCY8f/6xO6TsPDrZHcdPRyMe3muMlY2DxHwv9GJNDR5Ne5VEAzUjnbgoy+B29SX4oY8cXJ6XhVz8mt3Eg==", + "subType": "06" + } + } + }, + "aws_objectId_det_auto_id": { + "kms": "aws", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAHTMY2l+gY8glm4HeSsGfCSfOsTVTzYU8qnQV8iqEFHrO5SBJac59gv3N/jukMwAnt0j6vIIQrROkVetU24YY7sQ==", + "subType": "06" + } + } + }, + "aws_objectId_det_explicit_id": { + "kms": "aws", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAHTMY2l+gY8glm4HeSsGfCSfOsTVTzYU8qnQV8iqEFHrO5SBJac59gv3N/jukMwAnt0j6vIIQrROkVetU24YY7sQ==", + "subType": "06" + } + } + }, + "aws_objectId_det_explicit_altname": { + "kms": "aws", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAHTMY2l+gY8glm4HeSsGfCSfOsTVTzYU8qnQV8iqEFHrO5SBJac59gv3N/jukMwAnt0j6vIIQrROkVetU24YY7sQ==", + "subType": "06" + } + } + }, + "aws_bool_rand_auto_id": { + "kms": "aws", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAISm4UFt1HC2j0ObpTBg7SvF2Dq31i9To2ED4F3JcTihhq0fVzaSCsUz9VTJ0ziHmeNPNdfPPZO6qA/CDEZBO4jg==", + "subType": "06" + } + } + }, + "aws_bool_rand_auto_altname": { + "kms": "aws", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAIj93KeAa96DmZXdB8boFvW19jhJSMmtSs5ag5FDSkH8MdKG2d2VoBOdUlBrL+LHYELqeDHCszY7qCirvb5mIgZg==", + "subType": "06" + } + } + }, + "aws_bool_rand_explicit_id": { + "kms": "aws", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAIMbDFEuHIl5MNEsWnYLIand1vpK6EMv7Mso6qxrN4wHSVVwmxK+GCPgrKoUQsNuTssFWNCu0IhwrXOagDEfmlxw==", + "subType": "06" + } + } + }, + "aws_bool_rand_explicit_altname": { + "kms": "aws", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAIkIaWfmPdxgAV5Rtb6on6T0NGt9GPFDScQD5I/Ch0ngiTCCKceJOjU0ljd3YTgfWRA1p/MlMIV0I5YAWZXKTHlg==", + "subType": "06" + } + } + }, + "aws_bool_det_explicit_id": { + "kms": "aws", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "aws_bool_det_explicit_altname": { + "kms": "aws", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "aws_date_rand_auto_id": { + "kms": "aws", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAJz1VG4+QnQXEE+TGu/pzfPugGMVTiC1xnenG1ByRdPvsERVw9WComWl1tb9tt9oblD7H/q0y1+y8HevkDqohB2Q==", + "subType": "06" + } + } + }, + "aws_date_rand_auto_altname": { + "kms": "aws", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAJa1kI2mIIYWjf7zjf5dD9+psvAQpjZ3nnsoXA5upcIwEtZaC8bxKKHVpOLOP3rTbvT5EV6vLhXkferGoyaqd/8w==", + "subType": "06" + } + } + }, + "aws_date_rand_explicit_id": { + "kms": "aws", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAJ9Q5Xe4UuOLQTUwosk47A6xx40XJcNoICCNtKrHqsUYy0QLCFRc5v4nA0160BVghURizbUtX8iuIp11pnsDyRtA==", + "subType": "06" + } + } + }, + "aws_date_rand_explicit_altname": { + "kms": "aws", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAJkHOdUc/4U82wxWJZ0SYABkJjQqNApkH2Iy/5S+PoatPgynoeSFTU9FmAbuWV/gbtIfBiaCOIjlsdonl/gf9+5w==", + "subType": "06" + } + } + }, + "aws_date_det_auto_id": { + "kms": "aws", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAJEEpQNsiqMWPqD4lhMkiOJHGE8FxOeYrKPiiAp/bZTrLKyCSS0ZL1WT9H3cGzxWPm5veihCjKqWhjatC/pjtzbQ==", + "subType": "06" + } + } + }, + "aws_date_det_explicit_id": { + "kms": "aws", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAJEEpQNsiqMWPqD4lhMkiOJHGE8FxOeYrKPiiAp/bZTrLKyCSS0ZL1WT9H3cGzxWPm5veihCjKqWhjatC/pjtzbQ==", + "subType": "06" + } + } + }, + "aws_date_det_explicit_altname": { + "kms": "aws", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAJEEpQNsiqMWPqD4lhMkiOJHGE8FxOeYrKPiiAp/bZTrLKyCSS0ZL1WT9H3cGzxWPm5veihCjKqWhjatC/pjtzbQ==", + "subType": "06" + } + } + }, + "aws_null_rand_explicit_id": { + "kms": "aws", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "aws_null_rand_explicit_altname": { + "kms": "aws", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "aws_null_det_explicit_id": { + "kms": "aws", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "aws_null_det_explicit_altname": { + "kms": "aws", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "aws_regex_rand_auto_id": { + "kms": "aws", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAALnhViSt3HqTDzyLN4mWO9srBU8TjRvPWsAJYfj/5sgI/yFuWdrggMs3Aq6G+K3tRrX3Yb+osy5CLiFCxq9WIvAA==", + "subType": "06" + } + } + }, + "aws_regex_rand_auto_altname": { + "kms": "aws", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAALbL2RS2tGQLBZ+6LtXLKAWFKcoKui+u4+gMIlFemLgpdO2eLqrMJB53ccqZImX8ons9UgAwDkiD68hWy8e7KHfg==", + "subType": "06" + } + } + }, + "aws_regex_rand_explicit_id": { + "kms": "aws", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAALa0+ftF6W/0Ul4J9VT/3chXFktE1o+OK4S14h2kyOqDVNA8yMKuyCK5nWl1yZvjJ76TuhEABte23oxcBP5QwalQ==", + "subType": "06" + } + } + }, + "aws_regex_rand_explicit_altname": { + "kms": "aws", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAALS4Yo9Fwk6OTx2CWdnObFT2L4rHngeIbdCyT4/YMJYd+jLU3mph14M1ptZZg+TBIgSPHq+BkvpRDifbMmOVr/Hg==", + "subType": "06" + } + } + }, + "aws_regex_det_auto_id": { + "kms": "aws", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAALpwNlokiTCUtTa2Kx9NVGvXR/aKPGhR5iaCT7nHEk4BOiZ9Kr4cRHdPCeZ7A+gjG4cKoT62sm3Fj1FwSOl8J8aQ==", + "subType": "06" + } + } + }, + "aws_regex_det_explicit_id": { + "kms": "aws", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAALpwNlokiTCUtTa2Kx9NVGvXR/aKPGhR5iaCT7nHEk4BOiZ9Kr4cRHdPCeZ7A+gjG4cKoT62sm3Fj1FwSOl8J8aQ==", + "subType": "06" + } + } + }, + "aws_regex_det_explicit_altname": { + "kms": "aws", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAALpwNlokiTCUtTa2Kx9NVGvXR/aKPGhR5iaCT7nHEk4BOiZ9Kr4cRHdPCeZ7A+gjG4cKoT62sm3Fj1FwSOl8J8aQ==", + "subType": "06" + } + } + }, + "aws_dbPointer_rand_auto_id": { + "kms": "aws", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAMfCVAnMNbRGsThnoVGb2KDsCIU2ehcPtebk/TFG4GZvEmculscLLih813lEz5NHS2sAXBn721EzUS7d0TKAPbmEYFwUBnijIQIPvUoUO8AQM=", + "subType": "06" + } + } + }, + "aws_dbPointer_rand_auto_altname": { + "kms": "aws", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAMvYJ5BtaMLVXV+qj85q5WqKRlzlHOBIIxZfUE/BBXUwqSTpJLdQQD++DDh6F2dtorBeYa3oUv2ef3ImASk5j23joU35Pm3Zt9Ci1pMNGodWs=", + "subType": "06" + } + } + }, + "aws_dbPointer_rand_explicit_id": { + "kms": "aws", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAMdsmYtPDw8kKjfB2kWfx5W1oNEkWWct1lRpesN303pUWsawDJpfBx40lg18So2X/g4yGIwpY3qfEKQZA4vCJeT+MTjhRXFjXA7eS/mxv8f3E=", + "subType": "06" + } + } + }, + "aws_dbPointer_rand_explicit_altname": { + "kms": "aws", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAM0hcvS5zmY3mlTp0SfME/rINlflF/sx2KvP0eJTdH+Uk0WHuTkFIJAza+bXvV/gB7iNC350qyzUX3M6NHx/9s/5yBpY8MawTZTZ7WCQIA+ZI=", + "subType": "06" + } + } + }, + "aws_dbPointer_det_auto_id": { + "kms": "aws", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAMp4QxbaEOij66L+RtaMekrDSm6QbfJBTQ8lQFhxfq9n7SVuQ9Zwdy14Ja8tyI3cGgQzQ/73rHUJ3CKA4+OYr63skYUkkkdlHxUrIMd5j5woc=", + "subType": "06" + } + } + }, + "aws_dbPointer_det_explicit_id": { + "kms": "aws", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAMp4QxbaEOij66L+RtaMekrDSm6QbfJBTQ8lQFhxfq9n7SVuQ9Zwdy14Ja8tyI3cGgQzQ/73rHUJ3CKA4+OYr63skYUkkkdlHxUrIMd5j5woc=", + "subType": "06" + } + } + }, + "aws_dbPointer_det_explicit_altname": { + "kms": "aws", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAMp4QxbaEOij66L+RtaMekrDSm6QbfJBTQ8lQFhxfq9n7SVuQ9Zwdy14Ja8tyI3cGgQzQ/73rHUJ3CKA4+OYr63skYUkkkdlHxUrIMd5j5woc=", + "subType": "06" + } + } + }, + "aws_javascript_rand_auto_id": { + "kms": "aws", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAN3HzAC9BTD7Jgi0PR4RS/Z6L6QtAQ7VhbKRbX+1smmnYniH6jVBM6zyxMDM8h9YjMPNs8EJrGDnisuf33w5KI/A==", + "subType": "06" + } + } + }, + "aws_javascript_rand_auto_altname": { + "kms": "aws", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAANJpw+znlu3ecSiNyZ0EerVsow4aDRF2auI3Wy69EVexJkQlHO753PjRn8hG/x2kY8ROy5IUU43jaugP5AN1bwNQ==", + "subType": "06" + } + } + }, + "aws_javascript_rand_explicit_id": { + "kms": "aws", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAANzoDiq8uI0+l8COY8YdM9S3rpLvPOHOWmJqJNtOyS0ZXUx1SB5paRJ4W3Eg8KuXEeoFwvBDe9cW9YT66CzkjlBw==", + "subType": "06" + } + } + }, + "aws_javascript_rand_explicit_altname": { + "kms": "aws", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAN/JhtRongJweLC5SdrXHhsFz3p82q3cwXf8Sru21DK6S39S997y3uhVLn0xlX5d94PxK1XVYSjz1oVuMxZouZ7Q==", + "subType": "06" + } + } + }, + "aws_javascript_det_auto_id": { + "kms": "aws", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAANE39aEGiuUZ1WyakVEBgkGzLp5whkIjJ4uiaFLXniRszJL70FRkcf+aFXlA5Y4So9/ODKF76qbSsH4Jk6L+3mog==", + "subType": "06" + } + } + }, + "aws_javascript_det_explicit_id": { + "kms": "aws", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAANE39aEGiuUZ1WyakVEBgkGzLp5whkIjJ4uiaFLXniRszJL70FRkcf+aFXlA5Y4So9/ODKF76qbSsH4Jk6L+3mog==", + "subType": "06" + } + } + }, + "aws_javascript_det_explicit_altname": { + "kms": "aws", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAANE39aEGiuUZ1WyakVEBgkGzLp5whkIjJ4uiaFLXniRszJL70FRkcf+aFXlA5Y4So9/ODKF76qbSsH4Jk6L+3mog==", + "subType": "06" + } + } + }, + "aws_symbol_rand_auto_id": { + "kms": "aws", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAOBv1T9tleM0xwNe7efg/MlShyzvXe3Pmg1GzPl3gjFRHZGWXR578KqX+8oiz65eXGzNuyOFvcpnR2gYCs3NeKeQfctO5plEiIva6nzCI5SK8=", + "subType": "06" + } + } + }, + "aws_symbol_rand_auto_altname": { + "kms": "aws", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAOwLgGws8CMh+GgkEJFAx8tDIflyjsgG+/1FmZZobKAg8NOKqfXjtbnNCbvR28OCk6g/8SqBm8m53G6JciwvthJ0DirdfEexiUqu7IPtaeeyw=", + "subType": "06" + } + } + }, + "aws_symbol_rand_explicit_id": { + "kms": "aws", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAORQi3dNkXzZeruWu19kEhDu6fFD/h47ILzk+OVKQMoriAQC5YFyVRp1yAkIaWsrsPcyCHlfZ99FySSQeqSYbZZNj5FqyonWvDuPTduHDy3CI=", + "subType": "06" + } + } + }, + "aws_symbol_rand_explicit_altname": { + "kms": "aws", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAOj+Yl1pQPiJ6mESOISOyUYsKN/VIvC8f0derhxIPakXkwn57U0sxv+geUkrl3JZDxY3+cX5M1JZmY+PfjaYQhbTorf9RZaVC2Wwo2lMftWi0=", + "subType": "06" + } + } + }, + "aws_symbol_det_auto_id": { + "kms": "aws", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAO5IHripygBGEsVK8RFWZ1rIIVUap8KVDuqOspZpERaj+5ZEfqIcyrP/WK9KdvwOfdOWXfP/mOwuImYgNdbaQe+ejkYe4W0Y0uneCuw88k95Q=", + "subType": "06" + } + } + }, + "aws_symbol_det_explicit_id": { + "kms": "aws", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAO5IHripygBGEsVK8RFWZ1rIIVUap8KVDuqOspZpERaj+5ZEfqIcyrP/WK9KdvwOfdOWXfP/mOwuImYgNdbaQe+ejkYe4W0Y0uneCuw88k95Q=", + "subType": "06" + } + } + }, + "aws_symbol_det_explicit_altname": { + "kms": "aws", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAO5IHripygBGEsVK8RFWZ1rIIVUap8KVDuqOspZpERaj+5ZEfqIcyrP/WK9KdvwOfdOWXfP/mOwuImYgNdbaQe+ejkYe4W0Y0uneCuw88k95Q=", + "subType": "06" + } + } + }, + "aws_javascriptWithScope_rand_auto_id": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAPT31GSNkY1RM43miv1XPYtDX1vU/xORiM3U0pumjqA+JLU/HMhH++75OcMhcAQqMjm2nZtZScxdGJsJJPEEzqjbFNMJgYc9sqR5uLnzk+2dg=", + "subType": "06" + } + } + }, + "aws_javascriptWithScope_rand_auto_altname": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAPUxgaKAxSQ1uzOZtzsbtrxtDT2P/zWY6lYsbChXuRUooqvyjXSkNDqKBBA7Gp5BdGiVB/JLR47Tihpbcw1s1yGhwQRvnqeDvPrf91nvElXRY=", + "subType": "06" + } + } + }, + "aws_javascriptWithScope_rand_explicit_id": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAPv8W0ZtquFCLTG0TqvRjdzKa/4mvqT2FuEGQ0mXG2k2BZh2LY5APr/kgW0tP4eLjHzVld6OLiM9ZKAvENCZ6/fKOvqSwpIfkdLWUIeB4REQg=", + "subType": "06" + } + } + }, + "aws_javascriptWithScope_rand_explicit_altname": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAPMVhWjaxLffdAOkVgIJpjgNIldMS451NQs3C1jb+pzopHp3DlfZ+AHQpK9reMVVKjaqanhWBpL25q+feA60XVgZPCUDroiRYqMFqU//y0amw=", + "subType": "06" + } + } + }, + "aws_javascriptWithScope_det_explicit_id": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$code": "x=1", "$scope": {} } + }, + "aws_javascriptWithScope_det_explicit_altname": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$code": "x=1", "$scope": {} } + }, + "aws_int_rand_auto_id": { + "kms": "aws", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAQFV5b3vsoZe+MT4z8soetpmrWJpm7be41FNu/rdEqHWTG32jCym6762PCNYH5+vA7ldCWQkdt+ncneHsxzPrm9w==", + "subType": "06" + } + } + }, + "aws_int_rand_auto_altname": { + "kms": "aws", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAQY9+QenvU1Tk/dEGZP11uOZJLHAJ9hWHbEhxbtxItt1LsdU/8gOZfypilIO5BUkLT/15PUuXV28GISNh6yIuWhw==", + "subType": "06" + } + } + }, + "aws_int_rand_explicit_id": { + "kms": "aws", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAQruCugbneumhcinuXm89WW1PXVuSOewttp9cpsPPsCRVqe/uAkZOdJnZ2KaEZ9zki2GeqaJTs1qDmaJofc6GMEA==", + "subType": "06" + } + } + }, + "aws_int_rand_explicit_altname": { + "kms": "aws", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAQb15qXl/tejk4pmgkc4pUxzt4eJrv/cetgzgcPVaROAQSzd8ptbgCjaV8vP46uqozRoaDFZbQ06t65c3f0x/Ucw==", + "subType": "06" + } + } + }, + "aws_int_det_auto_id": { + "kms": "aws", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAQCXo6ieWvfoqkG+rP7J2BV013AVf/oNMmmGWe44VEHahF+qZHzW5I/F2qIA+xgKkk172pFq0iTSOpe+K2WHMKFw==", + "subType": "06" + } + } + }, + "aws_int_det_explicit_id": { + "kms": "aws", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAQCXo6ieWvfoqkG+rP7J2BV013AVf/oNMmmGWe44VEHahF+qZHzW5I/F2qIA+xgKkk172pFq0iTSOpe+K2WHMKFw==", + "subType": "06" + } + } + }, + "aws_int_det_explicit_altname": { + "kms": "aws", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAAQCXo6ieWvfoqkG+rP7J2BV013AVf/oNMmmGWe44VEHahF+qZHzW5I/F2qIA+xgKkk172pFq0iTSOpe+K2WHMKFw==", + "subType": "06" + } + } + }, + "aws_timestamp_rand_auto_id": { + "kms": "aws", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAR63xXG8mrlixkQzD5VBIPE6NHicaWcS5CBhiIJDcZ0x8D9c5TgRJUfCeWhKvWFD4o0DoxcBQ2opPormFDpvmq/g==", + "subType": "06" + } + } + }, + "aws_timestamp_rand_auto_altname": { + "kms": "aws", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAARAgY9LsUxP6gP4gYRvvzZ4iaHVQRNbycATiVag1YNSiDmEr4LYserYuBscdrIy4v3zgGaulFM9KV86bx0ItycZA==", + "subType": "06" + } + } + }, + "aws_timestamp_rand_explicit_id": { + "kms": "aws", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAARLneAZqPcHdzGGnXz2Ne5E7HP9cDC1+yoIwcA8OSF/IlzEjrrMAi3z6Izol6gWDlD7VOh7QYL3sASJOXyzF1hPQ==", + "subType": "06" + } + } + }, + "aws_timestamp_rand_explicit_altname": { + "kms": "aws", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAARH2bU7KNo5SHxiO8JFEcT9wryuHNXyM7ADop1oPcESyay1Nc0WHPD3nr0yMAK481NxOkE3qXyaslu7bcP/744WA==", + "subType": "06" + } + } + }, + "aws_timestamp_det_auto_id": { + "kms": "aws", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAARG7kGfx0ky+d4Hl/fRBu8oUR1Mph26Dkv3J7fxGYanpzOFMiHIfVO0uwYMvsfzG54y0DDNlS3FmmS13DzepbzGQ==", + "subType": "06" + } + } + }, + "aws_timestamp_det_explicit_id": { + "kms": "aws", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAARG7kGfx0ky+d4Hl/fRBu8oUR1Mph26Dkv3J7fxGYanpzOFMiHIfVO0uwYMvsfzG54y0DDNlS3FmmS13DzepbzGQ==", + "subType": "06" + } + } + }, + "aws_timestamp_det_explicit_altname": { + "kms": "aws", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAARG7kGfx0ky+d4Hl/fRBu8oUR1Mph26Dkv3J7fxGYanpzOFMiHIfVO0uwYMvsfzG54y0DDNlS3FmmS13DzepbzGQ==", + "subType": "06" + } + } + }, + "aws_long_rand_auto_id": { + "kms": "aws", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAASZbes2EdR78crt2pXVElW2YwAQh8HEBapYYeav2VQeg2syXaV/qZuD8ofnAVn4v/DydTTMVMmK+sVU/TlnAu2eA==", + "subType": "06" + } + } + }, + "aws_long_rand_auto_altname": { + "kms": "aws", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAASt+7fmMYH+fLHgybc+sng8/UmKP3YPUEPCz1SXVQljQp6orsCILSgtgGPsdeGnN5NSxh3XzerHs6zlR92fWpZCw==", + "subType": "06" + } + } + }, + "aws_long_rand_explicit_id": { + "kms": "aws", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAS01fF1uo6zYDToJnOT/EbDipzk7YZ6I+IspZF+avjU3XYfpRxT9NdAgKr0euWJwyAsdpWqqCwFummfrPeZOy04A==", + "subType": "06" + } + } + }, + "aws_long_rand_explicit_altname": { + "kms": "aws", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAS6tpH796bqy58mXf38rJvVtA1uBcxBE5yIGQ4RN44oypc/pvw0ouhFI1dkoneKMtAFU/5RygZV+RvQhRtgKn76A==", + "subType": "06" + } + } + }, + "aws_long_det_auto_id": { + "kms": "aws", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAASC7O/8JeB4WTqQFPuMpFRsAuonPS3yu7IAPZeRPIr03CmM6HNndYIKMoFM13eELNZTdJSgg9u9ItGqRw+/XMHzQ==", + "subType": "06" + } + } + }, + "aws_long_det_explicit_id": { + "kms": "aws", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAASC7O/8JeB4WTqQFPuMpFRsAuonPS3yu7IAPZeRPIr03CmM6HNndYIKMoFM13eELNZTdJSgg9u9ItGqRw+/XMHzQ==", + "subType": "06" + } + } + }, + "aws_long_det_explicit_altname": { + "kms": "aws", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQFkgAAAAAAAAAAAAAAAAAASC7O/8JeB4WTqQFPuMpFRsAuonPS3yu7IAPZeRPIr03CmM6HNndYIKMoFM13eELNZTdJSgg9u9ItGqRw+/XMHzQ==", + "subType": "06" + } + } + }, + "aws_decimal_rand_auto_id": { + "kms": "aws", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAATgf5zW9EgnWHPxj4HAGt472eN9UXP41TaF8V2J7S2zqSpiBZGKDuOIjw2FBSqaNp53vvfl9HpwAuQBJZhrwkBCKRkKV/AAR3/pTpuoqhSKaM=", + "subType": "06" + } + } + }, + "aws_decimal_rand_auto_altname": { + "kms": "aws", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAATPRfvZWdupE2N0W1DXUx7X8Zz7g43jawJL7PbQtTYetI78xRETkMdygwSEHgs+cvnUBBtYIeKRVkOGZQkwf568OclhDiPxUeD38cR5blBq/U=", + "subType": "06" + } + } + }, + "aws_decimal_rand_explicit_id": { + "kms": "aws", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAAT+ZnCg2lSMIohZ9RJ4CNs3LZ0g+nV04cYAmrxTSrTSBPGlZ7Ywh5A2rCss7AUijYZiKiYyZbuAzukbOuVRhdCtm+xo9+DyLAwTezF18okk6Y=", + "subType": "06" + } + } + }, + "aws_decimal_rand_explicit_altname": { + "kms": "aws", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgFkgAAAAAAAAAAAAAAAAAATlnQYASsTZRRHzFjcbCClXartcXBVRrYv7JImMkDmAj6EAjf/ZqpjeykkS/wohMhXaNwyZBdREr+n+GDV7imYoL4WRBOLnqB6hrYidlWqNzE=", + "subType": "06" + } + } + }, + "aws_decimal_det_explicit_id": { + "kms": "aws", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$numberDecimal": "1.234" } + }, + "aws_decimal_det_explicit_altname": { + "kms": "aws", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$numberDecimal": "1.234" } + }, + "aws_minKey_rand_explicit_id": { + "kms": "aws", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$minKey": 1 } + }, + "aws_minKey_rand_explicit_altname": { + "kms": "aws", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$minKey": 1 } + }, + "aws_minKey_det_explicit_id": { + "kms": "aws", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$minKey": 1 } + }, + "aws_minKey_det_explicit_altname": { + "kms": "aws", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$minKey": 1 } + }, + "aws_maxKey_rand_explicit_id": { + "kms": "aws", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$maxKey": 1 } + }, + "aws_maxKey_rand_explicit_altname": { + "kms": "aws", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$maxKey": 1 } + }, + "aws_maxKey_det_explicit_id": { + "kms": "aws", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$maxKey": 1 } + }, + "aws_maxKey_det_explicit_altname": { + "kms": "aws", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$maxKey": 1 } + }, + "local_double_rand_auto_id": { + "kms": "local", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAABGF195CB8nRmK9+KxYO7T96MeXucC/ILQtEEQAS4zrwj3Qz7YEQrf/apvbKTCkn3siN2XSDLQ/7dmddZa9xa9yQ==", + "subType": "06" + } + } + }, + "local_double_rand_auto_altname": { + "kms": "local", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAABY8g18z6ZOjGtfNxaAmU95tXMdoM6qbtDMpB72paqiHZTW1UGB22HPXiEnVz05JTBzzX4fc6tOldX6aJel812Zg==", + "subType": "06" + } + } + }, + "local_double_rand_explicit_id": { + "kms": "local", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAABDlHwN8hYyScEhhx64TdJ2Qp2rmKRg8983zdqIL1914tyPwRQq7ySCOhmFif2S7v4KT+r0uOfimYvKD1n9rKHlg==", + "subType": "06" + } + } + }, + "local_double_rand_explicit_altname": { + "kms": "local", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAB2VnTFlaCRzAZZTQiMWQORFNgXIuAJlHJXIHiYow2eO6JbVghWTpH+MsdafBNPVnc0zKuZBL0Qs2Nuk1xiQaqhA==", + "subType": "06" + } + } + }, + "local_double_det_explicit_id": { + "kms": "local", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$numberDouble": "1.234" } + }, + "local_double_det_explicit_altname": { + "kms": "local", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$numberDouble": "1.234" } + }, + "local_string_rand_auto_id": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAC5NBAPM8q2n9fnkwQfE9so/XcO51plPBNs5VlBRbDw68k9T6/uZ2TWsAvTYtVooY59zHHr2QS3usKbGQB6J61rA==", + "subType": "06" + } + } + }, + "local_string_rand_auto_altname": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACM/EjGMrkYHvSZra26m74upuvLkfKXTs+tTWquGzrgWYLnLt8I6XBIwx1VymS9EybrCU/ewmtgjLUNUFQacIeXA==", + "subType": "06" + } + } + }, + "local_string_rand_explicit_id": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACn4tD26UG8lO9gTZaxen6yXzHo/a2lokeY1ClxHMtJODoJr2JZzIDHP3A9aZ8L4+Vu+nyqphaWyGaGONKu8gpcQ==", + "subType": "06" + } + } + }, + "local_string_rand_explicit_altname": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACZfoO2LjY+IB31FZ1Tq7pHr0DCFKGJqWcXcOrnZ7bV9Euc9f101motJc31sp8nF5CTCfd83VQE0319eQrxDDaSw==", + "subType": "06" + } + } + }, + "local_string_det_auto_id": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACW0cZMYWOY3eoqQQkSdBtS9iHC4CSQA27dy6XJGcmTV8EDuhGNnPmbx0EKFTDb0PCSyCjMyuE4nsgmNYgjTaSuw==", + "subType": "06" + } + } + }, + "local_string_det_explicit_id": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACW0cZMYWOY3eoqQQkSdBtS9iHC4CSQA27dy6XJGcmTV8EDuhGNnPmbx0EKFTDb0PCSyCjMyuE4nsgmNYgjTaSuw==", + "subType": "06" + } + } + }, + "local_string_det_explicit_altname": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACW0cZMYWOY3eoqQQkSdBtS9iHC4CSQA27dy6XJGcmTV8EDuhGNnPmbx0EKFTDb0PCSyCjMyuE4nsgmNYgjTaSuw==", + "subType": "06" + } + } + }, + "local_object_rand_auto_id": { + "kms": "local", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAADlekcUsETAkkKTjCVx5EISJN+sftrQax/VhaWXLyRgRz97adXXmwZkMyt+035SHZsF91i2LaXziMA4RHoP+nKFw==", + "subType": "06" + } + } + }, + "local_object_rand_auto_altname": { + "kms": "local", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAADpaQmy5r6q9gLqEm+FIi/OyQgcuUnrICCP9rC4S3wR6qUHd82IW/3dFQUzwTkaXxgStjopamQMuZ4ESRj0xx0bA==", + "subType": "06" + } + } + }, + "local_object_rand_explicit_id": { + "kms": "local", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAADCHRJCINzWY0u4gZPWEmHg/JoQ8IW4yMfUyzYJCQrEMp4rUeupIuxqSuq2QyLBYZBBv0r7t3lNH49I5qDeav2vA==", + "subType": "06" + } + } + }, + "local_object_rand_explicit_altname": { + "kms": "local", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAADrHQQUnLF1jdNmFY/V266cS28XAB4nOKetHAcSbwkeUxNzgZT1g+XMQaYfcNMMv/ywypKU1KpgLMsEOpm4qcPkQ==", + "subType": "06" + } + } + }, + "local_object_det_explicit_id": { + "kms": "local", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "x": { "$numberInt": "1" } } + }, + "local_object_det_explicit_altname": { + "kms": "local", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "x": { "$numberInt": "1" } } + }, + "local_array_rand_auto_id": { + "kms": "local", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAEXa7bQ5vGPNsLdklM/H+sop8aCL4vlDiVUoVjTAGjTngn2WLcdKLWxaNSyMdJpsI/NsxQJ58YrcwP+yHzi9rZVtRdbg7m8p+CYcq1vUm6UoQ=", + "subType": "06" + } + } + }, + "local_array_rand_auto_altname": { + "kms": "local", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAEVlZlOvtRmGIhcYi/qPl3HKi/qf0yRQrkbVo9rScYkxDCBN9wA55pAWHDQ/5Sjy4d0DwL57k+M1G9e7xSIrv8xXKwoIuuabhSWaIX2eJHroY=", + "subType": "06" + } + } + }, + "local_array_rand_explicit_id": { + "kms": "local", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAEYBLSYHHt2rohezMF4lMjNdqy9CY33EHf+pgRbJwVXZScLDgn9CcqeRsdU8bW5h2qgNpQvoSMBB7pW+Dgp1RauTHZSOd4PcZpAGjwoFDWSSM=", + "subType": "06" + } + } + }, + "local_array_rand_explicit_altname": { + "kms": "local", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAES1IJ8S2NxWekolQockxLJvzFSGfKQ9Xbi55vO8LyWo0sIG9ZgPQXtVQkZ301CsdFduvx9A0vDqQ0MGYc4plxNnpUTizJPRUDyez5dOgZ9tI=", + "subType": "06" + } + } + }, + "local_array_det_explicit_id": { + "kms": "local", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { "$numberInt": "1" }, + { "$numberInt": "2" }, + { "$numberInt": "3" } + ] + }, + "local_array_det_explicit_altname": { + "kms": "local", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { "$numberInt": "1" }, + { "$numberInt": "2" }, + { "$numberInt": "3" } + ] + }, + "local_binData=00_rand_auto_id": { + "kms": "local", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAF+hgWs4ZCo9GnmhSM9SDSWzWX4E7Tlp4TwlEy3zfO/rrMREECGB4u8LD8Ju9b8YP+xcZhMI1tcz/vrQS87NffUg==", + "subType": "06" + } + } + }, + "local_binData=00_rand_auto_altname": { + "kms": "local", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAFtEvaXWpGfXC1GlKu0AeRDaeBKHryGoS0tAUr48vfYk7umCr+fJKyXCY9vSv7wCiQxWLe8V/EZWkHsu0zqhJw9w==", + "subType": "06" + } + } + }, + "local_binData=00_rand_explicit_id": { + "kms": "local", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAF/1L5bvmMX3Bk2nAw8KvvRd/7nZ82XHVasT0jrlPhSiJU7ehJMeUCOb7HCHU6KgCzZB9C2W3NoVhLKIhE9ZnYdg==", + "subType": "06" + } + } + }, + "local_binData=00_rand_explicit_altname": { + "kms": "local", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAFK0W5IWKzggR4UU+fhwA2p8YCHLfmx5y1OEtHc/9be9eEYTORACDmWY6207Vd4LhBJCedd+Q5qMm7NRZjjhyLEQ==", + "subType": "06" + } + } + }, + "local_binData=00_det_auto_id": { + "kms": "local", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAF1ofBnK9+ERP29P/i14GQ/y3muic6tNKY532zCkzQkJSktYCOeXS8DdY1DdaOP/asZWzPTdgwby6/iZcAxJU+xQ==", + "subType": "06" + } + } + }, + "local_binData=00_det_explicit_id": { + "kms": "local", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAF1ofBnK9+ERP29P/i14GQ/y3muic6tNKY532zCkzQkJSktYCOeXS8DdY1DdaOP/asZWzPTdgwby6/iZcAxJU+xQ==", + "subType": "06" + } + } + }, + "local_binData=00_det_explicit_altname": { + "kms": "local", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAF1ofBnK9+ERP29P/i14GQ/y3muic6tNKY532zCkzQkJSktYCOeXS8DdY1DdaOP/asZWzPTdgwby6/iZcAxJU+xQ==", + "subType": "06" + } + } + }, + "local_binData=04_rand_auto_id": { + "kms": "local", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAFxq38aA4k/tYHPwJFRK0pahlo/3zjCe3VHJRqURRA+04lbJCvdkQTawxWlf8o+3Pcetl1UcPTQigdYp5KbIkstuPstLbT+TZXHVD1os9LTRw=", + "subType": "06" + } + } + }, + "local_binData=04_rand_auto_altname": { + "kms": "local", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAFTXNWchCPmCSY0+AL22/kCBmAoDJDX5T18jpJHLdvZtHs0zwD64b9hLvfRK268BlNu4P37KDFE6LT0QzjG7brqzFJf3ZaadDCKeIw1q7DWQs=", + "subType": "06" + } + } + }, + "local_binData=04_rand_explicit_id": { + "kms": "local", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAF7XgMgKjQmWYWmobrYWKiGYCKsy5kTgVweFBuzvFISaZjFsq2hrZB2DwUaOeT6XUPH/Onrdjc3fNElf3FdQDHif4rt+1lh9jEX+nMbRw9i3s=", + "subType": "06" + } + } + }, + "local_binData=04_rand_explicit_altname": { + "kms": "local", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAFGoA/1H0waFLor6LbkUCLC2Wm9j/ZT7yifPbf0G7WvO0+gBLlffr3aJIQ9ik5vxPbmDDMCoYlbEYgb8i9I5tKC17WPhjVH2N2+4l9y7aEmS4=", + "subType": "06" + } + } + }, + "local_binData=04_det_auto_id": { + "kms": "local", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAFwO3hsD8ee/uwgUiHWem8fGe54LsTJWqgbRCacIe6sxrsyLT6EsVIqg4Sn7Ou+FC3WJbFld5kx8euLe/MHa8FGYjxD97z5j+rUx5tt3T6YbA=", + "subType": "06" + } + } + }, + "local_binData=04_det_explicit_id": { + "kms": "local", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAFwO3hsD8ee/uwgUiHWem8fGe54LsTJWqgbRCacIe6sxrsyLT6EsVIqg4Sn7Ou+FC3WJbFld5kx8euLe/MHa8FGYjxD97z5j+rUx5tt3T6YbA=", + "subType": "06" + } + } + }, + "local_binData=04_det_explicit_altname": { + "kms": "local", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAFwO3hsD8ee/uwgUiHWem8fGe54LsTJWqgbRCacIe6sxrsyLT6EsVIqg4Sn7Ou+FC3WJbFld5kx8euLe/MHa8FGYjxD97z5j+rUx5tt3T6YbA=", + "subType": "06" + } + } + }, + "local_undefined_rand_explicit_id": { + "kms": "local", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$undefined": true } + }, + "local_undefined_rand_explicit_altname": { + "kms": "local", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$undefined": true } + }, + "local_undefined_det_explicit_id": { + "kms": "local", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$undefined": true } + }, + "local_undefined_det_explicit_altname": { + "kms": "local", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$undefined": true } + }, + "local_objectId_rand_auto_id": { + "kms": "local", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAHfvxWRZOzfao3faE3RglL0IcDpBcNwqiGL5KgSokmRxWjjWeiel88Mbo5Plo0SswwNQ2H7C5GVG21L+UbvcW63g==", + "subType": "06" + } + } + }, + "local_objectId_rand_auto_altname": { + "kms": "local", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAHhd9lSOO7bHE7PM+Uxa2v3X1FF66IwyEr0wqnyTaOM+cHQLmec/RlEaRIQ1x2AiW7LwmmVgZ0xBMK9CMh0Lhbyw==", + "subType": "06" + } + } + }, + "local_objectId_rand_explicit_id": { + "kms": "local", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAHETwT9bo+JtboBVW/8GzzMQCpn22iiNJnlxYfyO45jvYJQRs29RRIouCsnFkmC7cfAO3GlVxv113euYjIO7AlAg==", + "subType": "06" + } + } + }, + "local_objectId_rand_explicit_altname": { + "kms": "local", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAHhsguAMBzQUFBAitpJDzKEaMDGUGfvCzmUUhf4rnp8xeall/p91TUudaSMcU11XEgJ0Mym4IbYRd8+TfUai0nvw==", + "subType": "06" + } + } + }, + "local_objectId_det_auto_id": { + "kms": "local", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAH4ElF4AvQ+kkGfhadgKNy3GcYrDZPN6RpzaMYIhcCGDvC9W+cIS9dH1aJbPU7vTPmEZnnynPTDWjw3rAj2+9mOA==", + "subType": "06" + } + } + }, + "local_objectId_det_explicit_id": { + "kms": "local", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAH4ElF4AvQ+kkGfhadgKNy3GcYrDZPN6RpzaMYIhcCGDvC9W+cIS9dH1aJbPU7vTPmEZnnynPTDWjw3rAj2+9mOA==", + "subType": "06" + } + } + }, + "local_objectId_det_explicit_altname": { + "kms": "local", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAH4ElF4AvQ+kkGfhadgKNy3GcYrDZPN6RpzaMYIhcCGDvC9W+cIS9dH1aJbPU7vTPmEZnnynPTDWjw3rAj2+9mOA==", + "subType": "06" + } + } + }, + "local_bool_rand_auto_id": { + "kms": "local", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAIxGld4J/2vSWg5tjQulpkm9C6WeUcLbv2yfKRXPAbmLpv3u4Yrmr5qisJtqmDPTcb993WosvCYAh0UGW+zpsdEg==", + "subType": "06" + } + } + }, + "local_bool_rand_auto_altname": { + "kms": "local", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAIpUFPiS2uoW1Aqs0WQkBa201OBmsuJ8WUKcv5aBPASkcwfaw9qSWs3QrbEDR2GyoU4SeYOByCAQMzXCPoIYAFdQ==", + "subType": "06" + } + } + }, + "local_bool_rand_explicit_id": { + "kms": "local", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAIJuzu1a60meYlU3LMjw/7G4Vh/lqKopxdpGWoLXEmY/NoHgX6Fkv9iTwxv/Nv8rZwtawpFV+mQUG/6A1IHMBASQ==", + "subType": "06" + } + } + }, + "local_bool_rand_explicit_altname": { + "kms": "local", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAIn9VjxL5TdGgJLckNHRrIaL32L31q5OERRZG2M5OYKk66TnrlfEs+ykcDvGwMGKpr/PYjY5kBHDc/oELGJJbWRQ==", + "subType": "06" + } + } + }, + "local_bool_det_explicit_id": { + "kms": "local", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "local_bool_det_explicit_altname": { + "kms": "local", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "local_date_rand_auto_id": { + "kms": "local", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAJPPv4MC5xzt2uxPGBHH9g2z03o9SQjjmuxt97Ub1UcKCCHsGED3bx6YSrocuEMiFFI4d5Fqgl8HNeS4j0PR0tYA==", + "subType": "06" + } + } + }, + "local_date_rand_auto_altname": { + "kms": "local", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAJ6i2A9Hi4xWlOMjFMGpwaRctR1VFnb4El166n18RvjKic46V+WoadvLHS32RhPOvkLVYwIeU4C+vrO5isBNoUdw==", + "subType": "06" + } + } + }, + "local_date_rand_explicit_id": { + "kms": "local", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAJHcniV7Q0C8ZTWrE0hp5i5bUPlrrRdNLZckfODw8XNVtVPDjbznglccQmI7w1t8kOVp65eKzVzUOXN0YkqA+1QA==", + "subType": "06" + } + } + }, + "local_date_rand_explicit_altname": { + "kms": "local", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAJKCUCjC3hsmEKKYwGP3ceh3zR+ArE8LYFOQfN87aEsTr60VrzHXmsE8PvizRhhMnrp07ljzQkuat39L+0QSR2qQ==", + "subType": "06" + } + } + }, + "local_date_det_auto_id": { + "kms": "local", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAJ1GMYQTruoKr6fv9XCbcVkx/3yivymPSMEkPCRDYxQv45w4TqBKMDfpRd1TOLOv1qvcb+gjH+z5IfVBMp2IpG/Q==", + "subType": "06" + } + } + }, + "local_date_det_explicit_id": { + "kms": "local", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAJ1GMYQTruoKr6fv9XCbcVkx/3yivymPSMEkPCRDYxQv45w4TqBKMDfpRd1TOLOv1qvcb+gjH+z5IfVBMp2IpG/Q==", + "subType": "06" + } + } + }, + "local_date_det_explicit_altname": { + "kms": "local", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAJ1GMYQTruoKr6fv9XCbcVkx/3yivymPSMEkPCRDYxQv45w4TqBKMDfpRd1TOLOv1qvcb+gjH+z5IfVBMp2IpG/Q==", + "subType": "06" + } + } + }, + "local_null_rand_explicit_id": { + "kms": "local", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "local_null_rand_explicit_altname": { + "kms": "local", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "local_null_det_explicit_id": { + "kms": "local", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "local_null_det_explicit_altname": { + "kms": "local", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "local_regex_rand_auto_id": { + "kms": "local", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAALXKw7zSgqQj1AKoWO0MoMxsBuu0cMB6KdJQCRKdupoLV/Y22owwsVpDDMv5sgUpkG5YIV+Fz7taHodXE07qHopw==", + "subType": "06" + } + } + }, + "local_regex_rand_auto_altname": { + "kms": "local", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAALntOLXq7VW1+jwba/dSbidMo2bewNo7AtK9A1CPwk9XrjUQaEOQxfRpho3BYQEo2U67fQdsY/tyhaj4jduHn9JQ==", + "subType": "06" + } + } + }, + "local_regex_rand_explicit_id": { + "kms": "local", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAALlMMG2iS/gEOEsVKR7sxBJP2IUzZ+aRbozDSkqADncresBvaPBSE17lng5NG7H1JRCAcP1rH/Te+0CrMd7JpRAQ==", + "subType": "06" + } + } + }, + "local_regex_rand_explicit_altname": { + "kms": "local", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAL1YNnlVu5+njDLxh1LMhIPOH19RykAXhxrUbCy6TI5MLQsAOSgAJbXOTXeKr0D8/Ff0phToWOKl193gOOIp8yZQ==", + "subType": "06" + } + } + }, + "local_regex_det_auto_id": { + "kms": "local", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAALiZbL5nFIZl7cSLH5E3wK3jJeAeFc7hLHNITtLAu+o10raEs5i/UCihMHmkf8KHZxghs056pfm5BjPzlL9x7IHQ==", + "subType": "06" + } + } + }, + "local_regex_det_explicit_id": { + "kms": "local", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAALiZbL5nFIZl7cSLH5E3wK3jJeAeFc7hLHNITtLAu+o10raEs5i/UCihMHmkf8KHZxghs056pfm5BjPzlL9x7IHQ==", + "subType": "06" + } + } + }, + "local_regex_det_explicit_altname": { + "kms": "local", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAALiZbL5nFIZl7cSLH5E3wK3jJeAeFc7hLHNITtLAu+o10raEs5i/UCihMHmkf8KHZxghs056pfm5BjPzlL9x7IHQ==", + "subType": "06" + } + } + }, + "local_dbPointer_rand_auto_id": { + "kms": "local", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAMUdAA9uOSk1tXJVe/CG3Ps6avYTEF1eHj1wSlCHkFxqlMtTO+rIQpikpjH0MrcXvEEdAO8g5hFZ01I7DWyK5AAxTxDqVF+kOaQ2VfKs6hyuo=", + "subType": "06" + } + } + }, + "local_dbPointer_rand_auto_altname": { + "kms": "local", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAMiNqvqLwZrPnsF235z+Obl1K9iEXdJ5GucMGpJdRG4lRvRE0Oy1vh6ztNTpYPY/tXyUFTBWlzl/lITalSEm/dT1Bnlh0iPAFrAiNySf662og=", + "subType": "06" + } + } + }, + "local_dbPointer_rand_explicit_id": { + "kms": "local", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAM+Tn31YcKiowBTJWRYCYAEO7UARDE2/jTVGEKXCpiwEqqP3JSAS0b80zYt8dxo5mVhUo2a02ClKrB8vs+B6sU1kXrahSaVSEHZlRSGN9fWgo=", + "subType": "06" + } + } + }, + "local_dbPointer_rand_explicit_altname": { + "kms": "local", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAMdOZZUvpJIqG9qiOLy5x4BdftyHipPDZn/eeLEc7ir3v4jJsY3dsv6fQERo5U9lMynNGA9PJePVzq5tWsIMX0EcCQcMfGmosfkYDzN1OX99A=", + "subType": "06" + } + } + }, + "local_dbPointer_det_auto_id": { + "kms": "local", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAMQWace2C1w3yqtmo/rgz3YtIDnx1Ia/oDsoHnnMZlEy5RoK3uosi1hvNAZCSg3Sen0H7MH3XVhGGMCL4cS69uJ0ENSvh+K6fiZzAXCKUPfvM=", + "subType": "06" + } + } + }, + "local_dbPointer_det_explicit_id": { + "kms": "local", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAMQWace2C1w3yqtmo/rgz3YtIDnx1Ia/oDsoHnnMZlEy5RoK3uosi1hvNAZCSg3Sen0H7MH3XVhGGMCL4cS69uJ0ENSvh+K6fiZzAXCKUPfvM=", + "subType": "06" + } + } + }, + "local_dbPointer_det_explicit_altname": { + "kms": "local", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAMQWace2C1w3yqtmo/rgz3YtIDnx1Ia/oDsoHnnMZlEy5RoK3uosi1hvNAZCSg3Sen0H7MH3XVhGGMCL4cS69uJ0ENSvh+K6fiZzAXCKUPfvM=", + "subType": "06" + } + } + }, + "local_javascript_rand_auto_id": { + "kms": "local", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAANNL2AMKwTDyMIvxLKhBxZKx50C0tBdkLwuXmuMcrUqZeH8bsvjtttoM9LWkkileMyeTWgxblJ1b+uQ+V+4VT6fA==", + "subType": "06" + } + } + }, + "local_javascript_rand_auto_altname": { + "kms": "local", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAANBjBlHGw3K3TWQHpvfa1z0bKhNnVFC/lZArIexo3wjdGq3MdkGA5cuBIp87HHmOIv6o/pvQ9K74v48RQl+JH44A==", + "subType": "06" + } + } + }, + "local_javascript_rand_explicit_id": { + "kms": "local", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAANjvM7u3vNVyKpyI7g5kbzBpHPzXzOQToDSng5/c9yjMG+qi4TPtOyassobJOnMmDYBLyqRXCl/GsDLprbg5jxuA==", + "subType": "06" + } + } + }, + "local_javascript_rand_explicit_altname": { + "kms": "local", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAANMtO7KneuVx4gSOjX4MQjKL80zJhnt+efDBylkpNsqKyxBXB60nkiredGzwaK3/4QhIfGJrC1fQpwUwu/v1L17g==", + "subType": "06" + } + } + }, + "local_javascript_det_auto_id": { + "kms": "local", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAANmQsg9E/BzGJVNVhSNyunS/TH0332oVFdPS6gjX0Cp/JC0YhB97DLz3N4e/q8ECaz7tTdQt9JacNUgxo+YCULUA==", + "subType": "06" + } + } + }, + "local_javascript_det_explicit_id": { + "kms": "local", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAANmQsg9E/BzGJVNVhSNyunS/TH0332oVFdPS6gjX0Cp/JC0YhB97DLz3N4e/q8ECaz7tTdQt9JacNUgxo+YCULUA==", + "subType": "06" + } + } + }, + "local_javascript_det_explicit_altname": { + "kms": "local", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAANmQsg9E/BzGJVNVhSNyunS/TH0332oVFdPS6gjX0Cp/JC0YhB97DLz3N4e/q8ECaz7tTdQt9JacNUgxo+YCULUA==", + "subType": "06" + } + } + }, + "local_symbol_rand_auto_id": { + "kms": "local", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAOOuO2b23mekwI8b6gWeEgRy1lLOCsNyBKvdmizK7/oOVKCvd+3kwUn9a6TxygooiVAN/Aohr1cjb8jRlMPWpkP0iO0+Tt6+vkizgFsQW4iio=", + "subType": "06" + } + } + }, + "local_symbol_rand_auto_altname": { + "kms": "local", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAOhN4QPOcmGnFKGvTfhz6TQleDA02X6oWULLHTnOUJYfE3OUSyf2ULEQh1yhdKdwXMuYVgGl28pMosiwkBShrXYe5ZlMjiZCIMZWSdUMV0tXk=", + "subType": "06" + } + } + }, + "local_symbol_rand_explicit_id": { + "kms": "local", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAO9aWi9RliwQHdXHoJME9VyN6XgyGd95Eclx+ZFYfLxBGAuUnPNjSfVuNZwYdyKC8JX79+mYhk7IXmcGV4z+4486sxyLk3idi4Kmpz2ESqV5g=", + "subType": "06" + } + } + }, + "local_symbol_rand_explicit_altname": { + "kms": "local", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAO/qev3DPfpkQoSW9aHOyalwfI/VYDQVN5VMINx4kw2vEqHiI1HRdZRPOz3q74TlQEy3TMNMTYdCvh5bpN/PptRZCTQbzP6ugz9dTp79w5/Ok=", + "subType": "06" + } + } + }, + "local_symbol_det_auto_id": { + "kms": "local", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAOsg5cs6VpZWoTOFg4ztZmpj8kSTeCArVcI1Zz2pOnmMqNv/vcKQGhKSBbfniMripr7iuiYtlgkHGsdO2FqUp6Jb8NEWm5uWqdNU21zR9SRkE=", + "subType": "06" + } + } + }, + "local_symbol_det_explicit_id": { + "kms": "local", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAOsg5cs6VpZWoTOFg4ztZmpj8kSTeCArVcI1Zz2pOnmMqNv/vcKQGhKSBbfniMripr7iuiYtlgkHGsdO2FqUp6Jb8NEWm5uWqdNU21zR9SRkE=", + "subType": "06" + } + } + }, + "local_symbol_det_explicit_altname": { + "kms": "local", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAOsg5cs6VpZWoTOFg4ztZmpj8kSTeCArVcI1Zz2pOnmMqNv/vcKQGhKSBbfniMripr7iuiYtlgkHGsdO2FqUp6Jb8NEWm5uWqdNU21zR9SRkE=", + "subType": "06" + } + } + }, + "local_javascriptWithScope_rand_auto_id": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAP5gLMvLOAc6vGAvC7bGmEC4eweptAiX3A7L0iCoHps/wm0FBLkfpF6F4pCjVYiY1lTID38wliRLPyhntCj+cfvlMfKSjouNgXMIWyQ8GKZ2c=", + "subType": "06" + } + } + }, + "local_javascriptWithScope_rand_auto_altname": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAPVsw9Opn/P5SAdJhX4MTxIcsmaG8isIN4NKPi9k1u/Vj7AVkcxYqwurAghaJpmfoAgMruvzi1hcKvd05yHd9Nk0vkvODwDgnjJB6QO+qUce8=", + "subType": "06" + } + } + }, + "local_javascriptWithScope_rand_explicit_id": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAPLUa+nsrqiHkVdE5K1xl/ZsiZqQznG2yVXyA3b3loBylbcL2NEBp1JUeGnPZ0y5ZK4AmoL6NMH2Io313rW3V8FTArs/OOQWPRJSe6h0M3wXk=", + "subType": "06" + } + } + }, + "local_javascriptWithScope_rand_explicit_altname": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAPzUKaXCH0JImSlY73HVop9g9c0YssNEiA7Dy7Vji61avxvnuJJfghDchdwwaY7Vc8+0bymoanUWcErRctLzjm+1uKeMnFQokR8wFtnS3PgpQ=", + "subType": "06" + } + } + }, + "local_javascriptWithScope_det_explicit_id": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$code": "x=1", "$scope": {} } + }, + "local_javascriptWithScope_det_explicit_altname": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$code": "x=1", "$scope": {} } + }, + "local_int_rand_auto_id": { + "kms": "local", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAQHXpXb3KlHA2KFTBgl0VoLCu0CUf1ae4DckkwDorbredVSqxvA5e+NvVudY5yuea6bC9F57JlbjI8NWYAUw4q0Q==", + "subType": "06" + } + } + }, + "local_int_rand_auto_altname": { + "kms": "local", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAQSxXMF4+TKV+a3lcxXky8VepEqdg5wI/jg+C4CAUgNurq2XhgrxyqiMjkU8z07tfyoLYyX6P+dTrwj6nzvvchCw==", + "subType": "06" + } + } + }, + "local_int_rand_explicit_id": { + "kms": "local", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAQmzteYnshCI8HBGd7UYUKvcg4xl6M8PRyi1xX/WHbjyQkAJXxczS8hO91wuqStE3tBNSmulUejz9S691ufTd6ZA==", + "subType": "06" + } + } + }, + "local_int_rand_explicit_altname": { + "kms": "local", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAQLCHLru//++QSoWVEyw2v6TUfCnlrPJXrpLLezWf16vK85jTfm8vJbb2X2UzX04wGzVL9tCFFsWX6Z5gHXhgSBg==", + "subType": "06" + } + } + }, + "local_int_det_auto_id": { + "kms": "local", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAQIxWjLBromNUgiOoeoZ4RUJUYIfhfOmab0sa4qYlS9bgYI41FU6BtzaOevR16O9i+uACbiHL0X6FMXKjOmiRAug==", + "subType": "06" + } + } + }, + "local_int_det_explicit_id": { + "kms": "local", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAQIxWjLBromNUgiOoeoZ4RUJUYIfhfOmab0sa4qYlS9bgYI41FU6BtzaOevR16O9i+uACbiHL0X6FMXKjOmiRAug==", + "subType": "06" + } + } + }, + "local_int_det_explicit_altname": { + "kms": "local", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAQIxWjLBromNUgiOoeoZ4RUJUYIfhfOmab0sa4qYlS9bgYI41FU6BtzaOevR16O9i+uACbiHL0X6FMXKjOmiRAug==", + "subType": "06" + } + } + }, + "local_timestamp_rand_auto_id": { + "kms": "local", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAARntIycg0Xkd16GEa//VSJI4Rkl7dT6MpRa+D3MiTEeio5Yy8zGK0u2BtEP/9MCRQw2hJDYj5znVqwhdduM0OTiA==", + "subType": "06" + } + } + }, + "local_timestamp_rand_auto_altname": { + "kms": "local", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAARWA9Ox5ejDPeWxfjbRgcGCtF/G5yrPMbBJD9ESDFc0NaVe0sdNNTisEVxsSkn7M/S4FCibKh+C8femr7xhu1iTw==", + "subType": "06" + } + } + }, + "local_timestamp_rand_explicit_id": { + "kms": "local", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAARrEfOL4+4Qh7IkhHnHcBEANGfMF8n2wUDnsZ0lXEb0fACKzaN5OKaxMIQBs/3pFBw721qRfCHY+ByKeaQuABbzg==", + "subType": "06" + } + } + }, + "local_timestamp_rand_explicit_altname": { + "kms": "local", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAARW8nwmnBt+LFIAcFWvOzX8llrGcveQKFhyYUIth9d7wtpTyc9myFp8GBQCnjDpKzA6lPmbqVYeLU0L9q0h6SHGQ==", + "subType": "06" + } + } + }, + "local_timestamp_det_auto_id": { + "kms": "local", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAR6uMylGytMq8QDr5Yz3w9HlW2MkGt6yIgUKcXYSaXru8eer+EkLv66/vy5rHqTfV0+8ryoi+d+PWO5U6b3Ng5Gg==", + "subType": "06" + } + } + }, + "local_timestamp_det_explicit_id": { + "kms": "local", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAR6uMylGytMq8QDr5Yz3w9HlW2MkGt6yIgUKcXYSaXru8eer+EkLv66/vy5rHqTfV0+8ryoi+d+PWO5U6b3Ng5Gg==", + "subType": "06" + } + } + }, + "local_timestamp_det_explicit_altname": { + "kms": "local", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAR6uMylGytMq8QDr5Yz3w9HlW2MkGt6yIgUKcXYSaXru8eer+EkLv66/vy5rHqTfV0+8ryoi+d+PWO5U6b3Ng5Gg==", + "subType": "06" + } + } + }, + "local_long_rand_auto_id": { + "kms": "local", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAASrinKUOpHIB7MNRmCAPWcP4CjZwfr5JaRT3G/GqY9B/6csj3+N9jmo1fYvM8uHcnmf5hzDDOamaE2FF1jDKkrHw==", + "subType": "06" + } + } + }, + "local_long_rand_auto_altname": { + "kms": "local", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAShWMPYDkCpTC2XLYyykPJMihASLKn6HHcB2Eh7jFwQb/8D1HCQoPmOHMyXaN4AtIKm1oqEfma6FSnEPENQoledQ==", + "subType": "06" + } + } + }, + "local_long_rand_explicit_id": { + "kms": "local", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAASd2h34ZLib+GiYayrm/FIZ/weg8wF41T0PfF8NCLTJCoT7gIkdpNRz2zkkQgZMR31efNKtsM8Bs4wgZbkrXsXWg==", + "subType": "06" + } + } + }, + "local_long_rand_explicit_altname": { + "kms": "local", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAASPAvdjz+a3FvXqDSjazaGqwZxrfXlfFB5/VjQFXQB0gpodCEaz1qaLSKfCWBg83ftrYKa/1sa44gU5NBthDfDwQ==", + "subType": "06" + } + } + }, + "local_long_det_auto_id": { + "kms": "local", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAASQk372m/hW3WX82/GH+ikPv3QUwK7Hh/RBpAguiNxMdNhkgA/y2gznVNm17t6djyub7+d5zN4P5PLS/EOm2kjtw==", + "subType": "06" + } + } + }, + "local_long_det_explicit_id": { + "kms": "local", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAASQk372m/hW3WX82/GH+ikPv3QUwK7Hh/RBpAguiNxMdNhkgA/y2gznVNm17t6djyub7+d5zN4P5PLS/EOm2kjtw==", + "subType": "06" + } + } + }, + "local_long_det_explicit_altname": { + "kms": "local", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAASQk372m/hW3WX82/GH+ikPv3QUwK7Hh/RBpAguiNxMdNhkgA/y2gznVNm17t6djyub7+d5zN4P5PLS/EOm2kjtw==", + "subType": "06" + } + } + }, + "local_decimal_rand_auto_id": { + "kms": "local", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAATLnMMDZhnGSn5F5xHhsJXxiTGXd61Eq6fgppOlxUNVlsZNYyr5tZ3owfTTqRuD9yRg97x65WiHewBBnJJSeirCTAy9zZxWPVlJSiC0gO7rbM=", + "subType": "06" + } + } + }, + "local_decimal_rand_auto_altname": { + "kms": "local", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAATenMh7NKQioGjpuEojIrYKFaJhbuGxUgu2yTTbe3TndhgHryhW9GXiUqo8WTpnXqpC5E/z03ZYLWfCbe7qGdL6T7bbrTpaTaWZnnAm3XaCqY=", + "subType": "06" + } + } + }, + "local_decimal_rand_explicit_id": { + "kms": "local", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAT9vqXuKRh+2HxeCMr+pQYdhYNw7xrTdU4dySWz0X6tCK7LZO5AV72utmRJxID7Bqv1ZlXAk00V92oDLyKG9kHeG5+S34QE/aLCPsAWcppfxY=", + "subType": "06" + } + } + }, + "local_decimal_rand_explicit_altname": { + "kms": "local", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAATtqOCFMbOkls3LikQNXlnlkRr5gJns1+5Kvbt7P7texMa/QlXkYSHhtwESyfOcCQ2sw1T0eZ9DDuNaznpdK2KIqZBkVEC9iMoxqIqXF7Nab0=", + "subType": "06" + } + } + }, + "local_decimal_det_explicit_id": { + "kms": "local", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$numberDecimal": "1.234" } + }, + "local_decimal_det_explicit_altname": { + "kms": "local", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$numberDecimal": "1.234" } + }, + "local_minKey_rand_explicit_id": { + "kms": "local", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$minKey": 1 } + }, + "local_minKey_rand_explicit_altname": { + "kms": "local", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$minKey": 1 } + }, + "local_minKey_det_explicit_id": { + "kms": "local", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$minKey": 1 } + }, + "local_minKey_det_explicit_altname": { + "kms": "local", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$minKey": 1 } + }, + "local_maxKey_rand_explicit_id": { + "kms": "local", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$maxKey": 1 } + }, + "local_maxKey_rand_explicit_altname": { + "kms": "local", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$maxKey": 1 } + }, + "local_maxKey_det_explicit_id": { + "kms": "local", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { "$maxKey": 1 } + }, + "local_maxKey_det_explicit_altname": { + "kms": "local", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { "$maxKey": 1 } + }, + "payload=0,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACcsBdT93ivCyvtkfQz9qb1A9Ll+I6hnGE0kFy3rmVG6xAvipmRJSoVq3iv7iUEDvaqmPXfjeH8h8cPYT86v3XSg==", + "subType": "06" + } + } + }, + "payload=1,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACQOzpNBEGSrANr3Wl8uYpqeIc7pjc8e2LS2FaSrb8tM9F3mR1FqGgfJtn3eD+HZf3Y3WEDGK8975a/1BufkMqIQ==", + "subType": "06" + } + } + }, + "payload=2,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACyGJEcuN1pG5oSEyxuKFwqddGHVU5Untbib7LkmtoJe9HngTofkOpeHZH/hV6Z3CFxLu6WFliJoySsFFbnFy9ag==", + "subType": "06" + } + } + }, + "payload=3,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACLbp4w6mx45lR1vvgmeRja/y8U+WnR2oH4IpfrDi4lKM+JPVnJweiN3/1wAy+sXSy0S1Yh9yxmhh9ISoTkAuVxw==", + "subType": "06" + } + } + }, + "payload=4,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACG0qMY/GPZ/2CR61cxbuizywefyMZVdeTCn5KFjqwejgxeBwX0JmGNHKKWbQIDQykRFv0q0WHUgsRmRhaotNCyQ==", + "subType": "06" + } + } + }, + "payload=5,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACJI1onNpQfZhaYWrPEzHvNaJRqUDZK2xoyonB5E473BPgp3zvn0Jmz1deL8GzS+HlkjCrx39OvHyVt3+3S0kYYw==", + "subType": "06" + } + } + }, + "payload=6,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAClyKY9tZBjl7SewSXr3MdoWRDUNgLaXDUjENpjyYvi/54EQ9a+J/LAAh1892i+mLpYxEUAmcftPyfX3VhbCgUQw==", + "subType": "06" + } + } + }, + "payload=7,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACAMbEA+kNvnVV7B//ds2/QoVot061kbazoMwB/psB5eFdLVB5qApAXEWgQEMwkNnsTUYbtSduQz6uGwdagtNBRw==", + "subType": "06" + } + } + }, + "payload=8,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACzdSK/d7Ni6D8qUgNopnEU5ia1K5llhBGk3O1Tf71t4ThnQjYW9eI/rIohWmev5CGWLHhwuvvKUtFcTAe+NMQww==", + "subType": "06" + } + } + }, + "payload=9,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACzQcEa+ktF2EZf35TtyatnSGGaIVvFhZNuo5P3VwQvoONJrK2cSad7PBDAv3xDAB+VPZAigXAGQvd051sHooOHg==", + "subType": "06" + } + } + }, + "payload=10,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACpfoDmApsR5xOD3TDhcHeD7Jco3kPFuuWjDpHtMepMOJ3S0c+ngGGhzPGZtEz2xuD/E7AQn1ryp/WAQ+WwkaJkQ==", + "subType": "06" + } + } + }, + "payload=11,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACICMRXmx3oKqYv0IpmzkSMBIGT4Li3MPBF4Lw1s5F69WvZApD58glIKB6b7koIrF5qc2Wrb1/Nw+stRv0zvQ8Y9CcFV4OHm6WoEw+XDlWXJ4=", + "subType": "06" + } + } + }, + "payload=12,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACTArUn0WUTojQC4fSvq3TwJVTsZNhWAK2WB057u2EnkUzMC0xsbU6611W6Okx6idZ7pMudXpBC34fRDrJPXOu3BxK+ZLCOWS2FqsvWq3HeTY=", + "subType": "06" + } + } + }, + "payload=13,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACU1Ojn7EM2i+5KK2Beh1gPLhryK3Y7PtaZ/v4JvstxuAV4OHOR9yROP7pwenHXxczkWXvcyMY9OCdmHO8pkQkXO21798IPkDDN/ejJUFI0Uw=", + "subType": "06" + } + } + }, + "payload=14,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAAC0ZLwSliCbcr/e1uiYWk6gRuD/5qiyulQ7IUNWjhpBR6SLUfX2+yExLzps9hoOp53j9zRSKIzyleZ8yGLTLeN+Lz9BUe2ZT+sV8NiqZz3pkA=", + "subType": "06" + } + } + }, + "payload=15,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACQ9pmlQeFDr+jEhFwjL/eGVxdv70JdnkLaKdJ3/jkvCX1VPU5HmQIi+JWY3Rrw844E/6sBR6zIODn5aM0WfyP8a2zKRAWaVQZ7n+QE9hDN/8=", + "subType": "06" + } + } + }, + "payload=16,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AizggCwAAAAAAAAAAAAAAAACiOcItInDGHqvkH0I3udp5nnX32XzDeqya/3KDjgZPT5GHek1vFTZ4924JVxFqFQz+No9rOVmyxm8O2fxjTK2vsjtADzKGnMTtFYZqghYCuc=", + "subType": "06" + } + } + }, + "payload=0,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACijFptWQy7a1Y0rpXEvamXWI9v9dnx0Qj84/mKUsVpc3agkQ0B04uPYeROdt2MeEeiZoEKVWV0NjBocAQCEz7dw==", + "subType": "06" + } + } + }, + "payload=1,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAChR90taVWsZk+++sgibX6CnFeQQHNoB8V+n2gmDe3CIT/t+WvhMf9D+mQipbAlrUyHgGihKMHcvAZ5RZ/spaH4Q==", + "subType": "06" + } + } + }, + "payload=2,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAC67wemDv1Xdu7+EMR9LMBTOxfyAqsGaxQibwamZItzplslL/Dp3t9g9vPuNzq0dWwhnfxQ9GBe8OA3dtRaifYCA==", + "subType": "06" + } + } + }, + "payload=3,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACVLxch+uC7weXrbtylCo1m4HYZmh0sd9JCrlTECO2M56JK1X9a30i2BDUdhPuoTvvODv74CGXkZKdist3o0mGAQ==", + "subType": "06" + } + } + }, + "payload=4,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACexfIZGkOYaCGktOUc6cgAYg7Bd/C5ZYmdb7b8+rd5BKWbthW6N6CxhDIyh/DHvkPAeIzfTYA2/9w6tsjfD/TPQ==", + "subType": "06" + } + } + }, + "payload=5,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACjUH/dPW4egOvFMJJnpWK8v27MeLkbXC4GFl1j+wPqTsIEeIWkzEmcXjHLTQGE2GplHHc/zxwRwD2dXdbzvsCDw==", + "subType": "06" + } + } + }, + "payload=6,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACzvS+QkGlvb05pNn+vBMml09yKmE8yM6lwccNIST5uZSsUxXf2hrxPtO7Ylc4lmBAJt/9bcM59JIeT9fpYMc75w==", + "subType": "06" + } + } + }, + "payload=7,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACSf2RxHJpRuh4j8nS1dfonUtsJEwgqfWrwOsfuT/tAGXgDN0ObUpzL2K7G2vmePjP4dwycCSIL3+2j34bqBJK1Q==", + "subType": "06" + } + } + }, + "payload=8,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACu96YYeLXXoYdEZYNU9UAZjSd6G4fOE1edrA6/RjZKVGWKxftmvj5g1VAOiom0XuTZUe1ihbnwhvKexeoa3Vc8Q==", + "subType": "06" + } + } + }, + "payload=9,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACX+UjBKo9+N0Z+mbyqZqkQv2ETMSn6aPTONWgJtw5nWklcxKjUSSLI+8LW/6M6Xf9a7177GsqmV2f/yCRF58Xtw==", + "subType": "06" + } + } + }, + "payload=10,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACL6TVscFzIJ9+Zj6LsCZ9xhaZuTZdvz1nJe4l69nKyj9hCjnyuiV6Ve4AXwQ5W1wiPfkJ0fCZS33NwiHw7QQ/vg==", + "subType": "06" + } + } + }, + "payload=11,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACPLq7IcWhTVwkKmy0flN7opoQzx7tTe1eD9JIc25FC9B6KGQkdcRDglDDR7/m6+kBtTnq88y63vBgomTxA8ZxQE+3pB7zCiBhX0QznuXvP44=", + "subType": "06" + } + } + }, + "payload=12,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACxv7v4pKtom5z1g9FUuyjEWAbdzJ3ytPNZlOfVr6KZnUPhIH7PfCz3/lTdYYWBTj01+SUZiC/7ruof9QDhsSiNWP7nUyHpQ/C3joI/BBjtDA=", + "subType": "06" + } + } + }, + "payload=13,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACZhiElQ/MvyVMwMkZPu8pT54Ap6TlpVSEbE4nIQzzeU3XKVuspMdI5IXvvgfULXKXc+AOu6oQXZ+wAJ1tErVOsb48HF1g0wbXbBA31C5qLEM=", + "subType": "06" + } + } + }, + "payload=14,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACdp8mDOeDuDLhE0LzTOT2p0CMaUsAQrGCzmiK6Ab9xvaIcPPcejUcpdO3XXAS/pPab4+TUwO5GbI5pDJ29zwaOiOz2H3OJ2m2p5BHQp9mCys=", + "subType": "06" + } + } + }, + "payload=15,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAACmtLohoP/gotuon2IvnGeLEfCWHRMhG9Wp4tPu/vbJJkJkbQTP35HRG9VrMV7KKrEQbOsJ2Y6UDBra4tyjn0fIkwwc/0X9i+xaP+TrwpNabE=", + "subType": "06" + } + } + }, + "payload=16,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASzggCwAAAAAAAAAAAAAAAAC6s9eUtSneKWj3/A7S+bPZLj3t1WtUh7ltW80b8jCRzA+kOI26j1MEb1tt68HgcnH1IJ3YQ/+UHlV95OgwSnIxlib/HJn3U0s8mpuCWe1Auo=", + "subType": "06" + } + } + }, + "azure_double_rand_auto_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAB0S2kOZe54q6iZqeTLndkX+kehTKtb30jTP7FS+Zx+cxhFs626OrGY+jrH41cLfroCccacyNHUZFRinfqZPNOyw==", + "subType": "06" + } + } + }, + "azure_double_rand_auto_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAABYViH7PLjCIdmTibW9dGCJADwXx2dRSMYxEmulPu89clAoeLDa8pwJ7YxLFQCcTGmZRfmp58dDDAzV8tyyE8QMg==", + "subType": "06" + } + } + }, + "azure_double_rand_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAABeRahSj4pniBp0rLIEZE8MdeyiIKcYuTZiuGzGiXbFbntEPow88DFHIBSxbMGR7p/8jCpPL+GqBwFkPkafXbMzg==", + "subType": "06" + } + } + }, + "azure_double_rand_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAABdaa3vKtO4cAEUjYJfOPl1KbbgeWtphfUuJd6MxR9VReNSf1jc+kONwmkPVQs2WyZ1n+TSQMGRoBp1nHRttDdTg==", + "subType": "06" + } + } + }, + "azure_double_det_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "azure_double_det_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "azure_string_rand_auto_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACeoztcDg9oZ7ixHinReWQTrAumpsfyb0E1s3BGOFHgBCi1tW79CEXfqN8riFRc1YeRTlN4k5ShgHaBWBlax+XoQ==", + "subType": "06" + } + } + }, + "azure_string_rand_auto_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACov9cXQvDHeKOS5Gxcxa8vdAcTsTXDYgUucGzsCyh4TnTWKGQEVk3DHndUXX569TKCjq5QsC//oWEwweCn1nZ4g==", + "subType": "06" + } + } + }, + "azure_string_rand_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACKU5qTdMdO0buQ/37ZRANUAAafcsoNMOTxJsDOfkqUb+/kRgM1ePlwVvk4EJiAGhJ/4SEmEOpwv05TT3PxGur2Q==", + "subType": "06" + } + } + }, + "azure_string_rand_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACX/ODKGHUyAKxoJ/c/3lEDBTc+eP/VS8OHrLhYoP96McpnFSgYi5jfUwvrFYa715fkass4N0nAHE6TzoGTYyk6Q==", + "subType": "06" + } + } + }, + "azure_string_det_auto_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAACmVI7YK4JLOzutEdQ79he817Vk5EDP/3hXwOlGmERZCtp8J8HcqClhV+pyvRLGbwmlh12fbSs9nEp7mrobQm9wA==", + "subType": "06" + } + } + }, + "azure_string_det_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAACmVI7YK4JLOzutEdQ79he817Vk5EDP/3hXwOlGmERZCtp8J8HcqClhV+pyvRLGbwmlh12fbSs9nEp7mrobQm9wA==", + "subType": "06" + } + } + }, + "azure_string_det_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAACmVI7YK4JLOzutEdQ79he817Vk5EDP/3hXwOlGmERZCtp8J8HcqClhV+pyvRLGbwmlh12fbSs9nEp7mrobQm9wA==", + "subType": "06" + } + } + }, + "azure_object_rand_auto_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADWkZMsfCo4dOPMH1RXC7GkZFt1RCjJf0vaLDA09ih1Jl47SOetZELQ7B1TQjRQitktzrfD43jk8Fn4J5ZYZu1qQ==", + "subType": "06" + } + } + }, + "azure_object_rand_auto_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADJFMymfstltZP1oAqj4bgbCk8uLGtCd12eLqvSq0ZO+JDvls7PAovwmoWwigHunP8BBXT8sLydK+jn1sHfnhrlw==", + "subType": "06" + } + } + }, + "azure_object_rand_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADCen+XrLYKg7gIVubVfdbQwuJ0mFHxhSUUyyBWj4RCeLeLUYXckboPGixXWB9XdwcOnInfF9u6qvktY67GtYASQ==", + "subType": "06" + } + } + }, + "azure_object_rand_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADnUyp/7eLmxxxOdsP+mNuJABK4PQoKFWDAY7lDrH6MYa03ryASOihPZWYZWXZLrbAf7cQQhElEkKqKwY8+NXgqg==", + "subType": "06" + } + } + }, + "azure_object_det_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_det_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_array_rand_auto_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAEtk14WyoatZcNPlg3y/XJNsBt6neFJeQwR06B9rMGV58oIsmeE5zMtUOBYTgzlnwyKpqI/XVAg8s1VxvsrvGCyLVPwGVyDztwtMgVSW6QM3s=", + "subType": "06" + } + } + }, + "azure_array_rand_auto_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAERTO63J4Nj1BpFlqVduA2IrAiGoV4jEOH3FnFgx7ZP7da/YBmLX/bc1EqdpC8v4faHxp74iU0xAB0yW4WgySDX7rriL5cw9sMpqgLRaBxGug=", + "subType": "06" + } + } + }, + "azure_array_rand_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAEs09qQdNVwh+KFqKPREQkw0XFdRNHAvjYJzs5MDE9+QxvtKlmVKSK3wkxDdCrcH4r7ePV2nCy2h1IHYqaDnnt4s5dSawI2l88iTT+bBcCSrU=", + "subType": "06" + } + } + }, + "azure_array_rand_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAEaQ/YL50up4YIMJuVJSiAP06IQ+YjdKLIfkN/prbOZMiXErcD1Vq1hwGhfGdpEsLVu8E7IhJb4wakVC/2dLZoRP95az6HqRRauNNZAIQMKfY=", + "subType": "06" + } + } + }, + "azure_array_det_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_det_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_binData=00_rand_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFl/leuLAHf1p6aRKHdFyN9FM6MW2XzBemql2xQgqkwJ6YOQXW6Pu/aI1scXVOrvrSu3+wBvByjHu++1AqFgzZRQ==", + "subType": "06" + } + } + }, + "azure_binData=00_rand_auto_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAF4Nq/LwyufT/mx0LtFSkupNHTuyjbr4yUy1N5/37XhkpqZ1e4sWCHGNaTDEm5+cvdnbqZ/MMkBv855dc8N7vnGA==", + "subType": "06" + } + } + }, + "azure_binData=00_rand_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFv1Kbv54uXJ76Ih63vtmszQtzkXqDlv8LDCFO3sjzu70+tgRXOhLm3J8uZpwoiNkgM6oNLn0en7tnEekYB9++CA==", + "subType": "06" + } + } + }, + "azure_binData=00_rand_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFgcYC1n7cGGXpv0qf1Kb8t9y/6kbhscGt2QJkQpAiqadFPPYDU/wwaKdDz94NpAHMZizUbhf9tvZ3UXl1bozhDA==", + "subType": "06" + } + } + }, + "azure_binData=00_det_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFvswfP3+jgia6rAyrypvbso3Xm4d7MEgJRUCWFYzA+9ov++vmeirgoTp/rFavTNOPb+61fvl1WKbVwrgODusaMg==", + "subType": "06" + } + } + }, + "azure_binData=00_det_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFvswfP3+jgia6rAyrypvbso3Xm4d7MEgJRUCWFYzA+9ov++vmeirgoTp/rFavTNOPb+61fvl1WKbVwrgODusaMg==", + "subType": "06" + } + } + }, + "azure_binData=00_det_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFvswfP3+jgia6rAyrypvbso3Xm4d7MEgJRUCWFYzA+9ov++vmeirgoTp/rFavTNOPb+61fvl1WKbVwrgODusaMg==", + "subType": "06" + } + } + }, + "azure_binData=04_rand_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFMzMC3BLn/zWE9dxpcD8G0h4aifSY0zSHS9xTVJXgq21s2WU++Ov2UvHatVozmtZltsUN9JvSWqOBQRkFsrXvI7bc4lYfOoOmfpTHFcRDA/c=", + "subType": "06" + } + } + }, + "azure_binData=04_rand_auto_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFDlBN5hUTcjamOg/sgyeG0S52kphsjUgvlpuqHYz6VVdLtZ69cGHOVqqyml3x2rVqWUZJjd4ZodOhlwWq9p+i5IYNot2QaBvi8NZSaiThTc0=", + "subType": "06" + } + } + }, + "azure_binData=04_rand_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFjvS2ozJuAL3rCvyBpraVtgL91OMdiskmgYnyfKlzd8EhYLd1cL4yxnTUjRXx+W+p8uN0/QZo+mynhcWnwcq83raY+I1HftSTx+S6rZ0qyDM=", + "subType": "06" + } + } + }, + "azure_binData=04_rand_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFqUMd/I0yOdy5W4THvFc6yrgSzB6arkRs/06b0M9Ii+QtAY6vbz+/aJ0Iy3Jm8TahC1wOZVmTj5luQpr+PHZMCEAFadv+0K/Nsx6xVhAh9gg=", + "subType": "06" + } + } + }, + "azure_binData=04_det_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFmN+KMrERGmfmue8/hG4D+ZcGzxC2HntdYBLjEolzvS9FV5JH/adxyUAnMpyL8FNznARL51rbv/G1nXPn9mPabsQ4BtWEAQbHx9TiXd+xbB0=", + "subType": "06" + } + } + }, + "azure_binData=04_det_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFmN+KMrERGmfmue8/hG4D+ZcGzxC2HntdYBLjEolzvS9FV5JH/adxyUAnMpyL8FNznARL51rbv/G1nXPn9mPabsQ4BtWEAQbHx9TiXd+xbB0=", + "subType": "06" + } + } + }, + "azure_binData=04_det_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFmN+KMrERGmfmue8/hG4D+ZcGzxC2HntdYBLjEolzvS9FV5JH/adxyUAnMpyL8FNznARL51rbv/G1nXPn9mPabsQ4BtWEAQbHx9TiXd+xbB0=", + "subType": "06" + } + } + }, + "azure_undefined_rand_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_rand_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_objectId_rand_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAH3sYVJpCKi310YxndMwm5ltEbbiRO1RwZxxeEkzI8tptbNXC8t7RkrT8VSJZ43wbGYCiqH5RZy9v8pYwtUm4STw==", + "subType": "06" + } + } + }, + "azure_objectId_rand_auto_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAHD7agzVEc0JwesHHhkpGYIDAHQ+3Hc691kqic6YmVvK2N45fD5aRKftaZNs5OxSj3tNHSo7lQ+DVtPj8uSSpsVg==", + "subType": "06" + } + } + }, + "azure_objectId_rand_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAHEgKgy2mpMLpfeEWqbvQOaRZAy+cEGXGon3e53/JoH6dZneEyyt4ZrcrK6uRqyUPWX0q104JbCYxfbtHtdzWgPQ==", + "subType": "06" + } + } + }, + "azure_objectId_rand_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAHqSv6Nruw3TIi7y0FPRjSfnJmWSdv5XMhAtnHNkT8MVuHeM32ayo0yc8dTA1wlkRtAI5JrGxTfERCXYuCojvvXg==", + "subType": "06" + } + } + }, + "azure_objectId_det_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAHcPRjIOyLDUJCDcdWkUySKCFS2AFkIa1OQyQAfC3Zh5HwJ1O7j2o+iYKRerhbni8lBiZH7EUMm1JcxM99lLC5jQ==", + "subType": "06" + } + } + }, + "azure_objectId_det_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAHcPRjIOyLDUJCDcdWkUySKCFS2AFkIa1OQyQAfC3Zh5HwJ1O7j2o+iYKRerhbni8lBiZH7EUMm1JcxM99lLC5jQ==", + "subType": "06" + } + } + }, + "azure_objectId_det_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAHcPRjIOyLDUJCDcdWkUySKCFS2AFkIa1OQyQAfC3Zh5HwJ1O7j2o+iYKRerhbni8lBiZH7EUMm1JcxM99lLC5jQ==", + "subType": "06" + } + } + }, + "azure_bool_rand_auto_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIYVWPvzSmiCs9LwRlv/AoQWhaS5mzoKX4W26M5eg/gPjOZbEVYOV80pWMxCcZWRAyV/NDWDUmKtRQDMU9b8lCJw==", + "subType": "06" + } + } + }, + "azure_bool_rand_auto_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIsAB01Ugqtw4T9SkuJBQN1y/ewpRAyz0vjFPdKI+jmPMmaXpMlXDJU8ZbTKm/nh6sjJCFcY5oZJ83ylbp2gHc6w==", + "subType": "06" + } + } + }, + "azure_bool_rand_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIr8/qFd564X1mqHEhB0y7bzGFdrHuw+Gk45nXla3VvGHzeIJy6j2Wdl0uziWslMmBvNp8WweW+jQ6E2Fu7SiojQ==", + "subType": "06" + } + } + }, + "azure_bool_rand_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIWsca5FAnS2zhHnmKmexvvXMTgsZZ7uAFHnjQassUcay6mvIWH4hOnGiRxt5Zm0wO4S6cZq+PZrmEH5/n9rJcJQ==", + "subType": "06" + } + } + }, + "azure_bool_det_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "azure_bool_det_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "azure_date_rand_auto_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJwKo7XW5daIFlwY1mDAnJdHlcUgF+74oViL28hQGhde63pkPyyS6lPkYrc1gcCK5DL7PwsSX4Vb9SsNAG9860xw==", + "subType": "06" + } + } + }, + "azure_date_rand_auto_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJYZdWIqvqTztGKJkSASMEOjyrUFKnYql8fMIEzfEZWx2BYsIkxxOUUUCASg/Jsn09fTLVQ7yLD+LwycuI2uaXsw==", + "subType": "06" + } + } + }, + "azure_date_rand_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJuWzKqi3KV8GbGGnT7i9N4BACUuNjt5AgKsjWIfrWRXK1+jRQFq0bYlVWaliT9CNIygL2aTF0H4eHl55PAI84MQ==", + "subType": "06" + } + } + }, + "azure_date_rand_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJ5JTtTuP4zTnEbaVlS/W59SrZ08LOC4ZIl+h+H4RnfHUfBXDwUou+APolVaYko+VZMKecrikdPeewgzWaqazJ1g==", + "subType": "06" + } + } + }, + "azure_date_det_auto_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAJCREIp/SPolAZcVU1iOmaJaN2tFId5HhrjNmhp6xhA1AIPLnN+U7TAqesxFN7iebR9fXI5fZxYNgyWqQC1rqUJw==", + "subType": "06" + } + } + }, + "azure_date_det_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAJCREIp/SPolAZcVU1iOmaJaN2tFId5HhrjNmhp6xhA1AIPLnN+U7TAqesxFN7iebR9fXI5fZxYNgyWqQC1rqUJw==", + "subType": "06" + } + } + }, + "azure_date_det_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAJCREIp/SPolAZcVU1iOmaJaN2tFId5HhrjNmhp6xhA1AIPLnN+U7TAqesxFN7iebR9fXI5fZxYNgyWqQC1rqUJw==", + "subType": "06" + } + } + }, + "azure_null_rand_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_rand_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_regex_rand_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAALsMm3W2ogEiI6m0l8dS5Xhqnw+vMBvN1EesOTqAZOk4tQleX6fWARwUUnjFxbuejU7ISb50fc/Ul+ntL9z/2nHQ==", + "subType": "06" + } + } + }, + "azure_regex_rand_auto_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAALITQNQI0hfCeMTxH0Hce1Cf5tinQG+Bq8EolUACvxUUQcDqIXfFXn19tV/Qyj4lIdnnwh/18hiswgEpJRK7uLGw==", + "subType": "06" + } + } + }, + "azure_regex_rand_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAALw/1QI/bKeiGUrrtC+yXOTvxZ2mJjSelPPGOm1mge0ws8DsX0DPHmo6MjhnRO4u0c/LWiE3hwHG2rYjAFlFXZ5A==", + "subType": "06" + } + } + }, + "azure_regex_rand_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAL6Sl58UfFCHCZzWIB4r19/ZjeSRAoWeTFCFedKiwyR8/xnL+8jzXK/9+vTIspP6j35lFapr+f4iBNB9WjdpYNKA==", + "subType": "06" + } + } + }, + "azure_regex_det_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAALxshM91Tsql/8kPe3dC16oP36XSUIN6godiRVIJLJ+NAwYtEkThthQsln7CrkIxIx6npN6A/hw1CBJERS/cqWhw==", + "subType": "06" + } + } + }, + "azure_regex_det_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAALxshM91Tsql/8kPe3dC16oP36XSUIN6godiRVIJLJ+NAwYtEkThthQsln7CrkIxIx6npN6A/hw1CBJERS/cqWhw==", + "subType": "06" + } + } + }, + "azure_regex_det_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAALxshM91Tsql/8kPe3dC16oP36XSUIN6godiRVIJLJ+NAwYtEkThthQsln7CrkIxIx6npN6A/hw1CBJERS/cqWhw==", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAMaAd1v/XCYM2Kzi/f4utR6aHOFORmzZ17EepEjkn5IeKshktUpPWjI/dBwSunn5Qxx2zI3nm06c3SDvp6tw8qb7u4qXjLQYhlsQ0bHvvm+vE=", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_auto_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAM6VNjkN9bMIzfC7AX0ZhOEXPpyPE0nzYq3c5TNHrgeGWdZDR9GVdbO9t55zQrQJJ2Mmevh8c0WaAUV+YODv7ty6TDBsPbaKWWqMzu/v9RXHo=", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAM66tywuMhwdyUjxfl7EOdKHNCLeIPnct3PgKrAKlOQFjiNQUIA2ShVy0qYpJcvvFsuQ5e8Bjr0IqeBc8mC7n4euRSM1UXpLqI5XHgXMMaYpI=", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAMtPQEbZ4gWoSYjVZLd5X6j0XxutWY1Ecrys2ErKRgZaxP0uGe8uw0cnr2Z5PYylaYmsSicLwD1PwWY42PKmaGBDraHmdfqDOPvrNxhBrfU/E=", + "subType": "06" + } + } + }, + "azure_dbPointer_det_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAMxUcVqq6RpAUCv08qGkmjuwVAIgLeYyh7xZnMeCYVGmhJKIP1Zdt1SvRGRV0jzwCQmXgxNd04adRwJnG/PRQIsL9aH3ilJgEnUbOo1nqR7yw=", + "subType": "06" + } + } + }, + "azure_dbPointer_det_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAMxUcVqq6RpAUCv08qGkmjuwVAIgLeYyh7xZnMeCYVGmhJKIP1Zdt1SvRGRV0jzwCQmXgxNd04adRwJnG/PRQIsL9aH3ilJgEnUbOo1nqR7yw=", + "subType": "06" + } + } + }, + "azure_dbPointer_det_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAMxUcVqq6RpAUCv08qGkmjuwVAIgLeYyh7xZnMeCYVGmhJKIP1Zdt1SvRGRV0jzwCQmXgxNd04adRwJnG/PRQIsL9aH3ilJgEnUbOo1nqR7yw=", + "subType": "06" + } + } + }, + "azure_javascript_rand_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANWXPb5z3a0S7F26vkmBF3fV+oXYUj15OEtnSlXlUrc+gbhbPDxSvCPnTBEy5sNu4ndkvEZZxYgZInkF2q4rhlfQ==", + "subType": "06" + } + } + }, + "azure_javascript_rand_auto_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANN4mcwLz/J4eOUknhVsy6kdF1ThDP8cx6dNpOwJWAiyPHEsn+i6JmMTlfQMBrUp9HB/u3R+jLO5yz4XgLUKE8Tw==", + "subType": "06" + } + } + }, + "azure_javascript_rand_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANJ+t5Z8hSQaoNzszzkWndAo4A0avDf9bKFa7euznz8ZYInnl9RUVqWMyxjSuIotAvTyYSJzxh+w2hKCgVf+MjEA==", + "subType": "06" + } + } + }, + "azure_javascript_rand_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANRLOQFpmkEg/KdWMmaurkNtUhy45rgtoipc9kQz6olgDWiMim81XC0AW5cOvjbHXL3w7Du28Kwdsp4j0PTTXHUQ==", + "subType": "06" + } + } + }, + "azure_javascript_det_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAANUrNUS/7/dmKVWBd+2JKGEn1hxbFSyu3p5sDNatukG2m16t4WwxzmYAg8PuQbAxekprs7iaLA+7D2Kn3ZuMSQOw==", + "subType": "06" + } + } + }, + "azure_javascript_det_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAANUrNUS/7/dmKVWBd+2JKGEn1hxbFSyu3p5sDNatukG2m16t4WwxzmYAg8PuQbAxekprs7iaLA+7D2Kn3ZuMSQOw==", + "subType": "06" + } + } + }, + "azure_javascript_det_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAANUrNUS/7/dmKVWBd+2JKGEn1hxbFSyu3p5sDNatukG2m16t4WwxzmYAg8PuQbAxekprs7iaLA+7D2Kn3ZuMSQOw==", + "subType": "06" + } + } + }, + "azure_symbol_rand_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAORMcgtQSU+/2Qlq57neRrVuAFSeSwkqdo+z1fh6IKjyEzhCy+u5bTzSzTopyKJQTCUZA2mSpRezWkM87oiGfhMFkBRVreMcE62eH+BLlgUaM=", + "subType": "06" + } + } + }, + "azure_symbol_rand_auto_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAOIKlAw/A3nwHn0tO2cYtJx0azB8MGmXtt+bRptzn8yHlUSpMpYaiU0ssBBiLkmMLAITYebLqDk3NHESyP7PvbSfX1E2XVn2Nf694ZqPWMec8=", + "subType": "06" + } + } + }, + "azure_symbol_rand_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAO8SXW76AEr/6D6zyP1RYwmwdVM2AINaXZn3Ipy+fynWTUV6XIPIRR7xMTttNo2zlh7fgXDZ28PmjooGlQzn0q0JVQmXPCIPM3aqAmMcgyuqg=", + "subType": "06" + } + } + }, + "azure_symbol_rand_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAOtoJWm2Ucre0foHIiOutsX1WIyub7t3Lby3/F8zRXn+l6ixlTjAPgWFwpRnYg96Lt2ACDDQ9CO51ejr9qk0b8LDBwG3qU5Cuibsp7vo1VsdI=", + "subType": "06" + } + } + }, + "azure_symbol_det_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAOvp/FMMmWVMkiuN51uFMFBiRQAcc9jftlNsHsLoNtohZaGni26kgX94b+/EI8pdWF5xA/73JlGlij0Rt+vC9s/zTDItRpn0bJL54WPphDcmA=", + "subType": "06" + } + } + }, + "azure_symbol_det_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAOvp/FMMmWVMkiuN51uFMFBiRQAcc9jftlNsHsLoNtohZaGni26kgX94b+/EI8pdWF5xA/73JlGlij0Rt+vC9s/zTDItRpn0bJL54WPphDcmA=", + "subType": "06" + } + } + }, + "azure_symbol_det_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAOvp/FMMmWVMkiuN51uFMFBiRQAcc9jftlNsHsLoNtohZaGni26kgX94b+/EI8pdWF5xA/73JlGlij0Rt+vC9s/zTDItRpn0bJL54WPphDcmA=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_auto_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAPCw9NnvJyuTYIgZxr1w1UiG85PGZ4rO62DWWDF98HwVM/Y6u7hNdNjkaWjYFsPMl38ioHw/pS8GFR62QmH2RAw/BV0wI7pNy2evANr3i3gKg=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_auto_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAPXQzqnQ2UWkIYof8/OfadNMa7iVKAbOaiu7YGm8iVrx+W6uxKLPFugVqHtQ29hYXXf33xr8rqGNxDlAe7/x1OeYEif71f7LUkmKF9WxJV9Ko=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAP0nxlppgPyjLx0eBempbOlL21G6KbABSrE6+YuNDcsjJjxCQuLR9+aoAwa+yCDEC7GZ1E3oP489edKUuNpE4Ts26jy4aRegu4DmyECUeBwAg=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAPO89afu9Sb+cK9wwM1cO1DPjvu5UNyObjjTScy1hy9PzllJGfj7b84f0Ah74jPYsMPwI0Eslu/IYF3+5jmquq5Qp/VUQESlxqRqRK0xIeMfs=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_det_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_det_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_int_rand_auto_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQUyy4uWmWdzypsK81q9egREg4s80X3L2hzxJzC+fL08Xzy1z9grpPPCfJrluUVKMMGmmZR8gJPJ70igN3unJbzg==", + "subType": "06" + } + } + }, + "azure_int_rand_auto_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQr4gyoHKpGsSJo8CMsYSJk/KilFMJhsDCmxrha7yfNW1uR5sjyZj4B4s6uTXGw76x7aR/AvecDlY3QFJb8L1mjg==", + "subType": "06" + } + } + }, + "azure_int_rand_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQ0zgXYPV1MuEFksmDpVDoWkoZQelm3+rYrMiT64KYywO//75799W8TbR3a7O6Q/ErjKQOin2OCp8EWwZqTDdz5w==", + "subType": "06" + } + } + }, + "azure_int_rand_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQG+qz00yizREbP3tla1elMiwf8TKLbUU2XWUP+E0vey/wvbjTTIzqwUlz/b9St77CHJhavypP3hMrngXR9GapbQ==", + "subType": "06" + } + } + }, + "azure_int_det_auto_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAQCkJH+CataLqp/xBjO77QBprC2xPV+rE+goSZ3C6aqwXIeTYHTOqEbeaFb5iZcqYH5nWvNvnfbZSIMyvSfrPjhw==", + "subType": "06" + } + } + }, + "azure_int_det_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAQCkJH+CataLqp/xBjO77QBprC2xPV+rE+goSZ3C6aqwXIeTYHTOqEbeaFb5iZcqYH5nWvNvnfbZSIMyvSfrPjhw==", + "subType": "06" + } + } + }, + "azure_int_det_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAQCkJH+CataLqp/xBjO77QBprC2xPV+rE+goSZ3C6aqwXIeTYHTOqEbeaFb5iZcqYH5nWvNvnfbZSIMyvSfrPjhw==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAARwcXYtx+A7g/zGkjGdkyVxZGCO9Nzj3D70NIpl2TeH2j9qYGP4DenwL1xSgrL2Ez+X58d2BvNhKrjA9y2w1Z8kA==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_auto_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAARQ0Pjx3l92Aqhn2e1hot2M9rQ6aLPE2Iw8AVhm5AD8FWywWih12Fn2p9+kiE33yKPOCyrTWQHKPtB4yYhqnJgGg==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAARvFMlIzh2IjpHkTJ8buqTOqBA0+CxVDsZacUhSHVMgJLN+0DJsJy8OfkmKMu9Lk5hULY00Udoja87x+79mYfmeQ==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAR+2SCd7V5ukAkh7CYpNPIatzTL8osNoA4Mb5jjjbos8eMamImw0fbH8YA+Rdm4CgGdQQ9VDX7MtMWlArkj0Jpew==", + "subType": "06" + } + } + }, + "azure_timestamp_det_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAARe72T/oC09QGE1vuljb6ZEHa6llEwMLT+C4s9u1fREkOKndpmrOlGE8zOey4teizY1ypOMkIZ8GDQJJ4kLSpNkQ==", + "subType": "06" + } + } + }, + "azure_timestamp_det_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAARe72T/oC09QGE1vuljb6ZEHa6llEwMLT+C4s9u1fREkOKndpmrOlGE8zOey4teizY1ypOMkIZ8GDQJJ4kLSpNkQ==", + "subType": "06" + } + } + }, + "azure_timestamp_det_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAARe72T/oC09QGE1vuljb6ZEHa6llEwMLT+C4s9u1fREkOKndpmrOlGE8zOey4teizY1ypOMkIZ8GDQJJ4kLSpNkQ==", + "subType": "06" + } + } + }, + "azure_long_rand_auto_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASSSgX7k8iw0xFe0AiIzOu0e0P7Ujyfsk/Cdl0fR5X8V3QLVER+1Qa47Qpb8iWL2VLBSh+55HvIEtvhWn8SwXaog==", + "subType": "06" + } + } + }, + "azure_long_rand_auto_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASUhKr5K7ulGTeFbhIvJ2DDE10gRAFn5+2zqnsIFSY8lYV2PBYcENdeNBXZs6kyIAYhJdQyuOChVCerTI5jmQWDw==", + "subType": "06" + } + } + }, + "azure_long_rand_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASHxawpjTHdXYRWQSZ7Qi7gFC+o4dW2mPH8s5nQkPFY/EubcJbdAZ5HFp66NfPaDJ/NSH6Vy+TkpX3683RC+bjSQ==", + "subType": "06" + } + } + }, + "azure_long_rand_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASVaMAv6UjuBOUZMJ9qz+58TQWmgaMpS9xrJziJY80ml9aRlDTtRubP7U40CgbDvrtY1QgHbkF/di1XDCB6iXMMg==", + "subType": "06" + } + } + }, + "azure_long_det_auto_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAS06L8oEPeMvVlA32VlobdOWG24OoyMbv9PyYsHLsbT0bHFwU7lYUSQG9EkYVRNPEDzvXpciE1jT7KT8CRY8XT/g==", + "subType": "06" + } + } + }, + "azure_long_det_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAS06L8oEPeMvVlA32VlobdOWG24OoyMbv9PyYsHLsbT0bHFwU7lYUSQG9EkYVRNPEDzvXpciE1jT7KT8CRY8XT/g==", + "subType": "06" + } + } + }, + "azure_long_det_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAS06L8oEPeMvVlA32VlobdOWG24OoyMbv9PyYsHLsbT0bHFwU7lYUSQG9EkYVRNPEDzvXpciE1jT7KT8CRY8XT/g==", + "subType": "06" + } + } + }, + "azure_decimal_rand_auto_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATJ6LZgPu9F+rPtYsMuvwOx62+g1dAk858BUtE9FjC/300DnbDiolhkHNcyoFs07NYUNgLthW2rISb/ejmsDCt/oqnf8zWYf9vrJEfHaS/Ocw=", + "subType": "06" + } + } + }, + "azure_decimal_rand_auto_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATX8eD6qFYWKwIGvXtQG79fXKuPW9hkIV0OwrmNNIqRltw6gPHl+/1X8Q6rgmjCxqvhB05AxTj7xz64gP+ILkPQY8e8VGuCOvOdwDo2IPwy18=", + "subType": "06" + } + } + }, + "azure_decimal_rand_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATBjQ9E5wDdTS/iI1XDqGmDBC5aLbPB4nSyrjRLfv1zEoPRjmcHlQmMRJA0mori2VQv6EBFNHeczFCenJaSAkuh77czeXM2vH3T6qwEIDs4dw=", + "subType": "06" + } + } + }, + "azure_decimal_rand_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATtkjbhdve7MNuLaTm6qvaewuVUxeC1DMz1fd4RC4jeiBFMd5uZUVJTiOIerwQ6P5G5lkMlezKDWgKl2FUvZH6c7V3JknhsaWcV5iLWGUL6Zc=", + "subType": "06" + } + } + }, + "azure_decimal_det_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_det_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_minKey_rand_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_rand_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_maxKey_rand_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_rand_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_double_rand_auto_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABFoHQxnh1XSC0k1B01uFFg7rE9sZVBn4PXo26JX8gx9tuxu+4l9Avb23H9BfOzuWiEc43iw87K/W2y0VfKp5CCg==", + "subType": "06" + } + } + }, + "gcp_double_rand_auto_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABRkZkEtQEFB/r268cNfYRQbN4u5Cxjl9Uh+8wq9TFWLQH2E/9wj2vTLlxQ2cQsM7Qd+XxR5idjfBf9CKAfvUa/A==", + "subType": "06" + } + } + }, + "gcp_double_rand_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABDSUZ+0BbDDEZxCXA+J2T6Js8Uor2dfXSf7s/hpLrg6dxcW2chpht9XLiLOXG5w83TzCAI5pF8cQgBpBpYjR8RQ==", + "subType": "06" + } + } + }, + "gcp_double_rand_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABCYxugs7L+4S+1rr0VILSbtBm79JPTLuzluQAv0+8hbu5Z6zReOL6Ta1vQH1oA+pSPGYA4euye3zNl1X6ZewbPw==", + "subType": "06" + } + } + }, + "gcp_double_det_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "gcp_double_det_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "gcp_string_rand_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAACx3wSslJEiD80YLTH0n4Bbs4yWVPQl15AU8pZMLLQePqEtI+BJy3t2bqNP1098jS0CGSf+LQmQvXhJn1aNFeMTw==", + "subType": "06" + } + } + }, + "gcp_string_rand_auto_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAC5BTe5KP5UxSIk6dJlkz8aaZ/9fg44XPWHafiiL/48lcv3AWbu2gcBo1EDuc1sJQu6XMrtDCRQ7PCHsL7sEQMGQ==", + "subType": "06" + } + } + }, + "gcp_string_rand_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAACyJN55OcyXXJ71x8VphTaIuIg6kQtGgVKPhWx0LSdYc6JOjB6LTdA7SEWiSlSWWFZE26UmKcPbkbLDAYf4IVrzQ==", + "subType": "06" + } + } + }, + "gcp_string_rand_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAACoa0d9gqfPP5s3+GoruwzxoQFgli8SmjpTVRLAOcFxqGdfrwSbpYffSw/OR45sZPxXCL6T2MtUvZsl7ukv0jBnw==", + "subType": "06" + } + } + }, + "gcp_string_det_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAACTCkyETcWayIZ9YEoQEBVIF3i7iXEe6M3KjYYaSVCYdqSbSHBzlwKWYbP+Xj/MMYBYTLZ1aiRQWCMK4gWPYppZw==", + "subType": "06" + } + } + }, + "gcp_string_det_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAACTCkyETcWayIZ9YEoQEBVIF3i7iXEe6M3KjYYaSVCYdqSbSHBzlwKWYbP+Xj/MMYBYTLZ1aiRQWCMK4gWPYppZw==", + "subType": "06" + } + } + }, + "gcp_string_det_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAACTCkyETcWayIZ9YEoQEBVIF3i7iXEe6M3KjYYaSVCYdqSbSHBzlwKWYbP+Xj/MMYBYTLZ1aiRQWCMK4gWPYppZw==", + "subType": "06" + } + } + }, + "gcp_object_rand_auto_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADy+8fkyeNYdIK001YogXfKc25zRXS1VGIFVWR6jRfrexy9C8LBBfX3iDwGNPbP2pkC3Tq16OoziQB6iNGf7s7yg==", + "subType": "06" + } + } + }, + "gcp_object_rand_auto_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADixoDdvm57gH8ooOaKI57WyZD5uaPmuYgmrgAFuV8I+oaalqYctnNSYlzQKCMQX/mIcTxvW3oOWY7+IzAz7npvw==", + "subType": "06" + } + } + }, + "gcp_object_rand_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADvq0OAoijgHaVMhsoNMdfWFLyISDo6Y13sYM0CoBXS/oXJNIJJvhgKPbFSV/h4IgiDLy4qNYOTJQvpqt094RPgQ==", + "subType": "06" + } + } + }, + "gcp_object_rand_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADuTZF7/uqGjFbjzBYspPkxGWvvVAEN/ib8bfPOQrEobtTWuU+ju9H3TlT9DMuFy7RdUZnPB0D3HkM8+zky5xeBw==", + "subType": "06" + } + } + }, + "gcp_object_det_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_det_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_array_rand_auto_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAE085kJIBX6S93D94bcRjkOegEKsksi2R1cxoVDoOpSdHh3S6bZAOh50W405wvnOKf3KTP9SICDUehQKQZSC026Y5dwVQ2GiM7PtpSedthKJs=", + "subType": "06" + } + } + }, + "gcp_array_rand_auto_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAEk/FAXsaqyVr6I+MY5L0axeLhskcEfLZeB8whLMKbjLDLa8Iep+IdrFVSfKo03Zr/7Ah8Js01aT6+Vt4EDMJK0mGKZJOjsrAf3b6RS+Mzebg=", + "subType": "06" + } + } + }, + "gcp_array_rand_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAEDY7J9JGiurctYr7ytakNjcryVm42fkubcVpQpUYEkpK/G9NLGjrJuFgNW5ZVjYiPKEBbDB7vEtJqGux0BU++hrvVHNJ3wUT2mbDE18NE4KE=", + "subType": "06" + } + } + }, + "gcp_array_rand_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAErFFlw8W9J2y+751RnYLw0TSK9ThD6sP3i4zPbZtiuhc90RFoJhScvqM9i4sDKuYePZZRLBxdX4EZhZClOmswCGDLCIWsQlSvCwgDcIsRR/w=", + "subType": "06" + } + } + }, + "gcp_array_det_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_det_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_binData=00_rand_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAF0R5BNkQKfm6wx/tob8nVGDEYV/pvy9UeCqc9gFNuB5d9KxCkgyxryV65rbB90OriqvWFO2jcxzchRYgRI3fQ+A==", + "subType": "06" + } + } + }, + "gcp_binData=00_rand_auto_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAF4wcT8XGc3xNdKYDX5/cbUwPDdnkIXlWWCCYeSXSk2oWPxMZnPsVQ44nXKJJsKitoE3r/hL1sSG5239WzCWyx9g==", + "subType": "06" + } + } + }, + "gcp_binData=00_rand_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAF07OFs5mlx0AB6QBanaybLuhuFbG+19KxSqHlSgELcz6TQKI6equX97OZdaWSWf2SSeiYm5E6+Y3lgA5l4KxC2A==", + "subType": "06" + } + } + }, + "gcp_binData=00_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFZ74Q7JMm7y2i3wRmjIRKefhmdnrhP1NXJgploi+44eQ2eRraZsW7peGPYyIfsXEbhgV5+aLmiYgvemBywfdogQ==", + "subType": "06" + } + } + }, + "gcp_binData=00_det_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFhwJkocj36WXoY3mg2GWUrJ5IQTo9MvkwEwRFKdkcxm9pX2PZPK7bN5ZWw3IFcQ/0GfaW6V4LYr8WarZdLF0p5g==", + "subType": "06" + } + } + }, + "gcp_binData=00_det_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFhwJkocj36WXoY3mg2GWUrJ5IQTo9MvkwEwRFKdkcxm9pX2PZPK7bN5ZWw3IFcQ/0GfaW6V4LYr8WarZdLF0p5g==", + "subType": "06" + } + } + }, + "gcp_binData=00_det_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFhwJkocj36WXoY3mg2GWUrJ5IQTo9MvkwEwRFKdkcxm9pX2PZPK7bN5ZWw3IFcQ/0GfaW6V4LYr8WarZdLF0p5g==", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFmDO47RTVXzm8D4hfhLICILrQJg3yOwG3HYfCdz7yaanPow2Y6bMxvXxk+kDS29aS8pJKDqJQQoMGc1ZFD3yYKsLQHRi/8rW6TNDQd4sCQ00=", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_auto_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFpiu9Q3LTuPmgdWBqo5Kw0vGF9xU1rMyE4xwR8GccZ7ZMrUcR4AnZnAP7ah5Oz8e7qonNYX4d09obesYSLlIjyK7J7qg+GWiEURgbvmOngaA=", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFHRy8dveGuMng9WMmadIp39jD7iEfl3bEjKmzyNoAc0wIcSJZo9kdGbNEwZ4p+A1gz273fmAt/AJwAxwvqdlanLWBr4wiSKz1Mu9VaBcTlyY=", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFiqO+sKodqXuVox0zTbKuY4Ng0QE1If2hDLWXljAEZdYABPk20UJyL/CHR49WP2Cwvi4evJCf8sEfKpR+ugPiyxWzP3iVe6qqTzP93BBjqoc=", + "subType": "06" + } + } + }, + "gcp_binData=04_det_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFEp5Gut6iENHUqDMVdBm4cxQy35gnslTf7vSWW9InFh323BvaTTiubxbxTiMKIa/u47MfMprL9HNQSwgpAQc4lped+YnlRW8RYvTcG4frFtA=", + "subType": "06" + } + } + }, + "gcp_binData=04_det_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFEp5Gut6iENHUqDMVdBm4cxQy35gnslTf7vSWW9InFh323BvaTTiubxbxTiMKIa/u47MfMprL9HNQSwgpAQc4lped+YnlRW8RYvTcG4frFtA=", + "subType": "06" + } + } + }, + "gcp_binData=04_det_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFEp5Gut6iENHUqDMVdBm4cxQy35gnslTf7vSWW9InFh323BvaTTiubxbxTiMKIa/u47MfMprL9HNQSwgpAQc4lped+YnlRW8RYvTcG4frFtA=", + "subType": "06" + } + } + }, + "gcp_undefined_rand_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_rand_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_objectId_rand_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAH8Kt6coc8bPI4QIwS1tIdk6pPA05xlZvrOyAQgvoqaozMtWzG15OunQLDdS3yJ5WRiV7kO6CIKqRrvL2RykB5sw==", + "subType": "06" + } + } + }, + "gcp_objectId_rand_auto_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAHU5Yzmz2mbgNQrGSvglgVuv14nQWzipBkZUVSO4eYZ7wLrj/9t0fnizsu7Isgg5oA9fV0Snh/A9pDnHZWoccXUw==", + "subType": "06" + } + } + }, + "gcp_objectId_rand_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAHsdq5/FLqbjMDiNzf+6k9yxUtFVjS/xSqErqaboOl21934pAzgkOzBGodpKKFuK0Ta4f3h21XS+84wlIYPMlTtw==", + "subType": "06" + } + } + }, + "gcp_objectId_rand_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAHokIdXxNQ/NBMdMAVNxyVuz/J5pMMdtfxxJxr7PbsRJ3FoD2QNjTgE1Wsz0G4o09Wv9UWD+/mIqPVlLgx1sRtPw==", + "subType": "06" + } + } + }, + "gcp_objectId_det_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAHkcbaj3Hy3b4HkjRkMgiw5h6jBW7Sc56QSJmAPmVSc2T4B8d79A49dW0RyEiInZJcnVRjrYzUTRtgRaG4/FRd8g==", + "subType": "06" + } + } + }, + "gcp_objectId_det_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAHkcbaj3Hy3b4HkjRkMgiw5h6jBW7Sc56QSJmAPmVSc2T4B8d79A49dW0RyEiInZJcnVRjrYzUTRtgRaG4/FRd8g==", + "subType": "06" + } + } + }, + "gcp_objectId_det_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAHkcbaj3Hy3b4HkjRkMgiw5h6jBW7Sc56QSJmAPmVSc2T4B8d79A49dW0RyEiInZJcnVRjrYzUTRtgRaG4/FRd8g==", + "subType": "06" + } + } + }, + "gcp_bool_rand_auto_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIf7vUYS5XFrEU4g03lzj9dk8a2MkaQdlH8nE/507D2Gm5XKQLi2jCENZ9UaQm3MQtVr4Uqrgz2GZiQHt9mXcG3w==", + "subType": "06" + } + } + }, + "gcp_bool_rand_auto_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIdOC4Tx/TaVLRtOL/Qh8RUFIzHFB6nSegZoITwZeDethd8V3+R+aIAgzfN3pvmZzagHyVCm2nbNYJNdjOJhuDrg==", + "subType": "06" + } + } + }, + "gcp_bool_rand_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIzB14mX2vaZdiW9kGc+wYEgTCXA0FB5AVEyuERD00+K7U5Otlc6ZUwMtb9nGUu+M7PnnfxiDFHCrUWrTkAZzSUw==", + "subType": "06" + } + } + }, + "gcp_bool_rand_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIhRLg79ACCMfeERBgG1wirirrZXZzbK11RxHkAbf14Fji2L3sdMBdLBU5I028+rmtDdC7khcNMt11V6XGKpAjnA==", + "subType": "06" + } + } + }, + "gcp_bool_det_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "gcp_bool_det_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "gcp_date_rand_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJL+mjI8xBmSahOOi3XkGRGxjhGNdJb445KZtRAaUdCV0vMKbrefuiDHJDPCYo7mLYNhRSIhQfs63IFYMrlKP26A==", + "subType": "06" + } + } + }, + "gcp_date_rand_auto_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJbeyqO5FRmqvPYyOb0tdKtK6JOg8QKbCl37/iFeEm7N0T0Pjb8Io4U0ndB3O6fjokc3kDQrZcQkV+OFWIMuKFjw==", + "subType": "06" + } + } + }, + "gcp_date_rand_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJVz3rSYIcoYtM0tZ8pB2Ytgh8RvYPeZvW7aUVJfZkZlIhfUHOHEf5kHqxzt8E1l2n3lmK/7ZVCFUuCCmr8cZyWw==", + "subType": "06" + } + } + }, + "gcp_date_rand_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJAiQqNyUcpuDEpFt7skp2NSHFCux2XObrIIFgXReYgtWoapL/n4zksJXl89PGavzNPBZbzgEa8uwwAe+S+Y6TLg==", + "subType": "06" + } + } + }, + "gcp_date_det_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAJmATV2A1P5DmrS8uES6AMD9y+EU3x7u4K4J0p296iSkCEgIdZZORhPIEnuJK3FHw1II6IEShW2nd7sOJRZSGKcg==", + "subType": "06" + } + } + }, + "gcp_date_det_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAJmATV2A1P5DmrS8uES6AMD9y+EU3x7u4K4J0p296iSkCEgIdZZORhPIEnuJK3FHw1II6IEShW2nd7sOJRZSGKcg==", + "subType": "06" + } + } + }, + "gcp_date_det_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAJmATV2A1P5DmrS8uES6AMD9y+EU3x7u4K4J0p296iSkCEgIdZZORhPIEnuJK3FHw1II6IEShW2nd7sOJRZSGKcg==", + "subType": "06" + } + } + }, + "gcp_null_rand_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_rand_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_regex_rand_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALiebb3hWwJRqlgVEhLYKKvo6cnlU7BFnZnvlZ8GuIr11fUvcnS9Tg2m7vPmfL7WVyuNrXlR48x28Es49YuaxuIg==", + "subType": "06" + } + } + }, + "gcp_regex_rand_auto_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALouDFNLVgBXqhJvBRj9DKacuD1AQ2NAVDW93P9NpZDFFwGOFxmKUcklbPj8KkHqvma8ovVUBTLLUDR+tKFRvC2Q==", + "subType": "06" + } + } + }, + "gcp_regex_rand_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALtdcT9+3R1he4eniT+1opqs/YtujFlqzBXssv+hCKhJQVY/IXde32nNpQ1WTgUc7jfIJl/v9HvuA9cDHPtDWWTg==", + "subType": "06" + } + } + }, + "gcp_regex_rand_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALAwlRAlj4Zpn+wu9eOcs5CsNgrkVwrgmu1tc4wyQp0Lt+3UcplYsXQMrMPcTx3yB0JcI4Kh65n/DrAaA+G/a6iw==", + "subType": "06" + } + } + }, + "gcp_regex_det_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAALbCutQ7D94gk0djewcQiEdMFVVa21+Dn5enQf/mqPi3o7vPy7OejDBk9fiZRffsioRMhlx2cxqa8T3+AkeN96yg==", + "subType": "06" + } + } + }, + "gcp_regex_det_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAALbCutQ7D94gk0djewcQiEdMFVVa21+Dn5enQf/mqPi3o7vPy7OejDBk9fiZRffsioRMhlx2cxqa8T3+AkeN96yg==", + "subType": "06" + } + } + }, + "gcp_regex_det_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAALbCutQ7D94gk0djewcQiEdMFVVa21+Dn5enQf/mqPi3o7vPy7OejDBk9fiZRffsioRMhlx2cxqa8T3+AkeN96yg==", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAMG8P+Y2YNIgknxE0/yPDCHASBvCU1IJwsEyaJPuOjn03enxEN7z/wbjVMN0lGUptDP3SVL+OIZtQ35VRP84MtnbdhcfZWqMhLjzrCjmtHUEg=", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_auto_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAMKCLFUN6ApB5fSVEWazRddhKTEwgqI/mxfe0BBxht69pZQYhTjhOJP0YcIrtr+RCeHOa4FIJgQod1CFOellIzO5YH5CuV4wPxCAlOdbJcBK8=", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAM7ULEA6uKKv4Pu4Sa3aAt7dXtEwfQC98aJoLBapHT+xXtn5GWPynOZQNtV3lGaYExQjiGdYbzOcav3SVy/sYTe3ktgkQnuZfe0tk0zyvKIMM=", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAMoMveHO1MadAKuT498xiKWWBUKRbH7k7P2YETDg/BufVw0swos07rk6WJa1vqyF61QEmACjy4pmlK/5P0VfKJBAIvif51YqHPQkobJVS3nVA=", + "subType": "06" + } + } + }, + "gcp_dbPointer_det_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAMz+9m1bE+Th9YeyPmJdtJPO0F5QYsGYtU/Eom/LSoYjDmTmV2ehkKx/cevIxJfZUc+Mvv/uGoeuubGl8tiX4l+f6yLrSIS6QBtIHYKXk+JNE=", + "subType": "06" + } + } + }, + "gcp_dbPointer_det_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAMz+9m1bE+Th9YeyPmJdtJPO0F5QYsGYtU/Eom/LSoYjDmTmV2ehkKx/cevIxJfZUc+Mvv/uGoeuubGl8tiX4l+f6yLrSIS6QBtIHYKXk+JNE=", + "subType": "06" + } + } + }, + "gcp_dbPointer_det_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAMz+9m1bE+Th9YeyPmJdtJPO0F5QYsGYtU/Eom/LSoYjDmTmV2ehkKx/cevIxJfZUc+Mvv/uGoeuubGl8tiX4l+f6yLrSIS6QBtIHYKXk+JNE=", + "subType": "06" + } + } + }, + "gcp_javascript_rand_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANqBD0ITMn4BaFnDp7BX7vXbRBkFwmjQRVUeBbwsQtv5WVlJMAd/2+w7tyH8Wc44x0/9U/DA5GVhpTrtdDyPBI3w==", + "subType": "06" + } + } + }, + "gcp_javascript_rand_auto_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANtA0q4mbkAaKX4x1xk0/094Mln0wnh2bYnI6s6dh+l2WLDH7A9JMZxCl6kc4uOsEfbOvjP/PLIYtdMGs14EjM5A==", + "subType": "06" + } + } + }, + "gcp_javascript_rand_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANfrW3pmeiFdBFt5tJS6Auq9Wo/J4r/vMRiueLWxig5S1zYuf9kFPJMK/nN9HqQPIcBIJIC2i/uEPgeepaNXACCw==", + "subType": "06" + } + } + }, + "gcp_javascript_rand_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANL7UZNzpwfwhRn/HflWIE9CSxGYNwLSo9d86HsOJ42rrZKq6HQqm/hiEAg0lyqCxVIVFxYEc2BUWSaq4/+SSyZw==", + "subType": "06" + } + } + }, + "gcp_javascript_det_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAANB2d97R8nUJqnG0JPsWzyFe5pct5jvUljdkPnlZvLN1ZH+wSu4WmLfjri6IzzYP//f8tywn4Il+R4lZ0Kr/RAeA==", + "subType": "06" + } + } + }, + "gcp_javascript_det_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAANB2d97R8nUJqnG0JPsWzyFe5pct5jvUljdkPnlZvLN1ZH+wSu4WmLfjri6IzzYP//f8tywn4Il+R4lZ0Kr/RAeA==", + "subType": "06" + } + } + }, + "gcp_javascript_det_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAANB2d97R8nUJqnG0JPsWzyFe5pct5jvUljdkPnlZvLN1ZH+wSu4WmLfjri6IzzYP//f8tywn4Il+R4lZ0Kr/RAeA==", + "subType": "06" + } + } + }, + "gcp_symbol_rand_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAOsGdnr6EKcBdOAvYrP0o1pWbhhJbYsqfVwwwS1zq6ZkBayOss2J3TuYwBGXhJFlq3iIiWLdxGQ883XIvuAECnqUNuvpK2rOLwtDg8xJLiH24=", + "subType": "06" + } + } + }, + "gcp_symbol_rand_auto_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAOpfa6CUSnJBvnWdd7pSZ2pXAbYm68Yka6xa/fuyhVx/Tc926/JpqmOmQtXqbOj8dZra0rQ3/yxHySwgD7s9Qr+xvyL7LvAguGkGmEV5H4Xz4=", + "subType": "06" + } + } + }, + "gcp_symbol_rand_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAO085iqYGFdtjiFWHcNqE0HuKMNHmk49DVh+pX8Pb4p3ehB57JL1nRqaXqHPqhFenxSEInT/te9HQRr+ADcHADvUGsScfm/n85v85nq6X+5y4=", + "subType": "06" + } + } + }, + "gcp_symbol_rand_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAOiidb+2TsbAb2wc7MtDzb/UYsjgVNSw410Sz9pm+Uy7aZROE5SURKXdLjrCH2ZM2a+XCAl3o9yAoNgmAjEvYVxjmyzLK00EVjT42MBOrdA+k=", + "subType": "06" + } + } + }, + "gcp_symbol_det_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAOFBGo77joqvZl7QQMB9ebMsAI3uro8ILQTJsTUgAqNzSh1mNzqihGHZYe84xtgMrVxNuwcjkidkRbNnLXWLuarOx4tgmOLx5A5G1eYEe3s7Q=", + "subType": "06" + } + } + }, + "gcp_symbol_det_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAOFBGo77joqvZl7QQMB9ebMsAI3uro8ILQTJsTUgAqNzSh1mNzqihGHZYe84xtgMrVxNuwcjkidkRbNnLXWLuarOx4tgmOLx5A5G1eYEe3s7Q=", + "subType": "06" + } + } + }, + "gcp_symbol_det_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAOFBGo77joqvZl7QQMB9ebMsAI3uro8ILQTJsTUgAqNzSh1mNzqihGHZYe84xtgMrVxNuwcjkidkRbNnLXWLuarOx4tgmOLx5A5G1eYEe3s7Q=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_auto_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAPUsQHeXWhdmyfQ2Sq1ev1HMuMhBTc/FZFKO9tMMcI9qzjr+z4IdCOFCcx24/T/6NCsDpMiOGNnCdaBCCNRwNM0CTIkpHNLO+RSZORDgAsm9Q=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_auto_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAPRZawtuu0gErebyFqiQw0LxniWhdeujGzaqfAXriGo/2fU7PalzTlWQa8wsv0y7Q/i1K4JbQwCEFpJWLppmtZshCGbVWjpPljB2BH4NNrLPE=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAP0qkQjuKmKIqdrsrR9djxt+1jFlEL7K9bP1oz7QWuY38dZJOoGwa6G1bP4wDzjsucJLCEgU2IY+t7BHraBFXvR/Aar8ID5eXcvJ7iOPIyqUw=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAP6L41iuBWGLg3hQZuhXp4MupTQvIT07+/+CRY292sC02mehk5BkuSOEVrehlvyvBJFKia4Bqd/UWvY8PnUPLqFKTLnokONWbAuh36y3gjStw=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_det_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_det_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_int_rand_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQ+6oRKWMSvC+3UGrHSyGeVlR9bFnZtFTmYlUoGn04k6ndtCl8rsmBVUV6dMMYd7znnZtTSIGPI8q6jwf/NJjdIw==", + "subType": "06" + } + } + }, + "gcp_int_rand_auto_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQnz5jAbrrdutTPFA4m3MvlVJr3bpurTKY5xjwO5k8DZpeWTJzr+kVEJjG6M8/RgC/0UFNgBBrDbDhYa8PZHRijw==", + "subType": "06" + } + } + }, + "gcp_int_rand_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQfRFoxUgjrv8up/eZ/fLlr/z++d/jFm30nYvKqsnQT7vkmmujJWc8yAtthR9OI6W5biBgAkounqRHhvatLZC6gA==", + "subType": "06" + } + } + }, + "gcp_int_rand_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQY/ePk59RY6vLejx9a5ITwkT9000KAubVSqMoQwv7lNXO+GKZfZoLHG6k1MA/IxTvl1Zbz1Tw1bTctmj0HPEGNA==", + "subType": "06" + } + } + }, + "gcp_int_det_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAQE9RVV9pOuysUUEGKq0u6ztFM0gTpoOHcHsTFQstA7+L9XTvxWEgL3RgNeq5KtKdODlxl62niV8dnQwlSoDSSWw==", + "subType": "06" + } + } + }, + "gcp_int_det_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAQE9RVV9pOuysUUEGKq0u6ztFM0gTpoOHcHsTFQstA7+L9XTvxWEgL3RgNeq5KtKdODlxl62niV8dnQwlSoDSSWw==", + "subType": "06" + } + } + }, + "gcp_int_det_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAQE9RVV9pOuysUUEGKq0u6ztFM0gTpoOHcHsTFQstA7+L9XTvxWEgL3RgNeq5KtKdODlxl62niV8dnQwlSoDSSWw==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARLnk1LpJIriKr6iiY1yBDGnfkRaHNwWcQyL+mORtYC4+AQ6oMv0qpGrJxS2QCbYY1tGmAISqZHCIExCG+TIv4bw==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_auto_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARaqYXh9AVZI6gvRZrBwbprE5P3K5Qf4PIK1ca+mLRNOof0EExyAhtku7mYXusLeq0ww/tV6Zt1cA36KsT8a0Nog==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARLXzBjkCN8BpfXDIrb94kuZCD07Uo/DMBfMIWQtAb1++tTheUoY2ClQz33Luh4g8NXwuMJ7h8ufE70N2+b1yrUg==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARe44QH9ZvTAuHsWhEMoue8eHod+cJpBm+Kl/Xtw7NI/6UTOOHC5Kkg20EvX3+GwXdAGk0bUSCFiTZb/yPox1OlA==", + "subType": "06" + } + } + }, + "gcp_timestamp_det_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAARzXjP6d6j/iQxiz1/TC/m+IfAGLFH9wY2ksS//i9x15QttlhcRrT3XmPvxaP5OjTHac4Gq3m2aXiJH56lETyl8A==", + "subType": "06" + } + } + }, + "gcp_timestamp_det_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAARzXjP6d6j/iQxiz1/TC/m+IfAGLFH9wY2ksS//i9x15QttlhcRrT3XmPvxaP5OjTHac4Gq3m2aXiJH56lETyl8A==", + "subType": "06" + } + } + }, + "gcp_timestamp_det_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAARzXjP6d6j/iQxiz1/TC/m+IfAGLFH9wY2ksS//i9x15QttlhcRrT3XmPvxaP5OjTHac4Gq3m2aXiJH56lETyl8A==", + "subType": "06" + } + } + }, + "gcp_long_rand_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAASuGZs48eEyVBJ9vvM6cvRySfuR0WM4kL7lx52rSGXBKtkZywyP5rJwNtRn9WTBMDqc1O/4jUgYXpqHx39SLhUPA==", + "subType": "06" + } + } + }, + "gcp_long_rand_auto_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAS/62F71oKTX1GlvOP89uNhXpIyLZ5OdnuLeM/hvL5HWyOudSb06cG3+xnPg3QgppAYFK5X2PGgrEcrA87AykLPg==", + "subType": "06" + } + } + }, + "gcp_long_rand_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAASSgx+p4YzTvjZ+GCZCFHEKHNXJUSloPnLRHE4iJ515Epb8Tox7h8/aIAkB3ulnDS9BiT5UKdye2TWf8OBEwkXzg==", + "subType": "06" + } + } + }, + "gcp_long_rand_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAStqszyEfltpgd3aYeoyqaJX27OX861o06VhNX/N2fdSfKx0NQq/hWlWTkX6hK3hjCijiTtHmhFQR6QLkHD/6THw==", + "subType": "06" + } + } + }, + "gcp_long_det_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAS0wJHtZKnxJlWnlSu0xuq7bZR25UdwcbdCRSaXBC0EXEFuqlzrZSn1lcwKPKGZQO8EQ6SdQDqK95alMLmM8eQrQ==", + "subType": "06" + } + } + }, + "gcp_long_det_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAS0wJHtZKnxJlWnlSu0xuq7bZR25UdwcbdCRSaXBC0EXEFuqlzrZSn1lcwKPKGZQO8EQ6SdQDqK95alMLmM8eQrQ==", + "subType": "06" + } + } + }, + "gcp_long_det_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAS0wJHtZKnxJlWnlSu0xuq7bZR25UdwcbdCRSaXBC0EXEFuqlzrZSn1lcwKPKGZQO8EQ6SdQDqK95alMLmM8eQrQ==", + "subType": "06" + } + } + }, + "gcp_decimal_rand_auto_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATg4U3nbHBX/Az3ie2yurEIJO6cFryQWKiCpBbx1z0NF7RXd7kFC1XzaY6zcBjfl2AfRO8FFmgjTmFXb6gTRSSF0iAZJZTslfe3n6YFtwSKDI=", + "subType": "06" + } + } + }, + "gcp_decimal_rand_auto_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATdSSyp0ewboV5zI3T3TV/FOrdx0UQbFHhqcH+yqpotoWPSw5dxE+BEoihYLeaPKuVU/rUIY4TUv05Egj7Ovg62Kpk3cPscxsGtE/T2Ppbt6o=", + "subType": "06" + } + } + }, + "gcp_decimal_rand_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATl7k20T22pf5Y9knVwIDyOIlbHyZBJqyi3Mai8APEZIYjpSKDKs8QNAH69CIjupyge8Izw4Cuch0bRrvMbp6YFfrUgk1JIQ4iLKkqqzHpBTY=", + "subType": "06" + } + } + }, + "gcp_decimal_rand_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATF7YLkhkuLhXdxrQk2fJTs128tRNYHeodkqw7ha/TxW3Czr5gE272gnkdzfNoS7uu9XwOr1yjrC6y/8gHALAWn77WvGrAlBktLQbIIinsuds=", + "subType": "06" + } + } + }, + "gcp_decimal_det_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_det_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_minKey_rand_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_rand_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_maxKey_rand_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_rand_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_double_rand_auto_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAB1hL/nPkpQtqxQUANbIJr30PQ98vPvaoy4JWUoElOL+cCnrSra3o7W+12dydy0rCS2EKrVm7Fw0C8L9nf1hpWjw==", + "subType": "06" + } + } + }, + "kmip_double_rand_auto_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABxlcphy2SxXlkRBvO1Z3nNUqchmeOhIhkdYBbbW7CwYeLVRDciXFsZN73Nb9Bm+W4IpUNpo6mqFEtfjevIjtFyg==", + "subType": "06" + } + } + }, + "kmip_double_rand_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABx5AfRSiblFc1DGwxRIaUSP2kaM76ryzPUKL9KnEgnX1kjIlFz5B15uMht2cxdrntHFe1qZZk8V9PxTBpWZhJ8Q==", + "subType": "06" + } + } + }, + "kmip_double_rand_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABXUC9v9HPrmU9tINzFmr2sQM9f7GHDus+y5T4pWX28PRtfnTysN/ANCfB9RosoR/wuKsbznwwD2JfSzOvlKo3PQ==", + "subType": "06" + } + } + }, + "kmip_double_det_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "kmip_double_det_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "kmip_string_rand_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACGHmqW1qbfqVlfB0x0CkXCk9smhs3yXsxJ/8eypSgbDQqVLSW2nf5bbHpnoCHHNtQ7I7ZBXzPzDLH2GgMJpopeQ==", + "subType": "06" + } + } + }, + "kmip_string_rand_auto_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAC9BJTD1pEMbslAjbJYt7yx/jzKkcZF3axu96+NYwp8afUCjXG5TOUZzODOwkbJuWgr7DBxa2GkZTvaAEk86h+Ow==", + "subType": "06" + } + } + }, + "kmip_string_rand_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACQlG28ECy8KHXC7GEPdC8+raBo2RMJwl5pofcPaTGkPUEbkreguMd1mYctNb90vXxby1nNeJY4o5zJJCMiNhNXg==", + "subType": "06" + } + } + }, + "kmip_string_rand_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACbWuK+3nzeKSNVjmgHb0Ii7rA+CsAd+gYubPiMiHXZwE/o6i9FYWN+t/VK3p4K0CwIi6q3cycrMb2IgcvM27Q7Q==", + "subType": "06" + } + } + }, + "kmip_string_det_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_string_det_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_string_det_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_object_rand_auto_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADh2nGqaAUwHDRVjqYpj8JAPH7scmiHp1Z9SGBZQ6Fapxm+zWDdTBHyitM9U69BctJ5DaaafyqFOj5yr6sJ+ebJQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_auto_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAD1YhOKyNle4y0Qbeio1HlCULLeTCALCLgKSITd50bilD+oDyqQawixJAwphcdjhLdFzbFwst5RWqpsiWMPHx4hQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADveILoWFgX7AhUWCv8UL52TUa75qHuoNadnTQydJlqd6PVmtRKj+8vS7VwxNWPaH4wB1Tk7emMyFEbZpvvzjxqQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADB/LN9V/4SROJn+ESHRLM7wwcUltQUx3+LbbYXjPDXiiV14HK76Iyy6ZxJ+M5qC9bRj3afhTKuWLBblB8WwksOg==", + "subType": "06" + } + } + }, + "kmip_object_det_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_array_rand_auto_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEasWXQam8XtOkSO0nEttMCQ0iZ4V8DDmhMKyQDFDsiNHyF2h98Ya/xFv4ZSlbpGWXPBvBATEGgov/PDg2vhVi53y4Pk33RHfY60hABuksp3o=", + "subType": "06" + } + } + }, + "kmip_array_rand_auto_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEj3A1DYSEHm/3SlEmusA+pewxRPUoZ2NAjs60ioEBlCw9n6yiiB+X8d/w40TKsjZcOSfh05NC0z3gnpqQvrNolkxkvi9dmFiZeiiv5vBZUPI=", + "subType": "06" + } + } + }, + "kmip_array_rand_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEqeJW+L6lP0bn5QcD0FMI0C8vv2n5kV7SKgqKi1o5mxaxmp3Cjlspf7yumfSiQ5js6G9yJVAvHuxlqv14UFyR9RgXS0PIA8WzsAqkL0sJSw0=", + "subType": "06" + } + } + }, + "kmip_array_rand_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEnPlPwy0B1VKuNum1GzkZwQjZia5jNYL5bf/k+PbfhnToTRWGxx8+E3R7XXp6YT/rFkjPlzU8ww9+iZNo2oqNpYuHdrIC8ybhO6HZAlvcERo=", + "subType": "06" + } + } + }, + "kmip_array_det_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_binData=00_rand_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFliNDZ6DmjoVcYQBCKDI9njpBsDELg+TD6XLF7xbZnMaJCCHLHr7w3x2/xFfrFSN44CtGAKOniYPCMAspaxHqOA==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAF/P8LPmHKGgG0l5/Xi7jdkwfxpGPxoY0417suCvN6zjM3JNdufytzkektrm9CbBb1SnZCGYF9c0FCMzFG+tN/dg==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFWI0N4RbnYdEiFrzNpbRN9p+bSLm8Lthiu4K3/CvBg6GQpLMVQFhjW01Bud0lxpT2ohRnOK+ASUhiFcUU/t/lWQ==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFQZvAtpY4cjEr1rJWVoUGaZKmzocSJ0muHose7Tk5kRDczjFa4Jcu4hN7JLM9qz2z4g+WJC3KQTdW4ZBXStke/Q==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFn7rhdO8tYq77uVxcqd9Qjz84Yg7JnJMYf0ULTMTh1vJHacckkhXw+8fIMMiAKwuOVwGkMAtu5RBvrFqdfxryCg8RLTxu1YYVthufiClEIS0=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFwwXQx9dKyoyHq7GBMmHzYe9ysoJK/f/ZWzA6nErau9MtX1gqi7VRsYqkamb47/zVbsLZwPMmdgNyPxEh3kqbV2D61t5RG2A3VeqhO1pTF8c=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFALeGeinJ8DE+WZniLdCIW2gfJUj445Ukp9PvRLgBXLGedl8mIXlLF2eu3BA9vP6s5y9w6peQjhn+oEofrsUVYD2duyzeIRMKgNiNchjf6TU=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAF06Fx8CO3OSKE3fGri0VwK0e22YiG9LH2QkDTsRdFbT2lBm+bDD9FrEY8vKWS5RljMuysaxjBOzZ98d2LEs6k8LMOm83Nz/RESe4ZbbcfdQ0=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_undefined_rand_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_rand_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_objectId_rand_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHZFzE908RuO5deEt3t2QQdT12ybwqbm8D+sMJrdKt2Wp4kVPsw4ocAGGsRYN6VXe46P5fmyG5HqVWn0hkflZnQg==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_auto_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAH3dPKyCCStvOtVGzlgIS33fsl8OAwQblt9i21pOVuLiliY1Tup9EtkSic88+nNEtXnq9gRknRzLthXv/k1ql+7Q==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHcEjxVfHDSfLzFxAuK/rs/Pn/XV7jLkgKXZYeY0PNlRi1MHojN2AvQqI3J2rOvAjuYfikGcpvGPp/goqUbV9HYw==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHX65sNHnRYpx3VbWPCdQyFe7u0Y5ItabLEduqDeVsPk/iK4X3GjCSHQfw1yPi+CA+/veVpgdonwws6RiYV4ZZ5Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_bool_rand_auto_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIw/xgJlKEvErmVtue3X3RFsOI2sttAbxnzh1INc9GUQ2vok1VwYt9k88RxMPiOwMAZG7P1MlAdx7zt865onPKOw==", + "subType": "06" + } + } + }, + "kmip_bool_rand_auto_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIn8IuzlNHbpTgXOd1wEp364zJOBxj2Zf7a9B5osUV1sDY0G1OVpEnuDvZeUsdiUSyRjTTxzyuD/KZlKZ3+qrnrA==", + "subType": "06" + } + } + }, + "kmip_bool_rand_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAI3Nz9PdjUYQRGfTtvYSR8EQuUKFL0wdlEdfSCTBmMBhBPuuF9KxqCgy+ldVu1DRRgg3346DOKEEtE9BJPPInJ6Q==", + "subType": "06" + } + } + }, + "kmip_bool_rand_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIEGjqoerIZBk8Rw+YTO7jFKWzagDS8mEpD+9Wm1Q0r0ZHUmV0dQZcIqRV4oUk8U8uHUn0N3t2qGLr+rhUs4GH/g==", + "subType": "06" + } + } + }, + "kmip_bool_det_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "kmip_bool_det_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "kmip_date_rand_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJgr0v4xetUXjlLcPcyKv/rzjtWOKp9CZJcm23Noglu5RR/rXJS0qKI+W9MmJ64TMf27KvaJ0UXwfTRrvOC1plCg==", + "subType": "06" + } + } + }, + "kmip_date_rand_auto_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJoeysAaiPsVK+JL1P1vD/9xF92m5kKidUdn6yklPlSKN4VVEBTymDetTLujULs1u1TlrS71jVLxo3xEwpG/KQvg==", + "subType": "06" + } + } + }, + "kmip_date_rand_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJVwu4+Su0DktpnZvzTBHYpWbWTq5gho/SLijrcIrFJcvq4YrjjPCXv+odCl95tkH+J1RlJdQ5Cr0umEIazLa6GA==", + "subType": "06" + } + } + }, + "kmip_date_rand_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJWTYpjbDkIf82QXHMGrvd0SqhP8cBIakfYJf5aNcNrs86vxRhiG3KwETWPeOOlPZ6n1WjE2bOLB+DJTAxmJvahA==", + "subType": "06" + } + } + }, + "kmip_date_det_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_date_det_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_date_det_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_null_rand_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_rand_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_regex_rand_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALi8avMfpxSlDsSTqdxO8O2B1M79gOElyUIdXySQo7mvgHlf4oHQ7r94lL9dnsA2t/jmUmBKoGypaUQUSQE+9x+A==", + "subType": "06" + } + } + }, + "kmip_regex_rand_auto_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALfHerZ/KolaBrb5qi3SpeNVW+i/nh5mkcdtQg5f1pHePr68KryHucM/XDAzbMqrPlag2/41STGYdJqzYO7Mbppg==", + "subType": "06" + } + } + }, + "kmip_regex_rand_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALOhKDVAN5cuDyB1EuRFWgKKt0wGJ63E5pPY8Tq2TXMNgCxUUc5O+TE+Ux4ls/uMyOBA3gPzND0CZKiru0i7ACUQ==", + "subType": "06" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALK3Hg8xX9gX+d3vKh7aosRP9CS2CIFeG9sapZv3OAPv1eWjY62Cp/G16kJ0BQt33RYD+DzD3gWupfUSyNZR0gng==", + "subType": "06" + } + } + }, + "kmip_regex_det_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_regex_det_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_regex_det_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMoGkfmmUWTI+0aW7jVyCJ5Dgru1SCXBUmJSRzDL0D57pNruQ+79tVVcI6Uz5j87DhZFxShHbPjj583vLOOBNM3WGzZCpqH3serhHTWvXK+NM=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMwu1WaRhhv43xgxLNxuenbND9M6mxGtCs9o4J5+yfL95XNB9Daie3RcLlyngz0pncBie6IqjhTycXsxTLQ94Jdg6m5GD5cU541LYKvhbv5f4=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAM+CIoCAisUwhhJtWQLolxQGQWafniwYyvaJQHmJC94Uwbf1gPfhMR42v2VtrmIVP0J0BaP/xf0cco2/qWRdKGZpgkK2CK6M972NtnZ/2x03A=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMjbeE9+EaJYjGfeAuxsV8teOdsW8bfnlkvji/tE11Zq89UMGx+oUsZzeLjUgVZ5nxsZKCZjEAq+DPnwFVC+MgqNeqWL7fRChODFlPGH2ZC+8=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_javascript_rand_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANuzlkWs/c8xArrAxPgYuCeShjj1zCfIMHOTPohspcyNofo9iY3P5MlhEOprZDiS8dBFg6EB7fZDzDdczx6VCN2A==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_auto_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANwJ72y7UqCBJh1NwVRiE3vU1ex7FMv/X5YWCMuO9MHPMo4g1V5eaO4KfOr+K8+9NtkflgMpeDkvwP92rfR5ud5Q==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANj5q+888itRnLsw9PNGsBLhgqpvem5IJBOE2292r6zwjVueoEK/2I2PesRnn0esnkwdia1ADoMkcLUegwcFRkWQ==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANnvbnmApys7OIe8LGTsZKDG1F1G1SI/rfZVmF6q1fq5U7feYPp1ejb2t2S2+v7LfcOHytsQWGcYuWCDcl+vosvQ==", + "subType": "06" + } + } + }, + "kmip_javascript_det_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_javascript_det_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_javascript_det_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_symbol_rand_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOe+vXpJSkmBM3WkxZrn4ea9/C6iNyMXWUzkQIzIYlnbkyu8od8nfOdhobUhoFxcKnvdaxN1s5NhJ1FA97RN/upGYN+AI/7cTCElmFSpdSvkI=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_auto_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOPpCgK6Hc/M2elOJkwIU9J7PZa+h1chody2yvfDu/UlB6T5sxnEZ6aEY/ISNLhJlhsRzuApSgFOmnrcG6Eg9VnSKin2yK0ll+VFxQEDHAcSA=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOVoHX9GaOn71L5D9TpZmmxkx/asr0FHCLG5ZgLLA04yIhZHsDjt2DiVGGO/Mf4KwvoBn7Cf08qMhW7rQh2LgvvSLBO3zbw5l+MZ/bSn+Jylo=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOPobmcO/I4QObtCUEmGWpSCJ6tlYyhbO59q78LZBucSNl7DSkf/13tOJ9t+WKXACcMKVMmfPoFsgHbVj1nKWULBT07n1OWWDTZkuMD6C2+Fc=", + "subType": "06" + } + } + }, + "kmip_symbol_det_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_symbol_det_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_symbol_det_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPW2VMMm+EvsYpVtJQhsxgxgvV35kr9nxqKxP2qqIOAOQ58R/1oyYScFkNwB/tw0A1/zdvhoo+ERa7c0tjLIojFrosXhX2N/8Z4VnbZruz0Nk=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPjPq9BQR4EwG/CD+RthOJY04m99LCl/shY6HnaU/QL627kN1dbBAG5vs+MXfa+glg8waVTNgB94vm3j72FMV1ZOKvbl4faWF1Rl2EOpOlR9U=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPtqebrCAidKzBMvp3B5/vBeetqeCoMKS+vo+hLAYooXrnBunWxwRHpr45XYUvroG3aqOMkLtVZSgw8sO6Y/3z1viO2G0sGQW1ZMoW0/PX5Uw=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPtkJwXKlq8Fx1f1+9HFofM4uKi6lHQRFRyiOyUFJYxxZY1LR/2WXXTqWz3MWtrcJFCB+QSVOb1N/ieC7AZUboPgIuPJISM3Hu5VU2x/Isbdc=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_det_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_int_rand_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQ50kE7Tby9od2OsmIGZhp9k/mj4vy/YdnmF6YsSPxihbjV1vXGMraI/nGCr+0H1riwzq3m4sCT7aPw2VgiuwKMA==", + "subType": "06" + } + } + }, + "kmip_int_rand_auto_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQkNL14OSMX/bJbsLtB/UumRoat6QOY7fvwZxRrkXTS3VJVHigthI1cUX7Is/uUsY8oHOfk/ZuHklQkifmfdcklQ==", + "subType": "06" + } + } + }, + "kmip_int_rand_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQtN2gNVU9Itoj+vgcK/4jEB5baSUH+Qz2WqTY7m0XaA3bPWGFCiWY4Sdw+qovednrSSSbC+azWi1QYclFRraldQ==", + "subType": "06" + } + } + }, + "kmip_int_rand_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQk6uBqwXXFF9zEM4bc124goI3pBy2Jdi8Cd0ycKkjXrPG7GVCUm2UMbO+zEzYODeVo35N11g2yMXcv9RVgjWtNA==", + "subType": "06" + } + } + }, + "kmip_int_det_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_int_det_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_int_det_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAR2Cu3o2e/u5o69MndeZPJU5ngVA1G2MNYn00t+up/GlmaUC1ni1CVl0ZR0EVZ0gCDUrfxwPISPib8y23tNjbsog==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARgi8stgSQwqnN4Ws2ZBILOREsjreZcS1MBerL7dbGLVfzW99tqECglhGokkrE0aY69L0xMgcAUIaFRN4GanQAPg==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARPxEEI8L5Q3Jybu88BLdf31T3uYEUbijgSlKlkTt141RYrlE8nxtiYU5/5H9GXBis0Qq1s2C+MauD2h/cNijTCA==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARh/QaU1dnGbii4LtXCpT5o6vencc8E2fzarjJFbSEd0ixW/UV1ppZdvD729d0umkaIwIEVA4q+XVvHfl/ckKPFg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_long_rand_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASVv+ClXkh9spIaXWJYRV/o8UZjG+WWWrNpIjZ9LQn2bXakrKJ3REvdkrzGuxASmBhBYTplEyvxVCJwXuWRAGGYw==", + "subType": "06" + } + } + }, + "kmip_long_rand_auto_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASeAz/dK+Gc4/jx3W07B2rNFvQ0LoyCllFRvRVGu1Xf1NByc4cRZLOMzlr99syz/fifF6WY30bOi5Pani9QtFuGg==", + "subType": "06" + } + } + }, + "kmip_long_rand_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASP1HD9uoDlwTldaznKxW71JUQcLsa4/cUWzeTnelQwdpohCbZsM8fBZBqgwwTWnjpYY/LBUipC6yhwLKfUXBoBQ==", + "subType": "06" + } + } + }, + "kmip_long_rand_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASnGPH77bS/ETB1hn+VTvsBrxEvIHA6EAb8Z2SEz6BHt7SVeI+I7DLERvRVpV5kNJFcKgXDrvRmD+Et0rhSmk9sw==", + "subType": "06" + } + } + }, + "kmip_long_det_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_long_det_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_long_det_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_decimal_rand_auto_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATu/BbCc5Ti9SBlMR2B8zj3Q1yQ16Uob+10LWaT5QKS192IcnBGy4wmmNkIsTys060xUby9KKQF80dVPnjYfqJwEXCe/pVaPQZftE0DolKv78=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_auto_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATpq6/dtxq2ZUZHrK10aB0YjjPalEaXYcyAyRZjfXWAYCLZdT9sIybjX3Axjxisim+VSHx0QU7oXkKUfcbLgHyjUXj8g9059FHxKFkUsNv4Z8=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATS++9KcfM7uiShZYxRpFPrBJquKv7dyvFRTjnxs6aaaPo0fiqpv6bco/cMLsldEVpWDEA/Tc2HtSXYPp4UJsMfASyBjoxCloL5SaRWyD9Ye8=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATREcETS5KoAGyj/P45owPrdFfy5ng8Z1ND+F+780lLddOyPeDnIsa7yg6uvhTZ65mHfGLvKcFocclYenq/AX1dY4xdjLRg/AfT088A27ORUA=", + "subType": "06" + } + } + }, + "kmip_decimal_det_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_minKey_rand_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_rand_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_maxKey_rand_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_rand_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + } +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-aws.json b/test/client-side-encryption/corpus/corpus-key-aws.json new file mode 100644 index 0000000000..eca6cf912e --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-aws.json @@ -0,0 +1,33 @@ +{ + "status": { + "$numberInt": "1" + }, + "_id": { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "provider": "aws" + }, + "updateDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyAltNames": ["aws"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-azure.json b/test/client-side-encryption/corpus/corpus-key-azure.json new file mode 100644 index 0000000000..31a564edb8 --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-azure.json @@ -0,0 +1,33 @@ +{ + "_id": { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["azure"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-gcp.json b/test/client-side-encryption/corpus/corpus-key-gcp.json new file mode 100644 index 0000000000..79d6999b08 --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-gcp.json @@ -0,0 +1,35 @@ +{ + "_id": { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["gcp"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-kmip.json b/test/client-side-encryption/corpus/corpus-key-kmip.json new file mode 100644 index 0000000000..7c7069700e --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-kmip.json @@ -0,0 +1,32 @@ +{ + "_id": { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": ["kmip"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-local.json b/test/client-side-encryption/corpus/corpus-key-local.json new file mode 100644 index 0000000000..b3fe0723b0 --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-local.json @@ -0,0 +1,31 @@ +{ + "status": { + "$numberInt": "1" + }, + "_id": { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "local" + }, + "updateDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyMaterial": { + "$binary": { + "base64": "Ce9HSz/HKKGkIt4uyy+jDuKGA+rLC2cycykMo6vc8jXxqa1UVDYHWq1r+vZKbnnSRBfB981akzRKZCFpC05CTyFqDhXv6OnMjpG97OZEREGIsHEYiJkBW0jJJvfLLgeLsEpBzsro9FztGGXASxyxFRZFhXvHxyiLOKrdWfs7X1O/iK3pEoHMx6uSNSfUOgbebLfIqW7TO++iQS5g1xovXA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyAltNames": [ "local" ] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-schema.json b/test/client-side-encryption/corpus/corpus-schema.json new file mode 100644 index 0000000000..e74bc914f5 --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-schema.json @@ -0,0 +1,6335 @@ +{ + "bsonType": "object", + "properties": { + "aws_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "aws_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "aws_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "aws_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "aws_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "aws_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "aws_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "aws_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "aws_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "aws_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "aws_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "aws_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "aws_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "aws_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "aws_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "aws_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "aws_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "aws_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "aws_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "aws_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "aws_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "aws_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "aws_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "aws_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "aws_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "aws_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "aws_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "aws_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "aws_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "aws_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "aws_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "aws_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "aws_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "aws_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "aws_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "aws_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "aws_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "aws_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "aws_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "aws_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "aws_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "aws_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "aws_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "aws_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "aws_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "aws_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "aws_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AWSAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "aws_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_aws", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "aws_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "aws_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "local_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "local_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "local_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "local_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "local_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "local_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "local_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "local_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "local_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "local_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "local_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "local_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "local_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "local_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "local_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "local_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "local_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "local_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "local_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "local_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "local_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "local_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "local_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "local_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "local_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "local_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "local_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "local_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "local_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "local_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "local_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "local_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "local_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "local_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "local_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "local_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "local_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "local_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "local_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "local_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "local_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "local_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "local_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "local_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "local_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "local_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "local_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_local", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "local_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "local_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "azure_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "azure_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "azure_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "azure_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "azure_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "azure_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "azure_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "azure_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "azure_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "azure_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "azure_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "azure_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "azure_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "azure_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "azure_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "azure_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "azure_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "azure_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "azure_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "azure_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "azure_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "azure_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "azure_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "azure_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "azure_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "azure_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "azure_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "azure_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "azure_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "azure_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "azure_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "azure_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "azure_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "azure_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "azure_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "azure_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "azure_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "azure_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "azure_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "azure_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "azure_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "azure_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "azure_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "azure_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "gcp_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "gcp_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "gcp_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "gcp_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "gcp_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "gcp_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "gcp_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "gcp_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "gcp_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "gcp_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "gcp_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "gcp_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "gcp_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "gcp_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "gcp_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "gcp_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "gcp_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "gcp_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "gcp_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "gcp_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "gcp_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "gcp_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "gcp_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "gcp_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "gcp_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "gcp_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "gcp_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "gcp_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "gcp_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "gcp_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "gcp_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "gcp_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "gcp_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "gcp_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "gcp_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "gcp_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "gcp_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "gcp_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "gcp_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "gcp_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "gcp_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "gcp_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "kmip_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "kmip_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "kmip_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "kmip_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "kmip_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "kmip_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "kmip_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "kmip_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "kmip_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "kmip_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "kmip_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "kmip_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "kmip_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "kmip_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "kmip_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "kmip_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "kmip_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "kmip_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "kmip_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "kmip_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "kmip_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "kmip_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + } + } +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus.json b/test/client-side-encryption/corpus/corpus.json new file mode 100644 index 0000000000..559711b347 --- /dev/null +++ b/test/client-side-encryption/corpus/corpus.json @@ -0,0 +1,8619 @@ +{ + "_id": "client_side_encryption_corpus", + "altname_aws": "aws", + "altname_local": "local", + "altname_azure": "azure", + "altname_gcp": "gcp", + "altname_kmip": "kmip", + "aws_double_rand_auto_id": { + "kms": "aws", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "aws_double_rand_auto_altname": { + "kms": "aws", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "aws_double_rand_explicit_id": { + "kms": "aws", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "aws_double_rand_explicit_altname": { + "kms": "aws", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "aws_double_det_explicit_id": { + "kms": "aws", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "aws_double_det_explicit_altname": { + "kms": "aws", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "aws_string_rand_auto_id": { + "kms": "aws", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "aws_string_rand_auto_altname": { + "kms": "aws", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "aws_string_rand_explicit_id": { + "kms": "aws", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "aws_string_rand_explicit_altname": { + "kms": "aws", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "aws_string_det_auto_id": { + "kms": "aws", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "aws_string_det_explicit_id": { + "kms": "aws", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "aws_string_det_explicit_altname": { + "kms": "aws", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "aws_object_rand_auto_id": { + "kms": "aws", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "aws_object_rand_auto_altname": { + "kms": "aws", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "aws_object_rand_explicit_id": { + "kms": "aws", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "aws_object_rand_explicit_altname": { + "kms": "aws", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "aws_object_det_explicit_id": { + "kms": "aws", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "aws_object_det_explicit_altname": { + "kms": "aws", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "aws_array_rand_auto_id": { + "kms": "aws", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "aws_array_rand_auto_altname": { + "kms": "aws", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "aws_array_rand_explicit_id": { + "kms": "aws", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "aws_array_rand_explicit_altname": { + "kms": "aws", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "aws_array_det_explicit_id": { + "kms": "aws", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "aws_array_det_explicit_altname": { + "kms": "aws", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "aws_binData=00_rand_auto_id": { + "kms": "aws", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "aws_binData=00_rand_auto_altname": { + "kms": "aws", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "aws_binData=00_rand_explicit_id": { + "kms": "aws", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "aws_binData=00_rand_explicit_altname": { + "kms": "aws", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "aws_binData=00_det_auto_id": { + "kms": "aws", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "aws_binData=00_det_explicit_id": { + "kms": "aws", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "aws_binData=00_det_explicit_altname": { + "kms": "aws", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "aws_binData=04_rand_auto_id": { + "kms": "aws", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "aws_binData=04_rand_auto_altname": { + "kms": "aws", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "aws_binData=04_rand_explicit_id": { + "kms": "aws", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "aws_binData=04_rand_explicit_altname": { + "kms": "aws", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "aws_binData=04_det_auto_id": { + "kms": "aws", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "aws_binData=04_det_explicit_id": { + "kms": "aws", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "aws_binData=04_det_explicit_altname": { + "kms": "aws", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "aws_undefined_rand_explicit_id": { + "kms": "aws", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "aws_undefined_rand_explicit_altname": { + "kms": "aws", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "aws_undefined_det_explicit_id": { + "kms": "aws", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "aws_undefined_det_explicit_altname": { + "kms": "aws", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "aws_objectId_rand_auto_id": { + "kms": "aws", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "aws_objectId_rand_auto_altname": { + "kms": "aws", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "aws_objectId_rand_explicit_id": { + "kms": "aws", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "aws_objectId_rand_explicit_altname": { + "kms": "aws", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "aws_objectId_det_auto_id": { + "kms": "aws", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "aws_objectId_det_explicit_id": { + "kms": "aws", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "aws_objectId_det_explicit_altname": { + "kms": "aws", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "aws_bool_rand_auto_id": { + "kms": "aws", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "aws_bool_rand_auto_altname": { + "kms": "aws", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "aws_bool_rand_explicit_id": { + "kms": "aws", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "aws_bool_rand_explicit_altname": { + "kms": "aws", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "aws_bool_det_explicit_id": { + "kms": "aws", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "aws_bool_det_explicit_altname": { + "kms": "aws", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "aws_date_rand_auto_id": { + "kms": "aws", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "aws_date_rand_auto_altname": { + "kms": "aws", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "aws_date_rand_explicit_id": { + "kms": "aws", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "aws_date_rand_explicit_altname": { + "kms": "aws", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "aws_date_det_auto_id": { + "kms": "aws", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "aws_date_det_explicit_id": { + "kms": "aws", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "aws_date_det_explicit_altname": { + "kms": "aws", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "aws_null_rand_explicit_id": { + "kms": "aws", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "aws_null_rand_explicit_altname": { + "kms": "aws", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "aws_null_det_explicit_id": { + "kms": "aws", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "aws_null_det_explicit_altname": { + "kms": "aws", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "aws_regex_rand_auto_id": { + "kms": "aws", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "aws_regex_rand_auto_altname": { + "kms": "aws", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "aws_regex_rand_explicit_id": { + "kms": "aws", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "aws_regex_rand_explicit_altname": { + "kms": "aws", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "aws_regex_det_auto_id": { + "kms": "aws", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "aws_regex_det_explicit_id": { + "kms": "aws", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "aws_regex_det_explicit_altname": { + "kms": "aws", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "aws_dbPointer_rand_auto_id": { + "kms": "aws", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "aws_dbPointer_rand_auto_altname": { + "kms": "aws", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "aws_dbPointer_rand_explicit_id": { + "kms": "aws", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "aws_dbPointer_rand_explicit_altname": { + "kms": "aws", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "aws_dbPointer_det_auto_id": { + "kms": "aws", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "aws_dbPointer_det_explicit_id": { + "kms": "aws", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "aws_dbPointer_det_explicit_altname": { + "kms": "aws", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "aws_javascript_rand_auto_id": { + "kms": "aws", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "aws_javascript_rand_auto_altname": { + "kms": "aws", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "aws_javascript_rand_explicit_id": { + "kms": "aws", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "aws_javascript_rand_explicit_altname": { + "kms": "aws", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "aws_javascript_det_auto_id": { + "kms": "aws", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "aws_javascript_det_explicit_id": { + "kms": "aws", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "aws_javascript_det_explicit_altname": { + "kms": "aws", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "aws_symbol_rand_auto_id": { + "kms": "aws", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "aws_symbol_rand_auto_altname": { + "kms": "aws", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "aws_symbol_rand_explicit_id": { + "kms": "aws", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "aws_symbol_rand_explicit_altname": { + "kms": "aws", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "aws_symbol_det_auto_id": { + "kms": "aws", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "aws_symbol_det_explicit_id": { + "kms": "aws", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "aws_symbol_det_explicit_altname": { + "kms": "aws", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "aws_javascriptWithScope_rand_auto_id": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "aws_javascriptWithScope_rand_auto_altname": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "aws_javascriptWithScope_rand_explicit_id": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "aws_javascriptWithScope_rand_explicit_altname": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "aws_javascriptWithScope_det_explicit_id": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "aws_javascriptWithScope_det_explicit_altname": { + "kms": "aws", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "aws_int_rand_auto_id": { + "kms": "aws", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "aws_int_rand_auto_altname": { + "kms": "aws", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "aws_int_rand_explicit_id": { + "kms": "aws", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "aws_int_rand_explicit_altname": { + "kms": "aws", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "aws_int_det_auto_id": { + "kms": "aws", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "aws_int_det_explicit_id": { + "kms": "aws", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "aws_int_det_explicit_altname": { + "kms": "aws", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "aws_timestamp_rand_auto_id": { + "kms": "aws", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "aws_timestamp_rand_auto_altname": { + "kms": "aws", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "aws_timestamp_rand_explicit_id": { + "kms": "aws", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "aws_timestamp_rand_explicit_altname": { + "kms": "aws", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "aws_timestamp_det_auto_id": { + "kms": "aws", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "aws_timestamp_det_explicit_id": { + "kms": "aws", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "aws_timestamp_det_explicit_altname": { + "kms": "aws", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "aws_long_rand_auto_id": { + "kms": "aws", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "aws_long_rand_auto_altname": { + "kms": "aws", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "aws_long_rand_explicit_id": { + "kms": "aws", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "aws_long_rand_explicit_altname": { + "kms": "aws", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "aws_long_det_auto_id": { + "kms": "aws", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "aws_long_det_explicit_id": { + "kms": "aws", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "aws_long_det_explicit_altname": { + "kms": "aws", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "aws_decimal_rand_auto_id": { + "kms": "aws", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "aws_decimal_rand_auto_altname": { + "kms": "aws", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "aws_decimal_rand_explicit_id": { + "kms": "aws", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "aws_decimal_rand_explicit_altname": { + "kms": "aws", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "aws_decimal_det_explicit_id": { + "kms": "aws", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "aws_decimal_det_explicit_altname": { + "kms": "aws", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "aws_minKey_rand_explicit_id": { + "kms": "aws", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "aws_minKey_rand_explicit_altname": { + "kms": "aws", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "aws_minKey_det_explicit_id": { + "kms": "aws", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "aws_minKey_det_explicit_altname": { + "kms": "aws", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "aws_maxKey_rand_explicit_id": { + "kms": "aws", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "aws_maxKey_rand_explicit_altname": { + "kms": "aws", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "aws_maxKey_det_explicit_id": { + "kms": "aws", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "aws_maxKey_det_explicit_altname": { + "kms": "aws", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "local_double_rand_auto_id": { + "kms": "local", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "local_double_rand_auto_altname": { + "kms": "local", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "local_double_rand_explicit_id": { + "kms": "local", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "local_double_rand_explicit_altname": { + "kms": "local", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "local_double_det_explicit_id": { + "kms": "local", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "local_double_det_explicit_altname": { + "kms": "local", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "local_string_rand_auto_id": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "local_string_rand_auto_altname": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "local_string_rand_explicit_id": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "local_string_rand_explicit_altname": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "local_string_det_auto_id": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "local_string_det_explicit_id": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "local_string_det_explicit_altname": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "local_object_rand_auto_id": { + "kms": "local", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "local_object_rand_auto_altname": { + "kms": "local", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "local_object_rand_explicit_id": { + "kms": "local", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "local_object_rand_explicit_altname": { + "kms": "local", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "local_object_det_explicit_id": { + "kms": "local", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "local_object_det_explicit_altname": { + "kms": "local", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "local_array_rand_auto_id": { + "kms": "local", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "local_array_rand_auto_altname": { + "kms": "local", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "local_array_rand_explicit_id": { + "kms": "local", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "local_array_rand_explicit_altname": { + "kms": "local", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "local_array_det_explicit_id": { + "kms": "local", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "local_array_det_explicit_altname": { + "kms": "local", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "local_binData=00_rand_auto_id": { + "kms": "local", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "local_binData=00_rand_auto_altname": { + "kms": "local", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "local_binData=00_rand_explicit_id": { + "kms": "local", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "local_binData=00_rand_explicit_altname": { + "kms": "local", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "local_binData=00_det_auto_id": { + "kms": "local", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "local_binData=00_det_explicit_id": { + "kms": "local", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "local_binData=00_det_explicit_altname": { + "kms": "local", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "local_binData=04_rand_auto_id": { + "kms": "local", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "local_binData=04_rand_auto_altname": { + "kms": "local", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "local_binData=04_rand_explicit_id": { + "kms": "local", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "local_binData=04_rand_explicit_altname": { + "kms": "local", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "local_binData=04_det_auto_id": { + "kms": "local", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "local_binData=04_det_explicit_id": { + "kms": "local", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "local_binData=04_det_explicit_altname": { + "kms": "local", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "local_undefined_rand_explicit_id": { + "kms": "local", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "local_undefined_rand_explicit_altname": { + "kms": "local", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "local_undefined_det_explicit_id": { + "kms": "local", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "local_undefined_det_explicit_altname": { + "kms": "local", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "local_objectId_rand_auto_id": { + "kms": "local", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "local_objectId_rand_auto_altname": { + "kms": "local", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "local_objectId_rand_explicit_id": { + "kms": "local", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "local_objectId_rand_explicit_altname": { + "kms": "local", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "local_objectId_det_auto_id": { + "kms": "local", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "local_objectId_det_explicit_id": { + "kms": "local", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "local_objectId_det_explicit_altname": { + "kms": "local", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "local_bool_rand_auto_id": { + "kms": "local", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "local_bool_rand_auto_altname": { + "kms": "local", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "local_bool_rand_explicit_id": { + "kms": "local", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "local_bool_rand_explicit_altname": { + "kms": "local", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "local_bool_det_explicit_id": { + "kms": "local", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "local_bool_det_explicit_altname": { + "kms": "local", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "local_date_rand_auto_id": { + "kms": "local", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "local_date_rand_auto_altname": { + "kms": "local", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "local_date_rand_explicit_id": { + "kms": "local", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "local_date_rand_explicit_altname": { + "kms": "local", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "local_date_det_auto_id": { + "kms": "local", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "local_date_det_explicit_id": { + "kms": "local", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "local_date_det_explicit_altname": { + "kms": "local", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "local_null_rand_explicit_id": { + "kms": "local", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "local_null_rand_explicit_altname": { + "kms": "local", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "local_null_det_explicit_id": { + "kms": "local", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "local_null_det_explicit_altname": { + "kms": "local", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "local_regex_rand_auto_id": { + "kms": "local", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "local_regex_rand_auto_altname": { + "kms": "local", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "local_regex_rand_explicit_id": { + "kms": "local", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "local_regex_rand_explicit_altname": { + "kms": "local", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "local_regex_det_auto_id": { + "kms": "local", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "local_regex_det_explicit_id": { + "kms": "local", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "local_regex_det_explicit_altname": { + "kms": "local", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "local_dbPointer_rand_auto_id": { + "kms": "local", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "local_dbPointer_rand_auto_altname": { + "kms": "local", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "local_dbPointer_rand_explicit_id": { + "kms": "local", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "local_dbPointer_rand_explicit_altname": { + "kms": "local", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "local_dbPointer_det_auto_id": { + "kms": "local", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "local_dbPointer_det_explicit_id": { + "kms": "local", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "local_dbPointer_det_explicit_altname": { + "kms": "local", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "local_javascript_rand_auto_id": { + "kms": "local", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "local_javascript_rand_auto_altname": { + "kms": "local", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "local_javascript_rand_explicit_id": { + "kms": "local", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "local_javascript_rand_explicit_altname": { + "kms": "local", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "local_javascript_det_auto_id": { + "kms": "local", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "local_javascript_det_explicit_id": { + "kms": "local", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "local_javascript_det_explicit_altname": { + "kms": "local", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "local_symbol_rand_auto_id": { + "kms": "local", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "local_symbol_rand_auto_altname": { + "kms": "local", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "local_symbol_rand_explicit_id": { + "kms": "local", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "local_symbol_rand_explicit_altname": { + "kms": "local", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "local_symbol_det_auto_id": { + "kms": "local", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "local_symbol_det_explicit_id": { + "kms": "local", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "local_symbol_det_explicit_altname": { + "kms": "local", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "local_javascriptWithScope_rand_auto_id": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "local_javascriptWithScope_rand_auto_altname": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "local_javascriptWithScope_rand_explicit_id": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "local_javascriptWithScope_rand_explicit_altname": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "local_javascriptWithScope_det_explicit_id": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "local_javascriptWithScope_det_explicit_altname": { + "kms": "local", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "local_int_rand_auto_id": { + "kms": "local", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "local_int_rand_auto_altname": { + "kms": "local", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "local_int_rand_explicit_id": { + "kms": "local", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "local_int_rand_explicit_altname": { + "kms": "local", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "local_int_det_auto_id": { + "kms": "local", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "local_int_det_explicit_id": { + "kms": "local", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "local_int_det_explicit_altname": { + "kms": "local", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "local_timestamp_rand_auto_id": { + "kms": "local", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "local_timestamp_rand_auto_altname": { + "kms": "local", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "local_timestamp_rand_explicit_id": { + "kms": "local", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "local_timestamp_rand_explicit_altname": { + "kms": "local", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "local_timestamp_det_auto_id": { + "kms": "local", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "local_timestamp_det_explicit_id": { + "kms": "local", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "local_timestamp_det_explicit_altname": { + "kms": "local", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "local_long_rand_auto_id": { + "kms": "local", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "local_long_rand_auto_altname": { + "kms": "local", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "local_long_rand_explicit_id": { + "kms": "local", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "local_long_rand_explicit_altname": { + "kms": "local", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "local_long_det_auto_id": { + "kms": "local", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "local_long_det_explicit_id": { + "kms": "local", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "local_long_det_explicit_altname": { + "kms": "local", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "local_decimal_rand_auto_id": { + "kms": "local", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "local_decimal_rand_auto_altname": { + "kms": "local", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "local_decimal_rand_explicit_id": { + "kms": "local", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "local_decimal_rand_explicit_altname": { + "kms": "local", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "local_decimal_det_explicit_id": { + "kms": "local", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "local_decimal_det_explicit_altname": { + "kms": "local", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "local_minKey_rand_explicit_id": { + "kms": "local", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "local_minKey_rand_explicit_altname": { + "kms": "local", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "local_minKey_det_explicit_id": { + "kms": "local", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "local_minKey_det_explicit_altname": { + "kms": "local", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "local_maxKey_rand_explicit_id": { + "kms": "local", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "local_maxKey_rand_explicit_altname": { + "kms": "local", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "local_maxKey_det_explicit_id": { + "kms": "local", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "local_maxKey_det_explicit_altname": { + "kms": "local", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_double_rand_auto_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_rand_auto_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_rand_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_rand_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_det_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_det_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_string_rand_auto_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_rand_auto_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "azure_string_rand_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_rand_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "azure_string_det_auto_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_det_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_det_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "azure_object_rand_auto_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_rand_auto_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_rand_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_rand_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_det_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_det_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_array_rand_auto_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_rand_auto_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_rand_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_rand_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_det_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_det_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_binData=00_rand_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_rand_auto_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_rand_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_rand_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_det_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_det_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_det_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=04_rand_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_rand_auto_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_rand_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_rand_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_det_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_det_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_det_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_undefined_rand_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_rand_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_objectId_rand_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_rand_auto_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_rand_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_rand_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_det_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_det_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_det_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_bool_rand_auto_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "azure_bool_rand_auto_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "azure_bool_rand_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "azure_bool_rand_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "azure_bool_det_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "azure_bool_det_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "azure_date_rand_auto_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_rand_auto_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_rand_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_rand_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_det_auto_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_det_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_det_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_null_rand_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_rand_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_regex_rand_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_rand_auto_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_rand_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_rand_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_det_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_det_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_det_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_dbPointer_rand_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_rand_auto_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_rand_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_rand_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_det_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_det_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_det_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_javascript_rand_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_rand_auto_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_rand_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_rand_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_det_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_det_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_det_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_symbol_rand_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_rand_auto_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_rand_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_rand_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_det_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_det_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_det_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_javascriptWithScope_rand_auto_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_rand_auto_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_rand_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_rand_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_det_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_det_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_int_rand_auto_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_rand_auto_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_rand_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_rand_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_det_auto_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_det_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_det_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_timestamp_rand_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_rand_auto_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_rand_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_rand_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_det_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_det_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_det_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_long_rand_auto_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_rand_auto_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_rand_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_rand_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_det_auto_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_det_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_det_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_decimal_rand_auto_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_rand_auto_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_rand_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_rand_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_det_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_det_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_minKey_rand_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_rand_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_maxKey_rand_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_rand_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_double_rand_auto_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_rand_auto_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_rand_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_rand_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_det_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_det_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_string_rand_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_rand_auto_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_rand_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_rand_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_det_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_det_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_det_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "gcp_object_rand_auto_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_rand_auto_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_rand_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_rand_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_det_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_det_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_array_rand_auto_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_rand_auto_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_rand_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_rand_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_det_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_det_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_binData=00_rand_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_rand_auto_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_rand_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_det_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_det_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_det_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=04_rand_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_rand_auto_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_rand_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_det_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_det_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_det_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_undefined_rand_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_rand_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_objectId_rand_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_rand_auto_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_rand_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_rand_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_det_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_det_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_det_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_bool_rand_auto_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "gcp_bool_rand_auto_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "gcp_bool_rand_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "gcp_bool_rand_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "gcp_bool_det_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "gcp_bool_det_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "gcp_date_rand_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_rand_auto_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_rand_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_rand_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_det_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_det_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_det_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_null_rand_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_rand_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_regex_rand_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_rand_auto_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_rand_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_rand_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_det_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_det_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_det_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_dbPointer_rand_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_rand_auto_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_rand_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_rand_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_det_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_det_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_det_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_javascript_rand_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_rand_auto_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_rand_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_rand_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_det_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_det_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_det_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_symbol_rand_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_rand_auto_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_rand_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_rand_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_det_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_det_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_det_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_javascriptWithScope_rand_auto_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_rand_auto_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_rand_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_rand_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_det_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_det_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_int_rand_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_rand_auto_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_rand_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_rand_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_det_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_det_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_det_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_timestamp_rand_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_rand_auto_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_rand_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_rand_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_det_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_det_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_det_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_long_rand_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_rand_auto_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_rand_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_rand_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_det_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_det_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_det_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_decimal_rand_auto_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_rand_auto_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_rand_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_rand_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_det_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_det_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_minKey_rand_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_rand_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_maxKey_rand_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_rand_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_double_rand_auto_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_auto_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_det_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_det_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_string_rand_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_auto_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_object_rand_auto_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_auto_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_array_rand_auto_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_auto_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_binData=00_rand_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_undefined_rand_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_rand_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_objectId_rand_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_auto_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_bool_rand_auto_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "kmip_bool_rand_auto_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "kmip_bool_rand_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "kmip_bool_rand_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "kmip_bool_det_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "kmip_bool_det_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "kmip_date_rand_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_auto_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_null_rand_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_rand_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_regex_rand_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_auto_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_javascript_rand_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_auto_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_symbol_rand_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_auto_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_int_rand_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_auto_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_timestamp_rand_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_long_rand_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_auto_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_decimal_rand_auto_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_auto_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_minKey_rand_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_rand_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_maxKey_rand_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_rand_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "payload=0,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "" + }, + "payload=1,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "a" + }, + "payload=2,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aa" + }, + "payload=3,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaa" + }, + "payload=4,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaa" + }, + "payload=5,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaa" + }, + "payload=6,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaa" + }, + "payload=7,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaa" + }, + "payload=8,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaa" + }, + "payload=9,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaa" + }, + "payload=10,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaa" + }, + "payload=11,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaa" + }, + "payload=12,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaaa" + }, + "payload=13,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaaaa" + }, + "payload=14,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaaaaa" + }, + "payload=15,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaaaaaa" + }, + "payload=16,algo=rand": { + "kms": "local", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaaaaaaa" + }, + "payload=0,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "" + }, + "payload=1,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "a" + }, + "payload=2,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aa" + }, + "payload=3,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaa" + }, + "payload=4,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaa" + }, + "payload=5,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaa" + }, + "payload=6,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaa" + }, + "payload=7,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaa" + }, + "payload=8,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaa" + }, + "payload=9,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaa" + }, + "payload=10,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaa" + }, + "payload=11,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaa" + }, + "payload=12,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaaa" + }, + "payload=13,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaaaa" + }, + "payload=14,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaaaaa" + }, + "payload=15,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaaaaaa" + }, + "payload=16,algo=det": { + "kms": "local", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "aaaaaaaaaaaaaaaa" + } +} \ No newline at end of file diff --git a/test/client-side-encryption/custom/azure-dek.json b/test/client-side-encryption/custom/azure-dek.json new file mode 100644 index 0000000000..e644c971c6 --- /dev/null +++ b/test/client-side-encryption/custom/azure-dek.json @@ -0,0 +1,33 @@ +{ + "_id": { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["altname", "azure_altname"] +} diff --git a/test/client-side-encryption/custom/azure-gcp-schema.json b/test/client-side-encryption/custom/azure-gcp-schema.json new file mode 100644 index 0000000000..441949f6d6 --- /dev/null +++ b/test/client-side-encryption/custom/azure-gcp-schema.json @@ -0,0 +1,31 @@ +{ + "db.coll": { + "bsonType": "object", + "properties": { + "secret_azure": { + "encrypt": { + "keyId": [{ + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + }], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + }, + "secret_gcp": { + "encrypt": { + "keyId": [{ + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + } +} diff --git a/test/client-side-encryption/custom/gcp-dek.json b/test/client-side-encryption/custom/gcp-dek.json new file mode 100644 index 0000000000..968c8b9176 --- /dev/null +++ b/test/client-side-encryption/custom/gcp-dek.json @@ -0,0 +1,35 @@ +{ + "_id": { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["altname", "gcp_altname"] +} diff --git a/test/client-side-encryption/custom/key-document-local.json b/test/client-side-encryption/custom/key-document-local.json new file mode 100644 index 0000000000..522ad6d4d1 --- /dev/null +++ b/test/client-side-encryption/custom/key-document-local.json @@ -0,0 +1,18 @@ +{ + "_id": { + "$binary": { + "base64": "YWFhYWFhYWFhYWFhYWFhYQ==", + "subType": "04" + } +}, + "keyMaterial": { + "$binary": { + "base64": "db27rshiqK4Jqhb2xnwK4RfdFb9JuKeUe6xt5aYQF4o62tS75b7B4wxVN499gND9UVLUbpVKoyUoaZAeA895OENP335b8n8OwchcTFqS44t+P3zmhteYUQLIWQXaIgon7gEgLeJbaDHmSXS6/7NbfDDFlB37N7BP/2hx1yCOTN6NG/8M1ppw3LYT3CfP6EfXVEttDYtPbJpbb7nBVlxD7w==", + "subType": "00" + } + }, + "creationDate": { "$date": { "$numberLong": "1564354142963" } }, + "updateDate": { "$date": { "$numberLong": "1564354142963" } }, + "status": { "$numberInt": "0" }, + "masterKey": { "provider": "local" } +} diff --git a/test/client-side-encryption/custom/schema.json b/test/client-side-encryption/custom/schema.json new file mode 100644 index 0000000000..0c47fe3746 --- /dev/null +++ b/test/client-side-encryption/custom/schema.json @@ -0,0 +1,17 @@ +{ + "properties": { + "ssn": { + "encrypt": { + "keyId": [{ + "$binary": { + "base64": "YWFhYWFhYWFhYWFhYWFhYQ==", + "subType": "04" + } + }], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-prefix-suffix.json b/test/client-side-encryption/etc/data/encryptedFields-prefix-suffix.json new file mode 100644 index 0000000000..ec4489fa09 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-prefix-suffix.json @@ -0,0 +1,38 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "prefixPreview", + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + }, + { + "queryType": "suffixPreview", + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-substring.json b/test/client-side-encryption/etc/data/encryptedFields-substring.json new file mode 100644 index 0000000000..ee22def77b --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-substring.json @@ -0,0 +1,30 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "substringPreview", + "strMaxLength": { + "$numberInt": "10" + }, + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields.json b/test/client-side-encryption/etc/data/encryptedFields.json new file mode 100644 index 0000000000..88abe5a604 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields.json @@ -0,0 +1,30 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/keys/key1-document.json b/test/client-side-encryption/etc/data/keys/key1-document.json new file mode 100644 index 0000000000..566b56c354 --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key1-document.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key1-id.json b/test/client-side-encryption/etc/data/keys/key1-id.json new file mode 100644 index 0000000000..7d18f52ebb --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key1-id.json @@ -0,0 +1,6 @@ +{ + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key2-document.json b/test/client-side-encryption/etc/data/keys/key2-document.json new file mode 100644 index 0000000000..a654d980ba --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key2-document.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key2-id.json b/test/client-side-encryption/etc/data/keys/key2-id.json new file mode 100644 index 0000000000..6e9b87bbc2 --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key2-id.json @@ -0,0 +1,6 @@ +{ + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } +} diff --git a/test/client-side-encryption/etc/data/lookup/key-doc.json b/test/client-side-encryption/etc/data/lookup/key-doc.json new file mode 100644 index 0000000000..566b56c354 --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/key-doc.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-csfle.json b/test/client-side-encryption/etc/data/lookup/schema-csfle.json new file mode 100644 index 0000000000..29ac9ad5da --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-csfle.json @@ -0,0 +1,19 @@ +{ + "properties": { + "csfle": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-csfle2.json b/test/client-side-encryption/etc/data/lookup/schema-csfle2.json new file mode 100644 index 0000000000..3f1c02781c --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-csfle2.json @@ -0,0 +1,19 @@ +{ + "properties": { + "csfle2": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-qe.json b/test/client-side-encryption/etc/data/lookup/schema-qe.json new file mode 100644 index 0000000000..9428ea1b45 --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-qe.json @@ -0,0 +1,20 @@ +{ + "escCollection": "enxcol_.qe.esc", + "ecocCollection": "enxcol_.qe.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "qe", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": 0 + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-qe2.json b/test/client-side-encryption/etc/data/lookup/schema-qe2.json new file mode 100644 index 0000000000..77d5bd37cb --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-qe2.json @@ -0,0 +1,20 @@ +{ + "escCollection": "enxcol_.qe2.esc", + "ecocCollection": "enxcol_.qe2.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "qe2", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": 0 + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Date.json b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json new file mode 100644 index 0000000000..defa6e37ff --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json @@ -0,0 +1,36 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json new file mode 100644 index 0000000000..dbe28e9c10 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json @@ -0,0 +1,26 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json new file mode 100644 index 0000000000..538ab20f0e --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json @@ -0,0 +1,35 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json new file mode 100644 index 0000000000..fb4f46d375 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json @@ -0,0 +1,26 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json new file mode 100644 index 0000000000..07d1c84d6f --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json @@ -0,0 +1,35 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Int.json b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json new file mode 100644 index 0000000000..4f0b4854e4 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json @@ -0,0 +1,32 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Long.json b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json new file mode 100644 index 0000000000..32fe1ea15d --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json @@ -0,0 +1,32 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/external/external-key.json b/test/client-side-encryption/external/external-key.json new file mode 100644 index 0000000000..b3fe0723b0 --- /dev/null +++ b/test/client-side-encryption/external/external-key.json @@ -0,0 +1,31 @@ +{ + "status": { + "$numberInt": "1" + }, + "_id": { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "local" + }, + "updateDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyMaterial": { + "$binary": { + "base64": "Ce9HSz/HKKGkIt4uyy+jDuKGA+rLC2cycykMo6vc8jXxqa1UVDYHWq1r+vZKbnnSRBfB981akzRKZCFpC05CTyFqDhXv6OnMjpG97OZEREGIsHEYiJkBW0jJJvfLLgeLsEpBzsro9FztGGXASxyxFRZFhXvHxyiLOKrdWfs7X1O/iK3pEoHMx6uSNSfUOgbebLfIqW7TO++iQS5g1xovXA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyAltNames": [ "local" ] +} \ No newline at end of file diff --git a/test/client-side-encryption/external/external-schema.json b/test/client-side-encryption/external/external-schema.json new file mode 100644 index 0000000000..7d8cad8c33 --- /dev/null +++ b/test/client-side-encryption/external/external-schema.json @@ -0,0 +1,19 @@ +{ + "properties": { + "encrypted": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" +} diff --git a/test/client-side-encryption/limits/limits-doc.json b/test/client-side-encryption/limits/limits-doc.json new file mode 100644 index 0000000000..53de52326c --- /dev/null +++ b/test/client-side-encryption/limits/limits-doc.json @@ -0,0 +1,102 @@ +{ + "00": "a", + "01": "a", + "02": "a", + "03": "a", + "04": "a", + "05": "a", + "06": "a", + "07": "a", + "08": "a", + "09": "a", + "10": "a", + "11": "a", + "12": "a", + "13": "a", + "14": "a", + "15": "a", + "16": "a", + "17": "a", + "18": "a", + "19": "a", + "20": "a", + "21": "a", + "22": "a", + "23": "a", + "24": "a", + "25": "a", + "26": "a", + "27": "a", + "28": "a", + "29": "a", + "30": "a", + "31": "a", + "32": "a", + "33": "a", + "34": "a", + "35": "a", + "36": "a", + "37": "a", + "38": "a", + "39": "a", + "40": "a", + "41": "a", + "42": "a", + "43": "a", + "44": "a", + "45": "a", + "46": "a", + "47": "a", + "48": "a", + "49": "a", + "50": "a", + "51": "a", + "52": "a", + "53": "a", + "54": "a", + "55": "a", + "56": "a", + "57": "a", + "58": "a", + "59": "a", + "60": "a", + "61": "a", + "62": "a", + "63": "a", + "64": "a", + "65": "a", + "66": "a", + "67": "a", + "68": "a", + "69": "a", + "70": "a", + "71": "a", + "72": "a", + "73": "a", + "74": "a", + "75": "a", + "76": "a", + "77": "a", + "78": "a", + "79": "a", + "80": "a", + "81": "a", + "82": "a", + "83": "a", + "84": "a", + "85": "a", + "86": "a", + "87": "a", + "88": "a", + "89": "a", + "90": "a", + "91": "a", + "92": "a", + "93": "a", + "94": "a", + "95": "a", + "96": "a", + "97": "a", + "98": "a", + "99": "a" +} \ No newline at end of file diff --git a/test/client-side-encryption/limits/limits-encryptedFields.json b/test/client-side-encryption/limits/limits-encryptedFields.json new file mode 100644 index 0000000000..c52a0271e1 --- /dev/null +++ b/test/client-side-encryption/limits/limits-encryptedFields.json @@ -0,0 +1,14 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "path": "foo", + "bsonType": "string" + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/limits/limits-key.json b/test/client-side-encryption/limits/limits-key.json new file mode 100644 index 0000000000..b3fe0723b0 --- /dev/null +++ b/test/client-side-encryption/limits/limits-key.json @@ -0,0 +1,31 @@ +{ + "status": { + "$numberInt": "1" + }, + "_id": { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "local" + }, + "updateDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyMaterial": { + "$binary": { + "base64": "Ce9HSz/HKKGkIt4uyy+jDuKGA+rLC2cycykMo6vc8jXxqa1UVDYHWq1r+vZKbnnSRBfB981akzRKZCFpC05CTyFqDhXv6OnMjpG97OZEREGIsHEYiJkBW0jJJvfLLgeLsEpBzsro9FztGGXASxyxFRZFhXvHxyiLOKrdWfs7X1O/iK3pEoHMx6uSNSfUOgbebLfIqW7TO++iQS5g1xovXA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyAltNames": [ "local" ] +} \ No newline at end of file diff --git a/test/client-side-encryption/limits/limits-qe-doc.json b/test/client-side-encryption/limits/limits-qe-doc.json new file mode 100644 index 0000000000..71efbf4068 --- /dev/null +++ b/test/client-side-encryption/limits/limits-qe-doc.json @@ -0,0 +1,3 @@ +{ + "foo": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +} \ No newline at end of file diff --git a/test/client-side-encryption/limits/limits-schema.json b/test/client-side-encryption/limits/limits-schema.json new file mode 100644 index 0000000000..c06908d9ce --- /dev/null +++ b/test/client-side-encryption/limits/limits-schema.json @@ -0,0 +1,1405 @@ +{ + "properties": { + "00": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "01": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "02": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "03": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "04": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "05": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "06": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "07": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "08": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "09": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "10": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "11": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "12": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "13": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "14": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "15": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "16": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "17": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "18": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "19": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "20": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "21": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "22": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "23": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "24": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "25": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "26": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "27": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "28": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "29": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "30": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "31": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "32": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "33": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "34": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "35": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "36": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "37": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "38": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "39": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "40": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "41": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "42": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "43": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "44": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "45": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "46": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "47": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "48": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "49": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "50": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "51": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "52": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "53": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "54": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "55": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "56": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "57": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "58": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "59": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "60": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "61": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "62": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "63": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "64": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "65": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "66": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "67": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "68": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "69": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "70": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "71": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "72": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "73": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "74": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "75": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "76": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "77": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "78": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "79": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "80": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "81": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "82": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "83": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "84": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "85": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "86": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "87": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "88": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "89": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "90": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "91": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "92": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "93": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "94": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "95": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "96": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "97": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "98": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "99": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" +} \ No newline at end of file diff --git a/test/client-side-encryption/spec/legacy/aggregate.json b/test/client-side-encryption/spec/legacy/aggregate.json new file mode 100644 index 0000000000..7de725b71d --- /dev/null +++ b/test/client-side-encryption/spec/legacy/aggregate.json @@ -0,0 +1,390 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Aggregate with deterministic encryption", + "skipReason": "SERVER-39395", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encrypted_string": "457-55-5642" + } + } + ] + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encrypted_string": "457-55-5642" + } + } + ] + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "Aggregate with empty pipeline", + "skipReason": "SERVER-40829 hides agg support behind enableTestCommands flag.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [] + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [], + "cursor": {} + }, + "command_name": "aggregate" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "Aggregate should fail with random encryption", + "skipReason": "SERVER-39395", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "random": "abc" + } + } + ] + }, + "result": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + }, + { + "description": "Database aggregate should fail", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$currentOp": { + "allUsers": false, + "idleConnections": false, + "localOps": true + } + }, + { + "$match": { + "command.aggregate": { + "$eq": 1 + } + } + }, + { + "$project": { + "command": 1 + } + }, + { + "$project": { + "command.lsid": 0 + } + } + ] + }, + "result": { + "errorContains": "non-collection command not supported for auto encryption: aggregate" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/awsTemporary.json b/test/client-side-encryption/spec/legacy/awsTemporary.json new file mode 100644 index 0000000000..10eb85feee --- /dev/null +++ b/test/client-side-encryption/spec/legacy/awsTemporary.json @@ -0,0 +1,225 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using the AWS provider with temporary credentials", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "awsTemporary": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "Insert with invalid temporary credentials", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "awsTemporaryNoSessionToken": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "result": { + "errorContains": "security token" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/azureKMS.json b/test/client-side-encryption/spec/legacy/azureKMS.json new file mode 100644 index 0000000000..b0f5111370 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/azureKMS.json @@ -0,0 +1,235 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "azure_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using Azure KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "azure": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_azure": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/badQueries.json b/test/client-side-encryption/spec/legacy/badQueries.json new file mode 100644 index 0000000000..4968307ba3 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/badQueries.json @@ -0,0 +1,1446 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "$text unconditionally fails", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "$text": { + "$search": "search text" + } + } + }, + "result": { + "errorContains": "Unsupported match expression operator for encryption" + } + } + ] + }, + { + "description": "$where unconditionally fails", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "$where": { + "$code": "function() { return true }" + } + } + }, + "result": { + "errorContains": "Unsupported match expression operator for encryption" + } + } + ] + }, + { + "description": "$bit operators succeed on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAllClear": 35 + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAllClear": 35 + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAllSet": 35 + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAllSet": 35 + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAnyClear": 35 + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAnyClear": 35 + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAnySet": 35 + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAnySet": 35 + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "geo operators succeed on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$near": [ + 0, + 0 + ] + } + } + }, + "result": { + "errorContains": "unable to find index" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$near": [ + 0, + 0 + ] + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$nearSphere": [ + 0, + 0 + ] + } + } + }, + "result": { + "errorContains": "unable to find index" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$nearSphere": [ + 0, + 0 + ] + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$geoIntersects": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$geoIntersects": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$geoWithin": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$geoWithin": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "inequality operators succeed on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$gt": 1 + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$gt": 1 + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$lt": 1 + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$lt": 1 + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$gte": 1 + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$gte": 1 + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$lte": 1 + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$lte": 1 + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "other misc operators succeed on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$mod": [ + 3, + 1 + ] + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$mod": [ + 3, + 1 + ] + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$regex": "pattern", + "$options": "" + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$regex": "pattern", + "$options": "" + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$size": 2 + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$size": 2 + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$type": 2 + } + } + }, + "result": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$type": 2 + } + } + }, + "result": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$eq": null + } + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$eq": null + } + } + }, + "result": { + "errorContains": "Illegal equality to null predicate for encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$in": [ + null + ] + } + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + null + ] + } + } + }, + "result": { + "errorContains": "Illegal equality to null inside $in against an encrypted field" + } + } + ] + }, + { + "description": "$addToSet succeeds on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$addToSet": { + "unencrypted": [ + "a" + ] + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$addToSet": { + "encrypted_string": [ + "a" + ] + } + } + }, + "result": { + "errorContains": "$addToSet not allowed on encrypted values" + } + } + ] + }, + { + "description": "$inc succeeds on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$inc": { + "unencrypted": 1 + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$inc": { + "encrypted_string": 1 + } + } + }, + "result": { + "errorContains": "$inc and $mul not allowed on encrypted values" + } + } + ] + }, + { + "description": "$mul succeeds on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$mul": { + "unencrypted": 1 + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$mul": { + "encrypted_string": 1 + } + } + }, + "result": { + "errorContains": "$inc and $mul not allowed on encrypted values" + } + } + ] + }, + { + "description": "$max succeeds on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$max": { + "unencrypted": 1 + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$max": { + "encrypted_string": 1 + } + } + }, + "result": { + "errorContains": "$max and $min not allowed on encrypted values" + } + } + ] + }, + { + "description": "$min succeeds on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$min": { + "unencrypted": 1 + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$min": { + "encrypted_string": 1 + } + } + }, + "result": { + "errorContains": "$max and $min not allowed on encrypted values" + } + } + ] + }, + { + "description": "$currentDate succeeds on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$currentDate": { + "unencrypted": true + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$currentDate": { + "encrypted_string": true + } + } + }, + "result": { + "errorContains": "$currentDate not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pop succeeds on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pop": { + "unencrypted": 1 + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pop": { + "encrypted_string": 1 + } + } + }, + "result": { + "errorContains": "$pop not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pull succeeds on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pull": { + "unencrypted": 1 + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pull": { + "encrypted_string": 1 + } + } + }, + "result": { + "errorContains": "$pull not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pullAll succeeds on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pullAll": { + "unencrypted": [ + 1 + ] + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pullAll": { + "encrypted_string": [ + 1 + ] + } + } + }, + "result": { + "errorContains": "$pullAll not allowed on encrypted values" + } + } + ] + }, + { + "description": "$push succeeds on unencrypted, error on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$push": { + "unencrypted": 1 + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$push": { + "encrypted_string": 1 + } + } + }, + "result": { + "errorContains": "$push not allowed on encrypted values" + } + } + ] + }, + { + "description": "array filters on encrypted fields does not error in mongocryptd, but errors in mongod", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_string.$[i].x": 1 + } + }, + "arrayFilters": [ + { + "i.x": 1 + } + ] + }, + "result": { + "errorContains": "Array update operations not allowed on encrypted values" + } + } + ] + }, + { + "description": "positional operator succeeds on unencrypted, errors on encrypted", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "unencrypted": 1 + }, + "update": { + "$set": { + "unencrypted.$": 1 + } + } + }, + "result": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encrypted_string": "abc" + }, + "update": { + "$set": { + "encrypted_string.$": "abc" + } + } + }, + "result": { + "errorContains": "Cannot encrypt fields below '$' positional update operator" + } + } + ] + }, + { + "description": "an update that would produce an array on an encrypted field errors", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_string": [ + 1, + 2 + ] + } + } + }, + "result": { + "errorContains": "Cannot encrypt element of type" + } + } + ] + }, + { + "description": "an insert with encrypted field on _id errors", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "schemaMap": { + "default.default": { + "properties": { + "_id": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + }, + "result": { + "errorContains": "Invalid schema containing the 'encrypt' keyword." + } + } + ] + }, + { + "description": "an insert with an array value for an encrypted field fails", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "encrypted_string": [ + "123", + "456" + ] + } + }, + "result": { + "errorContains": "Cannot encrypt element of type" + } + } + ] + }, + { + "description": "an insert with a Timestamp(0,0) value in the top-level fails", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "random": { + "$timestamp": { + "t": 0, + "i": 0 + } + } + } + }, + "result": { + "errorContains": "A command that inserts cannot supply Timestamp(0, 0) for an encrypted" + } + } + ] + }, + { + "description": "distinct with the key referring to a field where the keyID is a JSON Pointer errors", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": {}, + "fieldName": "encrypted_w_altname" + }, + "result": { + "errorContains": "The distinct key is not allowed to be marked for encryption with a non-UUID keyId" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/badSchema.json b/test/client-side-encryption/spec/legacy/badSchema.json new file mode 100644 index 0000000000..1fd0f8ed3f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/badSchema.json @@ -0,0 +1,254 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Schema with an encrypted field in an array", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + }, + "bsonType": "array" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "result": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + }, + { + "description": "Schema without specifying parent object types", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "foo": { + "properties": { + "bar": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + } + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "result": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + }, + { + "description": "Schema with siblings of encrypt document", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + }, + "bsonType": "object" + } + } + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "result": { + "errorContains": "'encrypt' cannot be used in conjunction with 'bsonType'" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + }, + { + "description": "Schema with logical keywords", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "anyOf": [ + { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + ] + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "result": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/basic.json b/test/client-side-encryption/spec/legacy/basic.json new file mode 100644 index 0000000000..3ed066f530 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/basic.json @@ -0,0 +1,350 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Insert with deterministic encryption, then find it", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "Insert with randomized encryption, then find it", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "random": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "random": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "random": { + "$$type": "binData" + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/bulk.json b/test/client-side-encryption/spec/legacy/bulk.json new file mode 100644 index 0000000000..1b62e5e8ab --- /dev/null +++ b/test/client-side-encryption/spec/legacy/bulk.json @@ -0,0 +1,333 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Bulk write with encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "encrypted_string": "string1" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "$and": [ + { + "encrypted_string": "string1" + }, + { + "_id": 2 + } + ] + } + } + } + ], + "options": { + "ordered": true + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + } + } + ], + "ordered": true + }, + "command_name": "update" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "$and": [ + { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + { + "_id": { + "$eq": 2 + } + } + ] + }, + "limit": 1 + } + ], + "ordered": true + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/bypassAutoEncryption.json b/test/client-side-encryption/spec/legacy/bypassAutoEncryption.json new file mode 100644 index 0000000000..9d09cb3fa9 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/bypassAutoEncryption.json @@ -0,0 +1,402 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Insert with bypassAutoEncryption", + "clientOptions": { + "autoEncryptOpts": { + "bypassAutoEncryption": true, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "encrypted_string": "string0" + }, + "bypassDocumentValidation": true + } + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": {} + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ] + } + } + }, + { + "description": "Insert with bypassAutoEncryption for local schema", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "bypassAutoEncryption": true, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "encrypted_string": "string0" + }, + "bypassDocumentValidation": true + } + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": {} + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/bypassedCommand.json b/test/client-side-encryption/spec/legacy/bypassedCommand.json new file mode 100644 index 0000000000..18054a70cb --- /dev/null +++ b/test/client-side-encryption/spec/legacy/bypassedCommand.json @@ -0,0 +1,107 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": {}, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "ping is bypassed", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "ping", + "arguments": { + "command": { + "ping": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "ping": 1 + }, + "command_name": "ping" + } + } + ] + }, + { + "description": "kill op is not bypassed", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "killOp", + "arguments": { + "command": { + "killOp": 1, + "op": 1234 + } + }, + "result": { + "errorContains": "command not supported for auto encryption: killOp" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/count.json b/test/client-side-encryption/spec/legacy/count.json new file mode 100644 index 0000000000..9df8cd639e --- /dev/null +++ b/test/client-side-encryption/spec/legacy/count.json @@ -0,0 +1,229 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Count with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "count", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "count": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + "command_name": "count" + } + } + ] + }, + { + "description": "Count fails when filtering on a random encrypted field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "count", + "arguments": { + "filter": { + "random": "abc" + } + }, + "result": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/countDocuments.json b/test/client-side-encryption/spec/legacy/countDocuments.json new file mode 100644 index 0000000000..07ff97f264 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/countDocuments.json @@ -0,0 +1,241 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "countDocuments with deterministic encryption", + "skipReason": "waiting on SERVER-39395", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "countDocuments", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "result": 1 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/create-and-createIndexes.json b/test/client-side-encryption/spec/legacy/create-and-createIndexes.json new file mode 100644 index 0000000000..48638a97c8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/create-and-createIndexes.json @@ -0,0 +1,115 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "unencryptedCollection" + } + } + ] + }, + { + "description": "createIndexes is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "unencryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "unencryptedCollection", + "index": "name" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/delete.json b/test/client-side-encryption/spec/legacy/delete.json new file mode 100644 index 0000000000..a6f4ffde91 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/delete.json @@ -0,0 +1,340 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "deleteOne with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "deleteMany with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "deleteMany", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + } + }, + "result": { + "deletedCount": 2 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/distinct.json b/test/client-side-encryption/spec/legacy/distinct.json new file mode 100644 index 0000000000..9786b07814 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/distinct.json @@ -0,0 +1,276 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "distinct with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "fieldName": "encrypted_string" + }, + "result": [ + "string0" + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "distinct": "default", + "key": "encrypted_string", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + "command_name": "distinct" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "Distinct fails when filtering on a random encrypted field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": { + "random": "abc" + }, + "fieldName": "encrypted_string" + }, + "result": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/explain.json b/test/client-side-encryption/spec/legacy/explain.json new file mode 100644 index 0000000000..8ca3b48d37 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/explain.json @@ -0,0 +1,239 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Explain a find with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "explain", + "arguments": { + "command": { + "explain": { + "find": "default", + "filter": { + "encrypted_string": "string1" + } + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "explain": { + "find": "default", + "filter": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + } + }, + "verbosity": "allPlansExecution" + }, + "command_name": "explain" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/find.json b/test/client-side-encryption/spec/legacy/find.json new file mode 100644 index 0000000000..1feddab0e3 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/find.json @@ -0,0 +1,408 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Find with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "Find with $in with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1", + "random": "abc" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "Find fails when filtering on a random encrypted field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "random": "abc" + } + }, + "result": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/findOneAndDelete.json b/test/client-side-encryption/spec/legacy/findOneAndDelete.json new file mode 100644 index 0000000000..e418a4581b --- /dev/null +++ b/test/client-side-encryption/spec/legacy/findOneAndDelete.json @@ -0,0 +1,221 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "remove": true + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/findOneAndReplace.json b/test/client-side-encryption/spec/legacy/findOneAndReplace.json new file mode 100644 index 0000000000..78baca8432 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/findOneAndReplace.json @@ -0,0 +1,227 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "replacement": { + "encrypted_string": "string1" + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encrypted_string": "string0" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "update": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/findOneAndUpdate.json b/test/client-side-encryption/spec/legacy/findOneAndUpdate.json new file mode 100644 index 0000000000..1d85851151 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/findOneAndUpdate.json @@ -0,0 +1,231 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encrypted_string": "string0" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json new file mode 100644 index 0000000000..9b28df2f9a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json @@ -0,0 +1,261 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "BypassQueryAnalysis decrypts", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "bypassQueryAnalysis": true + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json new file mode 100644 index 0000000000..868095e1e6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json @@ -0,0 +1,233 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Compact works", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "ok": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedIndexed": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "encryptedUnindexed": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + } + } + }, + "command_name": "compactStructuredEncryptionData" + } + } + ] + }, + { + "description": "Compact errors on an unencrypted client", + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "errorContains": "'compactStructuredEncryptionData.compactionTokens' is missing" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json new file mode 100644 index 0000000000..c266aa6b83 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json @@ -0,0 +1,94 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "maxServerVersion": "6.3.99", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "tests": [ + { + "description": "driver returns an error if creating a QEv2 collection on unsupported server", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + }, + "result": { + "errorContains": "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption." + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json new file mode 100644 index 0000000000..c324be8abc --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json @@ -0,0 +1,1758 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "tests": [ + { + "description": "state collections and index are created", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "default state collection names are applied", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "drop removes all state collections", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection without encryptedFields.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "plaintextCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "plaintextCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "plaintextCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "plaintextCollection" + }, + "command_name": "create", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection from encryptedFieldsMap.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection from encryptedFields.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from encryptedFieldsMap", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": {} + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from remote encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": {} + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "encryptedFields are consulted for metadata collection names", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "invalid_esc_name", + "ecocCollection": "invalid_ecoc_name", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + }, + "result": { + "errorContains": "Encrypted State Collection name should follow" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json new file mode 100644 index 0000000000..1fb4c1d1bc --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json @@ -0,0 +1,149 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 decrypt of existing data succeeds", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json new file mode 100644 index 0000000000..ddfe57b00c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json @@ -0,0 +1,284 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Delete can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json new file mode 100644 index 0000000000..bdc5c99bc2 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -0,0 +1,212 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "encryptedFieldsMap is preferred over remote encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json new file mode 100644 index 0000000000..8e0c6dafa3 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json @@ -0,0 +1,300 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": {}, + "bsonType": "object" + }, + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "encryptedFields is preferred over jsonSchema", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json new file mode 100644 index 0000000000..1c0a057cad --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json @@ -0,0 +1,105 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [], + "tests": [ + { + "description": "default state collections are applied to encryptionInformation", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [] + } + } + }, + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json new file mode 100644 index 0000000000..c5e689a3de --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json @@ -0,0 +1,560 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "findOneAndUpdate can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + }, + { + "description": "findOneAndUpdate can modify an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "encryptedIndexed": "value456" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json new file mode 100644 index 0000000000..6e156ffc60 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json @@ -0,0 +1,296 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert and find FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json new file mode 100644 index 0000000000..48280f5bd4 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json @@ -0,0 +1,248 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert and find FLE2 unindexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ] + } + } + }, + { + "description": "Query with an unindexed field fails", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedUnindexed": "value123" + } + }, + "result": { + "errorContains": "encrypt" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json new file mode 100644 index 0000000000..1e655f0a9c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json @@ -0,0 +1,116 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [], + "tests": [ + { + "description": "FLE2 encrypt fails with missing key", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + }, + { + "description": "FLE2 decrypt fails with missing key", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": {} + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json new file mode 100644 index 0000000000..a6843c4737 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json @@ -0,0 +1,87 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [], + "encrypted_fields": { + "fields": [] + }, + "tests": [ + { + "description": "insert with no encryption succeeds", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": "bar" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "foo": "bar" + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json new file mode 100644 index 0000000000..59241927ca --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json @@ -0,0 +1,289 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Compact works with 'range' fields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "ok": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedInt": { + "ecoc": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "QxKJD2If48p0l8NAXf2Kr0aleMd/dATSjBK6hTpNMyc=", + "subType": "00" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "compactStructuredEncryptionData" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Aggregate.json new file mode 100644 index 0000000000..df2161cc36 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Aggregate.json @@ -0,0 +1,508 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Correctness.json new file mode 100644 index 0000000000..fae25a1c02 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Correctness.json @@ -0,0 +1,1842 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "-1" + } + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "value type is a date" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Delete.json new file mode 100644 index 0000000000..b4f15d9b1f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Delete.json @@ -0,0 +1,442 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json new file mode 100644 index 0000000000..97ab4aaeb9 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json @@ -0,0 +1,514 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-InsertFind.json new file mode 100644 index 0000000000..a011c388e4 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-InsertFind.json @@ -0,0 +1,499 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Update.json new file mode 100644 index 0000000000..6bab6499f5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Update.json @@ -0,0 +1,516 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Aggregate.json new file mode 100644 index 0000000000..d1a82c2164 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Aggregate.json @@ -0,0 +1,1902 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Correctness.json new file mode 100644 index 0000000000..4316a31c3e --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Correctness.json @@ -0,0 +1,1158 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Delete.json new file mode 100644 index 0000000000..19cae3c64f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Delete.json @@ -0,0 +1,1116 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json new file mode 100644 index 0000000000..4ab3b63ea5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json @@ -0,0 +1,1906 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-InsertFind.json new file mode 100644 index 0000000000..5a2adf6907 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-InsertFind.json @@ -0,0 +1,1893 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Update.json new file mode 100644 index 0000000000..b840d38347 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Update.json @@ -0,0 +1,1910 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json new file mode 100644 index 0000000000..271f57b125 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json @@ -0,0 +1,584 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json new file mode 100644 index 0000000000..8954445887 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json @@ -0,0 +1,1650 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json new file mode 100644 index 0000000000..7b3d5d8225 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json @@ -0,0 +1,476 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..af371f7b3f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json @@ -0,0 +1,588 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json new file mode 100644 index 0000000000..bbe81f87ad --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json @@ -0,0 +1,571 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json new file mode 100644 index 0000000000..987bdf1aa6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json @@ -0,0 +1,588 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Defaults.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Defaults.json new file mode 100644 index 0000000000..c2a119cb7f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Defaults.json @@ -0,0 +1,381 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range applies defaults for trimFactor and sparsity", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DRgbAAADcGF5bG9hZADEGgAABGcAsBoAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAA30oqY6NKy1KWDWf6Z36DtA2QsL9JRALvHX6smxz8cb4FcwAgAAAAADIhM0hCHwFGH+k7kPGuZlO+v5TjV6RRwA5FqUKM60o0BWwAIAAAAABTMPNUweBKrILSCxc5gcgjn9pTkkKX7KqWXgNMk4q7XgADMgB9AAAABWQAIAAAAACnCDvYEbgR9fWeQ8SatKNX43p0XIXTyFfzc7/395V2swVzACAAAAAAp8pkn2wJrZRBLlD18oE1ZRRiujmtFtuHYTZDzdGNE4kFbAAgAAAAAE2eptD2Jp126h5cd7S6k8IjRB6QJhuuWzPU/SEynDXTAAMzAH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzQAfQAAAAVkACAAAAAA8Ci9z02yMVsDNyHvLStLAHR25LO22UO5P/gbUG/IStQFcwAgAAAAAOdfFhaFVq1JPr3dIeLm1EYKWgceZ7hZ5FJT5u/lL/I+BWwAIAAAAADqUyU1hSFDLCmqsz2dhPhefzCShUV/Z2x+4P9xcGw8rwADNQB9AAAABWQAIAAAAAD3g2atCWYVOXW0YbCbvIturqNIAsy210bkL9KmqVMlAAVzACAAAAAAVGEb7L0QCjV/PBTAvUyhlddo467ToKjlMdwI9hsjuE4FbAAgAAAAAJe0bDhUH1sZldnDGWn0xMa1CQuN6cgv/i/6XqnpPS39AAM2AH0AAAAFZAAgAAAAANQOKUE9FOmCoMva2IYg45LZXJX0cMpUR1OvIwFmjLDYBXMAIAAAAAB6dyIKkQ86l/8j8zeWcDYeVGRYKd0USz6To3LbOBAKsAVsACAAAAAAELK0ExI0g4/WxNs+mf+Ua+mie3MuMO3daPGukA23VUYAAzcAfQAAAAVkACAAAAAARQp+fGA08v1bhcnYbfsP0ubXl9yg18QmYMfh2sd8EdEFcwAgAAAAABhe79wEznE298tt02xyRF7bk7a2NH9kwVg1TPY5/lT1BWwAIAAAAAADiGV5f/RRPkwpSrZMGHNBSarmwyqV+SYXI73QW/PmnwADOAB9AAAABWQAIAAAAABnW3CpmSFTglPNKYHJHhJHC/vd5BMWQpztIXQBL0sCngVzACAAAAAAC21qRBu2Px7VUz1lW95Dfn/0tw2yq9AVBtka34HijLgFbAAgAAAAAP8S1s5OA5cJT6ILpA94LanuLsSl9BsRCWHBtufFTMVrAAM5AH0AAAAFZAAgAAAAAJRIWu6DI2LR+2Pi09OaBZEmS2FInyBnGs9wf9Jf2wiIBXMAIAAAAABoDqKzj11qyOfXl4dcfkmGHqZxXyAsnGlgA9wsJRWWUQVsACAAAAAAIsDousyo/D8e4BCwUqvFhrKtOnpcGCSqpN94oFtWaC0AAzEwAH0AAAAFZAAgAAAAAE0h7vfdciFBeqIk1N14ZXw/jzFT0bLfXcNyiPRsg4W4BXMAIAAAAAB0Kbvm3VLBphtd8/OpgNuJtJaJJLhHBCKZJJeK+GcthAVsACAAAAAAKfjHp8xww1JDjzyjTnfamOvjFDc1Z3Hp/v/ZuQnFOOEAAzExAH0AAAAFZAAgAAAAACL9+rQRyywIXa5Pr7g2SnB0s0EjIct7PQtzjEkA69acBXMAIAAAAADz54imCCbu/qQkYP9wW2f5pHoBS+EyCe+xuDwC0UTiYgVsACAAAAAAKv602j4c3Bpn2t10qGl68eAD/fQsIH5lKMj8ANwrf7oAAzEyAH0AAAAFZAAgAAAAAKTK0NLhQ/+Y/HMxjRwBlXpXJAhAmCoWf1fReTegPnVpBXMAIAAAAAD7AlW+P4FfQS4r8d7EEvPVEP1diSbrVDBqg8ZvNl1XRAVsACAAAAAATTSEkff+/JMBjNwUciY2RQ6M66uMQMAtwU+UidDv1y4AAzEzAH0AAAAFZAAgAAAAAGMbgPxi2Wu1AlqoDKTgyBnCZlnCjHm2naxRcizkIbYJBXMAIAAAAADMvSM3VZzVyRFCfUvcLXAXQFRIxlhm0t0dUsnaRZG4hgVsACAAAAAAI7uGriMAQc4A/a70Yi1Y7IAC7o/mfNYf7/FvwELYf80AAzE0AH0AAAAFZAAgAAAAAPnZ1bdmrcX0fsSxliuSqvDbRqwIiVg0tYp0PViRX0nOBXMAIAAAAAAqBdZGg9O74mnwyQF+lILtyzHdLOErDjPSf9sM8EqCugVsACAAAAAAwhuDsz+fCtqY8mW8QvEVQERjDChwrYTw4y7dinlCCOMAAzE1AH0AAAAFZAAgAAAAAJ40Dmb5BUT1AlWjfXB43nIbJgDn9rBg9FAeYR80WK0vBXMAIAAAAAAMPqLMDdNmnKzA3Hq49/NkJfs+/cjnyjSAbmiOFUE5FgVsACAAAAAAxbi7ql49Y4pduqWlLJqpwimRzrEnC7w5fWaMBiinHL8AAzE2AH0AAAAFZAAgAAAAAGelnhqWM2gUVy4P5QE/2Zfd7s9BugPqB/tcnSsFg5X0BXMAIAAAAAAWUhif3G+NMvZ3YPLB5OMuIhfPEu6U8KR9gTvJFz5uIwVsACAAAAAADEs8/aVSj2sJjxjv1K7o/aH8vZzt1bga73YiIKUx5DYAAzE3AH0AAAAFZAAgAAAAAD1xX2wCyf1aK1MoXnBAPfWLeBxsJI2i06tWbuiYKgElBXMAIAAAAACW1NW4RibvY0JRUzPvCmKnVbEy8AIS70fmsY08WgJOEgVsACAAAAAAQq9eIVoLcd4WxXUC3vub+EnxmcI2uP/yUWr3cz0jv9EAAzE4AH0AAAAFZAAgAAAAAHwU1LYeJmTch640sTu3VRRRdQg4YZ7S9IRfVXWHEWU8BXMAIAAAAACozWKD2YlqbQiBVVwJKptfAVM+R2FPJPtXkxVFAhHNXQVsACAAAAAAn7LS0QzTv9sOJzxH0ZqxsLYBYoArEo/PIXkU/zTnpM0AAzE5AH0AAAAFZAAgAAAAAHKaToAsILpmJyCE02I1iwmF/FibqaOb4b5nteuwOayfBXMAIAAAAABPxYjSK5DKgsdUZrZ+hM6ikejPCUK6Rqa0leoN7KOM0QVsACAAAAAAH9rPq5vvOIe9nTAcM1W1dVhQZ+gSkBohgoWLPcZnQXcAAzIwAH0AAAAFZAAgAAAAANTGiHqJVq28n7mMZsJD6gHxVQp1A6z8wgZVW+xV/lhmBXMAIAAAAABCR4BfdNVy7WE+IyQ312vYuIW0aGcXxr2II/MbNz8ZdAVsACAAAAAAng0GYpYJTypRLQUd5tIXWaAjZX5na04T/BypmwwrXPoAAzIxAH0AAAAFZAAgAAAAABooumzjEqp9Hvvd+sn1L82NI2iUGRl0nXQNJTHM7oyVBXMAIAAAAADgjz5L2ursK4C+pXXsJ6XHABhyallj9s/vSUgxXvjiiwVsACAAAAAAPjlAM0tbO6EUmLAeIZt57YMkMsuQfuC3T3d9vtnxgjwAAzIyAH0AAAAFZAAgAAAAAMA4jmE8U2uGkYUeKoYSlb22tfrRq2VlhV1Jq1kn4hV9BXMAIAAAAADG4fLeJUcINPSb1pMfAASJkuYsgS/59Eq/51mET/Y7RQVsACAAAAAAmwwcWOnzvpxm4pROXOL+BlxjEG/7v7hIautb2ubFT44AAzIzAH0AAAAFZAAgAAAAAK8/E3VHzHM6Kjp39GjFy+ci1IiUG5oxh0W6elV+oiX2BXMAIAAAAAA4/F4Q94xxb2TvZcMcji/DVTFrZlH8BL/HzD86RRmqNAVsACAAAAAAif3HPf6B1dTX/W+Vlp6ohadEQk/GAmHYzXfJia2zHeIAAzI0AH0AAAAFZAAgAAAAAGUX9ttLN1cCrOjlzsl/E6jEzQottNDw8Zo94nbO1133BXMAIAAAAAA7uVthFvXH+pbBrgQmnkPcpiHFEVCAi0WA7sAt9tlt3gVsACAAAAAAznaMStSbtGXU1Pb5z9KDTvEd79s6gmWYCKOKdzeijpEAAzI1AH0AAAAFZAAgAAAAAKnT/qg8N85Q9EQvpH7FBqUooxHFgrIjqLlIDheva2QSBXMAIAAAAABGAKkFMKoSIrvClWF7filoYM6fI9xSqOJVNS3dv4lxYwVsACAAAAAAgITE31hQA4ZOxpUFYSYv0mzWbd/6RKgbUXiUY96fBQEAAzI2AH0AAAAFZAAgAAAAAHRDRDT2hJrJ8X9zB9ELT28q8ZsfkYr92chaZYakiLlqBXMAIAAAAAAT0Le67ObldDta/Qb17dYfdslPsJTfGj3bWAgC0JIingVsACAAAAAAMGDrqys8iJ3fCT2Cj+zXIuXtsf4OAXWJl5HoPUMlbNoAAzI3AH0AAAAFZAAgAAAAAOOJcUjYOE0KqcYS1yZ363zglQXfr3XSD+R5fWLSivDoBXMAIAAAAABjeLe+tg37lNa+DdVxtlCtY77tV9PqfJ5X4XEKrfwu0AVsACAAAAAAlbpHiQAPLLTvSF+u58RBCLnYQKB5wciIQmANV9bkzsoAAzI4AH0AAAAFZAAgAAAAAMwWOOaWDDYUusdA1nyoaEB3C4/9GRpFNGags95Ddp4LBXMAIAAAAACLrsQXGWK15fW4mPEUXJ/90by13aG+727qWJep8QJ/WgVsACAAAAAAuThwsAsKUB56QAXC0MjJsZ9736atbiHPlK2tE0urf9QAAzI5AH0AAAAFZAAgAAAAABPRXBK0z8UANcvMDWntBjN9yF7iGMPLbhbaKrvHwcplBXMAIAAAAACZlqWsYPIb+ydmH03BxD3TqSGsSNoI7EVCy0VgW0TpYgVsACAAAAAAD2uaBv8oc7l4EeC5PWx5sfeyGZoas0JdFJ33M3jjgjMAAzMwAH0AAAAFZAAgAAAAAOn9/6pbzjIxFEApugaVOvVKXq23sDCJELv5UtLPDZI3BXMAIAAAAACHIwSDTlof0vFoigF4drbeM/8rdlj/4U386zQsNLtPGwVsACAAAAAAsYt/rXnpL55J9rlWSFRA4seaU6ggix7RgxbrJPu6gO4AAzMxAH0AAAAFZAAgAAAAAIMCESykv5b5d6mYjU5DlnO709lOFCaNoJBLtzBIqmg4BXMAIAAAAADs1Bfuaun4Es3nQ4kr29BzheLRDcFv+9a0gOGkSEcrDgVsACAAAAAA5kW6i/jOBSdoGAsZEZxVNRvt6miv86bP8JfUT+1KJg8AAzMyAH0AAAAFZAAgAAAAAFSPmr27XgKhUkbEvvC6Br5K1w7280NZrrhdzfYF+YGjBXMAIAAAAADv2h+Xq6kM7MHYTLMACRwbe2MzGHu4sdB67FGzDR6H4QVsACAAAAAAKII0MMC7o6GKVfGo2qBW/p35NupBp7MI6Gp0zXYwJOcAAzMzAH0AAAAFZAAgAAAAAPSV9qprvlNZK6OSQZNxKhJmBMs6QCKFESB/oeIvAS0iBXMAIAAAAAA835Jh22/pvZgKoYH6KjE+RRpYkaM1G35TWq6uplk/rgVsACAAAAAA162IdSb079yVlS7GkuSdHU3dOw03a+NS55ZPVBxbD08AAzM0AH0AAAAFZAAgAAAAAGsadEBJFax/UltPXB86G/YPxo6h353ZT+rC62iGy7qqBXMAIAAAAADs9TP3h91f6bTuG8QCQMA3atAVGs8k0ZjVzX3pM8HNAgVsACAAAAAA2ed4R4wYD6DT0P+N6o3gDJPE0DjljbRAv5vme3jb42sAAzM1AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzM2AH0AAAAFZAAgAAAAAKJY+8+7psFzJb5T+Mg9UWb6gA9Y8NN9j/ML2jZkNDNPBXMAIAAAAAA2R/nCtSYfCim89BzdUPS+DTQGwYDk+2ihFPEBS8h+ygVsACAAAAAAaEQra7xyvA3JS0BasIpRVrz7ZXsp6RpH7OpfJBFzFG8AAzM3AH0AAAAFZAAgAAAAAI4qr+sJiRaqwZRhnenAzD7tTKq+jP1aaLyAln3w1HQuBXMAIAAAAADNYpqV73NpwN+Ta0ms1SRiu+6WNOOdGT+syghL+JAFhQVsACAAAAAAN07Fo9SK+fXp5Odk1J806pyVWc2WHXCtb1gJQknTgqsAAzM4AH0AAAAFZAAgAAAAAISgN1Hid7IWvDESN/3tywFZiBsZPYapOUx9/QjDDxLfBXMAIAAAAAA7lxpEz3+CGdv6/WKIAlIwRYURREKgn7+StwNoVekkDwVsACAAAAAAx+Oa2v1e1R7VomfsvcKO8VkY4eTl7LzjNQQL6Cj6GBQAAzM5AH0AAAAFZAAgAAAAAOTLdk1RIUzCsvK7xCXy+LxGhJf87fEL406U9QKta3JRBXMAIAAAAAD8+6UnUn8sN6AgQuuf7uFxW+2ZJNpZLgp3eKVtjbo9ewVsACAAAAAAQN3mZHmaDM0ZbUnk2O/+wCUjiCs4bnshfHjd/4ygLXcAAzQwAH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzQxAH0AAAAFZAAgAAAAAPLX4XT1eMfokMvj73G6loHEotbdivVFM6cpMbU0zIOmBXMAIAAAAABuTqwm6E60kVBN5iClzLnMBozIQRYjMozzRNKVhixkEAVsACAAAAAAjvY9G0Of8EQcZ4GVfSEVz7jrNn7i4qps2r82jJmngKoAAzQyAH0AAAAFZAAgAAAAAGzGJAUZBcVKRb4bCSNaRxtcDH2TqIgHqMElD9RL7SzDBXMAIAAAAABbJfrLwBrqZ2Ylm9QfL7nkW+GJ8vTlaeMUDT5620ebaAVsACAAAAAASiaS1IlBls5Tan57XqqbR1cuvyOcoSibJJQGREzm4c0AAzQzAH0AAAAFZAAgAAAAAC028abAppwE/ApZHU5RbzZZ8OPD5eJ8/6+NgiSFf4d+BXMAIAAAAAD3THvDUYWULR+AVLuRRPPAMVMeZ2ldWpBYSODboszWbQVsACAAAAAAATOaeYj+kx3MTDeNUcKGbUxLZDeMjC8JrWnlHmWTamQAAzQ0AH0AAAAFZAAgAAAAAHWr8wQYIKLiKeb3wd8kZQuXD/GUHDqXj12K/EQWV11CBXMAIAAAAADo3aFHDuyfls9tcWCxlFqJn4zDXd3WT9CIFYFjJnTYswVsACAAAAAAeMbIatR7DgefzuvF4WyNVDjJxP8KPA6U/rmMQIBvpM0AAzQ1AH0AAAAFZAAgAAAAAMdRi6AAjF1Z9ucMqYl2Ud1PLUGOlOPJFgSrPTjs27u8BXMAIAAAAAAqOdI7+P8srvqCTFadwMM3iggaVOGcf1BB0EjBYeV6RAVsACAAAAAAU+V2GrqgxJYs9mxuak/8JMFICXwQ2vksrBdOvSwWFpoAAzQ2AH0AAAAFZAAgAAAAADKKe++fqh4sn0a8Bb+w3QMFnOqSE5hDI3zGQTcmJGcOBXMAIAAAAAC8ebHa++JmxVISv6LzjuMgEZqzKSZlJyujnSV9syRD9AVsACAAAAAAQcVNSjyetScLu78IrAYaAigerY4kWtnbctmIyb19Wa4AAzQ3AH0AAAAFZAAgAAAAAMKoHwhZcocaQy7asIuRG8+P1qPENgFAwzc3X1gZWYnJBXMAIAAAAAB+R01s+WdJjLa5p7STuEylradWr+2JDxsWx9bKDgXNDQVsACAAAAAADeXTBHsm+FH2pQVoqOBPPIJiTJLqrzGisNnQ3S3xYJAAAzQ4AH0AAAAFZAAgAAAAAF41XuyBvREKcxjDl+wbnillseykpAjCKHmwIu+RNvM7BXMAIAAAAAC2Wzq+2mfO7howoOZxquqvOuH1D2WdlzA1nK+LUp0FMgVsACAAAAAARha+D6DVeDxSjNyXXO5DMY+W70EGyfc7gxR4TjzcYusAAzQ5AH0AAAAFZAAgAAAAAAfONgdhLPEjvsMxTY9K4//7WjREuRmZ6Bpcf3yvdMf3BXMAIAAAAABCy/zjmzucxQkbJ96l5vS5x6SeyHE0Z+Aqp9oZgBcC6QVsACAAAAAAasG/uN4DnWHZLkLhH4cMzXk5F/HL2D+72WH+1jjgH8UAAzUwAH0AAAAFZAAgAAAAAA5ZsebFm5NrSGs2E17+fUt4qkzsVmy4IJA5nGehtSBVBXMAIAAAAAAOzteKfp+YGPqn1fi8u/lKXP7E2Zgouwgt6KAADHX9AQVsACAAAAAA2+FaAbl8JZogfNCI0FFbmZZPy/KLF1u16FGrPspSbEIAAzUxAH0AAAAFZAAgAAAAAHf6LIjrvy6I31w/8b910U9qU8cBIYiWn9mW55NYZF8VBXMAIAAAAACONPisRtnFG9vV2mTQ3hRR/hGuVRA9dGd9Lt9JqDoM8wVsACAAAAAA+h7V/jIYJcd0ALIvFBlwxkFqWxBVlkqT9wFkmumr4QcAAzUyAH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAIAAAAAAAAAEHRmAAYAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Aggregate.json new file mode 100644 index 0000000000..daa7f4e973 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Aggregate.json @@ -0,0 +1,1132 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Correctness.json new file mode 100644 index 0000000000..edb336743c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Correctness.json @@ -0,0 +1,1160 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Delete.json new file mode 100644 index 0000000000..4a9c1f27b5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Delete.json @@ -0,0 +1,732 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json new file mode 100644 index 0000000000..d7860de83e --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json @@ -0,0 +1,1136 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-InsertFind.json new file mode 100644 index 0000000000..934af381f1 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-InsertFind.json @@ -0,0 +1,1123 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Update.json new file mode 100644 index 0000000000..ec95e0334a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Update.json @@ -0,0 +1,1140 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json new file mode 100644 index 0000000000..e8a50ebeca --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json @@ -0,0 +1,580 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json new file mode 100644 index 0000000000..87d0e3dd8c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json @@ -0,0 +1,1650 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json new file mode 100644 index 0000000000..8a0fecf786 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json @@ -0,0 +1,474 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..ac77931d61 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json @@ -0,0 +1,584 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json new file mode 100644 index 0000000000..5dcc09dca9 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json @@ -0,0 +1,571 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Update.json new file mode 100644 index 0000000000..483e3d52e6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Update.json @@ -0,0 +1,588 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Aggregate.json new file mode 100644 index 0000000000..6cd837c789 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Aggregate.json @@ -0,0 +1,484 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Correctness.json new file mode 100644 index 0000000000..9dc4e4e501 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Correctness.json @@ -0,0 +1,1644 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "1" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Delete.json new file mode 100644 index 0000000000..b251db9157 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Delete.json @@ -0,0 +1,420 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json new file mode 100644 index 0000000000..6e09b5ea2c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json @@ -0,0 +1,488 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-InsertFind.json new file mode 100644 index 0000000000..cbab7e7699 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-InsertFind.json @@ -0,0 +1,475 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Update.json new file mode 100644 index 0000000000..cb6b223943 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Update.json @@ -0,0 +1,492 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Aggregate.json new file mode 100644 index 0000000000..5c4bf10101 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Aggregate.json @@ -0,0 +1,484 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Correctness.json new file mode 100644 index 0000000000..d81e0933f8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Correctness.json @@ -0,0 +1,1644 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "1" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Delete.json new file mode 100644 index 0000000000..faf0c401b7 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Delete.json @@ -0,0 +1,420 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json new file mode 100644 index 0000000000..b233b40b54 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json @@ -0,0 +1,488 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-InsertFind.json new file mode 100644 index 0000000000..1b787d4cb6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-InsertFind.json @@ -0,0 +1,475 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Update.json new file mode 100644 index 0000000000..07182bb5e2 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Update.json @@ -0,0 +1,492 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-WrongType.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-WrongType.json new file mode 100644 index 0000000000..6215604508 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-WrongType.json @@ -0,0 +1,163 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ], + "maxServerVersion": "8.99.99" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberLong": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Update.json new file mode 100644 index 0000000000..cb260edc0d --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Update.json @@ -0,0 +1,570 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Update can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "foo": "bar" + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "update" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + }, + { + "description": "Update can modify an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "encryptedIndexed": "value456" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "update" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..901c4dd841 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json @@ -0,0 +1,503 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encryptedIndexed": "foo" + } + } + ] + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/gcpKMS.json b/test/client-side-encryption/spec/legacy/gcpKMS.json new file mode 100644 index 0000000000..65f12ec139 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/gcpKMS.json @@ -0,0 +1,237 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "gcp_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using GCP KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "gcp": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_gcp": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/getMore.json b/test/client-side-encryption/spec/legacy/getMore.json new file mode 100644 index 0000000000..94e788ef61 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/getMore.json @@ -0,0 +1,266 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "getMore with encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "batchSize": 2, + "filter": {} + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + }, + { + "_id": 3, + "encrypted_string": "string2" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "batchSize": 2 + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "default", + "batchSize": 2 + }, + "command_name": "getMore" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/insert.json b/test/client-side-encryption/spec/legacy/insert.json new file mode 100644 index 0000000000..cf2910fd7a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/insert.json @@ -0,0 +1,344 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "insertOne with encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + } + }, + { + "description": "insertMany with encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/keyAltName.json b/test/client-side-encryption/spec/legacy/keyAltName.json new file mode 100644 index 0000000000..7f71b9dbeb --- /dev/null +++ b/test/client-side-encryption/spec/legacy/keyAltName.json @@ -0,0 +1,228 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Insert with encryption using key alt name", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_w_altname": "string0", + "altname": "altname" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [] + } + }, + { + "keyAltNames": { + "$in": [ + "altname" + ] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_w_altname": { + "$$type": "binData" + }, + "altname": "altname" + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_w_altname": { + "$$type": "binData" + }, + "altname": "altname" + } + ] + } + } + }, + { + "description": "Replace with key alt name fails", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_w_altname": "string0" + } + }, + "upsert": true + }, + "result": { + "errorContains": "A non-static (JSONPointer) keyId is not supported" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/keyCache.json b/test/client-side-encryption/spec/legacy/keyCache.json new file mode 100644 index 0000000000..912ce80020 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/keyCache.json @@ -0,0 +1,270 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Insert with deterministic encryption, then find it", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "keyExpirationMS": 1 + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 50 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/kmipKMS.json b/test/client-side-encryption/spec/legacy/kmipKMS.json new file mode 100644 index 0000000000..349328b433 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/kmipKMS.json @@ -0,0 +1,362 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": [ + "altname", + "kmip_altname" + ] + }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + }, + "keyMaterial": { + "$binary": { + "base64": "5TLMFWlguBWe5GUESTvOVtkdBsCrynhnV72XRyZ66/nk+EP9/1oEp1t1sg0+vwCTqULHjBiUE6DRx2mYD/Eup1+u2Jgz9/+1sV1drXeOPALNPkSgiZiDbIb67zRi+wTABEcKcegJH+FhmSGxwUoQAiHCsCbcvia5P8tN1lt98YQ=", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": "11" + }, + "keyAltNames": [ + "delegated" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using KMIP KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "kmip": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "Insert a document with auto encryption using KMIP delegated KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "kmip": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip_delegated": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/localKMS.json b/test/client-side-encryption/spec/legacy/localKMS.json new file mode 100644 index 0000000000..67c4ba1308 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/localKMS.json @@ -0,0 +1,191 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "Ce9HSz/HKKGkIt4uyy+jDuKGA+rLC2cycykMo6vc8jXxqa1UVDYHWq1r+vZKbnnSRBfB981akzRKZCFpC05CTyFqDhXv6OnMjpG97OZEREGIsHEYiJkBW0jJJvfLLgeLsEpBzsro9FztGGXASxyxFRZFhXvHxyiLOKrdWfs7X1O/iK3pEoHMx6uSNSfUOgbebLfIqW7TO++iQS5g1xovXA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using local KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {}, + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACV/+zJmpqMU47yxS/xIVAviGi7wHDuFwaULAixEAoIh0xHz73UYOM3D8D44gcJn67EROjbz4ITpYzzlCJovDL0Q==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACV/+zJmpqMU47yxS/xIVAviGi7wHDuFwaULAixEAoIh0xHz73UYOM3D8D44gcJn67EROjbz4ITpYzzlCJovDL0Q==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/localSchema.json b/test/client-side-encryption/spec/legacy/localSchema.json new file mode 100644 index 0000000000..4698520f6f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/localSchema.json @@ -0,0 +1,258 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": {}, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "A local schema should override", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "A local schema with no encryption is an error", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "test": { + "bsonType": "string" + } + }, + "bsonType": "object", + "required": [ + "test" + ] + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "result": { + "errorContains": "JSON schema keyword 'required' is only allowed with a remote schema" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/malformedCiphertext.json b/test/client-side-encryption/spec/legacy/malformedCiphertext.json new file mode 100644 index 0000000000..c81330ce83 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/malformedCiphertext.json @@ -0,0 +1,321 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "00" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQ==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAa2V2aW4gYWxiZXJ0c29uCg==", + "subType": "06" + } + } + } + ], + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Wrong subtype", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "Empty data", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 2 + } + }, + "result": { + "errorContains": "malformed ciphertext" + } + } + ] + }, + { + "description": "Malformed data", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 3 + } + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/maxWireVersion.json b/test/client-side-encryption/spec/legacy/maxWireVersion.json new file mode 100644 index 0000000000..f04f58dffd --- /dev/null +++ b/test/client-side-encryption/spec/legacy/maxWireVersion.json @@ -0,0 +1,74 @@ +{ + "runOn": [ + { + "maxServerVersion": "4.0.99" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "operation fails with maxWireVersion < 8", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "extraOptions": { + "mongocryptdBypassSpawn": true + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "encrypted_string": "string0" + } + }, + "result": { + "errorContains": "Auto-encryption requires a minimum MongoDB version of 4.2" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/missingKey.json b/test/client-side-encryption/spec/legacy/missingKey.json new file mode 100644 index 0000000000..275147bb72 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/missingKey.json @@ -0,0 +1,179 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Insert with encryption on a missing key", + "clientOptions": { + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.different", + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "different", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/namedKMS.json b/test/client-side-encryption/spec/legacy/namedKMS.json new file mode 100644 index 0000000000..c859443585 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/namedKMS.json @@ -0,0 +1,197 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ], + "tests": [ + { + "description": "Automatically encrypt and decrypt with a named KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local:name2": { + "key": { + "$binary": { + "base64": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/noSchema.json b/test/client-side-encryption/spec/legacy/noSchema.json new file mode 100644 index 0000000000..095434f886 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/noSchema.json @@ -0,0 +1,67 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "unencrypted", + "tests": [ + { + "description": "Insert on an unencrypted collection", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "unencrypted" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "unencrypted", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/replaceOne.json b/test/client-side-encryption/spec/legacy/replaceOne.json new file mode 100644 index 0000000000..9757686819 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/replaceOne.json @@ -0,0 +1,239 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "replaceOne with encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "replaceOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "replacement": { + "encrypted_string": "string1", + "random": "abc" + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + } + ], + "ordered": true + }, + "command_name": "update" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/timeoutMS.json b/test/client-side-encryption/spec/legacy/timeoutMS.json new file mode 100644 index 0000000000..b667767cfc --- /dev/null +++ b/test/client-side-encryption/spec/legacy/timeoutMS.json @@ -0,0 +1,200 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "cse-timeouts-db", + "collection_name": "cse-timeouts-coll", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "timeoutMS applied to listCollections to get collection schema", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "cse-timeouts-coll" + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "command_name": "listCollections" + } + } + ] + }, + { + "description": "remaining timeoutMS applied to find to get keyvault data", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections", + "find" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/types.json b/test/client-side-encryption/spec/legacy/types.json new file mode 100644 index 0000000000..a6c6507e90 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/types.json @@ -0,0 +1,1646 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": {}, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "type=objectId", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_objectId": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "objectId", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_objectId": { + "$oid": "AAAAAAAAAAAAAAAAAAAAAAAA" + } + } + } + }, + { + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "encrypted_objectId": { + "$oid": "AAAAAAAAAAAAAAAAAAAAAAAA" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_objectId": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAHmkTPqvzfHMWpvS1mEsrjOxVQ2dyihEgIFWD5E0eNEsiMBQsC0GuvjdqYRL5DHLFI1vKuGek7EYYp0Qyii/tHqA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_objectId": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAHmkTPqvzfHMWpvS1mEsrjOxVQ2dyihEgIFWD5E0eNEsiMBQsC0GuvjdqYRL5DHLFI1vKuGek7EYYp0Qyii/tHqA==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "type=symbol", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_symbol": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "symbol", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_symbol": { + "$symbol": "test" + } + } + } + }, + { + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "encrypted_symbol": { + "$symbol": "test" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_symbol": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAOOmvDmWjcuKsSCO7U/7t9HJ8eI73B6wduyMbdkvn7n7V4uTJes/j+BTtneSdyG2JHKHGkevWAJSIU2XoO66BSXw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_symbol": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAOOmvDmWjcuKsSCO7U/7t9HJ8eI73B6wduyMbdkvn7n7V4uTJes/j+BTtneSdyG2JHKHGkevWAJSIU2XoO66BSXw==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "type=int", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_int": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_int": { + "$numberInt": "123" + } + } + } + }, + { + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "encrypted_int": { + "$numberInt": "123" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_int": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAQPNXJVXMEjGZnftMuf2INKufXCtQIRHdw5wTgn6QYt3ejcoAXyiwI4XIUizkpsob494qpt2in4tWeiO7b9zkA8Q==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_int": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAQPNXJVXMEjGZnftMuf2INKufXCtQIRHdw5wTgn6QYt3ejcoAXyiwI4XIUizkpsob494qpt2in4tWeiO7b9zkA8Q==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "type=double", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_double": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "double", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_double": { + "$numberDouble": "1.23" + } + } + }, + "result": { + "errorContains": "element of type: double" + } + } + ] + }, + { + "description": "type=decimal", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_decimal": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "decimal", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_decimal": { + "$numberDecimal": "1.23" + } + } + }, + "result": { + "errorContains": "element of type: decimal" + } + } + ] + }, + { + "description": "type=binData", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_binData": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "binData", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AAAA", + "subType": "00" + } + } + } + } + }, + { + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AAAA", + "subType": "00" + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAFB/KHZQHaHHo8fctcl7v6kR+sLkJoTRx2cPSSck9ya+nbGROSeFhdhDRHaCzhV78fDEqnMDSVPNi+ZkbaIh46GQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAFB/KHZQHaHHo8fctcl7v6kR+sLkJoTRx2cPSSck9ya+nbGROSeFhdhDRHaCzhV78fDEqnMDSVPNi+ZkbaIh46GQ==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "type=javascript", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_javascript": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "javascript", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_javascript": { + "$code": "var x = 1;" + } + } + } + }, + { + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "encrypted_javascript": { + "$code": "var x = 1;" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_javascript": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAANrvMgJkTKWGMc9wt3E2RBR2Hu5gL9p+vIIdHe9FcOm99t1W480/oX1Gnd87ON3B399DuFaxi/aaIiQSo7gTX6Lw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_javascript": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAANrvMgJkTKWGMc9wt3E2RBR2Hu5gL9p+vIIdHe9FcOm99t1W480/oX1Gnd87ON3B399DuFaxi/aaIiQSo7gTX6Lw==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "type=javascriptWithScope", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_javascriptWithScope": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "javascriptWithScope", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_javascriptWithScope": { + "$code": "var x = 1;", + "$scope": {} + } + } + }, + "result": { + "errorContains": "element of type: javascriptWithScope" + } + } + ] + }, + { + "description": "type=object", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_object": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "object", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_object": {} + } + }, + "result": { + "errorContains": "element of type: object" + } + } + ] + }, + { + "description": "type=timestamp", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_timestamp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "timestamp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_timestamp": { + "$timestamp": { + "t": 123, + "i": 456 + } + } + } + } + }, + { + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "encrypted_timestamp": { + "$timestamp": { + "t": 123, + "i": 456 + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_timestamp": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAARJHaM4Gq3MpDTdBasBsEolQaOmxJQU1wsZVaSFAOLpEh1QihDglXI95xemePFMKhg+KNpFg7lw1ChCs2Wn/c26Q==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_timestamp": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAARJHaM4Gq3MpDTdBasBsEolQaOmxJQU1wsZVaSFAOLpEh1QihDglXI95xemePFMKhg+KNpFg7lw1ChCs2Wn/c26Q==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "type=regex", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_regex": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "regex", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_regex": { + "$regularExpression": { + "pattern": "test", + "options": "" + } + } + } + } + }, + { + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "encrypted_regex": { + "$regularExpression": { + "pattern": "test", + "options": "" + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_regex": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAALVnxM4UqGhqf5eXw6nsS08am3YJrTf1EvjKitT8tyyMAbHsICIU3GUjuC7EBofCHbusvgo7pDyaClGostFz44nA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_regex": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAALVnxM4UqGhqf5eXw6nsS08am3YJrTf1EvjKitT8tyyMAbHsICIU3GUjuC7EBofCHbusvgo7pDyaClGostFz44nA==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "type=date", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_date": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "date", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_date": { + "$date": { + "$numberLong": "123" + } + } + } + } + }, + { + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "encrypted_date": { + "$date": { + "$numberLong": "123" + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_date": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAJ5sN7u6l97+DswfKTqZAijSTSOo5htinGKQKUD7pHNJYlLXGOkB4glrCu7ibu0g3344RHQ5yUp4YxMEa8GD+Snw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_date": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAJ5sN7u6l97+DswfKTqZAijSTSOo5htinGKQKUD7pHNJYlLXGOkB4glrCu7ibu0g3344RHQ5yUp4YxMEa8GD+Snw==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "type=minKey", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_minKey": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "minKey", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_minKey": { + "$minKey": 1 + } + } + }, + "result": { + "errorContains": "Cannot encrypt element of type: minKey" + } + } + ] + }, + { + "description": "type=maxKey", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_maxKey": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "maxKey", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_maxKey": { + "$maxKey": 1 + } + } + }, + "result": { + "errorContains": "Cannot encrypt element of type: maxKey" + } + } + ] + }, + { + "description": "type=undefined", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_undefined": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "undefined", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_undefined": { + "$undefined": true + } + } + }, + "result": { + "errorContains": "Cannot encrypt element of type: undefined" + } + } + ] + }, + { + "description": "type=array", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_array": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "array", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_array": [] + } + }, + "result": { + "errorContains": "element of type: array" + } + } + ] + }, + { + "description": "type=bool", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_bool": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "bool", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_bool": true + } + }, + "result": { + "errorContains": "element of type: bool" + } + } + ] + }, + { + "description": "type=null", + "clientOptions": { + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_null": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "null", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_null": true + } + }, + "result": { + "errorContains": "Cannot encrypt element of type: null" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/unsupportedCommand.json b/test/client-side-encryption/spec/legacy/unsupportedCommand.json new file mode 100644 index 0000000000..3188715115 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/unsupportedCommand.json @@ -0,0 +1,152 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "x": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "x": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "mapReduce deterministic encryption (unsupported)", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "result": { + "errorContains": "command not supported for auto encryption: mapreduce" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/updateMany.json b/test/client-side-encryption/spec/legacy/updateMany.json new file mode 100644 index 0000000000..823909044b --- /dev/null +++ b/test/client-side-encryption/spec/legacy/updateMany.json @@ -0,0 +1,307 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "updateMany with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + }, + "update": { + "$set": { + "encrypted_string": "string2", + "random": "abc" + } + } + }, + "result": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + }, + "multi": true, + "upsert": false + } + ], + "ordered": true + }, + "command_name": "update" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + } + }, + { + "description": "updateMany fails when filtering on a random field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "random": "abc" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + }, + "result": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/updateOne.json b/test/client-side-encryption/spec/legacy/updateOne.json new file mode 100644 index 0000000000..23bada964f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/updateOne.json @@ -0,0 +1,465 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "updateOne with deterministic encryption", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1", + "random": "abc" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + } + } + ], + "ordered": true + }, + "command_name": "update" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + } + }, + { + "description": "updateOne fails when filtering on a random field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "random": "abc" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + }, + "result": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + }, + { + "description": "$unset works with an encrypted field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$unset": { + "encrypted_string": "" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": {}, + "u": { + "$unset": { + "encrypted_string": "" + } + } + } + ], + "ordered": true + }, + "command_name": "update" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } + }, + { + "description": "$rename works if target value has same encryption options", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$rename": { + "encrypted_string": "encrypted_string_equivalent" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": {}, + "u": { + "$rename": { + "encrypted_string": "encrypted_string_equivalent" + } + } + } + ], + "ordered": true + }, + "command_name": "update" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_equivalent": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "$rename fails if target value has different encryption options", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$rename": { + "encrypted_string": "random" + } + } + }, + "result": { + "errorContains": "$rename between two encrypted fields must have the same metadata or both be unencrypted" + } + } + ] + }, + { + "description": "an invalid update (no $ operators) is validated and errors", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "encrypted_string": "random" + } + }, + "result": { + "errorContains": "" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..e07137ce15 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json @@ -0,0 +1,642 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encrypted_string": "foo" + } + } + ] + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-cleanupStructuredEncryptionData.json b/test/client-side-encryption/spec/unified/QE-Text-cleanupStructuredEncryptionData.json new file mode 100644 index 0000000000..24f33ab3ec --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-cleanupStructuredEncryptionData.json @@ -0,0 +1,219 @@ +{ + "description": "QE-Text-cleanupStructuredEncryptionData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "QE Text cleanupStructuredEncryptionData works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "cleanupStructuredEncryptionData": "coll" + }, + "commandName": "cleanupStructuredEncryptionData" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "cleanupStructuredEncryptionData": "coll", + "cleanupTokens": { + "encryptedText": { + "ecoc": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "YAiF7Iwhqq1UyfxPvm70xfQJtrIRPrjfD2yRLG1+saQ=", + "subType": "00" + } + } + } + } + }, + "commandName": "cleanupStructuredEncryptionData" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-compactStructuredEncryptionData.json b/test/client-side-encryption/spec/unified/QE-Text-compactStructuredEncryptionData.json new file mode 100644 index 0000000000..c7abfe2d4b --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-compactStructuredEncryptionData.json @@ -0,0 +1,261 @@ +{ + "description": "QE-Text-compactStructuredEncryptionData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "QE Text compactStructuredEncryptionData works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "compactStructuredEncryptionData": "coll" + }, + "commandName": "compactStructuredEncryptionData" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "compactStructuredEncryptionData": "coll", + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "db.coll": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ], + "strEncodeVersion": { + "$numberInt": "1" + }, + "escCollection": "enxcol_.coll.esc", + "ecocCollection": "enxcol_.coll.ecoc" + } + } + }, + "compactionTokens": { + "encryptedText": { + "ecoc": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "YAiF7Iwhqq1UyfxPvm70xfQJtrIRPrjfD2yRLG1+saQ=", + "subType": "00" + } + } + } + } + }, + "commandName": "compactStructuredEncryptionData" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-prefixPreview.json b/test/client-side-encryption/spec/unified/QE-Text-prefixPreview.json new file mode 100644 index 0000000000..7279385743 --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-prefixPreview.json @@ -0,0 +1,338 @@ +{ + "description": "QE-Text-prefixPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "prefixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE prefixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrStartsWith": { + "input": "$encryptedText", + "prefix": "foo" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fmUMXTMV/XRiN0IL3VXxSEn6SQG9E6Po30kJKB8JJlQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vZIDMiFDgjmLNYVrrbnq1zT4hg7sGpe/PMtighSsnRc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "26Z5G+sHTzV3D7F8Y0m08389USZ2afinyFV3ez9UEBQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q/JEq8of7bE0QE5Id0XuOsNQ4qVpANYymcPQDUL2Ywk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Uvvv46LkfbgLoPqZ6xTBzpgoYRTM6FUgRdqZ9eaVojI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nMxdq2lladuBJA3lv3JC2MumIUtRJBNJVLp3PVE6nQk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hS3V0qq5CF/SkTl3ZWWWgXcAJ8G5yGtkY2RwcHNc5Oc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McgwYUxfKj5+4D0vskZymy4KA82s71MR25iV/Enutww=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ciqdk1b+t+Vrr6oIlFFk0Zdym5BPmwN3glQ0/VcsVdM=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrStartsWith": { + "input": "$encryptedText", + "prefix": "bar" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-substringPreview.json b/test/client-side-encryption/spec/unified/QE-Text-substringPreview.json new file mode 100644 index 0000000000..6a8f133eac --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-substringPreview.json @@ -0,0 +1,551 @@ +{ + "description": "QE-Text-substringPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "substringPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "10" + }, + "strMaxLength": { + "$numberLong": "20" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE suffixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrContains", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrContains": { + "input": "$encryptedText", + "substring": "oba" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IpY3x/jjm8j/74jAdUhgxdM5hk68zR0zv/lTKm/72Vg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G+ky260C6QiOfIxKz14FmaMbAxvui1BKJO/TnLOHlGk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7dv3gAKe9vwJMZmpB40pRCwRTmc7ds9UkGhxH8j084E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o0V+Efn6x8XQdE80F1tztNaT3qxHjcsd9DOQ47BtmQk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sJvrCjyVot7PIZFsdRehWFANKAj6fmBaj3FLbz/dZLE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e98auxFmu02h5MfBIARk29MI7hSmvN3F9DaQ0xjqoEM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US83krGNov/ezL6IhsY5eEOCxv1xUPDIEL/nmY0IKi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P2Aq5+OHZPG0CWIdmZvWq9c/18ZKVYW3vbxd+WU/TXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8AdPRPnSzcd5uhq4TZfNvNeF0XjLNVwAsJJMTtktw84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9O6u/G51I4ZHFLhL4ZLuudbr0s202A2QnPfThmOXPhI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N7AjYVyVlv6+lVSTM+cIxRL3SMgs3G5LgxSs+jrgDkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RbGF7dQbPGYQFd9DDO1hPz1UlLOJ77FAC6NsjGwJeos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m7srHMgKm6kZwsNx8rc45pmw0/9Qro6xuQ8lZS3+RYk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K75CNU3JyKFqZWPiIsVi4+n7DhYmcPl/nEhQ3d88mVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c7bwGpUZc/7JzEnMS7qQ/TPuXZyrmMihFaAV6zIqbZc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rDvEdUgEk8u4Srt3ETokWs2FXcnyJaRGQ+NbkFwi2rQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VcdZj9zfveRBRlpCR2OYWau2+GokOFb73TE3gpElNiU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOa9o2xfA6OgkbYUxd6wQJicaeN6guhy2V66W3ALsaA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1xGkJh+um70XiRd8lKLDtyHgDqrf7/59Mg7X0+KZh8k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OSvllqHxycbcZN4phR6NDujY3ttA59o7nQJ6V9eJpX0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZTX1pyk8Vdw0BSbJx7GeJNcQf3tGKxbrrNSTqBqUWkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cn7V05zb5iXwYrePGMHztC+GRq+Tj8IMpRDraauPhSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E9bV9KyrZxHJSUmMg0HrDK4gGN+75ruelAnrM6hXQgY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrssTNmdgXoTGpbaF0JLRCGH6cDQuz1XEFNTy98nrb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jZmyOJP35dsxQ/OY5U4ISpVRIYr8iedNfcwZiKt29Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d2mocORMbX9MX+/itAW8r1kxVw2/uii4vzXtc+2CIRQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JBnJy58eRPhDo3DuZvsHbvQDiHXxdtAx1Eif66k5SfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OjbDulC8s62v0pgweBSsQqtJjJBwH5JinfJpj7nVr+A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "85i7KT2GP9nSda3Gsil5LKubhq0LDtc22pxBxHpR+nE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "u9Fvsclwrs9lwIcMPV/fMZD7L3d5anSfJQVjQb9mgLg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LZ32ttmLJGOIw9oFaUCn3Sx5uHPTYJPSFpeGRWNqlUc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMsZvGEePTqtl0FJAL/jAdyWNQIlpwN61YIlZsSIZ6s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XZcu1a/ZGsIzAl3j4MXQlLo4v2p7kvIqRHtIQYFmL6k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Zse27LinlYCEnX6iTmJceI33mEJxFb0LdPxp0RiMOaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOv2Hgb2/sBpnX9XwFbIN6yDxhjchwlmczUf82W2tp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oQxZ9A6j3x5j6x1Jqw/N9tpP4rfWMjcV3y+a3PkrL7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/D7ew3EijyUnmT22awVFspcuyo3JChJcDeCPwpljzVM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BEmmwqyamt9X3bcWDld61P01zquy8fBHAXq3SHAPP0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wygD9/kAo1KsRvtr1v+9/lvqoWdKwgh6gDHvAQfXPPk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pRTKgF/uksrF1c1AcfSTY6ZhqBKVud1vIztQ4/36SLs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C4iUo8oNJsjJ37BqnBgIgSQpf99X2Bb4W5MZEAmakHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "icoE53jIq6Fu/YGKUiSUTYyZ8xdiTQY9jJiGxVJObpw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oubCwk0V6G2RFWtcOnYDU4uUBoXBrhBRi4nZgrYj9JY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IyqhQ9nGhzEi5YW2W6v1kGU5DY2u2qSqbM/qXdLdWVU=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrContains", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrContains": { + "input": "$encryptedText", + "substring": "blah" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-suffixPreview.json b/test/client-side-encryption/spec/unified/QE-Text-suffixPreview.json new file mode 100644 index 0000000000..deec5e63b0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-suffixPreview.json @@ -0,0 +1,338 @@ +{ + "description": "QE-Text-suffixPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE suffixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrEndsWith": { + "input": "$encryptedText", + "suffix": "bar" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uDCWsucUsJemUP7pmeb+Kd8B9qupVzI8wnLFqX1rkiU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "W3E1x4bHZ8SEHFz4zwXM0G5Z5WSwBhnxE8x5/qdP6JM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6g/TXVDDf6z+ntResIvTKWdmIy4ajQ1rhwdNZIiEG7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hU+u/T3D6dHDpT3d/v5AlgtRoAufCXCAyO2jQlgsnCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vrPnq0AtBIURNgNGA6HJL+5/p5SBWe+qz8505TRo/dE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "W5pylBxdv2soY2NcBfPiHDVLTS6tx+0ULkI8gysBeFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oWO3xX3x0bYUJGK2S1aPAmlU3Xtfsgb9lTZ6flGAlsg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SjZGucTEUbdpd86O8yj1pyMyBOOKxvAQ9C8ngZ9C5UE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CEaMZkxVDVbnXr+To0DOyvsva04UQkIYP3KtgYVVwf8=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrEndsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrEndsWith": { + "input": "$encryptedText", + "suffix": "foo" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/addKeyAltName.json b/test/client-side-encryption/spec/unified/addKeyAltName.json new file mode 100644 index 0000000000..f70bc572a8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/addKeyAltName.json @@ -0,0 +1,609 @@ +{ + "description": "addKeyAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "add keyAltName to non-existent data key", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "new_key_alt_name" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "new_key_alt_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "add new keyAltName to data key with no keyAltNames", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "add existing keyAltName to existing data key", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "add new keyAltName to data key with keyAltNames", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "another_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0, + "keyAltNames": "$keyAltNames" + } + }, + { + "$unwind": "$keyAltNames" + }, + { + "$sort": { + "keyAltNames": 1 + } + } + ] + }, + "expectResult": [ + { + "keyAltNames": "another_name" + }, + { + "keyAltNames": "local_key" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "another_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/aggregate.json b/test/client-side-encryption/spec/unified/aggregate.json new file mode 100644 index 0000000000..d04ce49d28 --- /dev/null +++ b/test/client-side-encryption/spec/unified/aggregate.json @@ -0,0 +1,433 @@ +{ + "description": "aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with deterministic encryption", + "skipReason": "SERVER-39395", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encrypted_string": "457-55-5642" + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encrypted_string": "457-55-5642" + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "Aggregate with empty pipeline", + "skipReason": "SERVER-40829 hides agg support behind enableTestCommands flag.", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [], + "cursor": {} + }, + "commandName": "aggregate" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Aggregate should fail with random encryption", + "skipReason": "SERVER-39395", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "random": "abc" + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + }, + { + "description": "Database aggregate should fail", + "operations": [ + { + "name": "aggregate", + "object": "db", + "arguments": { + "pipeline": [ + { + "$currentOp": { + "allUsers": false, + "idleConnections": false, + "localOps": true + } + }, + { + "$match": { + "command.aggregate": { + "$eq": 1 + } + } + }, + { + "$project": { + "command": 1 + } + }, + { + "$project": { + "command.lsid": 0 + } + } + ] + }, + "expectError": { + "errorContains": "non-collection command not supported for auto encryption: aggregate" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/awsTemporary.json b/test/client-side-encryption/spec/unified/awsTemporary.json new file mode 100644 index 0000000000..24b732a5eb --- /dev/null +++ b/test/client-side-encryption/spec/unified/awsTemporary.json @@ -0,0 +1,313 @@ +{ + "description": "awsTemporary", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + }, + "sessionToken": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + }, + "sessionToken": "bad" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using the AWS provider with temporary credentials", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Insert with invalid temporary credentials", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll1", + "expectError": { + "errorContains": "security token" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/azureKMS.json b/test/client-side-encryption/spec/unified/azureKMS.json new file mode 100644 index 0000000000..b70959217f --- /dev/null +++ b/test/client-side-encryption/spec/unified/azureKMS.json @@ -0,0 +1,293 @@ +{ + "description": "azureKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "azure_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using Azure KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_azure": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/badQueries.json b/test/client-side-encryption/spec/unified/badQueries.json new file mode 100644 index 0000000000..7a4f30d5b7 --- /dev/null +++ b/test/client-side-encryption/spec/unified/badQueries.json @@ -0,0 +1,1393 @@ +{ + "description": "badQueries", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "collection": { + "id": "coll_with_encrypted_id", + "database": "db", + "collectionName": "coll_with_encrypted_id" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "default", + "collectionName": "coll_with_encrypted_id", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "_id": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + } + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "$text unconditionally fails", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "$text": { + "$search": "search text" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Unsupported match expression operator for encryption" + } + } + ] + }, + { + "description": "$where unconditionally fails", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "$where": { + "$code": "function() { return true }" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Unsupported match expression operator for encryption" + } + } + ] + }, + { + "description": "$bit operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAllClear": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAllClear": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAllSet": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAllSet": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAnyClear": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAnyClear": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAnySet": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAnySet": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "geo operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$near": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "unable to find index" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$near": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$nearSphere": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "unable to find index" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$nearSphere": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$geoIntersects": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$geoIntersects": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$geoWithin": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$geoWithin": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "inequality operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$gt": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$gt": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$lt": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$lt": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$gte": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$gte": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$lte": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$lte": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "other misc operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$mod": [ + 3, + 1 + ] + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$mod": [ + 3, + 1 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$regex": "pattern", + "$options": "" + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$regex": "pattern", + "$options": "" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$size": 2 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$size": 2 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$eq": null + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$eq": null + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Illegal equality to null predicate for encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$in": [ + null + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + null + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Illegal equality to null inside $in against an encrypted field" + } + } + ] + }, + { + "description": "$addToSet succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$addToSet": { + "unencrypted": [ + "a" + ] + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$addToSet": { + "encrypted_string": [ + "a" + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$addToSet not allowed on encrypted values" + } + } + ] + }, + { + "description": "$inc succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$inc": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$inc": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$inc and $mul not allowed on encrypted values" + } + } + ] + }, + { + "description": "$mul succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$mul": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$mul": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$inc and $mul not allowed on encrypted values" + } + } + ] + }, + { + "description": "$max succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$max": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$max": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$max and $min not allowed on encrypted values" + } + } + ] + }, + { + "description": "$min succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$min": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$min": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$max and $min not allowed on encrypted values" + } + } + ] + }, + { + "description": "$currentDate succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$currentDate": { + "unencrypted": true + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$currentDate": { + "encrypted_string": true + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$currentDate not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pop succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pop": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pop": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$pop not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pull succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pull": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pull": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$pull not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pullAll succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pullAll": { + "unencrypted": [ + 1 + ] + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pullAll": { + "encrypted_string": [ + 1 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$pullAll not allowed on encrypted values" + } + } + ] + }, + { + "description": "$push succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$push": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$push": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$push not allowed on encrypted values" + } + } + ] + }, + { + "description": "array filters on encrypted fields does not error in mongocryptd, but errors in mongod", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_string.$[i].x": 1 + } + }, + "arrayFilters": [ + { + "i.x": 1 + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "Array update operations not allowed on encrypted values" + } + } + ] + }, + { + "description": "positional operator succeeds on unencrypted, errors on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "unencrypted": 1 + }, + "update": { + "$set": { + "unencrypted.$": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encrypted_string": "abc" + }, + "update": { + "$set": { + "encrypted_string.$": "abc" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt fields below '$' positional update operator" + } + } + ] + }, + { + "description": "an update that would produce an array on an encrypted field errors", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_string": [ + 1, + 2 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element of type" + } + } + ] + }, + { + "description": "an insert with encrypted field on _id errors", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + }, + "object": "coll_with_encrypted_id", + "expectError": { + "errorContains": "Invalid schema containing the 'encrypt' keyword." + } + } + ] + }, + { + "description": "an insert with an array value for an encrypted field fails", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "encrypted_string": [ + "123", + "456" + ] + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element of type" + } + } + ] + }, + { + "description": "an insert with a Timestamp(0,0) value in the top-level fails", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "random": { + "$timestamp": { + "t": 0, + "i": 0 + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "A command that inserts cannot supply Timestamp(0, 0) for an encrypted" + } + } + ] + }, + { + "description": "distinct with the key referring to a field where the keyID is a JSON Pointer errors", + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": {}, + "fieldName": "encrypted_w_altname" + }, + "object": "coll", + "expectError": { + "errorContains": "The distinct key is not allowed to be marked for encryption with a non-UUID keyId" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/badSchema.json b/test/client-side-encryption/spec/unified/badSchema.json new file mode 100644 index 0000000000..af93d659d4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/badSchema.json @@ -0,0 +1,393 @@ +{ + "description": "badSchema", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + }, + "bsonType": "array" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db0", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll0", + "database": "db0", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "foo": { + "properties": { + "bar": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + } + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + }, + { + "client": { + "id": "client2", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + }, + "bsonType": "object" + } + } + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db2", + "client": "client2", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll2", + "database": "db2", + "collectionName": "default" + } + }, + { + "client": { + "id": "client3", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "anyOf": [ + { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + ] + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db3", + "client": "client3", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll3", + "database": "db3", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Schema with an encrypted field in an array", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll0", + "expectError": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + }, + { + "description": "Schema without specifying parent object types", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll1", + "expectError": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + }, + { + "description": "Schema with siblings of encrypt document", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll2", + "expectError": { + "errorContains": "'encrypt' cannot be used in conjunction with 'bsonType'" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + }, + { + "description": "Schema with logical keywords", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll3", + "expectError": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/basic.json b/test/client-side-encryption/spec/unified/basic.json new file mode 100644 index 0000000000..5522f585da --- /dev/null +++ b/test/client-side-encryption/spec/unified/basic.json @@ -0,0 +1,431 @@ +{ + "description": "basic", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with deterministic encryption, then find it", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Insert with randomized encryption, then find it", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "random": "123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "random": "123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/bulk.json b/test/client-side-encryption/spec/unified/bulk.json new file mode 100644 index 0000000000..90922b88d0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/bulk.json @@ -0,0 +1,407 @@ +{ + "description": "bulk", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Bulk write with encryption", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + } + }, + { + "insertOne": { + "document": { + "_id": 2, + "encrypted_string": "string1" + } + } + }, + { + "updateOne": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + } + }, + { + "deleteOne": { + "filter": { + "$and": [ + { + "encrypted_string": "string1" + }, + { + "_id": 2 + } + ] + } + } + } + ], + "ordered": true + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "$and": [ + { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + { + "_id": { + "$eq": 2 + } + } + ] + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/bypassAutoEncryption.json b/test/client-side-encryption/spec/unified/bypassAutoEncryption.json new file mode 100644 index 0000000000..3254c43781 --- /dev/null +++ b/test/client-side-encryption/spec/unified/bypassAutoEncryption.json @@ -0,0 +1,403 @@ +{ + "description": "bypassAutoEncryption", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "bypassAutoEncryption": true, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with bypassAutoEncryption", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "encrypted_string": "string0" + }, + "bypassDocumentValidation": true + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": {} + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Insert with bypassAutoEncryption for local schema", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "encrypted_string": "string0" + }, + "bypassDocumentValidation": true + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": {} + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/bypassedCommand.json b/test/client-side-encryption/spec/unified/bypassedCommand.json new file mode 100644 index 0000000000..b0c4c56322 --- /dev/null +++ b/test/client-side-encryption/spec/unified/bypassedCommand.json @@ -0,0 +1,147 @@ +{ + "description": "bypassedCommand", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "ping is bypassed", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "kill op is not bypassed", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "killOp": 1, + "op": 1234 + }, + "commandName": "killOp" + }, + "expectError": { + "errorContains": "command not supported for auto encryption: killOp" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/count.json b/test/client-side-encryption/spec/unified/count.json new file mode 100644 index 0000000000..d44b3e827d --- /dev/null +++ b/test/client-side-encryption/spec/unified/count.json @@ -0,0 +1,293 @@ +{ + "description": "count", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Count with deterministic encryption", + "operations": [ + { + "name": "countDocuments", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "cursor": {}, + "pipeline": [ + { + "$match": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + { + "$group": { + "_id": { + "$const": 1 + }, + "n": { + "$sum": { + "$const": 1 + } + } + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "Count fails when filtering on a random encrypted field", + "operations": [ + { + "name": "countDocuments", + "arguments": { + "filter": { + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/countDocuments.json b/test/client-side-encryption/spec/unified/countDocuments.json new file mode 100644 index 0000000000..c0202258b8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/countDocuments.json @@ -0,0 +1,296 @@ +{ + "description": "countDocuments", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "countDocuments with deterministic encryption", + "skipReason": "waiting on SERVER-39395", + "operations": [ + { + "name": "countDocuments", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": 1 + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/create-and-createIndexes.json b/test/client-side-encryption/spec/unified/create-and-createIndexes.json new file mode 100644 index 0000000000..5debd15945 --- /dev/null +++ b/test/client-side-encryption/spec/unified/create-and-createIndexes.json @@ -0,0 +1,121 @@ +{ + "description": "create-and-createIndexes", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "unencryptedCollection" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "unencryptedCollection", + "documents": [] + } + ], + "tests": [ + { + "description": "create is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "unencryptedCollection" + } + } + ] + }, + { + "description": "createIndexes is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createIndex", + "object": "coll", + "arguments": { + "keys": { + "x": 1 + }, + "name": "name" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "unencryptedCollection", + "indexName": "name" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json new file mode 100644 index 0000000000..2344a61a95 --- /dev/null +++ b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json @@ -0,0 +1,119 @@ +{ + "description": "createDataKey-kms_providers-invalid", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + } + } + } + ], + "tests": [ + { + "description": "create data key without required master key fields", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": {} + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "create data key with invalid master key field", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "masterKey": { + "invalid": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "create data key with invalid master key", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "invalid" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/createDataKey.json b/test/client-side-encryption/spec/unified/createDataKey.json new file mode 100644 index 0000000000..f99fa3dbcf --- /dev/null +++ b/test/client-side-encryption/spec/unified/createDataKey.json @@ -0,0 +1,775 @@ +{ + "description": "createDataKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "create data key with AWS KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with Azure KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "azure", + "opts": { + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with GCP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "gcp", + "opts": { + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with KMIP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with KMIP delegated KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip", + "opts": { + "masterKey": { + "delegated": true + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + }, + "delegated": true + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with local KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "local" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with no keyAltName", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [] + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": { + "$$exists": false + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with single keyAltName", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "local_key" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with multiple keyAltNames", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "abc", + "def" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0, + "keyAltNames": 1 + } + }, + { + "$unwind": "$keyAltNames" + }, + { + "$sort": { + "keyAltNames": 1 + } + } + ] + }, + "expectResult": [ + { + "keyAltNames": "abc" + }, + { + "keyAltNames": "def" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": { + "$$type": "array" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "create datakey with custom key material", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyMaterial": { + "$binary": { + "base64": "a2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFs", + "subType": "00" + } + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with invalid custom key material (too short)", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyMaterial": { + "$binary": { + "base64": "a2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFs", + "subType": "00" + } + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/delete.json b/test/client-side-encryption/spec/unified/delete.json new file mode 100644 index 0000000000..242bcdba8c --- /dev/null +++ b/test/client-side-encryption/spec/unified/delete.json @@ -0,0 +1,396 @@ +{ + "description": "delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with deterministic encryption", + "operations": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "deleteMany with deterministic encryption", + "operations": [ + { + "name": "deleteMany", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 2 + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/deleteKey.json b/test/client-side-encryption/spec/unified/deleteKey.json new file mode 100644 index 0000000000..3a10fb082f --- /dev/null +++ b/test/client-side-encryption/spec/unified/deleteKey.json @@ -0,0 +1,557 @@ +{ + "description": "deleteKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "delete non-existent data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "delete existing AWS data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "delete existing local data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ] + } + ] + }, + { + "description": "delete existing data key twice", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/distinct.json b/test/client-side-encryption/spec/unified/distinct.json new file mode 100644 index 0000000000..a7ac0fc7f1 --- /dev/null +++ b/test/client-side-encryption/spec/unified/distinct.json @@ -0,0 +1,325 @@ +{ + "description": "distinct", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "distinct with deterministic encryption", + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "fieldName": "encrypted_string" + }, + "object": "coll", + "expectResult": [ + "string0" + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "default", + "key": "encrypted_string", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "Distinct fails when filtering on a random encrypted field", + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": { + "random": "abc" + }, + "fieldName": "encrypted_string" + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/explain.json b/test/client-side-encryption/spec/unified/explain.json new file mode 100644 index 0000000000..667f921165 --- /dev/null +++ b/test/client-side-encryption/spec/unified/explain.json @@ -0,0 +1,293 @@ +{ + "description": "explain", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Explain a find with deterministic encryption", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "explain": { + "find": "default", + "filter": { + "encrypted_string": "string1" + } + } + }, + "commandName": "explain" + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "explain": { + "find": "default", + "filter": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + } + }, + "verbosity": "allPlansExecution" + }, + "commandName": "explain" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/find.json b/test/client-side-encryption/spec/unified/find.json new file mode 100644 index 0000000000..7f358d9c08 --- /dev/null +++ b/test/client-side-encryption/spec/unified/find.json @@ -0,0 +1,458 @@ +{ + "description": "find", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Find with deterministic encryption", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Find with $in with deterministic encryption", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1", + "random": "abc" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Find fails when filtering on a random encrypted field", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/findOneAndDelete.json b/test/client-side-encryption/spec/unified/findOneAndDelete.json new file mode 100644 index 0000000000..ff1103cb9b --- /dev/null +++ b/test/client-side-encryption/spec/unified/findOneAndDelete.json @@ -0,0 +1,276 @@ +{ + "description": "findOneAndDelete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with deterministic encryption", + "operations": [ + { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "remove": true + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/findOneAndReplace.json b/test/client-side-encryption/spec/unified/findOneAndReplace.json new file mode 100644 index 0000000000..c1a89fd2f6 --- /dev/null +++ b/test/client-side-encryption/spec/unified/findOneAndReplace.json @@ -0,0 +1,282 @@ +{ + "description": "findOneAndReplace", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with deterministic encryption", + "operations": [ + { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "replacement": { + "encrypted_string": "string1" + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encrypted_string": "string0" + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "update": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/findOneAndUpdate.json b/test/client-side-encryption/spec/unified/findOneAndUpdate.json new file mode 100644 index 0000000000..ffcb0e79e4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/findOneAndUpdate.json @@ -0,0 +1,286 @@ +{ + "description": "findOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with deterministic encryption", + "operations": [ + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encrypted_string": "string0" + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json new file mode 100644 index 0000000000..671413b83f --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json @@ -0,0 +1,324 @@ +{ + "description": "fle2v2-BypassQueryAnalysis", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "bypassQueryAnalysis": true + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "unencryptedDB", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "unencryptedColl", + "database": "unencryptedDB", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + ], + "tests": [ + { + "description": "BypassQueryAnalysis decrypts", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + }, + { + "object": "unencryptedColl", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Compact.json b/test/client-side-encryption/spec/unified/fle2v2-Compact.json new file mode 100644 index 0000000000..07ebf4351b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Compact.json @@ -0,0 +1,312 @@ +{ + "description": "fle2v2-Compact", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Compact works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + }, + "commandName": "compactStructuredEncryptionData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedIndexed": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "encryptedUnindexed": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + } + } + }, + "commandName": "compactStructuredEncryptionData" + } + }, + { + "commandSucceededEvent": { + "commandName": "compactStructuredEncryptionData", + "reply": { + "ok": 1 + } + } + } + ] + } + ] + }, + { + "description": "Compact errors on an unencrypted client", + "operations": [ + { + "name": "runCommand", + "object": "db1", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + }, + "commandName": "compactStructuredEncryptionData" + }, + "expectError": { + "errorContains": "'compactStructuredEncryptionData.compactionTokens' is missing" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-CreateCollection-OldServer.json b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection-OldServer.json new file mode 100644 index 0000000000..fc069d55b2 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection-OldServer.json @@ -0,0 +1,127 @@ +{ + "description": "fle2v2-CreateCollection-OldServer", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "maxServerVersion": "6.3.99", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "tests": [ + { + "description": "driver returns an error if creating a QEv2 collection on unsupported server", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + }, + "expectError": { + "errorContains": "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption." + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-CreateCollection.json b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection.json new file mode 100644 index 0000000000..3dfb76c461 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection.json @@ -0,0 +1,1748 @@ +{ + "description": "fle2v2-CreateCollection", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "invalid_esc_name", + "ecocCollection": "invalid_ecoc_name", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "client": { + "id": "client2", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": {} + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db2", + "client": "client2", + "databaseName": "default" + } + } + ], + "tests": [ + { + "description": "state collections and index are created", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "default state collection names are applied", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "drop removes all state collections", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexNotExists", + "object": "db", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "CreateCollection without encryptedFields.", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "plaintextCollection" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "plaintextCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "plaintextCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "plaintextCollection" + }, + "databaseName": "default", + "commandName": "create" + } + } + ] + } + ] + }, + { + "description": "CreateCollection from encryptedFieldsMap.", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "CreateCollection from encryptedFields.", + "operations": [ + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "DropCollection from encryptedFieldsMap", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "DropCollection from encryptedFields", + "operations": [ + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "DropCollection from remote encryptedFields", + "operations": [ + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "encryptedFields are consulted for metadata collection names", + "operations": [ + { + "name": "dropCollection", + "object": "db1", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db1", + "arguments": { + "collection": "encryptedCollection" + }, + "expectError": { + "errorContains": "Encrypted State Collection name should follow" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-DecryptExistingData.json b/test/client-side-encryption/spec/unified/fle2v2-DecryptExistingData.json new file mode 100644 index 0000000000..b171c78c00 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-DecryptExistingData.json @@ -0,0 +1,186 @@ +{ + "description": "fle2v2-DecryptExistingData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 decrypt of existing data succeeds", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Delete.json new file mode 100644 index 0000000000..305f642ae1 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Delete.json @@ -0,0 +1,326 @@ +{ + "description": "fle2v2-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Delete can query an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json new file mode 100644 index 0000000000..7a6957db0a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -0,0 +1,258 @@ +{ + "description": "fle2v2-EncryptedFields-vs-EncryptedFieldsMap", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + ], + "tests": [ + { + "description": "encryptedFieldsMap is preferred over remote encryptedFields", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "default", + "commandName": "insert", + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "ordered": true + } + } + }, + { + "commandStartedEvent": { + "databaseName": "default", + "commandName": "find", + "command": { + "find": "default", + "filter": { + "_id": 1 + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "commandName": "find", + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "default", + "databaseName": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-jsonSchema.json new file mode 100644 index 0000000000..af24e9b369 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-jsonSchema.json @@ -0,0 +1,367 @@ +{ + "description": "fle2v2-EncryptedFields-vs-jsonSchema", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "validator": { + "$jsonSchema": { + "properties": {}, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "encryptedFields is preferred over jsonSchema", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFieldsMap-defaults.json new file mode 100644 index 0000000000..3727e43147 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFieldsMap-defaults.json @@ -0,0 +1,139 @@ +{ + "description": "fle2v2-EncryptedFieldsMap-defaults", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "default state collections are applied to encryptionInformation", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [] + } + } + }, + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-FindOneAndUpdate.json new file mode 100644 index 0000000000..5131dc9fef --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-FindOneAndUpdate.json @@ -0,0 +1,622 @@ +{ + "description": "fle2v2-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate can query an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate can modify an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "encryptedIndexed": "value456" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Indexed.json b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Indexed.json new file mode 100644 index 0000000000..8155797583 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Indexed.json @@ -0,0 +1,361 @@ +{ + "description": "fle2v2-InsertFind-Indexed", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Insert and find FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Unindexed.json new file mode 100644 index 0000000000..a6410bb9d8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Unindexed.json @@ -0,0 +1,301 @@ +{ + "description": "fle2v2-InsertFind-Unindexed", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Insert and find FLE2 unindexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Query with an unindexed field fails", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedUnindexed": "value123" + } + }, + "object": "coll", + "expectError": { + "errorContains": "encrypt" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-MissingKey.json b/test/client-side-encryption/spec/unified/fle2v2-MissingKey.json new file mode 100644 index 0000000000..dc8ffc57b2 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-MissingKey.json @@ -0,0 +1,137 @@ +{ + "description": "fle2v2-MissingKey", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "FLE2 encrypt fails with missing key", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + }, + { + "description": "FLE2 decrypt fails with missing key", + "operations": [ + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-NoEncryption.json b/test/client-side-encryption/spec/unified/fle2v2-NoEncryption.json new file mode 100644 index 0000000000..4036fe5edd --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-NoEncryption.json @@ -0,0 +1,123 @@ +{ + "description": "fle2v2-NoEncryption", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [] + } + } + } + ], + "tests": [ + { + "description": "insert with no encryption succeeds", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": "bar" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Compact.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Compact.json new file mode 100644 index 0000000000..8ccbcafc24 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Compact.json @@ -0,0 +1,358 @@ +{ + "description": "fle2v2-Rangev2-Compact", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Compact works with 'range' fields", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + }, + "commandName": "compactStructuredEncryptionData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedInt": { + "ecoc": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "QxKJD2If48p0l8NAXf2Kr0aleMd/dATSjBK6hTpNMyc=", + "subType": "00" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "compactStructuredEncryptionData" + } + }, + { + "commandSucceededEvent": { + "commandName": "compactStructuredEncryptionData", + "reply": { + "ok": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Aggregate.json new file mode 100644 index 0000000000..7933cc5600 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Aggregate.json @@ -0,0 +1,574 @@ +{ + "description": "fle2v2-Rangev2-Date-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Correctness.json new file mode 100644 index 0000000000..9ed541fa8e --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Correctness.json @@ -0,0 +1,1610 @@ +{ + "description": "fle2v2-Rangev2-Date-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "-1" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value type is a date" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Delete.json new file mode 100644 index 0000000000..ad05dd4e17 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Delete.json @@ -0,0 +1,505 @@ +{ + "description": "fle2v2-Rangev2-Date-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-FindOneAndUpdate.json new file mode 100644 index 0000000000..55db0279c2 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-FindOneAndUpdate.json @@ -0,0 +1,577 @@ +{ + "description": "fle2v2-Rangev2-Date-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-InsertFind.json new file mode 100644 index 0000000000..1fd1edf191 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-InsertFind.json @@ -0,0 +1,562 @@ +{ + "description": "fle2v2-Rangev2-Date-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Update.json new file mode 100644 index 0000000000..d5153270d5 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Update.json @@ -0,0 +1,581 @@ +{ + "description": "fle2v2-Rangev2-Date-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Aggregate.json new file mode 100644 index 0000000000..712a68be32 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Aggregate.json @@ -0,0 +1,1965 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Correctness.json new file mode 100644 index 0000000000..edca7724a7 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Correctness.json @@ -0,0 +1,1016 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Delete.json new file mode 100644 index 0000000000..4b0121ac22 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Delete.json @@ -0,0 +1,1179 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json new file mode 100644 index 0000000000..2697549f6a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json @@ -0,0 +1,1969 @@ +{ + "description": "fle2v2-Rangev2-Decimal-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-InsertFind.json new file mode 100644 index 0000000000..e3d52f5d04 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-InsertFind.json @@ -0,0 +1,1956 @@ +{ + "description": "fle2v2-Rangev2-Decimal-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Update.json new file mode 100644 index 0000000000..8ade3593e6 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Update.json @@ -0,0 +1,1975 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Aggregate.json new file mode 100644 index 0000000000..41ba49112b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Aggregate.json @@ -0,0 +1,647 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Correctness.json new file mode 100644 index 0000000000..bc4e1f4508 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Correctness.json @@ -0,0 +1,1418 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Delete.json new file mode 100644 index 0000000000..1912f68ee5 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Delete.json @@ -0,0 +1,539 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..9cf4488622 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json @@ -0,0 +1,651 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-InsertFind.json new file mode 100644 index 0000000000..a9c3a8a46a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-InsertFind.json @@ -0,0 +1,634 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Update.json new file mode 100644 index 0000000000..7f8ea38ae0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Update.json @@ -0,0 +1,653 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Defaults.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Defaults.json new file mode 100644 index 0000000000..cdbd169676 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Defaults.json @@ -0,0 +1,444 @@ +{ + "description": "fle2v2-Rangev2-Defaults", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range applies defaults for trimFactor and sparsity", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DRgbAAADcGF5bG9hZADEGgAABGcAsBoAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAA30oqY6NKy1KWDWf6Z36DtA2QsL9JRALvHX6smxz8cb4FcwAgAAAAADIhM0hCHwFGH+k7kPGuZlO+v5TjV6RRwA5FqUKM60o0BWwAIAAAAABTMPNUweBKrILSCxc5gcgjn9pTkkKX7KqWXgNMk4q7XgADMgB9AAAABWQAIAAAAACnCDvYEbgR9fWeQ8SatKNX43p0XIXTyFfzc7/395V2swVzACAAAAAAp8pkn2wJrZRBLlD18oE1ZRRiujmtFtuHYTZDzdGNE4kFbAAgAAAAAE2eptD2Jp126h5cd7S6k8IjRB6QJhuuWzPU/SEynDXTAAMzAH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzQAfQAAAAVkACAAAAAA8Ci9z02yMVsDNyHvLStLAHR25LO22UO5P/gbUG/IStQFcwAgAAAAAOdfFhaFVq1JPr3dIeLm1EYKWgceZ7hZ5FJT5u/lL/I+BWwAIAAAAADqUyU1hSFDLCmqsz2dhPhefzCShUV/Z2x+4P9xcGw8rwADNQB9AAAABWQAIAAAAAD3g2atCWYVOXW0YbCbvIturqNIAsy210bkL9KmqVMlAAVzACAAAAAAVGEb7L0QCjV/PBTAvUyhlddo467ToKjlMdwI9hsjuE4FbAAgAAAAAJe0bDhUH1sZldnDGWn0xMa1CQuN6cgv/i/6XqnpPS39AAM2AH0AAAAFZAAgAAAAANQOKUE9FOmCoMva2IYg45LZXJX0cMpUR1OvIwFmjLDYBXMAIAAAAAB6dyIKkQ86l/8j8zeWcDYeVGRYKd0USz6To3LbOBAKsAVsACAAAAAAELK0ExI0g4/WxNs+mf+Ua+mie3MuMO3daPGukA23VUYAAzcAfQAAAAVkACAAAAAARQp+fGA08v1bhcnYbfsP0ubXl9yg18QmYMfh2sd8EdEFcwAgAAAAABhe79wEznE298tt02xyRF7bk7a2NH9kwVg1TPY5/lT1BWwAIAAAAAADiGV5f/RRPkwpSrZMGHNBSarmwyqV+SYXI73QW/PmnwADOAB9AAAABWQAIAAAAABnW3CpmSFTglPNKYHJHhJHC/vd5BMWQpztIXQBL0sCngVzACAAAAAAC21qRBu2Px7VUz1lW95Dfn/0tw2yq9AVBtka34HijLgFbAAgAAAAAP8S1s5OA5cJT6ILpA94LanuLsSl9BsRCWHBtufFTMVrAAM5AH0AAAAFZAAgAAAAAJRIWu6DI2LR+2Pi09OaBZEmS2FInyBnGs9wf9Jf2wiIBXMAIAAAAABoDqKzj11qyOfXl4dcfkmGHqZxXyAsnGlgA9wsJRWWUQVsACAAAAAAIsDousyo/D8e4BCwUqvFhrKtOnpcGCSqpN94oFtWaC0AAzEwAH0AAAAFZAAgAAAAAE0h7vfdciFBeqIk1N14ZXw/jzFT0bLfXcNyiPRsg4W4BXMAIAAAAAB0Kbvm3VLBphtd8/OpgNuJtJaJJLhHBCKZJJeK+GcthAVsACAAAAAAKfjHp8xww1JDjzyjTnfamOvjFDc1Z3Hp/v/ZuQnFOOEAAzExAH0AAAAFZAAgAAAAACL9+rQRyywIXa5Pr7g2SnB0s0EjIct7PQtzjEkA69acBXMAIAAAAADz54imCCbu/qQkYP9wW2f5pHoBS+EyCe+xuDwC0UTiYgVsACAAAAAAKv602j4c3Bpn2t10qGl68eAD/fQsIH5lKMj8ANwrf7oAAzEyAH0AAAAFZAAgAAAAAKTK0NLhQ/+Y/HMxjRwBlXpXJAhAmCoWf1fReTegPnVpBXMAIAAAAAD7AlW+P4FfQS4r8d7EEvPVEP1diSbrVDBqg8ZvNl1XRAVsACAAAAAATTSEkff+/JMBjNwUciY2RQ6M66uMQMAtwU+UidDv1y4AAzEzAH0AAAAFZAAgAAAAAGMbgPxi2Wu1AlqoDKTgyBnCZlnCjHm2naxRcizkIbYJBXMAIAAAAADMvSM3VZzVyRFCfUvcLXAXQFRIxlhm0t0dUsnaRZG4hgVsACAAAAAAI7uGriMAQc4A/a70Yi1Y7IAC7o/mfNYf7/FvwELYf80AAzE0AH0AAAAFZAAgAAAAAPnZ1bdmrcX0fsSxliuSqvDbRqwIiVg0tYp0PViRX0nOBXMAIAAAAAAqBdZGg9O74mnwyQF+lILtyzHdLOErDjPSf9sM8EqCugVsACAAAAAAwhuDsz+fCtqY8mW8QvEVQERjDChwrYTw4y7dinlCCOMAAzE1AH0AAAAFZAAgAAAAAJ40Dmb5BUT1AlWjfXB43nIbJgDn9rBg9FAeYR80WK0vBXMAIAAAAAAMPqLMDdNmnKzA3Hq49/NkJfs+/cjnyjSAbmiOFUE5FgVsACAAAAAAxbi7ql49Y4pduqWlLJqpwimRzrEnC7w5fWaMBiinHL8AAzE2AH0AAAAFZAAgAAAAAGelnhqWM2gUVy4P5QE/2Zfd7s9BugPqB/tcnSsFg5X0BXMAIAAAAAAWUhif3G+NMvZ3YPLB5OMuIhfPEu6U8KR9gTvJFz5uIwVsACAAAAAADEs8/aVSj2sJjxjv1K7o/aH8vZzt1bga73YiIKUx5DYAAzE3AH0AAAAFZAAgAAAAAD1xX2wCyf1aK1MoXnBAPfWLeBxsJI2i06tWbuiYKgElBXMAIAAAAACW1NW4RibvY0JRUzPvCmKnVbEy8AIS70fmsY08WgJOEgVsACAAAAAAQq9eIVoLcd4WxXUC3vub+EnxmcI2uP/yUWr3cz0jv9EAAzE4AH0AAAAFZAAgAAAAAHwU1LYeJmTch640sTu3VRRRdQg4YZ7S9IRfVXWHEWU8BXMAIAAAAACozWKD2YlqbQiBVVwJKptfAVM+R2FPJPtXkxVFAhHNXQVsACAAAAAAn7LS0QzTv9sOJzxH0ZqxsLYBYoArEo/PIXkU/zTnpM0AAzE5AH0AAAAFZAAgAAAAAHKaToAsILpmJyCE02I1iwmF/FibqaOb4b5nteuwOayfBXMAIAAAAABPxYjSK5DKgsdUZrZ+hM6ikejPCUK6Rqa0leoN7KOM0QVsACAAAAAAH9rPq5vvOIe9nTAcM1W1dVhQZ+gSkBohgoWLPcZnQXcAAzIwAH0AAAAFZAAgAAAAANTGiHqJVq28n7mMZsJD6gHxVQp1A6z8wgZVW+xV/lhmBXMAIAAAAABCR4BfdNVy7WE+IyQ312vYuIW0aGcXxr2II/MbNz8ZdAVsACAAAAAAng0GYpYJTypRLQUd5tIXWaAjZX5na04T/BypmwwrXPoAAzIxAH0AAAAFZAAgAAAAABooumzjEqp9Hvvd+sn1L82NI2iUGRl0nXQNJTHM7oyVBXMAIAAAAADgjz5L2ursK4C+pXXsJ6XHABhyallj9s/vSUgxXvjiiwVsACAAAAAAPjlAM0tbO6EUmLAeIZt57YMkMsuQfuC3T3d9vtnxgjwAAzIyAH0AAAAFZAAgAAAAAMA4jmE8U2uGkYUeKoYSlb22tfrRq2VlhV1Jq1kn4hV9BXMAIAAAAADG4fLeJUcINPSb1pMfAASJkuYsgS/59Eq/51mET/Y7RQVsACAAAAAAmwwcWOnzvpxm4pROXOL+BlxjEG/7v7hIautb2ubFT44AAzIzAH0AAAAFZAAgAAAAAK8/E3VHzHM6Kjp39GjFy+ci1IiUG5oxh0W6elV+oiX2BXMAIAAAAAA4/F4Q94xxb2TvZcMcji/DVTFrZlH8BL/HzD86RRmqNAVsACAAAAAAif3HPf6B1dTX/W+Vlp6ohadEQk/GAmHYzXfJia2zHeIAAzI0AH0AAAAFZAAgAAAAAGUX9ttLN1cCrOjlzsl/E6jEzQottNDw8Zo94nbO1133BXMAIAAAAAA7uVthFvXH+pbBrgQmnkPcpiHFEVCAi0WA7sAt9tlt3gVsACAAAAAAznaMStSbtGXU1Pb5z9KDTvEd79s6gmWYCKOKdzeijpEAAzI1AH0AAAAFZAAgAAAAAKnT/qg8N85Q9EQvpH7FBqUooxHFgrIjqLlIDheva2QSBXMAIAAAAABGAKkFMKoSIrvClWF7filoYM6fI9xSqOJVNS3dv4lxYwVsACAAAAAAgITE31hQA4ZOxpUFYSYv0mzWbd/6RKgbUXiUY96fBQEAAzI2AH0AAAAFZAAgAAAAAHRDRDT2hJrJ8X9zB9ELT28q8ZsfkYr92chaZYakiLlqBXMAIAAAAAAT0Le67ObldDta/Qb17dYfdslPsJTfGj3bWAgC0JIingVsACAAAAAAMGDrqys8iJ3fCT2Cj+zXIuXtsf4OAXWJl5HoPUMlbNoAAzI3AH0AAAAFZAAgAAAAAOOJcUjYOE0KqcYS1yZ363zglQXfr3XSD+R5fWLSivDoBXMAIAAAAABjeLe+tg37lNa+DdVxtlCtY77tV9PqfJ5X4XEKrfwu0AVsACAAAAAAlbpHiQAPLLTvSF+u58RBCLnYQKB5wciIQmANV9bkzsoAAzI4AH0AAAAFZAAgAAAAAMwWOOaWDDYUusdA1nyoaEB3C4/9GRpFNGags95Ddp4LBXMAIAAAAACLrsQXGWK15fW4mPEUXJ/90by13aG+727qWJep8QJ/WgVsACAAAAAAuThwsAsKUB56QAXC0MjJsZ9736atbiHPlK2tE0urf9QAAzI5AH0AAAAFZAAgAAAAABPRXBK0z8UANcvMDWntBjN9yF7iGMPLbhbaKrvHwcplBXMAIAAAAACZlqWsYPIb+ydmH03BxD3TqSGsSNoI7EVCy0VgW0TpYgVsACAAAAAAD2uaBv8oc7l4EeC5PWx5sfeyGZoas0JdFJ33M3jjgjMAAzMwAH0AAAAFZAAgAAAAAOn9/6pbzjIxFEApugaVOvVKXq23sDCJELv5UtLPDZI3BXMAIAAAAACHIwSDTlof0vFoigF4drbeM/8rdlj/4U386zQsNLtPGwVsACAAAAAAsYt/rXnpL55J9rlWSFRA4seaU6ggix7RgxbrJPu6gO4AAzMxAH0AAAAFZAAgAAAAAIMCESykv5b5d6mYjU5DlnO709lOFCaNoJBLtzBIqmg4BXMAIAAAAADs1Bfuaun4Es3nQ4kr29BzheLRDcFv+9a0gOGkSEcrDgVsACAAAAAA5kW6i/jOBSdoGAsZEZxVNRvt6miv86bP8JfUT+1KJg8AAzMyAH0AAAAFZAAgAAAAAFSPmr27XgKhUkbEvvC6Br5K1w7280NZrrhdzfYF+YGjBXMAIAAAAADv2h+Xq6kM7MHYTLMACRwbe2MzGHu4sdB67FGzDR6H4QVsACAAAAAAKII0MMC7o6GKVfGo2qBW/p35NupBp7MI6Gp0zXYwJOcAAzMzAH0AAAAFZAAgAAAAAPSV9qprvlNZK6OSQZNxKhJmBMs6QCKFESB/oeIvAS0iBXMAIAAAAAA835Jh22/pvZgKoYH6KjE+RRpYkaM1G35TWq6uplk/rgVsACAAAAAA162IdSb079yVlS7GkuSdHU3dOw03a+NS55ZPVBxbD08AAzM0AH0AAAAFZAAgAAAAAGsadEBJFax/UltPXB86G/YPxo6h353ZT+rC62iGy7qqBXMAIAAAAADs9TP3h91f6bTuG8QCQMA3atAVGs8k0ZjVzX3pM8HNAgVsACAAAAAA2ed4R4wYD6DT0P+N6o3gDJPE0DjljbRAv5vme3jb42sAAzM1AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzM2AH0AAAAFZAAgAAAAAKJY+8+7psFzJb5T+Mg9UWb6gA9Y8NN9j/ML2jZkNDNPBXMAIAAAAAA2R/nCtSYfCim89BzdUPS+DTQGwYDk+2ihFPEBS8h+ygVsACAAAAAAaEQra7xyvA3JS0BasIpRVrz7ZXsp6RpH7OpfJBFzFG8AAzM3AH0AAAAFZAAgAAAAAI4qr+sJiRaqwZRhnenAzD7tTKq+jP1aaLyAln3w1HQuBXMAIAAAAADNYpqV73NpwN+Ta0ms1SRiu+6WNOOdGT+syghL+JAFhQVsACAAAAAAN07Fo9SK+fXp5Odk1J806pyVWc2WHXCtb1gJQknTgqsAAzM4AH0AAAAFZAAgAAAAAISgN1Hid7IWvDESN/3tywFZiBsZPYapOUx9/QjDDxLfBXMAIAAAAAA7lxpEz3+CGdv6/WKIAlIwRYURREKgn7+StwNoVekkDwVsACAAAAAAx+Oa2v1e1R7VomfsvcKO8VkY4eTl7LzjNQQL6Cj6GBQAAzM5AH0AAAAFZAAgAAAAAOTLdk1RIUzCsvK7xCXy+LxGhJf87fEL406U9QKta3JRBXMAIAAAAAD8+6UnUn8sN6AgQuuf7uFxW+2ZJNpZLgp3eKVtjbo9ewVsACAAAAAAQN3mZHmaDM0ZbUnk2O/+wCUjiCs4bnshfHjd/4ygLXcAAzQwAH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzQxAH0AAAAFZAAgAAAAAPLX4XT1eMfokMvj73G6loHEotbdivVFM6cpMbU0zIOmBXMAIAAAAABuTqwm6E60kVBN5iClzLnMBozIQRYjMozzRNKVhixkEAVsACAAAAAAjvY9G0Of8EQcZ4GVfSEVz7jrNn7i4qps2r82jJmngKoAAzQyAH0AAAAFZAAgAAAAAGzGJAUZBcVKRb4bCSNaRxtcDH2TqIgHqMElD9RL7SzDBXMAIAAAAABbJfrLwBrqZ2Ylm9QfL7nkW+GJ8vTlaeMUDT5620ebaAVsACAAAAAASiaS1IlBls5Tan57XqqbR1cuvyOcoSibJJQGREzm4c0AAzQzAH0AAAAFZAAgAAAAAC028abAppwE/ApZHU5RbzZZ8OPD5eJ8/6+NgiSFf4d+BXMAIAAAAAD3THvDUYWULR+AVLuRRPPAMVMeZ2ldWpBYSODboszWbQVsACAAAAAAATOaeYj+kx3MTDeNUcKGbUxLZDeMjC8JrWnlHmWTamQAAzQ0AH0AAAAFZAAgAAAAAHWr8wQYIKLiKeb3wd8kZQuXD/GUHDqXj12K/EQWV11CBXMAIAAAAADo3aFHDuyfls9tcWCxlFqJn4zDXd3WT9CIFYFjJnTYswVsACAAAAAAeMbIatR7DgefzuvF4WyNVDjJxP8KPA6U/rmMQIBvpM0AAzQ1AH0AAAAFZAAgAAAAAMdRi6AAjF1Z9ucMqYl2Ud1PLUGOlOPJFgSrPTjs27u8BXMAIAAAAAAqOdI7+P8srvqCTFadwMM3iggaVOGcf1BB0EjBYeV6RAVsACAAAAAAU+V2GrqgxJYs9mxuak/8JMFICXwQ2vksrBdOvSwWFpoAAzQ2AH0AAAAFZAAgAAAAADKKe++fqh4sn0a8Bb+w3QMFnOqSE5hDI3zGQTcmJGcOBXMAIAAAAAC8ebHa++JmxVISv6LzjuMgEZqzKSZlJyujnSV9syRD9AVsACAAAAAAQcVNSjyetScLu78IrAYaAigerY4kWtnbctmIyb19Wa4AAzQ3AH0AAAAFZAAgAAAAAMKoHwhZcocaQy7asIuRG8+P1qPENgFAwzc3X1gZWYnJBXMAIAAAAAB+R01s+WdJjLa5p7STuEylradWr+2JDxsWx9bKDgXNDQVsACAAAAAADeXTBHsm+FH2pQVoqOBPPIJiTJLqrzGisNnQ3S3xYJAAAzQ4AH0AAAAFZAAgAAAAAF41XuyBvREKcxjDl+wbnillseykpAjCKHmwIu+RNvM7BXMAIAAAAAC2Wzq+2mfO7howoOZxquqvOuH1D2WdlzA1nK+LUp0FMgVsACAAAAAARha+D6DVeDxSjNyXXO5DMY+W70EGyfc7gxR4TjzcYusAAzQ5AH0AAAAFZAAgAAAAAAfONgdhLPEjvsMxTY9K4//7WjREuRmZ6Bpcf3yvdMf3BXMAIAAAAABCy/zjmzucxQkbJ96l5vS5x6SeyHE0Z+Aqp9oZgBcC6QVsACAAAAAAasG/uN4DnWHZLkLhH4cMzXk5F/HL2D+72WH+1jjgH8UAAzUwAH0AAAAFZAAgAAAAAA5ZsebFm5NrSGs2E17+fUt4qkzsVmy4IJA5nGehtSBVBXMAIAAAAAAOzteKfp+YGPqn1fi8u/lKXP7E2Zgouwgt6KAADHX9AQVsACAAAAAA2+FaAbl8JZogfNCI0FFbmZZPy/KLF1u16FGrPspSbEIAAzUxAH0AAAAFZAAgAAAAAHf6LIjrvy6I31w/8b910U9qU8cBIYiWn9mW55NYZF8VBXMAIAAAAACONPisRtnFG9vV2mTQ3hRR/hGuVRA9dGd9Lt9JqDoM8wVsACAAAAAA+h7V/jIYJcd0ALIvFBlwxkFqWxBVlkqT9wFkmumr4QcAAzUyAH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAIAAAAAAAAAEHRmAAYAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Aggregate.json new file mode 100644 index 0000000000..c0211a1a34 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Aggregate.json @@ -0,0 +1,1195 @@ +{ + "description": "fle2v2-Rangev2-Double-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Correctness.json new file mode 100644 index 0000000000..3bffc95191 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Correctness.json @@ -0,0 +1,1018 @@ +{ + "description": "fle2v2-Rangev2-Double-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Delete.json new file mode 100644 index 0000000000..ac82c52b14 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Delete.json @@ -0,0 +1,795 @@ +{ + "description": "fle2v2-Rangev2-Double-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-FindOneAndUpdate.json new file mode 100644 index 0000000000..ce1be99a3a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-FindOneAndUpdate.json @@ -0,0 +1,1199 @@ +{ + "description": "fle2v2-Rangev2-Double-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-InsertFind.json new file mode 100644 index 0000000000..cac8bcafea --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-InsertFind.json @@ -0,0 +1,1186 @@ +{ + "description": "fle2v2-Rangev2-Double-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Update.json new file mode 100644 index 0000000000..938657c91c --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Update.json @@ -0,0 +1,1205 @@ +{ + "description": "fle2v2-Rangev2-Double-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Aggregate.json new file mode 100644 index 0000000000..2046630a7b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Aggregate.json @@ -0,0 +1,643 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Correctness.json new file mode 100644 index 0000000000..939a12c9f8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Correctness.json @@ -0,0 +1,1418 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Delete.json new file mode 100644 index 0000000000..db615d6fe3 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Delete.json @@ -0,0 +1,537 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..a8f87596e8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json @@ -0,0 +1,647 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-InsertFind.json new file mode 100644 index 0000000000..5e4aa5f1e0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-InsertFind.json @@ -0,0 +1,634 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Update.json new file mode 100644 index 0000000000..10cae6be89 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Update.json @@ -0,0 +1,653 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Aggregate.json new file mode 100644 index 0000000000..77a8f43e9c --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Aggregate.json @@ -0,0 +1,547 @@ +{ + "description": "fle2v2-Rangev2-Int-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Correctness.json new file mode 100644 index 0000000000..dde5ec371b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Correctness.json @@ -0,0 +1,1412 @@ +{ + "description": "fle2v2-Rangev2-Int-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "1" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Delete.json new file mode 100644 index 0000000000..1c54c6e0f6 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Delete.json @@ -0,0 +1,483 @@ +{ + "description": "fle2v2-Rangev2-Int-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-FindOneAndUpdate.json new file mode 100644 index 0000000000..265a0c6f0d --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-FindOneAndUpdate.json @@ -0,0 +1,551 @@ +{ + "description": "fle2v2-Rangev2-Int-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-InsertFind.json new file mode 100644 index 0000000000..08b6d2c2a5 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-InsertFind.json @@ -0,0 +1,538 @@ +{ + "description": "fle2v2-Rangev2-Int-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Update.json new file mode 100644 index 0000000000..9f28f768bb --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Update.json @@ -0,0 +1,557 @@ +{ + "description": "fle2v2-Rangev2-Int-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Aggregate.json new file mode 100644 index 0000000000..01ff139a55 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Aggregate.json @@ -0,0 +1,547 @@ +{ + "description": "fle2v2-Rangev2-Long-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Correctness.json new file mode 100644 index 0000000000..cc5388b1f0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Correctness.json @@ -0,0 +1,1412 @@ +{ + "description": "fle2v2-Rangev2-Long-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "1" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Delete.json new file mode 100644 index 0000000000..0a8580110c --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Delete.json @@ -0,0 +1,483 @@ +{ + "description": "fle2v2-Rangev2-Long-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-FindOneAndUpdate.json new file mode 100644 index 0000000000..f014e1a4ac --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-FindOneAndUpdate.json @@ -0,0 +1,551 @@ +{ + "description": "fle2v2-Rangev2-Long-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-InsertFind.json new file mode 100644 index 0000000000..2896df0032 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-InsertFind.json @@ -0,0 +1,538 @@ +{ + "description": "fle2v2-Rangev2-Long-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Update.json new file mode 100644 index 0000000000..4f8cd1d80d --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Update.json @@ -0,0 +1,557 @@ +{ + "description": "fle2v2-Rangev2-Long-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-WrongType.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-WrongType.json new file mode 100644 index 0000000000..03681947ce --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-WrongType.json @@ -0,0 +1,204 @@ +{ + "description": "fle2v2-Rangev2-WrongType", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "maxServerVersion": "8.99.99", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberLong": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Update.json new file mode 100644 index 0000000000..9c39c4d83d --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Update.json @@ -0,0 +1,633 @@ +{ + "description": "fle2v2-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Update can query an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "foo": "bar" + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "Update can modify an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "encryptedIndexed": "value456" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "update" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/unified/fle2v2-validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..54cc60a3b1 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-validatorAndPartialFieldExpression.json @@ -0,0 +1,304 @@ +{ + "description": "fle2v2-validatorAndPartialFieldExpression", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "coll", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "coll", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encryptedIndexed": "foo" + } + } + ] + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/gcpKMS.json b/test/client-side-encryption/spec/unified/gcpKMS.json new file mode 100644 index 0000000000..6468b5b6ce --- /dev/null +++ b/test/client-side-encryption/spec/unified/gcpKMS.json @@ -0,0 +1,292 @@ +{ + "description": "gcpKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "gcp_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using GCP KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_gcp": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKey.json b/test/client-side-encryption/spec/unified/getKey.json new file mode 100644 index 0000000000..2ea3fe7358 --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKey.json @@ -0,0 +1,319 @@ +{ + "description": "getKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "get non-existent data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing AWS data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing local data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKeyByAltName.json b/test/client-side-encryption/spec/unified/getKeyByAltName.json new file mode 100644 index 0000000000..2505abc16e --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKeyByAltName.json @@ -0,0 +1,289 @@ +{ + "description": "getKeyByAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "get non-existent data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "does_not_exist" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "does_not_exist" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing AWS data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "aws_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "aws_key" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing local data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "local_key" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKeys.json b/test/client-side-encryption/spec/unified/getKeys.json new file mode 100644 index 0000000000..d944712357 --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKeys.json @@ -0,0 +1,260 @@ +{ + "description": "getKeys", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "getKeys with zero key documents", + "operations": [ + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "getKeys with single key documents", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "abc" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": [ + "abc" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "getKeys with many key documents", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + }, + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getMore.json b/test/client-side-encryption/spec/unified/getMore.json new file mode 100644 index 0000000000..adaa59b01e --- /dev/null +++ b/test/client-side-encryption/spec/unified/getMore.json @@ -0,0 +1,321 @@ +{ + "description": "getMore", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "getMore with encryption", + "operations": [ + { + "name": "find", + "arguments": { + "batchSize": 2, + "filter": {} + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + }, + { + "_id": 3, + "encrypted_string": "string2" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "default", + "batchSize": 2 + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/insert.json b/test/client-side-encryption/spec/unified/insert.json new file mode 100644 index 0000000000..23e4e6c2ae --- /dev/null +++ b/test/client-side-encryption/spec/unified/insert.json @@ -0,0 +1,421 @@ +{ + "description": "insert", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "insertOne with encryption", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "insertMany with encryption", + "operations": [ + { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/keyAltName.json b/test/client-side-encryption/spec/unified/keyAltName.json new file mode 100644 index 0000000000..826f43df22 --- /dev/null +++ b/test/client-side-encryption/spec/unified/keyAltName.json @@ -0,0 +1,299 @@ +{ + "description": "keyAltName", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with encryption using key alt name", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_w_altname": "string0", + "altname": "altname" + } + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_w_altname": { + "$$type": "binData" + }, + "altname": "altname" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [] + } + }, + { + "keyAltNames": { + "$in": [ + "altname" + ] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_w_altname": { + "$$type": "binData" + }, + "altname": "altname" + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Replace with key alt name fails", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_w_altname": "string0" + } + }, + "upsert": true + }, + "object": "coll", + "expectError": { + "errorContains": "A non-static (JSONPointer) keyId is not supported" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/keyCache.json b/test/client-side-encryption/spec/unified/keyCache.json new file mode 100644 index 0000000000..a39701e286 --- /dev/null +++ b/test/client-side-encryption/spec/unified/keyCache.json @@ -0,0 +1,198 @@ +{ + "description": "keyCache-explicit", + "schemaVersion": "1.22", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "OCTP9uKPPmvuqpHlqq83gPk4U6rUPxKVRRyVtrjFmVjdoa4Xzm1SzUbr7aIhNI42czkUBmrCtZKF31eaaJnxEBkqf0RFukA9Mo3NEHQWgAQ2cn9duOcRbaFUQo2z0/rB" + } + }, + "keyExpirationMS": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "a+YWzdygTAG62/cNUkqZiQ==", + "subType": "04" + } + }, + "keyAltNames": [], + "keyMaterial": { + "$binary": { + "base64": "iocBkhO3YBokiJ+FtxDTS71/qKXQ7tSWhWbcnFTXBcMjarsepvALeJ5li+SdUd9ePuatjidxAdMo7vh1V2ZESLMkQWdpPJ9PaJjA67gKQKbbbB4Ik5F2uKjULvrMBnFNVRMup4JNUwWFQJpqbfMveXnUVcD06+pUpAkml/f+DSXrV3e5rxciiNVtz03dAG8wJrsKsFXWj6vTjFhsfknyBA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "decrypt, wait, and decrypt again", + "operations": [ + { + "name": "decrypt", + "object": "clientEncryption0", + "arguments": { + "value": { + "$binary": { + "base64": "AWvmFs3coEwButv3DVJKmYkCJ6lUzRX9R28WNlw5uyndb+8gurA+p8q14s7GZ04K2ZvghieRlAr5UwZbow3PMq27u5EIhDDczwBFcbdP1amllw==", + "subType": "06" + } + } + }, + "expectResult": "foobar" + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 50 + } + }, + { + "name": "decrypt", + "object": "clientEncryption0", + "arguments": { + "value": { + "$binary": { + "base64": "AWvmFs3coEwButv3DVJKmYkCJ6lUzRX9R28WNlw5uyndb+8gurA+p8q14s7GZ04K2ZvghieRlAr5UwZbow3PMq27u5EIhDDczwBFcbdP1amllw==", + "subType": "06" + } + } + }, + "expectResult": "foobar" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "a+YWzdygTAG62/cNUkqZiQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "a+YWzdygTAG62/cNUkqZiQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/kmipKMS.json b/test/client-side-encryption/spec/unified/kmipKMS.json new file mode 100644 index 0000000000..e19f85882b --- /dev/null +++ b/test/client-side-encryption/spec/unified/kmipKMS.json @@ -0,0 +1,415 @@ +{ + "description": "kmipKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": [ + "altname", + "kmip_altname" + ] + }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + }, + "keyMaterial": { + "$binary": { + "base64": "5TLMFWlguBWe5GUESTvOVtkdBsCrynhnV72XRyZ66/nk+EP9/1oEp1t1sg0+vwCTqULHjBiUE6DRx2mYD/Eup1+u2Jgz9/+1sV1drXeOPALNPkSgiZiDbIb67zRi+wTABEcKcegJH+FhmSGxwUoQAiHCsCbcvia5P8tN1lt98YQ=", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": "11" + }, + "keyAltNames": [ + "delegated" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using KMIP KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Insert a document with auto encryption using KMIP delegated KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip_delegated": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/localKMS.json b/test/client-side-encryption/spec/unified/localKMS.json new file mode 100644 index 0000000000..03b8486484 --- /dev/null +++ b/test/client-side-encryption/spec/unified/localKMS.json @@ -0,0 +1,261 @@ +{ + "description": "localKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "Ce9HSz/HKKGkIt4uyy+jDuKGA+rLC2cycykMo6vc8jXxqa1UVDYHWq1r+vZKbnnSRBfB981akzRKZCFpC05CTyFqDhXv6OnMjpG97OZEREGIsHEYiJkBW0jJJvfLLgeLsEpBzsro9FztGGXASxyxFRZFhXvHxyiLOKrdWfs7X1O/iK3pEoHMx6uSNSfUOgbebLfIqW7TO++iQS5g1xovXA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using local KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACV/+zJmpqMU47yxS/xIVAviGi7wHDuFwaULAixEAoIh0xHz73UYOM3D8D44gcJn67EROjbz4ITpYzzlCJovDL0Q==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACV/+zJmpqMU47yxS/xIVAviGi7wHDuFwaULAixEAoIh0xHz73UYOM3D8D44gcJn67EROjbz4ITpYzzlCJovDL0Q==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/localSchema.json b/test/client-side-encryption/spec/unified/localSchema.json new file mode 100644 index 0000000000..685ee39d7c --- /dev/null +++ b/test/client-side-encryption/spec/unified/localSchema.json @@ -0,0 +1,337 @@ +{ + "description": "localSchema", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "test": { + "bsonType": "string" + } + }, + "bsonType": "object", + "required": [ + "test" + ] + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + }, + { + "database": { + "id": "encryptedDB2", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl2", + "database": "encryptedDB2", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "A local schema should override", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "commandName": "find", + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "ATskEQHiu0JVjK9BNsVmqWIClDjVEWlpmVRN76InSQuFW2piVbYFkh0QhZCKyx9DdvFBUG+FWluh0kXyhdq3b2Vt/nqNWjXn2y0+JPhrc4W+wQ==", + "subType": "06" + } + } + } + ], + "ordered": true + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "command": { + "find": "default", + "filter": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "default", + "databaseName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "ATskEQHiu0JVjK9BNsVmqWIClDjVEWlpmVRN76InSQuFW2piVbYFkh0QhZCKyx9DdvFBUG+FWluh0kXyhdq3b2Vt/nqNWjXn2y0+JPhrc4W+wQ==", + "subType": "06" + } + } + } + ] + } + ] + }, + { + "description": "A local schema with no encryption is an error", + "operations": [ + { + "object": "encryptedColl2", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "expectError": { + "isError": true, + "errorContains": "JSON schema keyword 'required' is only allowed with a remote schema" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/malformedCiphertext.json b/test/client-side-encryption/spec/unified/malformedCiphertext.json new file mode 100644 index 0000000000..550928f1e0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/malformedCiphertext.json @@ -0,0 +1,241 @@ +{ + "description": "malformedCiphertext", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "00" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQ==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAa2V2aW4gYWxiZXJ0c29uCg==", + "subType": "06" + } + } + } + ] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Wrong subtype", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "Empty data", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 2 + } + }, + "object": "coll", + "expectError": { + "errorContains": "malformed ciphertext" + } + } + ] + }, + { + "description": "Malformed data", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 3 + } + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/maxWireVersion.json b/test/client-side-encryption/spec/unified/maxWireVersion.json new file mode 100644 index 0000000000..f7a5f0b7db --- /dev/null +++ b/test/client-side-encryption/spec/unified/maxWireVersion.json @@ -0,0 +1,108 @@ +{ + "description": "maxWireVersion", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99", + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "extraOptions": { + "mongocryptdBypassSpawn": true + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "operation fails with maxWireVersion < 8", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "encrypted_string": "string0" + } + }, + "expectError": { + "errorContains": "Auto-encryption requires a minimum MongoDB version of 4.2" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/missingKey.json b/test/client-side-encryption/spec/unified/missingKey.json new file mode 100644 index 0000000000..af0fd5812a --- /dev/null +++ b/test/client-side-encryption/spec/unified/missingKey.json @@ -0,0 +1,233 @@ +{ + "description": "missingKey", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.different", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with encryption on a missing key", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "different", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS-createDataKey.json b/test/client-side-encryption/spec/unified/namedKMS-createDataKey.json new file mode 100644 index 0000000000..4d75e4cf51 --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS-createDataKey.json @@ -0,0 +1,396 @@ +{ + "description": "namedKMS-createDataKey", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name1": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure:name1": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp:name1": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip:name1": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local:name1": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "create data key with named AWS KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws:name1", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named Azure KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "azure:name1", + "opts": { + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named GCP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "gcp:name1", + "opts": { + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named KMIP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip:name1" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named local KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local:name1" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "local:name1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS-explicit.json b/test/client-side-encryption/spec/unified/namedKMS-explicit.json new file mode 100644 index 0000000000..e28d7e8b30 --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS-explicit.json @@ -0,0 +1,130 @@ +{ + "description": "namedKMS-explicit", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyAltNames": [ + "local:name2" + ], + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ] + } + ], + "tests": [ + { + "description": "can explicitly encrypt with a named KMS provider", + "operations": [ + { + "name": "encrypt", + "object": "clientEncryption0", + "arguments": { + "value": "foobar", + "opts": { + "keyAltName": "local:name2", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "expectResult": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC4yX2LTAuN253GAkEO2ZXp4GpCyM7yoVNJMQQl+6uzxMs03IprLC7DL2vr18x9LwOimjTS9YbMJhrnFkEPuNhbg==", + "subType": "06" + } + } + } + ] + }, + { + "description": "can explicitly decrypt with a named KMS provider", + "operations": [ + { + "name": "decrypt", + "object": "clientEncryption0", + "arguments": { + "value": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC4yX2LTAuN253GAkEO2ZXp4GpCyM7yoVNJMQQl+6uzxMs03IprLC7DL2vr18x9LwOimjTS9YbMJhrnFkEPuNhbg==", + "subType": "06" + } + } + }, + "expectResult": "foobar" + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS-rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/namedKMS-rewrapManyDataKey.json new file mode 100644 index 0000000000..b3b9bd2477 --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS-rewrapManyDataKey.json @@ -0,0 +1,1385 @@ +{ + "description": "namedKMS-rewrapManyDataKey", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name1": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure:name1": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp:name1": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip:name1": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local:name1": { + "key": { + "$$placeholder": 1 + } + }, + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + }, + "aws:name2": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "azure:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "pr01l7qDygUkFE/0peFwpnNlv3iIy8zrQK38Q9i12UCN2jwZHDmfyx8wokiIKMb9kAleeY+vnt3Cf1MKu9kcDmI+KxbNDd+V3ytAAGzOVLDJr77CiWjF9f8ntkXRHrAY9WwnVDANYkDwXlyU0Y2GQFTiW65jiQhUtYLYH63Tk48SsJuQvnWw1Q+PzY8ga+QeVec8wbcThwtm+r2IHsCFnc72Gv73qq7weISw+O4mN08z3wOp5FOS2ZM3MK7tBGmPdBcktW7F8ODGsOQ1FU53OrWUnyX2aTi2ftFFFMWVHqQo7EYuBZHru8RRODNKMyQk0BFfKovAeTAVRv9WH9QU7g==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "keyAltNames": [ + "gcp:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0USbQtof/pYRLQO96yg/JEtZbD1UxKueaC37yzT5tTkSiQEAhClWB5ZCSgzHgxv8raWjNB4r7e8ePGdsmSuYTYmLC5oHHS/BdQisConzNKFaobEQZHamTCjyhy5NotKF8MWoo+dyfQApwI29+vAGyrUIQCXzKwRnNdNQ+lb3vJtS5bqvLTvSxKHpVca2kqyC9nhonV+u4qru5Q2bAqUgVFc8fL4pBuvlowZFTQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "keyAltNames": [ + "kmip:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CklVctHzke4mcytd0TxGqvepkdkQN8NUF4+jV7aZQITAKdz6WjdDpq3lMt9nSzWGG2vAEfvRb3mFEVjV57qqGqxjq2751gmiMRHXz0btStbIK3mQ5xbY9kdye4tsixlCryEwQONr96gwlwKKI9Nubl9/8+uRF6tgYjje7Q7OjauEf1SrJwKcoQ3WwnjZmEqAug0kImCpJ/irhdqPzivRiA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip:name1", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local:name1" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap to aws:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "aws:name1_key" + } + }, + "opts": { + "provider": "aws:name1", + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "aws:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to azure:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "azure:name1_key" + } + }, + "opts": { + "provider": "azure:name1", + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "azure:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to gcp:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "gcp:name1_key" + } + }, + "opts": { + "provider": "gcp:name1", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "gcp:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to kmip:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip:name1_key" + } + }, + "opts": { + "provider": "kmip:name1" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to local:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "local:name1_key" + } + }, + "opts": { + "provider": "local:name1" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "local:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap from local:name1 to local:name2", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$eq": "local:name1_key" + } + }, + "opts": { + "provider": "local:name2" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$eq": "local:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name2" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap from aws:name1 to aws:name2", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$eq": "aws:name1_key" + } + }, + "opts": { + "provider": "aws:name2", + "masterKey": { + "key": "arn:aws:kms:us-east-1:857654397073:key/0f8468f0-f135-4226-aa0b-bd05c4c30df5", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$eq": "aws:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name2", + "key": "arn:aws:kms:us-east-1:857654397073:key/0f8468f0-f135-4226-aa0b-bd05c4c30df5", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS.json b/test/client-side-encryption/spec/unified/namedKMS.json new file mode 100644 index 0000000000..5e203865fd --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS.json @@ -0,0 +1,241 @@ +{ + "description": "namedKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ] + } + ], + "tests": [ + { + "description": "Automatically encrypt and decrypt with a named KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/noSchema.json b/test/client-side-encryption/spec/unified/noSchema.json new file mode 100644 index 0000000000..c18afa4ed4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/noSchema.json @@ -0,0 +1,115 @@ +{ + "description": "noSchema", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "unencrypted" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "unencrypted", + "documents": [] + } + ], + "tests": [ + { + "description": "Insert on an unencrypted collection", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1 + } + ], + "collectionName": "unencrypted", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "unencrypted" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "unencrypted", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/removeKeyAltName.json b/test/client-side-encryption/spec/unified/removeKeyAltName.json new file mode 100644 index 0000000000..1b7077077a --- /dev/null +++ b/test/client-side-encryption/spec/unified/removeKeyAltName.json @@ -0,0 +1,672 @@ +{ + "description": "removeKeyAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "remove keyAltName from non-existent data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "does_not_exist" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "does_not_exist" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "does_not_exist" + ] + } + } + } + ] + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "remove non-existent keyAltName from existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "does_not_exist" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "does_not_exist" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "does_not_exist" + ] + } + } + } + ] + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "remove an existing keyAltName from an existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "alternate_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "alternate_name" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "alternate_name" + ] + } + } + } + ] + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "remove the last keyAltName from an existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "alternate_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "alternate_name" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "alternate_name" + ] + } + } + } + ] + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "local_key" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "local_key" + ] + } + } + } + ] + } + } + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/replaceOne.json b/test/client-side-encryption/spec/unified/replaceOne.json new file mode 100644 index 0000000000..a093e238ba --- /dev/null +++ b/test/client-side-encryption/spec/unified/replaceOne.json @@ -0,0 +1,316 @@ +{ + "description": "replaceOne", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "replaceOne with encryption", + "operations": [ + { + "name": "replaceOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "replacement": { + "encrypted_string": "string1", + "random": "abc" + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json new file mode 100644 index 0000000000..4c7d4e8048 --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json @@ -0,0 +1,162 @@ +{ + "description": "rewrapManyDataKey-decrypt_failure", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-2:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-2" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap data key that fails during decryption due to invalid masterKey", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "local" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json new file mode 100644 index 0000000000..cd2d20c255 --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json @@ -0,0 +1,250 @@ +{ + "description": "rewrapManyDataKey-encrypt_failure", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap with invalid masterKey for AWS KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "aws", + "masterKey": { + "key": "arn:aws:kms:us-east-2:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-2" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with invalid masterKey for Azure KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "azure", + "masterKey": { + "keyVaultEndpoint": "invalid-vault-csfle.vault.azure.net", + "keyName": "invalid-name-csfle" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with invalid masterKey for GCP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "gcp", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "invalid-ring-csfle", + "keyName": "invalid-name-csfle" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json new file mode 100644 index 0000000000..8803491dbe --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -0,0 +1,1922 @@ +{ + "description": "rewrapManyDataKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "azure_key" + ], + "keyMaterial": { + "$binary": { + "base64": "pr01l7qDygUkFE/0peFwpnNlv3iIy8zrQK38Q9i12UCN2jwZHDmfyx8wokiIKMb9kAleeY+vnt3Cf1MKu9kcDmI+KxbNDd+V3ytAAGzOVLDJr77CiWjF9f8ntkXRHrAY9WwnVDANYkDwXlyU0Y2GQFTiW65jiQhUtYLYH63Tk48SsJuQvnWw1Q+PzY8ga+QeVec8wbcThwtm+r2IHsCFnc72Gv73qq7weISw+O4mN08z3wOp5FOS2ZM3MK7tBGmPdBcktW7F8ODGsOQ1FU53OrWUnyX2aTi2ftFFFMWVHqQo7EYuBZHru8RRODNKMyQk0BFfKovAeTAVRv9WH9QU7g==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "keyAltNames": [ + "gcp_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0USbQtof/pYRLQO96yg/JEtZbD1UxKueaC37yzT5tTkSiQEAhClWB5ZCSgzHgxv8raWjNB4r7e8ePGdsmSuYTYmLC5oHHS/BdQisConzNKFaobEQZHamTCjyhy5NotKF8MWoo+dyfQApwI29+vAGyrUIQCXzKwRnNdNQ+lb3vJtS5bqvLTvSxKHpVca2kqyC9nhonV+u4qru5Q2bAqUgVFc8fL4pBuvlowZFTQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "keyAltNames": [ + "kmip_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CklVctHzke4mcytd0TxGqvepkdkQN8NUF4+jV7aZQITAKdz6WjdDpq3lMt9nSzWGG2vAEfvRb3mFEVjV57qqGqxjq2751gmiMRHXz0btStbIK3mQ5xbY9kdye4tsixlCryEwQONr96gwlwKKI9Nubl9/8+uRF6tgYjje7Q7OjauEf1SrJwKcoQ3WwnjZmEqAug0kImCpJ/irhdqPzivRiA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba5" + }, + "keyAltNames": [ + "kmip_delegated_key" + ], + "keyMaterial": { + "$binary": { + "base64": "5TLMFWlguBWe5GUESTvOVtkdBsCrynhnV72XRyZ66/nk+EP9/1oEp1t1sg0+vwCTqULHjBiUE6DRx2mYD/Eup1+u2Jgz9/+1sV1drXeOPALNPkSgiZiDbIb67zRi+wTABEcKcegJH+FhmSGxwUoQAiHCsCbcvia5P8tN1lt98YQ=", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip", + "keyId": "11", + "delegated": true + } + } + ] + } + ], + "tests": [ + { + "description": "no keys to rewrap due to no filter matches", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": "no_matching_keys" + }, + "opts": { + "provider": "local" + } + }, + "expectResult": { + "bulkWriteResult": { + "$$exists": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "no_matching_keys" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new AWS KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "aws_key" + } + }, + "opts": { + "provider": "aws", + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "aws_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new Azure KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "azure_key" + } + }, + "opts": { + "provider": "azure", + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "azure_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new GCP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "gcp_key" + } + }, + "opts": { + "provider": "gcp", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "gcp_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new KMIP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip_key" + } + }, + "opts": { + "provider": "kmip" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new KMIP delegated KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip_delegated_key" + } + }, + "opts": { + "provider": "kmip", + "masterKey": { + "delegated": true + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip_delegated_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new local KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "local_key" + } + }, + "opts": { + "provider": "local" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "local_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with current KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {} + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 6, + "modifiedCount": 6, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "masterKey": 1 + }, + "sort": { + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba5" + }, + "masterKey": { + "provider": "kmip", + "keyId": "11", + "delegated": true + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/timeoutMS.json b/test/client-side-encryption/spec/unified/timeoutMS.json new file mode 100644 index 0000000000..98dc50e98a --- /dev/null +++ b/test/client-side-encryption/spec/unified/timeoutMS.json @@ -0,0 +1,270 @@ +{ + "description": "timeoutMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "timeoutMS": 500 + } + } + }, + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "cse-timeouts-db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "cse-timeouts-coll" + } + } + ], + "initialData": [ + { + "databaseName": "cse-timeouts-db", + "collectionName": "cse-timeouts-coll", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS applied to listCollections to get collection schema", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "cse-timeouts-coll" + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to find to get keyvault data", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections", + "find" + ], + "blockConnection": true, + "blockTimeMS": 300 + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/types.json b/test/client-side-encryption/spec/unified/types.json new file mode 100644 index 0000000000..3bb49f2a64 --- /dev/null +++ b/test/client-side-encryption/spec/unified/types.json @@ -0,0 +1,2262 @@ +{ + "description": "types", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_objectId": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "objectId", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db0", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll0", + "database": "db0", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_symbol": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "symbol", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + }, + { + "client": { + "id": "client2", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_int": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db2", + "client": "client2", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll2", + "database": "db2", + "collectionName": "default" + } + }, + { + "client": { + "id": "client3", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_double": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "double", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db3", + "client": "client3", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll3", + "database": "db3", + "collectionName": "default" + } + }, + { + "client": { + "id": "client4", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_decimal": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "decimal", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db4", + "client": "client4", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll4", + "database": "db4", + "collectionName": "default" + } + }, + { + "client": { + "id": "client5", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_binData": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "binData", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db5", + "client": "client5", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll5", + "database": "db5", + "collectionName": "default" + } + }, + { + "client": { + "id": "client6", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_javascript": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "javascript", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db6", + "client": "client6", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll6", + "database": "db6", + "collectionName": "default" + } + }, + { + "client": { + "id": "client7", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_javascriptWithScope": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "javascriptWithScope", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db7", + "client": "client7", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll7", + "database": "db7", + "collectionName": "default" + } + }, + { + "client": { + "id": "client8", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_object": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "object", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db8", + "client": "client8", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll8", + "database": "db8", + "collectionName": "default" + } + }, + { + "client": { + "id": "client9", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_timestamp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "timestamp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db9", + "client": "client9", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll9", + "database": "db9", + "collectionName": "default" + } + }, + { + "client": { + "id": "client10", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_regex": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "regex", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db10", + "client": "client10", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll10", + "database": "db10", + "collectionName": "default" + } + }, + { + "client": { + "id": "client11", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_date": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "date", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db11", + "client": "client11", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll11", + "database": "db11", + "collectionName": "default" + } + }, + { + "client": { + "id": "client12", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_minKey": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "minKey", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db12", + "client": "client12", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll12", + "database": "db12", + "collectionName": "default" + } + }, + { + "client": { + "id": "client13", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_maxKey": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "maxKey", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db13", + "client": "client13", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll13", + "database": "db13", + "collectionName": "default" + } + }, + { + "client": { + "id": "client14", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_undefined": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "undefined", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db14", + "client": "client14", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll14", + "database": "db14", + "collectionName": "default" + } + }, + { + "client": { + "id": "client15", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_array": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "array", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db15", + "client": "client15", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll15", + "database": "db15", + "collectionName": "default" + } + }, + { + "client": { + "id": "client16", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_bool": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "bool", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db16", + "client": "client16", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll16", + "database": "db16", + "collectionName": "default" + } + }, + { + "client": { + "id": "client17", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_null": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "null", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db17", + "client": "client17", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll17", + "database": "db17", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "type=objectId", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_objectId": { + "$oid": "AAAAAAAAAAAAAAAAAAAAAAAA" + } + } + }, + "object": "coll0" + }, + { + "name": "find", + "object": "coll0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_objectId": { + "$oid": "AAAAAAAAAAAAAAAAAAAAAAAA" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_objectId": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAHmkTPqvzfHMWpvS1mEsrjOxVQ2dyihEgIFWD5E0eNEsiMBQsC0GuvjdqYRL5DHLFI1vKuGek7EYYp0Qyii/tHqA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_objectId": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAHmkTPqvzfHMWpvS1mEsrjOxVQ2dyihEgIFWD5E0eNEsiMBQsC0GuvjdqYRL5DHLFI1vKuGek7EYYp0Qyii/tHqA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=symbol", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_symbol": { + "$symbol": "test" + } + } + }, + "object": "coll1" + }, + { + "name": "find", + "object": "coll1", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_symbol": { + "$symbol": "test" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_symbol": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAOOmvDmWjcuKsSCO7U/7t9HJ8eI73B6wduyMbdkvn7n7V4uTJes/j+BTtneSdyG2JHKHGkevWAJSIU2XoO66BSXw==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_symbol": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAOOmvDmWjcuKsSCO7U/7t9HJ8eI73B6wduyMbdkvn7n7V4uTJes/j+BTtneSdyG2JHKHGkevWAJSIU2XoO66BSXw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_int": { + "$numberInt": "123" + } + } + }, + "object": "coll2" + }, + { + "name": "find", + "object": "coll2", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_int": { + "$numberInt": "123" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_int": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAQPNXJVXMEjGZnftMuf2INKufXCtQIRHdw5wTgn6QYt3ejcoAXyiwI4XIUizkpsob494qpt2in4tWeiO7b9zkA8Q==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_int": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAQPNXJVXMEjGZnftMuf2INKufXCtQIRHdw5wTgn6QYt3ejcoAXyiwI4XIUizkpsob494qpt2in4tWeiO7b9zkA8Q==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_double": { + "$numberDouble": "1.23" + } + } + }, + "object": "coll3", + "expectError": { + "errorContains": "element of type: double" + } + } + ] + }, + { + "description": "type=decimal", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_decimal": { + "$numberDecimal": "1.23" + } + } + }, + "object": "coll4", + "expectError": { + "errorContains": "element of type: decimal" + } + } + ] + }, + { + "description": "type=binData", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AAAA", + "subType": "00" + } + } + } + }, + "object": "coll5" + }, + { + "name": "find", + "object": "coll5", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AAAA", + "subType": "00" + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAFB/KHZQHaHHo8fctcl7v6kR+sLkJoTRx2cPSSck9ya+nbGROSeFhdhDRHaCzhV78fDEqnMDSVPNi+ZkbaIh46GQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client5", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAFB/KHZQHaHHo8fctcl7v6kR+sLkJoTRx2cPSSck9ya+nbGROSeFhdhDRHaCzhV78fDEqnMDSVPNi+ZkbaIh46GQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=javascript", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_javascript": { + "$code": "var x = 1;" + } + } + }, + "object": "coll6" + }, + { + "name": "find", + "object": "coll6", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_javascript": { + "$code": "var x = 1;" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_javascript": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAANrvMgJkTKWGMc9wt3E2RBR2Hu5gL9p+vIIdHe9FcOm99t1W480/oX1Gnd87ON3B399DuFaxi/aaIiQSo7gTX6Lw==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client6", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_javascript": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAANrvMgJkTKWGMc9wt3E2RBR2Hu5gL9p+vIIdHe9FcOm99t1W480/oX1Gnd87ON3B399DuFaxi/aaIiQSo7gTX6Lw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=javascriptWithScope", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_javascriptWithScope": { + "$code": "var x = 1;", + "$scope": {} + } + } + }, + "object": "coll7", + "expectError": { + "errorContains": "element of type: javascriptWithScope" + } + } + ] + }, + { + "description": "type=object", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_object": {} + } + }, + "object": "coll8", + "expectError": { + "errorContains": "element of type: object" + } + } + ] + }, + { + "description": "type=timestamp", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_timestamp": { + "$timestamp": { + "t": 123, + "i": 456 + } + } + } + }, + "object": "coll9" + }, + { + "name": "find", + "object": "coll9", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_timestamp": { + "$timestamp": { + "t": 123, + "i": 456 + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_timestamp": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAARJHaM4Gq3MpDTdBasBsEolQaOmxJQU1wsZVaSFAOLpEh1QihDglXI95xemePFMKhg+KNpFg7lw1ChCs2Wn/c26Q==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client9", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_timestamp": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAARJHaM4Gq3MpDTdBasBsEolQaOmxJQU1wsZVaSFAOLpEh1QihDglXI95xemePFMKhg+KNpFg7lw1ChCs2Wn/c26Q==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=regex", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_regex": { + "$regularExpression": { + "pattern": "test", + "options": "" + } + } + } + }, + "object": "coll10" + }, + { + "name": "find", + "object": "coll10", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_regex": { + "$regularExpression": { + "pattern": "test", + "options": "" + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_regex": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAALVnxM4UqGhqf5eXw6nsS08am3YJrTf1EvjKitT8tyyMAbHsICIU3GUjuC7EBofCHbusvgo7pDyaClGostFz44nA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client10", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_regex": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAALVnxM4UqGhqf5eXw6nsS08am3YJrTf1EvjKitT8tyyMAbHsICIU3GUjuC7EBofCHbusvgo7pDyaClGostFz44nA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=date", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_date": { + "$date": { + "$numberLong": "123" + } + } + } + }, + "object": "coll11" + }, + { + "name": "find", + "object": "coll11", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_date": { + "$date": { + "$numberLong": "123" + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_date": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAJ5sN7u6l97+DswfKTqZAijSTSOo5htinGKQKUD7pHNJYlLXGOkB4glrCu7ibu0g3344RHQ5yUp4YxMEa8GD+Snw==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client11", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_date": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAJ5sN7u6l97+DswfKTqZAijSTSOo5htinGKQKUD7pHNJYlLXGOkB4glrCu7ibu0g3344RHQ5yUp4YxMEa8GD+Snw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=minKey", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_minKey": { + "$minKey": 1 + } + } + }, + "object": "coll12", + "expectError": { + "errorContains": "Cannot encrypt element of type: minKey" + } + } + ] + }, + { + "description": "type=maxKey", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_maxKey": { + "$maxKey": 1 + } + } + }, + "object": "coll13", + "expectError": { + "errorContains": "Cannot encrypt element of type: maxKey" + } + } + ] + }, + { + "description": "type=undefined", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_undefined": { + "$undefined": true + } + } + }, + "object": "coll14", + "expectError": { + "errorContains": "Cannot encrypt element of type: undefined" + } + } + ] + }, + { + "description": "type=array", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_array": [] + } + }, + "object": "coll15", + "expectError": { + "errorContains": "element of type: array" + } + } + ] + }, + { + "description": "type=bool", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_bool": true + } + }, + "object": "coll16", + "expectError": { + "errorContains": "element of type: bool" + } + } + ] + }, + { + "description": "type=null", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_null": true + } + }, + "object": "coll17", + "expectError": { + "errorContains": "Cannot encrypt element of type: null" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/unsupportedCommand.json b/test/client-side-encryption/spec/unified/unsupportedCommand.json new file mode 100644 index 0000000000..a91390324a --- /dev/null +++ b/test/client-side-encryption/spec/unified/unsupportedCommand.json @@ -0,0 +1,200 @@ +{ + "description": "unsupportedCommand", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "x": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "x": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "mapReduce deterministic encryption (unsupported)", + "operations": [ + { + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "command not supported for auto encryption: mapreduce" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/updateMany.json b/test/client-side-encryption/spec/unified/updateMany.json new file mode 100644 index 0000000000..cae4c0eaf4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/updateMany.json @@ -0,0 +1,376 @@ +{ + "description": "updateMany", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "updateMany with deterministic encryption", + "operations": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + }, + "update": { + "$set": { + "encrypted_string": "string2", + "random": "abc" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + }, + "multi": true, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "updateMany fails when filtering on a random field", + "operations": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "random": "abc" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/updateOne.json b/test/client-side-encryption/spec/unified/updateOne.json new file mode 100644 index 0000000000..6c8fdcbb6e --- /dev/null +++ b/test/client-side-encryption/spec/unified/updateOne.json @@ -0,0 +1,538 @@ +{ + "description": "updateOne", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "updateOne with deterministic encryption", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1", + "random": "abc" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "updateOne fails when filtering on a random field", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "random": "abc" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + }, + { + "description": "$unset works with an encrypted field", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$unset": { + "encrypted_string": "" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1 + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": {}, + "u": { + "$unset": { + "encrypted_string": "" + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "$rename works if target value has same encryption options", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$rename": { + "encrypted_string": "encrypted_string_equivalent" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_equivalent": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": {}, + "u": { + "$rename": { + "encrypted_string": "encrypted_string_equivalent" + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "$rename fails if target value has different encryption options", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$rename": { + "encrypted_string": "random" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$rename between two encrypted fields must have the same metadata or both be unencrypted" + } + } + ] + }, + { + "description": "an invalid update (no $ operators) is validated and errors", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "encrypted_string": "random" + } + }, + "object": "coll", + "expectError": { + "errorContains": "" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/unified/validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..c46a193273 --- /dev/null +++ b/test/client-side-encryption/spec/unified/validatorAndPartialFieldExpression.json @@ -0,0 +1,323 @@ +{ + "description": "validatorAndPartialFieldExpression", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encrypted_string": "foo" + } + } + ] + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/collection_management/clustered-indexes.json b/test/collection_management/clustered-indexes.json new file mode 100644 index 0000000000..9db5ff06d7 --- /dev/null +++ b/test/collection_management/clustered-indexes.json @@ -0,0 +1,291 @@ +{ + "description": "clustered-indexes", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "5.3", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "ci-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "ci-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "createCollection with clusteredIndex", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ci-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + } + ] + } + ] + }, + { + "description": "listCollections includes clusteredIndex", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "listCollections", + "object": "database0", + "arguments": { + "filter": { + "name": { + "$eq": "test" + } + } + }, + "expectResult": [ + { + "name": "test", + "options": { + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index", + "v": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": { + "$eq": "test" + } + } + }, + "databaseName": "ci-tests" + } + } + ] + } + ] + }, + { + "description": "listIndexes returns the index", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "listIndexes", + "object": "collection0", + "expectResult": [ + { + "key": { + "_id": 1 + }, + "name": "test index", + "clustered": true, + "unique": true, + "v": { + "$$type": [ + "int", + "long" + ] + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "test" + }, + "databaseName": "ci-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/collection_management/createCollection-pre_and_post_images.json b/test/collection_management/createCollection-pre_and_post_images.json new file mode 100644 index 0000000000..f488deacd8 --- /dev/null +++ b/test/collection_management/createCollection-pre_and_post_images.json @@ -0,0 +1,92 @@ +{ + "description": "createCollection-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "papi-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "createCollection with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "papi-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "papi-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + }, + "databaseName": "papi-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/collection_management/timeseries-collection.json b/test/collection_management/timeseries-collection.json new file mode 100644 index 0000000000..2ee52eac41 --- /dev/null +++ b/test/collection_management/timeseries-collection.json @@ -0,0 +1,320 @@ +{ + "description": "timeseries-collection", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "ts-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "ts-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "createCollection with all options", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] + }, + { + "description": "insertMany with duplicate ids", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630482" + } + } + }, + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630483" + } + } + } + ] + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "sort": { + "time": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630482" + } + } + }, + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630483" + } + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630482" + } + } + }, + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630483" + } + } + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": {}, + "sort": { + "time": 1 + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] + }, + { + "description": "createCollection with bucketing options", + "runOnRequirements": [ + { + "minServerVersion": "6.3" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "timeseries": { + "timeField": "time", + "bucketMaxSpanSeconds": 3600, + "bucketRoundingSeconds": 3600 + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "timeseries": { + "timeField": "time", + "bucketMaxSpanSeconds": 3600, + "bucketRoundingSeconds": 3600 + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/command.json b/test/command_logging/command.json new file mode 100644 index 0000000000..d2970df692 --- /dev/null +++ b/test/command_logging/command.json @@ -0,0 +1,215 @@ +{ + "description": "command-logging", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful command", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ping": 1, + "$db": "logging-tests" + } + } + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "databaseName": "logging-tests", + "commandName": "ping", + "reply": { + "$$type": "string" + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A failed command", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "find", + "command": { + "$$type": "string" + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "databaseName": "logging-tests", + "commandName": "find", + "failure": { + "$$exists": true + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/driver-connection-id.json b/test/command_logging/driver-connection-id.json new file mode 100644 index 0000000000..40db98d6fa --- /dev/null +++ b/test/command_logging/driver-connection-id.json @@ -0,0 +1,146 @@ +{ + "description": "driver-connection-id", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful command", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ping", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A failed command", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "find", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/no-handshake-messages.json b/test/command_logging/no-handshake-messages.json new file mode 100644 index 0000000000..a61e208798 --- /dev/null +++ b/test/command_logging/no-handshake-messages.json @@ -0,0 +1,94 @@ +{ + "description": "no-handshake-command-logs", + "schemaVersion": "1.13", + "tests": [ + { + "description": "Handshake commands should not generate log messages", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionCreatedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping" + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/no-heartbeat-messages.json b/test/command_logging/no-heartbeat-messages.json new file mode 100644 index 0000000000..525be9171d --- /dev/null +++ b/test/command_logging/no-heartbeat-messages.json @@ -0,0 +1,91 @@ +{ + "description": "no-heartbeat-command-logs", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "tests": [ + { + "description": "Heartbeat commands should not generate log messages", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": {} + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping" + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/operation-id.json b/test/command_logging/operation-id.json new file mode 100644 index 0000000000..b1a3cec3d9 --- /dev/null +++ b/test/command_logging/operation-id.json @@ -0,0 +1,198 @@ +{ + "description": "operation-id", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Successful bulk write command log messages include operationIds", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "x": 1 + } + } + }, + { + "deleteOne": { + "filter": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "insert", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "delete", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "delete", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Failed bulk write command log message includes operationId", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "x": 1 + }, + "update": [ + { + "$invalidOperator": true + } + ] + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "update", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "update", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/redacted-commands.json b/test/command_logging/redacted-commands.json new file mode 100644 index 0000000000..43b9ff74f2 --- /dev/null +++ b/test/command_logging/redacted-commands.json @@ -0,0 +1,1438 @@ +{ + "description": "redacted-commands", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "auth": false + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-redaction-tests" + } + } + ], + "tests": [ + { + "description": "authenticate command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "authenticate", + "command": { + "authenticate": 1, + "mechanism": "MONGODB-X509", + "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "db": "$external" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "authenticate", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "authenticate", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to authenticate is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "authenticate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "authenticate", + "command": { + "authenticate": 1, + "mechanism": "MONGODB-X509", + "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "authenticate", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "authenticate", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "saslStart command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslStart", + "command": { + "saslStart": 1, + "payload": "definitely-invalid-payload", + "db": "admin" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslStart", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "saslStart", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to saslStart is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslStart" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslStart", + "command": { + "saslStart": 1, + "payload": "ZmFrZXNhc2xwYXlsb2Fk", + "mechanism": "MONGODB-X509" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslStart", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "saslStart", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "saslContinue command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslContinue", + "command": { + "saslContinue": 1, + "conversationId": 0, + "payload": "definitely-invalid-payload" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslContinue", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "saslContinue", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to saslContinue is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslContinue", + "command": { + "saslContinue": 1, + "conversationId": 0, + "payload": "ZmFrZXNhc2xwYXlsb2Fk" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslContinue", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "saslContinue", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "getnonce command and server reply are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "getnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "getnonce", + "reply": { + "$$matchAsDocument": {} + } + } + } + ] + } + ] + }, + { + "description": "network error in response to getnonce is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getnonce" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "getnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "getnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "createUser command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "createUser", + "command": { + "createUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "createUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "createUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to createUser is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createUser" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "createUser", + "command": { + "createUser": "private", + "pwd": "pwd", + "roles": [] + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "createUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "createUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "updateUser command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "updateUser", + "command": { + "updateUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "updateUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "updateUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to updateUser is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "updateUser" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "updateUser", + "command": { + "updateUser": "private", + "pwd": "pwd", + "roles": [] + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "updateUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "updateUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "copydbgetnonce command and resulting server-generated error are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": "private" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbgetnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "copydbgetnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to copydbgetnonce is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "copydbgetnonce" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": "private" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbgetnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "copydbgetnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "copydbsaslstart command and resulting server-generated error are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": "private" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbsaslstart", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "copydbsaslstart", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to copydbsaslstart is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "copydbsaslstart" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": "private" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbgetnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "copydbgetnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "copydb command and resulting server-generated error are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydb", + "command": { + "copydb": "private" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydb", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "copydb", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to copydb is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "copydb" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydb", + "command": { + "copydb": "private" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydb", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "copydb", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "hello with speculative authenticate command and server reply are redacted", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "hello", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "hello", + "reply": { + "$$matchAsDocument": {} + } + } + } + ] + } + ] + }, + { + "description": "legacy hello with speculative authenticate command and server reply are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "ismaster", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ismaster", + "reply": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "isMaster", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "isMaster", + "reply": { + "$$matchAsDocument": {} + } + } + } + ] + } + ] + }, + { + "description": "hello without speculative authenticate command and server reply are not redacted", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "hello", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "hello": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "hello", + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1, + "isWritablePrimary": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello without speculative authenticate command and server reply are not redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "ismaster", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ismaster": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ismaster", + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1, + "ismaster": true + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "isMaster", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "isMaster": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "isMaster", + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1, + "ismaster": true + } + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/server-connection-id.json b/test/command_logging/server-connection-id.json new file mode 100644 index 0000000000..abbbbc7442 --- /dev/null +++ b/test/command_logging/server-connection-id.json @@ -0,0 +1,131 @@ +{ + "description": "server-connection-id", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "databaseName": "logging-server-connection-id-tests", + "collectionName": "logging-tests-collection", + "documents": [] + } + ], + "tests": [ + { + "description": "command log messages include server connection id", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "insert", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "find", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/service-id.json b/test/command_logging/service-id.json new file mode 100644 index 0000000000..ea39d61231 --- /dev/null +++ b/test/command_logging/service-id.json @@ -0,0 +1,207 @@ +{ + "description": "service-id", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "databaseName": "logging-server-connection-id-tests", + "collectionName": "logging-tests-collection", + "documents": [] + } + ], + "tests": [ + { + "description": "command log messages include serviceId when in LB mode", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "insert", + "serviceId": { + "$$type": "string" + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "serviceId": { + "$$type": "string" + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "find", + "serviceId": { + "$$type": "string" + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "serviceId": { + "$$type": "string" + } + } + } + ] + } + ] + }, + { + "description": "command log messages omit serviceId when not in LB mode", + "runOnRequirements": [ + { + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "insert", + "serviceId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "serviceId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "find", + "serviceId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "serviceId": { + "$$exists": false + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/unacknowledged-write.json b/test/command_logging/unacknowledged-write.json new file mode 100644 index 0000000000..0d33c020d5 --- /dev/null +++ b/test/command_logging/unacknowledged-write.json @@ -0,0 +1,151 @@ +{ + "description": "unacknowledged-write", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "An unacknowledged write generates a succeeded log message with ok: 1 reply", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "insert", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "insert": "logging-tests-collection", + "$db": "logging-tests" + } + } + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "reply": { + "$$matchAsDocument": { + "ok": 1 + } + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/bulkWrite.json b/test/command_monitoring/bulkWrite.json new file mode 100644 index 0000000000..49c728442e --- /dev/null +++ b/test/command_monitoring/bulkWrite.json @@ -0,0 +1,154 @@ +{ + "description": "bulkWrite", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful mixed bulk write", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "$set": { + "x": 333 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/command.json b/test/command_monitoring/command.json new file mode 100644 index 0000000000..c28af95fed --- /dev/null +++ b/test/command_monitoring/command.json @@ -0,0 +1,83 @@ +{ + "description": "command", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful command", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1 + }, + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/deleteMany.json b/test/command_monitoring/deleteMany.json new file mode 100644 index 0000000000..78ebad1f98 --- /dev/null +++ b/test/command_monitoring/deleteMany.json @@ -0,0 +1,162 @@ +{ + "description": "deleteMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful deleteMany", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 2 + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "A successful deleteMany with write errors", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$unsupported": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$unsupported": 1 + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/deleteOne.json b/test/command_monitoring/deleteOne.json new file mode 100644 index 0000000000..2420794fe5 --- /dev/null +++ b/test/command_monitoring/deleteOne.json @@ -0,0 +1,162 @@ +{ + "description": "deleteOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful deleteOne", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "A successful deleteOne with write errors", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$unsupported": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$unsupported": 1 + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/find.json b/test/command_monitoring/find.json new file mode 100644 index 0000000000..bc9668499b --- /dev/null +++ b/test/command_monitoring/find.json @@ -0,0 +1,558 @@ +{ + "description": "find", + "schemaVersion": "1.15", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "A successful find with no options", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": 1 + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + }, + { + "description": "A successful find with options", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "x": -1 + }, + "projection": { + "_id": 0, + "x": 1 + }, + "skip": 2, + "comment": "test", + "hint": { + "_id": 1 + }, + "max": { + "_id": 6 + }, + "maxTimeMS": 6000, + "min": { + "_id": 0 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "x": -1 + }, + "projection": { + "_id": 0, + "x": 1 + }, + "skip": 2, + "comment": "test", + "hint": { + "_id": 1 + }, + "max": { + "_id": 6 + }, + "maxTimeMS": 6000, + "min": { + "_id": 0 + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "x": 33 + }, + { + "x": 22 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + }, + { + "description": "A successful find with showRecordId and returnKey", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "showRecordId": true, + "returnKey": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "showRecordId": true, + "returnKey": true + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + }, + { + "description": "A successful find with a getMore", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + }, + { + "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", + "runOnRequirements": [ + { + "minServerVersion": "3.1", + "maxServerVersion": "4.4.99", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 1 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + } + ] + } + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + }, + { + "description": "A failed find event", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "$or": true + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/insertMany.json b/test/command_monitoring/insertMany.json new file mode 100644 index 0000000000..a80a218c67 --- /dev/null +++ b/test/command_monitoring/insertMany.json @@ -0,0 +1,148 @@ +{ + "description": "insertMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful insertMany", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "A successful insertMany with write errors", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/insertOne.json b/test/command_monitoring/insertOne.json new file mode 100644 index 0000000000..6ff732e41b --- /dev/null +++ b/test/command_monitoring/insertOne.json @@ -0,0 +1,144 @@ +{ + "description": "insertOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful insertOne", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "A successful insertOne with write errors", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/redacted-commands.json b/test/command_monitoring/redacted-commands.json new file mode 100644 index 0000000000..4302ba8900 --- /dev/null +++ b/test/command_monitoring/redacted-commands.json @@ -0,0 +1,679 @@ +{ + "description": "redacted-commands", + "schemaVersion": "1.5", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "auth": false + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ], + "observeSensitiveCommands": true + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + } + ], + "tests": [ + { + "description": "authenticate", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "authenticate", + "command": { + "authenticate": 1, + "mechanism": "MONGODB-X509", + "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "db": "$external" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "authenticate", + "command": { + "authenticate": { + "$$exists": false + }, + "mechanism": { + "$$exists": false + }, + "user": { + "$$exists": false + }, + "db": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "saslStart", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslStart", + "command": { + "saslStart": 1, + "payload": "definitely-invalid-payload", + "db": "admin" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "saslStart", + "command": { + "saslStart": { + "$$exists": false + }, + "payload": { + "$$exists": false + }, + "db": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "saslContinue", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslContinue", + "command": { + "saslContinue": 1, + "conversationId": 0, + "payload": "definitely-invalid-payload" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "saslContinue", + "command": { + "saslContinue": { + "$$exists": false + }, + "conversationId": { + "$$exists": false + }, + "payload": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "getnonce", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "getnonce", + "command": { + "getnonce": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "getnonce", + "reply": { + "ok": { + "$$exists": false + }, + "nonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "createUser", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "createUser", + "command": { + "createUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createUser", + "command": { + "createUser": { + "$$exists": false + }, + "pwd": { + "$$exists": false + }, + "roles": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateUser", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "updateUser", + "command": { + "updateUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "updateUser", + "command": { + "updateUser": { + "$$exists": false + }, + "pwd": { + "$$exists": false + }, + "roles": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydbgetnonce", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydbsaslstart", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydb", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydb", + "command": { + "copydb": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydb", + "command": { + "copydb": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "hello with speculative authenticate", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello with speculative authenticate", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "hello without speculative authenticate is not redacted", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello without speculative authenticate is not redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/server-connection-id.json b/test/command_monitoring/server-connection-id.json new file mode 100644 index 0000000000..a8f27637fc --- /dev/null +++ b/test/command_monitoring/server-connection-id.json @@ -0,0 +1,101 @@ +{ + "description": "server-connection-id", + "schemaVersion": "1.6", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "server-connection-id-tests", + "collectionName": "coll", + "documents": [] + } + ], + "tests": [ + { + "description": "command events include server connection id", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "hasServerConnectionId": true + } + }, + { + "commandSucceededEvent": { + "commandName": "insert", + "hasServerConnectionId": true + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "hasServerConnectionId": true + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "hasServerConnectionId": true + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/unacknowledged-client-bulkWrite.json b/test/command_monitoring/unacknowledged-client-bulkWrite.json new file mode 100644 index 0000000000..14740cea34 --- /dev/null +++ b/test/command_monitoring/unacknowledged-client-bulkWrite.json @@ -0,0 +1,225 @@ +{ + "description": "unacknowledged-client-bulkWrite", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ], + "uriOptions": { + "w": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "tests": [ + { + "description": "A successful mixed client bulkWrite", + "operations": [ + { + "object": "client", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "command-monitoring-tests.test", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "command-monitoring-tests.test", + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } + } + ], + "ordered": false + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + }, + "insertedCount": { + "$$unsetOrMatches": 0 + }, + "upsertedCount": { + "$$unsetOrMatches": 0 + }, + "matchedCount": { + "$$unsetOrMatches": 0 + }, + "modifiedCount": { + "$$unsetOrMatches": 0 + }, + "deletedCount": { + "$$unsetOrMatches": 0 + }, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + }, + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 3 + }, + "updateMods": { + "$set": { + "x": 333 + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "command-monitoring-tests.test" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite", + "reply": { + "ok": 1, + "nInserted": { + "$$exists": false + }, + "nMatched": { + "$$exists": false + }, + "nModified": { + "$$exists": false + }, + "nUpserted": { + "$$exists": false + }, + "nDeleted": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/unacknowledgedBulkWrite.json b/test/command_monitoring/unacknowledgedBulkWrite.json new file mode 100644 index 0000000000..78ddde767f --- /dev/null +++ b/test/command_monitoring/unacknowledgedBulkWrite.json @@ -0,0 +1,117 @@ +{ + "description": "unacknowledgedBulkWrite", + "schemaVersion": "1.7", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful unordered bulk write with an unacknowledged write concern", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } + } + } + ], + "ordered": false + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } + ], + "ordered": false, + "writeConcern": { + "w": 0 + } + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": { + "$$exists": false + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/updateMany.json b/test/command_monitoring/updateMany.json new file mode 100644 index 0000000000..b15434226c --- /dev/null +++ b/test/command_monitoring/updateMany.json @@ -0,0 +1,188 @@ +{ + "description": "updateMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful updateMany", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": true + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 2 + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateMany with write errors", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$unsupported": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$unsupported": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": true + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/updateOne.json b/test/command_monitoring/updateOne.json new file mode 100644 index 0000000000..a0ae99e88d --- /dev/null +++ b/test/command_monitoring/updateOne.json @@ -0,0 +1,260 @@ +{ + "description": "updateOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful updateOne", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateOne with upsert where the upserted id is not an ObjectId", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1, + "upserted": [ + { + "index": 0, + "_id": 4 + } + ] + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateOne with write errors", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$unsupported": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$unsupported": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/writeConcernError.json b/test/command_monitoring/writeConcernError.json new file mode 100644 index 0000000000..455e5422b7 --- /dev/null +++ b/test/command_monitoring/writeConcernError.json @@ -0,0 +1,155 @@ +{ + "description": "writeConcernError", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A retryable write with write concern errors publishes success event", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91 + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1, + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91 + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 0000000000..91fad28d0a --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +import asyncio +import sys +from test import pytest_conf, setup, teardown + +import pytest + +_IS_SYNC = True + + +@pytest.fixture(scope="session") +def event_loop_policy(): + # The default asyncio loop implementation on Windows + # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) + # We explicitly use a different loop implementation here to prevent that issue + if sys.platform == "win32": + return asyncio.WindowsSelectorEventLoopPolicy() # type: ignore[attr-defined] + + return asyncio.get_event_loop_policy() + + +@pytest.fixture(scope="package", autouse=True) +def test_setup_and_teardown(): + setup() + yield + teardown() + + +pytest_collection_modifyitems = pytest_conf.pytest_collection_modifyitems diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json new file mode 100644 index 0000000000..5799e834d7 --- /dev/null +++ b/test/connection_logging/connection-logging.json @@ -0,0 +1,523 @@ +{ + "description": "connection-logging", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient" + } + } + ], + "tests": [ + { + "description": "Create a client, run a command, and close the client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "connection": "debug" + } + } + } + ] + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection ready", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked out", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked in", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked out", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked in", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection closed", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "Connection pool was closed" + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool closed", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Connection checkout fails due to error establishing connection", + "runOnRequirements": [ + { + "auth": true, + "minServerVersion": "4.0" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryReads": false, + "appname": "clientAppName", + "heartbeatFrequencyMS": 10000 + }, + "observeLogMessages": { + "connection": "debug" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "clientAppName" + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection closed", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "An error occurred while using the connection", + "error": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout failed", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "An error occurred while trying to establish a new connection", + "error": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool cleared", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/connection_logging/connection-pool-options.json b/test/connection_logging/connection-pool-options.json new file mode 100644 index 0000000000..7055a54869 --- /dev/null +++ b/test/connection_logging/connection-pool-options.json @@ -0,0 +1,458 @@ +{ + "description": "connection-pool-options", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "tests": [ + { + "description": "Options should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "connectionReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "minPoolSize": 1, + "maxPoolSize": 5, + "maxIdleTimeMS": 10000 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "minPoolSize": 1, + "maxPoolSize": 5, + "maxIdleTimeMS": 10000 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection ready", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "maxConnecting should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "maxConnecting": 5 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "maxConnecting": 5 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "waitQueueTimeoutMS should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "waitQueueTimeoutMS": 10000 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "waitQueueTimeoutMS": 10000 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "waitQueueSize should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "waitQueueSize": 100 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "waitQueueSize": 100 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "waitQueueMultiple should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "waitQueueSize": 5 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "waitQueueMultiple": 5 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/connection_monitoring/connection-must-have-id.json b/test/connection_monitoring/connection-must-have-id.json new file mode 100644 index 0000000000..f2d6fb95e9 --- /dev/null +++ b/test/connection_monitoring/connection-must-have-id.json @@ -0,0 +1,52 @@ +{ + "version": 1, + "style": "unit", + "description": "must have an ID number associated with it", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolReady", + "ConnectionPoolClosed", + "ConnectionReady" + ] +} diff --git a/test/connection_monitoring/connection-must-order-ids.json b/test/connection_monitoring/connection-must-order-ids.json new file mode 100644 index 0000000000..b7c2751dd7 --- /dev/null +++ b/test/connection_monitoring/connection-must-order-ids.json @@ -0,0 +1,52 @@ +{ + "version": 1, + "style": "unit", + "description": "must have IDs assigned in order of creation", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolReady", + "ConnectionPoolClosed", + "ConnectionReady" + ] +} diff --git a/test/connection_monitoring/pool-checkin-destroy-closed.json b/test/connection_monitoring/pool-checkin-destroy-closed.json new file mode 100644 index 0000000000..55d0c03752 --- /dev/null +++ b/test/connection_monitoring/pool-checkin-destroy-closed.json @@ -0,0 +1,50 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy checked in connection if pool has been closed", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "close" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "poolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolReady", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/connection_monitoring/pool-checkin-destroy-stale.json b/test/connection_monitoring/pool-checkin-destroy-stale.json new file mode 100644 index 0000000000..6ffb8f53d1 --- /dev/null +++ b/test/connection_monitoring/pool-checkin-destroy-stale.json @@ -0,0 +1,50 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy checked in connection if it is stale", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "clear" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "stale", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolReady", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/connection_monitoring/pool-checkin-make-available.json b/test/connection_monitoring/pool-checkin-make-available.json new file mode 100644 index 0000000000..3f37f188c0 --- /dev/null +++ b/test/connection_monitoring/pool-checkin-make-available.json @@ -0,0 +1,47 @@ +{ + "version": 1, + "style": "unit", + "description": "must make valid checked in connection available", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42, + "duration": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42, + "duration": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolReady", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/connection_monitoring/pool-checkin.json b/test/connection_monitoring/pool-checkin.json new file mode 100644 index 0000000000..3b40cec6f4 --- /dev/null +++ b/test/connection_monitoring/pool-checkin.json @@ -0,0 +1,34 @@ +{ + "version": 1, + "style": "unit", + "description": "must have a method of allowing the driver to check in a connection", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + } + ], + "events": [ + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionPoolReady", + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionCheckOutStarted", + "ConnectionCheckedOut" + ] +} diff --git a/test/connection_monitoring/pool-checkout-connection.json b/test/connection_monitoring/pool-checkout-connection.json new file mode 100644 index 0000000000..c7e8914d45 --- /dev/null +++ b/test/connection_monitoring/pool-checkout-connection.json @@ -0,0 +1,40 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to check out a connection", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionReady", + "connectionId": 1, + "address": 42, + "duration": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42, + "duration": 42 + } + ], + "ignore": [ + "ConnectionPoolReady", + "ConnectionPoolCreated" + ] +} diff --git a/test/connection_monitoring/pool-checkout-custom-maxConnecting-is-enforced.json b/test/connection_monitoring/pool-checkout-custom-maxConnecting-is-enforced.json new file mode 100644 index 0000000000..6620f82fd9 --- /dev/null +++ b/test/connection_monitoring/pool-checkout-custom-maxConnecting-is-enforced.json @@ -0,0 +1,81 @@ +{ + "version": 1, + "style": "integration", + "description": "custom maxConnecting is enforced", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "poolOptions": { + "maxConnecting": 1, + "maxPoolSize": 2, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionReady" + }, + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionReady" + } + ], + "ignore": [ + "ConnectionCheckOutStarted", + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionClosed", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/connection_monitoring/pool-checkout-error-closed.json b/test/connection_monitoring/pool-checkout-error-closed.json new file mode 100644 index 0000000000..614403ef50 --- /dev/null +++ b/test/connection_monitoring/pool-checkout-error-closed.json @@ -0,0 +1,70 @@ +{ + "version": 1, + "style": "unit", + "description": "must throw error if checkOut is called on a closed pool", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn1" + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "close" + }, + { + "name": "checkOut" + } + ], + "error": { + "type": "PoolClosedError", + "message": "Attempted to check out a connection from closed connection pool" + }, + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42, + "connectionId": 42, + "duration": 42 + }, + { + "type": "ConnectionCheckedIn", + "address": 42, + "connectionId": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "address": 42, + "duration": 42, + "reason": "poolClosed" + } + ], + "ignore": [ + "ConnectionPoolReady", + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed" + ] +} diff --git a/test/connection_monitoring/pool-checkout-maxConnecting-is-enforced.json b/test/connection_monitoring/pool-checkout-maxConnecting-is-enforced.json new file mode 100644 index 0000000000..3a63818bfe --- /dev/null +++ b/test/connection_monitoring/pool-checkout-maxConnecting-is-enforced.json @@ -0,0 +1,108 @@ +{ + "version": 1, + "style": "integration", + "description": "maxConnecting is enforced", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 800 + } + }, + "poolOptions": { + "maxPoolSize": 10, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "wait", + "ms": 400 + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 3 + } + ], + "events": [ + { + "type": "ConnectionCreated", + "address": 42, + "connectionId": 1 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42, + "connectionId": 1 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + } + ], + "ignore": [ + "ConnectionCheckOutStarted", + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionClosed", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/connection_monitoring/pool-checkout-maxConnecting-timeout.json b/test/connection_monitoring/pool-checkout-maxConnecting-timeout.json new file mode 100644 index 0000000000..4d9fda1a68 --- /dev/null +++ b/test/connection_monitoring/pool-checkout-maxConnecting-timeout.json @@ -0,0 +1,104 @@ +{ + "version": 1, + "style": "integration", + "description": "waiting on maxConnecting is limited by WaitQueueTimeoutMS", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750 + } + }, + "poolOptions": { + "maxPoolSize": 10, + "waitQueueTimeoutMS": 50 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 2 + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "waitForThread", + "target": "thread3" + } + ], + "error": { + "type": "WaitQueueTimeoutError", + "message": "Timed out while checking out a connection from connection pool" + }, + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "timeout", + "address": 42, + "duration": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionClosed", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/connection_monitoring/pool-checkout-minPoolSize-connection-maxConnecting.json b/test/connection_monitoring/pool-checkout-minPoolSize-connection-maxConnecting.json new file mode 100644 index 0000000000..3b0d43e877 --- /dev/null +++ b/test/connection_monitoring/pool-checkout-minPoolSize-connection-maxConnecting.json @@ -0,0 +1,88 @@ +{ + "version": 1, + "style": "integration", + "description": "threads blocked by maxConnecting check out minPoolSize connections", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "poolOptions": { + "minPoolSize": 2, + "maxPoolSize": 3, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "wait", + "ms": 200 + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 2 + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolReady", + "ConnectionClosed", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/connection_monitoring/pool-checkout-multiple.json b/test/connection_monitoring/pool-checkout-multiple.json new file mode 100644 index 0000000000..07a4eda629 --- /dev/null +++ b/test/connection_monitoring/pool-checkout-multiple.json @@ -0,0 +1,70 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to check out multiple connections at the same time", + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForThread", + "target": "thread1" + }, + { + "name": "waitForThread", + "target": "thread2" + }, + { + "name": "waitForThread", + "target": "thread3" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionPoolReady", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/connection_monitoring/pool-checkout-no-idle.json b/test/connection_monitoring/pool-checkout-no-idle.json new file mode 100644 index 0000000000..0b0fe572ff --- /dev/null +++ b/test/connection_monitoring/pool-checkout-no-idle.json @@ -0,0 +1,68 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy and must not check out an idle connection if found while iterating available connections", + "poolOptions": { + "maxIdleTimeMS": 10, + "backgroundThreadIntervalMS": -1 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "wait", + "ms": 50 + }, + { + "name": "checkOut" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "idle", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionPoolReady", + "ConnectionCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/connection_monitoring/pool-checkout-no-stale.json b/test/connection_monitoring/pool-checkout-no-stale.json new file mode 100644 index 0000000000..ec76f4e9c8 --- /dev/null +++ b/test/connection_monitoring/pool-checkout-no-stale.json @@ -0,0 +1,73 @@ +{ + "version": 1, + "style": "unit", + "description": "must destroy and must not check out a stale connection if found while iterating available connections", + "poolOptions": { + "backgroundThreadIntervalMS": -1 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "clear" + }, + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 1, + "reason": "stale", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionPoolReady", + "ConnectionCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json b/test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json new file mode 100644 index 0000000000..10b526e0c3 --- /dev/null +++ b/test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json @@ -0,0 +1,116 @@ +{ + "version": 1, + "style": "integration", + "description": "threads blocked by maxConnecting check out returned connections", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750 + } + }, + "poolOptions": { + "maxConnecting": 2, + "maxPoolSize": 10, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn0" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 4 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "checkIn", + "connection": "conn0" + }, + { + "name": "wait", + "ms": 100 + } + ], + "events": [ + { + "type": "ConnectionCreated", + "address": 42, + "connectionId": 1 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolReady", + "ConnectionClosed", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/connection_monitoring/pool-clear-clears-waitqueue.json b/test/connection_monitoring/pool-clear-clears-waitqueue.json new file mode 100644 index 0000000000..e6077f12a5 --- /dev/null +++ b/test/connection_monitoring/pool-clear-clears-waitqueue.json @@ -0,0 +1,105 @@ +{ + "version": 1, + "style": "unit", + "description": "clearing pool clears the WaitQueue", + "poolOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 30000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 4 + }, + { + "name": "clear" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 3, + "timeout": 1000 + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42, + "duration": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42, + "duration": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42, + "duration": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42, + "duration": 42 + } + ], + "ignore": [ + "ConnectionPoolReady", + "ConnectionPoolCleared", + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckedIn", + "ConnectionClosed" + ] +} diff --git a/test/connection_monitoring/pool-clear-interrupting-pending-connections.json b/test/connection_monitoring/pool-clear-interrupting-pending-connections.json new file mode 100644 index 0000000000..c1fd746329 --- /dev/null +++ b/test/connection_monitoring/pool-clear-interrupting-pending-connections.json @@ -0,0 +1,77 @@ +{ + "version": 1, + "style": "integration", + "description": "clear with interruptInUseConnections = true closes pending connections", + "runOn": [ + { + "minServerVersion": "4.9.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 10000 + } + }, + "poolOptions": { + "minPoolSize": 0 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "clear", + "interruptInUseConnections": true + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionPoolCleared", + "interruptInUseConnections": true + }, + { + "type": "ConnectionClosed" + }, + { + "type": "ConnectionCheckOutFailed" + } + ], + "ignore": [ + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/connection_monitoring/pool-clear-min-size.json b/test/connection_monitoring/pool-clear-min-size.json new file mode 100644 index 0000000000..239df871b8 --- /dev/null +++ b/test/connection_monitoring/pool-clear-min-size.json @@ -0,0 +1,68 @@ +{ + "version": 1, + "style": "unit", + "description": "pool clear halts background minPoolSize establishments", + "poolOptions": { + "minPoolSize": 1, + "backgroundThreadIntervalMS": 50 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 1 + }, + { + "name": "clear" + }, + { + "name": "wait", + "ms": 200 + }, + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionClosed" + ] +} diff --git a/test/connection_monitoring/pool-clear-paused.json b/test/connection_monitoring/pool-clear-paused.json new file mode 100644 index 0000000000..847f08d849 --- /dev/null +++ b/test/connection_monitoring/pool-clear-paused.json @@ -0,0 +1,32 @@ +{ + "version": 1, + "style": "unit", + "description": "clearing a paused pool emits no events", + "operations": [ + { + "name": "clear" + }, + { + "name": "ready" + }, + { + "name": "clear" + }, + { + "name": "clear" + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated" + ] +} diff --git a/test/connection_monitoring/pool-clear-ready.json b/test/connection_monitoring/pool-clear-ready.json new file mode 100644 index 0000000000..88c2988ac5 --- /dev/null +++ b/test/connection_monitoring/pool-clear-ready.json @@ -0,0 +1,72 @@ +{ + "version": 1, + "style": "unit", + "description": "after clear, cannot check out connections until pool ready", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "clear" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "ready" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42, + "connectionId": 42, + "duration": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "address": 42, + "duration": 42, + "reason": "connectionError" + }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42, + "duration": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionReady", + "ConnectionCheckOutStarted", + "ConnectionCreated" + ] +} diff --git a/test/connection_monitoring/pool-clear-schedule-run-interruptInUseConnections-false.json b/test/connection_monitoring/pool-clear-schedule-run-interruptInUseConnections-false.json new file mode 100644 index 0000000000..3d7536951d --- /dev/null +++ b/test/connection_monitoring/pool-clear-schedule-run-interruptInUseConnections-false.json @@ -0,0 +1,81 @@ +{ + "version": 1, + "style": "unit", + "description": "Pool clear SHOULD schedule the next background thread run immediately (interruptInUseConnections = false)", + "poolOptions": { + "backgroundThreadIntervalMS": 10000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "clear", + "interruptInUseConnections": false + }, + { + "name": "waitForEvent", + "event": "ConnectionPoolCleared", + "count": 1, + "timeout": 1000 + }, + { + "name": "waitForEvent", + "event": "ConnectionClosed", + "count": 1, + "timeout": 1000 + }, + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "interruptInUseConnections": false + }, + { + "type": "ConnectionClosed", + "connectionId": 2, + "reason": "stale", + "address": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionPoolReady", + "ConnectionReady", + "ConnectionCheckOutStarted", + "ConnectionPoolCreated" + ] +} diff --git a/test/connection_monitoring/pool-close-destroy-conns.json b/test/connection_monitoring/pool-close-destroy-conns.json new file mode 100644 index 0000000000..a3d58a2136 --- /dev/null +++ b/test/connection_monitoring/pool-close-destroy-conns.json @@ -0,0 +1,52 @@ +{ + "version": 1, + "style": "unit", + "description": "When a pool is closed, it MUST first destroy all available connections in that pool", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkOut" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionCheckedIn", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionClosed", + "connectionId": 2, + "reason": "poolClosed", + "address": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionPoolReady", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted", + "ConnectionCheckedOut" + ] +} diff --git a/test/connection_monitoring/pool-close.json b/test/connection_monitoring/pool-close.json new file mode 100644 index 0000000000..fe083d73e6 --- /dev/null +++ b/test/connection_monitoring/pool-close.json @@ -0,0 +1,21 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to manually close a pool", + "operations": [ + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ] +} diff --git a/test/connection_monitoring/pool-create-max-size.json b/test/connection_monitoring/pool-create-max-size.json new file mode 100644 index 0000000000..e3a1fa8eda --- /dev/null +++ b/test/connection_monitoring/pool-create-max-size.json @@ -0,0 +1,133 @@ +{ + "version": 1, + "style": "unit", + "description": "must never exceed maxPoolSize total connections", + "poolOptions": { + "maxPoolSize": 3 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn1" + }, + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn2" + }, + { + "name": "checkIn", + "connection": "conn2" + }, + { + "name": "checkOut" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 5 + }, + { + "name": "checkIn", + "connection": "conn1" + }, + { + "name": "waitForThread", + "target": "thread1" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionPoolReady" + ] +} diff --git a/test/connection_monitoring/pool-create-min-size-error.json b/test/connection_monitoring/pool-create-min-size-error.json new file mode 100644 index 0000000000..1c744b850c --- /dev/null +++ b/test/connection_monitoring/pool-create-min-size-error.json @@ -0,0 +1,66 @@ +{ + "version": 1, + "style": "integration", + "description": "error during minPoolSize population clears pool", + "runOn": [ + { + "minServerVersion": "4.9.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "poolCreateMinSizeErrorTest" + } + }, + "poolOptions": { + "minPoolSize": 1, + "backgroundThreadIntervalMS": 50, + "appName": "poolCreateMinSizeErrorTest" + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionPoolCleared", + "count": 1 + }, + { + "name": "wait", + "ms": 200 + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionClosed", + "address": 42, + "connectionId": 42, + "reason": "error" + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated" + ] +} diff --git a/test/connection_monitoring/pool-create-min-size.json b/test/connection_monitoring/pool-create-min-size.json new file mode 100644 index 0000000000..43118f7841 --- /dev/null +++ b/test/connection_monitoring/pool-create-min-size.json @@ -0,0 +1,66 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to start a pool with minPoolSize connections", + "poolOptions": { + "minPoolSize": 3 + }, + "operations": [ + { + "name": "wait", + "ms": 200 + }, + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 3 + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 3 + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 42, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionReady", + "ConnectionClosed", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/connection_monitoring/pool-create-with-options.json b/test/connection_monitoring/pool-create-with-options.json new file mode 100644 index 0000000000..4e8223f91e --- /dev/null +++ b/test/connection_monitoring/pool-create-with-options.json @@ -0,0 +1,32 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to start a pool with various options set", + "poolOptions": { + "maxPoolSize": 50, + "minPoolSize": 5, + "maxIdleTimeMS": 100 + }, + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionPoolCreated", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": { + "maxPoolSize": 50, + "minPoolSize": 5, + "maxIdleTimeMS": 100 + } + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady" + ] +} diff --git a/test/connection_monitoring/pool-create.json b/test/connection_monitoring/pool-create.json new file mode 100644 index 0000000000..8c1f85537f --- /dev/null +++ b/test/connection_monitoring/pool-create.json @@ -0,0 +1,19 @@ +{ + "version": 1, + "style": "unit", + "description": "must be able to create a pool", + "operations": [ + { + "name": "waitForEvent", + "event": "ConnectionPoolCreated", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionPoolCreated", + "address": 42, + "options": 42 + } + ] +} diff --git a/test/connection_monitoring/pool-ready-ready.json b/test/connection_monitoring/pool-ready-ready.json new file mode 100644 index 0000000000..25dfa9c97c --- /dev/null +++ b/test/connection_monitoring/pool-ready-ready.json @@ -0,0 +1,39 @@ +{ + "version": 1, + "style": "unit", + "description": "readying a ready pool emits no events", + "operations": [ + { + "name": "ready" + }, + { + "name": "ready" + }, + { + "name": "ready" + }, + { + "name": "clear" + }, + { + "name": "ready" + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionPoolReady", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated" + ] +} diff --git a/test/connection_monitoring/pool-ready.json b/test/connection_monitoring/pool-ready.json new file mode 100644 index 0000000000..a90aed04d0 --- /dev/null +++ b/test/connection_monitoring/pool-ready.json @@ -0,0 +1,59 @@ +{ + "version": 1, + "style": "unit", + "description": "pool starts as cleared and becomes ready", + "operations": [ + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "ready" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42, + "duration": 42 + }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42, + "duration": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionReady" + ] +} diff --git a/test/connection_monitoring/wait-queue-timeout.json b/test/connection_monitoring/wait-queue-timeout.json new file mode 100644 index 0000000000..8bd7c49499 --- /dev/null +++ b/test/connection_monitoring/wait-queue-timeout.json @@ -0,0 +1,77 @@ +{ + "version": 1, + "style": "unit", + "description": "must aggressively timeout threads enqueued longer than waitQueueTimeoutMS", + "poolOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 50 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut", + "label": "conn0" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "checkIn", + "connection": "conn0" + }, + { + "name": "waitForThread", + "target": "thread1" + } + ], + "error": { + "type": "WaitQueueTimeoutError", + "message": "Timed out while checking out a connection from connection pool" + }, + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 42, + "address": 42, + "duration": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "timeout", + "address": 42, + "duration": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 42, + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionReady", + "ConnectionClosed", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/connection_string/test/invalid-uris.json b/test/connection_string/test/invalid-uris.json new file mode 100644 index 0000000000..a7accbd27d --- /dev/null +++ b/test/connection_string/test/invalid-uris.json @@ -0,0 +1,283 @@ +{ + "tests": [ + { + "description": "Empty string", + "uri": "", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid scheme", + "uri": "mongo://localhost:27017", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Missing host", + "uri": "mongodb://", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Double colon in host identifier", + "uri": "mongodb://localhost::27017", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Double colon in host identifier and trailing slash", + "uri": "mongodb://localhost::27017/", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Double colon in host identifier with missing host and port", + "uri": "mongodb://::", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Double colon in host identifier with missing port", + "uri": "mongodb://localhost,localhost::", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Double colon in host identifier and second host", + "uri": "mongodb://localhost::27017,abc", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid port (negative number) with hostname", + "uri": "mongodb://localhost:-1", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid port (zero) with hostname", + "uri": "mongodb://localhost:0/", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid port (positive number) with hostname", + "uri": "mongodb://localhost:65536", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid port (positive number) with hostname and trailing slash", + "uri": "mongodb://localhost:65536/", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid port (non-numeric string) with hostname", + "uri": "mongodb://localhost:foo", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid port (negative number) with IP literal", + "uri": "mongodb://[::1]:-1", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid port (zero) with IP literal", + "uri": "mongodb://[::1]:0/", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid port (positive number) with IP literal", + "uri": "mongodb://[::1]:65536", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid port (positive number) with IP literal and trailing slash", + "uri": "mongodb://[::1]:65536/", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid port (non-numeric string) with IP literal", + "uri": "mongodb://[::1]:foo", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Incomplete key value pair for option", + "uri": "mongodb://example.com/?w", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped colon", + "uri": "mongodb://alice:foo:bar@127.0.0.1", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username containing an unescaped at-sign", + "uri": "mongodb://alice@@127.0.0.1", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped at-sign", + "uri": "mongodb://alice@foo:bar@127.0.0.1", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username containing an unescaped slash", + "uri": "mongodb://alice/@localhost/db", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username containing unescaped slash with password", + "uri": "mongodb://alice/bob:foo@localhost/db", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped slash", + "uri": "mongodb://alice:foo/bar@localhost/db", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Host with unescaped slash", + "uri": "mongodb:///tmp/mongodb-27017.sock/", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "mongodb+srv with multiple service names", + "uri": "mongodb+srv://test5.test.mongodb.com,test6.test.mongodb.com", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "mongodb+srv with port number", + "uri": "mongodb+srv://test7.test.mongodb.com:27018", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped percent sign", + "uri": "mongodb://alice%foo:bar@127.0.0.1", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped percent sign and an escaped one", + "uri": "mongodb://user%20%:password@localhost", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped percent sign (non hex digit)", + "uri": "mongodb://user%w:password@localhost", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + } + ] +} diff --git a/test/connection_string/test/valid-auth.json b/test/connection_string/test/valid-auth.json new file mode 100644 index 0000000000..60f63f4e3f --- /dev/null +++ b/test/connection_string/test/valid-auth.json @@ -0,0 +1,312 @@ +{ + "tests": [ + { + "description": "User info for single IPv4 host without database", + "uri": "mongodb://alice:foo@127.0.0.1", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": null + } + ], + "auth": { + "username": "alice", + "password": "foo", + "db": null + }, + "options": null + }, + { + "description": "User info for single IPv4 host with database", + "uri": "mongodb://alice:foo@127.0.0.1/test", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": null + } + ], + "auth": { + "username": "alice", + "password": "foo", + "db": "test" + }, + "options": null + }, + { + "description": "User info for single IP literal host without database", + "uri": "mongodb://bob:bar@[::1]:27018", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ip_literal", + "host": "::1", + "port": 27018 + } + ], + "auth": { + "username": "bob", + "password": "bar", + "db": null + }, + "options": null + }, + { + "description": "User info for single IP literal host with database", + "uri": "mongodb://bob:bar@[::1]:27018/admin", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ip_literal", + "host": "::1", + "port": 27018 + } + ], + "auth": { + "username": "bob", + "password": "bar", + "db": "admin" + }, + "options": null + }, + { + "description": "User info for single hostname without database", + "uri": "mongodb://eve:baz@example.com", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": { + "username": "eve", + "password": "baz", + "db": null + }, + "options": null + }, + { + "description": "User info for single hostname with database", + "uri": "mongodb://eve:baz@example.com/db2", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": { + "username": "eve", + "password": "baz", + "db": "db2" + }, + "options": null + }, + { + "description": "User info for multiple hosts without database", + "uri": "mongodb://alice:secret@127.0.0.1,example.com:27018", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": null + }, + { + "type": "hostname", + "host": "example.com", + "port": 27018 + } + ], + "auth": { + "username": "alice", + "password": "secret", + "db": null + }, + "options": null + }, + { + "description": "User info for multiple hosts with database", + "uri": "mongodb://alice:secret@example.com,[::1]:27019/admin", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + }, + { + "type": "ip_literal", + "host": "::1", + "port": 27019 + } + ], + "auth": { + "username": "alice", + "password": "secret", + "db": "admin" + }, + "options": null + }, + { + "description": "Username without password", + "uri": "mongodb://alice@127.0.0.1", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": null + } + ], + "auth": { + "username": "alice", + "password": null, + "db": null + }, + "options": null + }, + { + "description": "Username with empty password", + "uri": "mongodb://alice:@127.0.0.1", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": null + } + ], + "auth": { + "username": "alice", + "password": "", + "db": null + }, + "options": null + }, + { + "description": "Escaped username and database without password", + "uri": "mongodb://%40l%3Ace%2F%3D@example.com/my%3Ddb", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": { + "username": "@l:ce/=", + "password": null, + "db": "my=db" + }, + "options": null + }, + { + "description": "Subdelimiters in user/pass don't need escaping (PLAIN)", + "uri": "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=PLAIN", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": null + } + ], + "auth": { + "username": "!$&'()*+,;=", + "password": "!$&'()*+,;=", + "db": "admin" + }, + "options": { + "authmechanism": "PLAIN" + } + }, + { + "description": "Escaped username (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": { + "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "password": null, + "db": null + }, + "options": { + "authmechanism": "MONGODB-X509" + } + }, + { + "description": "Escaped username (GSSAPI)", + "uri": "mongodb://user%40EXAMPLE.COM:secret@localhost/?authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forward,SERVICE_HOST:example.com&authMechanism=GSSAPI", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": { + "username": "user@EXAMPLE.COM", + "password": "secret", + "db": null + }, + "options": { + "authmechanism": "GSSAPI", + "authmechanismproperties": { + "SERVICE_NAME": "other", + "SERVICE_HOST": "example.com", + "CANONICALIZE_HOST_NAME": "forward" + } + } + }, + { + "description": "At-signs in options aren't part of the userinfo", + "uri": "mongodb://alice:secret@example.com/admin?replicaset=my@replicaset", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": { + "username": "alice", + "password": "secret", + "db": "admin" + }, + "options": { + "replicaset": "my@replicaset" + } + } + ] +} diff --git a/test/connection_string/test/valid-db-with-dotted-name.json b/test/connection_string/test/valid-db-with-dotted-name.json new file mode 100644 index 0000000000..5b5aaa5ee3 --- /dev/null +++ b/test/connection_string/test/valid-db-with-dotted-name.json @@ -0,0 +1,100 @@ +{ + "tests": [ + { + "description": "Multiple Unix domain sockets and auth DB resembling a socket (relative path)", + "uri": "mongodb://rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock/admin.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "rel/mongodb-27018.sock", + "port": null + } + ], + "auth": { + "username": null, + "password": null, + "db": "admin.sock" + }, + "options": null + }, + { + "description": "Multiple Unix domain sockets with auth DB resembling a path (relative path)", + "uri": "mongodb://rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock/admin.shoe", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "rel/mongodb-27018.sock", + "port": null + } + ], + "auth": { + "username": null, + "password": null, + "db": "admin.shoe" + }, + "options": null + }, + { + "description": "Multiple Unix domain sockets and auth DB resembling a socket (absolute path)", + "uri": "mongodb://%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock/admin.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "/tmp/mongodb-27018.sock", + "port": null + } + ], + "auth": { + "username": null, + "password": null, + "db": "admin.sock" + }, + "options": null + }, + { + "description": "Multiple Unix domain sockets with auth DB resembling a path (absolute path)", + "uri": "mongodb://%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock/admin.shoe", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "/tmp/mongodb-27018.sock", + "port": null + } + ], + "auth": { + "username": null, + "password": null, + "db": "admin.shoe" + }, + "options": null + } + ] +} diff --git a/test/connection_string/test/valid-host_identifiers.json b/test/connection_string/test/valid-host_identifiers.json new file mode 100644 index 0000000000..e8833b4af2 --- /dev/null +++ b/test/connection_string/test/valid-host_identifiers.json @@ -0,0 +1,154 @@ +{ + "tests": [ + { + "description": "Single IPv4 host without port", + "uri": "mongodb://127.0.0.1", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Single IPv4 host with port", + "uri": "mongodb://127.0.0.1:27018", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": 27018 + } + ], + "auth": null, + "options": null + }, + { + "description": "Single IP literal host without port", + "uri": "mongodb://[::1]", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ip_literal", + "host": "::1", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Single IP literal host with port", + "uri": "mongodb://[::1]:27019", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ip_literal", + "host": "::1", + "port": 27019 + } + ], + "auth": null, + "options": null + }, + { + "description": "Single hostname without port", + "uri": "mongodb://example.com", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Single hostname with port", + "uri": "mongodb://example.com:27020", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": 27020 + } + ], + "auth": null, + "options": null + }, + { + "description": "Single hostname (resembling IPv4) without port", + "uri": "mongodb://256.0.0.1", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "256.0.0.1", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Multiple hosts (mixed formats)", + "uri": "mongodb://127.0.0.1,[::1]:27018,example.com:27019", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": null + }, + { + "type": "ip_literal", + "host": "::1", + "port": 27018 + }, + { + "type": "hostname", + "host": "example.com", + "port": 27019 + } + ], + "auth": null, + "options": null + }, + { + "description": "UTF-8 hosts", + "uri": "mongodb://bücher.example.com,umläut.example.com/", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "bücher.example.com", + "port": null + }, + { + "type": "hostname", + "host": "umläut.example.com", + "port": null + } + ], + "auth": null, + "options": null + } + ] +} diff --git a/test/connection_string/test/valid-options.json b/test/connection_string/test/valid-options.json new file mode 100644 index 0000000000..fce53873a6 --- /dev/null +++ b/test/connection_string/test/valid-options.json @@ -0,0 +1,62 @@ +{ + "tests": [ + { + "description": "Option names are normalized to lowercase", + "uri": "mongodb://alice:secret@example.com/admin?AUTHMechanism=PLAIN", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": { + "username": "alice", + "password": "secret", + "db": "admin" + }, + "options": { + "authmechanism": "PLAIN" + } + }, + { + "description": "Missing delimiting slash between hosts and options", + "uri": "mongodb://example.com?tls=true", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "tls": true + } + }, + { + "description": "Colon in a key value pair", + "uri": "mongodb://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://test-cluster,ENVIRONMENT:azure", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "authmechanismProperties": { + "TOKEN_RESOURCE": "mongodb://test-cluster", + "ENVIRONMENT": "azure" + } + } + } + ] +} diff --git a/test/connection_string/test/valid-unix_socket-absolute.json b/test/connection_string/test/valid-unix_socket-absolute.json new file mode 100644 index 0000000000..66491db13b --- /dev/null +++ b/test/connection_string/test/valid-unix_socket-absolute.json @@ -0,0 +1,266 @@ +{ + "tests": [ + { + "description": "Unix domain socket (absolute path with trailing slash)", + "uri": "mongodb://%2Ftmp%2Fmongodb-27017.sock/", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket (absolute path without trailing slash)", + "uri": "mongodb://%2Ftmp%2Fmongodb-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket (mixed case)", + "uri": "mongodb://%2Ftmp%2FMongoDB-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/MongoDB-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket (absolute path with spaces in path)", + "uri": "mongodb://%2Ftmp%2F %2Fmongodb-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/ /mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Multiple Unix domain sockets (absolute paths)", + "uri": "mongodb://%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "/tmp/mongodb-27018.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Multiple hosts (absolute path and ipv4)", + "uri": "mongodb://127.0.0.1:27017,%2Ftmp%2Fmongodb-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": 27017 + }, + { + "type": "unix", + "host": "/tmp/mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Multiple hosts (absolute path and hostname resembling relative path)", + "uri": "mongodb://mongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "/tmp/mongodb-27018.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket with auth database (absolute path)", + "uri": "mongodb://alice:foo@%2Ftmp%2Fmongodb-27017.sock/admin", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/mongodb-27017.sock", + "port": null + } + ], + "auth": { + "username": "alice", + "password": "foo", + "db": "admin" + }, + "options": null + }, + { + "description": "Unix domain socket with path resembling socket file (absolute path with trailing slash)", + "uri": "mongodb://%2Ftmp%2Fpath.to.sock%2Fmongodb-27017.sock/", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/path.to.sock/mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket with path resembling socket file (absolute path without trailing slash)", + "uri": "mongodb://%2Ftmp%2Fpath.to.sock%2Fmongodb-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/path.to.sock/mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket with path resembling socket file and auth (absolute path)", + "uri": "mongodb://bob:bar@%2Ftmp%2Fpath.to.sock%2Fmongodb-27017.sock/admin", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/path.to.sock/mongodb-27017.sock", + "port": null + } + ], + "auth": { + "username": "bob", + "password": "bar", + "db": "admin" + }, + "options": null + }, + { + "description": "Multiple Unix domain sockets and auth DB (absolute path)", + "uri": "mongodb://%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock/admin", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "/tmp/mongodb-27018.sock", + "port": null + } + ], + "auth": { + "username": null, + "password": null, + "db": "admin" + }, + "options": null + }, + { + "description": "Multiple Unix domain sockets with auth DB (absolute path)", + "uri": "mongodb://%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock/admin", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "/tmp/mongodb-27018.sock", + "port": null + } + ], + "auth": { + "username": null, + "password": null, + "db": "admin" + }, + "options": null + }, + { + "description": "Multiple Unix domain sockets with auth and query string (absolute path)", + "uri": "mongodb://bob:bar@%2Ftmp%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock/admin?w=1", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "/tmp/mongodb-27018.sock", + "port": null + } + ], + "auth": { + "username": "bob", + "password": "bar", + "db": "admin" + }, + "options": { + "w": 1 + } + } + ] +} diff --git a/test/connection_string/test/valid-unix_socket-relative.json b/test/connection_string/test/valid-unix_socket-relative.json new file mode 100644 index 0000000000..788720920b --- /dev/null +++ b/test/connection_string/test/valid-unix_socket-relative.json @@ -0,0 +1,286 @@ +{ + "tests": [ + { + "description": "Unix domain socket (relative path with trailing slash)", + "uri": "mongodb://rel%2Fmongodb-27017.sock/", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket (relative path without trailing slash)", + "uri": "mongodb://rel%2Fmongodb-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket (mixed case)", + "uri": "mongodb://rel%2FMongoDB-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/MongoDB-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket (relative path with spaces)", + "uri": "mongodb://rel%2F %2Fmongodb-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/ /mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Multiple Unix domain sockets (relative paths)", + "uri": "mongodb://rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "rel/mongodb-27018.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Multiple Unix domain sockets (relative and absolute paths)", + "uri": "mongodb://rel%2Fmongodb-27017.sock,%2Ftmp%2Fmongodb-27018.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "/tmp/mongodb-27018.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Multiple hosts (relative path and ipv4)", + "uri": "mongodb://127.0.0.1:27017,rel%2Fmongodb-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": 27017 + }, + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Multiple hosts (relative path and hostname resembling relative path)", + "uri": "mongodb://mongodb-27017.sock,rel%2Fmongodb-27018.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "rel/mongodb-27018.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket with auth database (relative path)", + "uri": "mongodb://alice:foo@rel%2Fmongodb-27017.sock/admin", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + } + ], + "auth": { + "username": "alice", + "password": "foo", + "db": "admin" + }, + "options": null + }, + { + "description": "Unix domain socket with path resembling socket file (relative path with trailing slash)", + "uri": "mongodb://rel%2Fpath.to.sock%2Fmongodb-27017.sock/", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/path.to.sock/mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket with path resembling socket file (relative path without trailing slash)", + "uri": "mongodb://rel%2Fpath.to.sock%2Fmongodb-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/path.to.sock/mongodb-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unix domain socket with path resembling socket file and auth (relative path)", + "uri": "mongodb://bob:bar@rel%2Fpath.to.sock%2Fmongodb-27017.sock/admin", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/path.to.sock/mongodb-27017.sock", + "port": null + } + ], + "auth": { + "username": "bob", + "password": "bar", + "db": "admin" + }, + "options": null + }, + { + "description": "Multiple Unix domain sockets and auth DB resembling a socket (relative path)", + "uri": "mongodb://rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock/admin", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "rel/mongodb-27018.sock", + "port": null + } + ], + "auth": { + "username": null, + "password": null, + "db": "admin" + }, + "options": null + }, + { + "description": "Multiple Unix domain sockets with auth DB resembling a path (relative path)", + "uri": "mongodb://rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock/admin", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "rel/mongodb-27018.sock", + "port": null + } + ], + "auth": { + "username": null, + "password": null, + "db": "admin" + }, + "options": null + }, + { + "description": "Multiple Unix domain sockets with auth and query string (relative path)", + "uri": "mongodb://bob:bar@rel%2Fmongodb-27017.sock,rel%2Fmongodb-27018.sock/admin?w=1", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/mongodb-27017.sock", + "port": null + }, + { + "type": "unix", + "host": "rel/mongodb-27018.sock", + "port": null + } + ], + "auth": { + "username": "bob", + "password": "bar", + "db": "admin" + }, + "options": { + "w": 1 + } + } + ] +} diff --git a/test/connection_string/test/valid-warnings.json b/test/connection_string/test/valid-warnings.json new file mode 100644 index 0000000000..e11757eb0e --- /dev/null +++ b/test/connection_string/test/valid-warnings.json @@ -0,0 +1,115 @@ +{ + "tests": [ + { + "description": "Unrecognized option keys are ignored", + "uri": "mongodb://example.com/?foo=bar", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Unsupported option values are ignored", + "uri": "mongodb://example.com/?fsync=ifPossible", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Repeated option keys", + "uri": "mongodb://example.com/?replicaSet=test&replicaSet=test", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "replicaset": "test" + } + }, + { + "description": "Deprecated (or unknown) options are ignored if replacement exists", + "uri": "mongodb://example.com/?wtimeout=5&wtimeoutMS=10", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "wtimeoutms": 10 + } + }, + { + "description": "Empty integer option values are ignored", + "uri": "mongodb://localhost/?maxIdleTimeMS=", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Empty boolean option value are ignored", + "uri": "mongodb://localhost/?journal=", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Comma in a key value pair causes a warning", + "uri": "mongodb://localhost?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://host1%2Chost2,ENVIRONMENT:azure", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": { + "authMechanism": "MONGODB-OIDC" + } + } + ] +} diff --git a/test/crud/unified/aggregate-allowdiskuse.json b/test/crud/unified/aggregate-allowdiskuse.json new file mode 100644 index 0000000000..2e54175b8a --- /dev/null +++ b/test/crud/unified/aggregate-allowdiskuse.json @@ -0,0 +1,155 @@ +{ + "description": "aggregate-allowdiskuse", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Aggregate does not send allowDiskUse when value is not specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": { + "$$exists": false + } + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate sends allowDiskUse false when false is specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": false + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate sends allowDiskUse true when true is specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": true + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-collation.json b/test/crud/unified/aggregate-collation.json new file mode 100644 index 0000000000..e7f0c3a7f1 --- /dev/null +++ b/test/crud/unified/aggregate-collation.json @@ -0,0 +1,73 @@ +{ + "description": "aggregate-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with collation", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "x": "PING" + } + } + ], + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": "ping" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-let.json b/test/crud/unified/aggregate-let.json new file mode 100644 index 0000000000..039900920f --- /dev/null +++ b/test/crud/unified/aggregate-let.json @@ -0,0 +1,376 @@ +{ + "description": "aggregate-let", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Aggregate with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 0, + "x": "$$x", + "y": "$$y", + "rand": "$$rand" + } + } + ], + "let": { + "id": 1, + "x": "foo", + "y": { + "$literal": "$bar" + }, + "rand": { + "$rand": {} + } + } + }, + "expectResult": [ + { + "x": "foo", + "y": "$bar", + "rand": { + "$$type": "double" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 0, + "x": "$$x", + "y": "$$y", + "rand": "$$rand" + } + } + ], + "let": { + "id": 1, + "x": "foo", + "y": { + "$literal": "$bar" + }, + "rand": { + "$rand": {} + } + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "2.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "unrecognized field 'let'", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate to collection with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Aggregate to collection with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "2.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "unrecognized field 'let'", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-merge-errorResponse.json b/test/crud/unified/aggregate-merge-errorResponse.json new file mode 100644 index 0000000000..6c7305fd91 --- /dev/null +++ b/test/crud/unified/aggregate-merge-errorResponse.json @@ -0,0 +1,90 @@ +{ + "description": "aggregate-merge-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 1 + } + ] + } + ], + "tests": [ + { + "description": "aggregate $merge DuplicateKey error is accessible", + "runOnRequirements": [ + { + "minServerVersion": "5.1", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "aggregate", + "object": "database0", + "arguments": { + "pipeline": [ + { + "$documents": [ + { + "_id": 2, + "x": 1 + } + ] + }, + { + "$merge": { + "into": "test", + "whenMatched": "fail" + } + } + ] + }, + "expectError": { + "errorCode": 11000, + "errorResponse": { + "keyPattern": { + "_id": 1 + }, + "keyValue": { + "_id": 2 + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-merge.json b/test/crud/unified/aggregate-merge.json new file mode 100644 index 0000000000..ac61ceb8a6 --- /dev/null +++ b/test/crud/unified/aggregate-merge.json @@ -0,0 +1,497 @@ +{ + "description": "aggregate-merge", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_aggregate_merge" + } + }, + { + "collection": { + "id": "collection_readConcern_majority", + "database": "database0", + "collectionName": "test_aggregate_merge", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_local", + "database": "database0", + "collectionName": "test_aggregate_merge", + "collectionOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_available", + "database": "database0", + "collectionName": "test_aggregate_merge", + "collectionOptions": { + "readConcern": { + "level": "available" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test_aggregate_merge", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $merge", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and batch size of 0", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "batchSize": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "cursor": {} + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and majority readConcern", + "operations": [ + { + "object": "collection_readConcern_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and local readConcern", + "operations": [ + { + "object": "collection_readConcern_local", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "readConcern": { + "level": "local" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and available readConcern", + "operations": [ + { + "object": "collection_readConcern_available", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "readConcern": { + "level": "available" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-out-readConcern.json b/test/crud/unified/aggregate-out-readConcern.json new file mode 100644 index 0000000000..e293457c1c --- /dev/null +++ b/test/crud/unified/aggregate-out-readConcern.json @@ -0,0 +1,407 @@ +{ + "description": "aggregate-out-readConcern", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.1.0", + "topologies": [ + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern" + } + }, + { + "collection": { + "id": "collection_readConcern_majority", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_local", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_available", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "available" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_linearizable", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "linearizable" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test_aggregate_out_readconcern", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "readConcern majority with out stage", + "operations": [ + { + "object": "collection_readConcern_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "readConcern local with out stage", + "operations": [ + { + "object": "collection_readConcern_local", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "local" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "readConcern available with out stage", + "operations": [ + { + "object": "collection_readConcern_available", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "available" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "readConcern linearizable with out stage", + "operations": [ + { + "object": "collection_readConcern_linearizable", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "linearizable" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-out.json b/test/crud/unified/aggregate-out.json new file mode 100644 index 0000000000..db0d7918cf --- /dev/null +++ b/test/crud/unified/aggregate-out.json @@ -0,0 +1,143 @@ +{ + "description": "aggregate-out", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "2.6", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $out", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "batchSize": 2 + } + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $out and batch size of 0", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "batchSize": 0 + } + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-write-readPreference.json b/test/crud/unified/aggregate-write-readPreference.json new file mode 100644 index 0000000000..c1fa3b4574 --- /dev/null +++ b/test/crud/unified/aggregate-write-readPreference.json @@ -0,0 +1,391 @@ +{ + "description": "aggregate-write-readPreference", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "_yamlAnchors": { + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + }, + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + } + } + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $out includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $out omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ], + "$readPreference": { + "$$exists": false + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $merge includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $merge omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ], + "$readPreference": { + "$$exists": false + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate.json b/test/crud/unified/aggregate.json new file mode 100644 index 0000000000..55634f05f6 --- /dev/null +++ b/test/crud/unified/aggregate.json @@ -0,0 +1,615 @@ +{ + "description": "aggregate", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "aggregate-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "aggregate-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "aggregate with multiple batches works", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2 + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "aggregate with a string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": "comment" + }, + "object": "collection0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "aggregate with a document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "object": "collection0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with a document comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "aggregate with comment sets comment on getMore", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + }, + "comment": { + "content": "test" + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "aggregate with comment does not set comment on getMore - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2, + "comment": "comment" + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + }, + "comment": "comment" + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate with multiple stages", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2 + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-arrayFilters-clientError.json b/test/crud/unified/bulkWrite-arrayFilters-clientError.json new file mode 100644 index 0000000000..63815e3233 --- /dev/null +++ b/test/crud/unified/bulkWrite-arrayFilters-clientError.json @@ -0,0 +1,151 @@ +{ + "description": "bulkWrite-arrayFilters-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.5.5" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "crud-v2" + } + } + ], + "initialData": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite on server that doesn't support arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": {}, + "update": { + "$set": { + "y.0.b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "BulkWrite on server that doesn't support arrayFilters with arrayFilters on second op", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": {}, + "update": { + "$set": { + "y.0.b": 2 + } + } + } + }, + { + "updateMany": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-arrayFilters.json b/test/crud/unified/bulkWrite-arrayFilters.json new file mode 100644 index 0000000000..bc4e7b9fcb --- /dev/null +++ b/test/crud/unified/bulkWrite-arrayFilters.json @@ -0,0 +1,364 @@ +{ + "description": "bulkWrite-arrayFilters", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.5.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": { + "$set": { + "y.$[i].b": 2 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": { + "$set": { + "y.$[i].b": 2 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 2 + } + ] + } + ] + } + ] + }, + { + "description": "BulkWrite with arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + } + }, + { + "updateMany": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 3, + "modifiedCount": 3, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 2 + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-collation.json b/test/crud/unified/bulkWrite-collation.json new file mode 100644 index 0000000000..fe54b1a1e3 --- /dev/null +++ b/test/crud/unified/bulkWrite-collation.json @@ -0,0 +1,254 @@ +{ + "description": "bulkWrite-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + }, + { + "_id": 4, + "x": "pong" + }, + { + "_id": 5, + "x": "pONg" + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite with delete operations and collation", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "x": "PING" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + } + }, + { + "deleteOne": { + "filter": { + "x": "PING" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + } + }, + { + "deleteMany": { + "filter": { + "x": "PONG" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 4, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "BulkWrite with update operations and collation", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "x": "ping" + }, + "update": { + "$set": { + "x": "PONG" + } + }, + "collation": { + "locale": "en_US", + "strength": 3 + } + } + }, + { + "updateOne": { + "filter": { + "x": "ping" + }, + "update": { + "$set": { + "x": "PONG" + } + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + } + }, + { + "replaceOne": { + "filter": { + "x": "ping" + }, + "replacement": { + "_id": 6, + "x": "ping" + }, + "upsert": true, + "collation": { + "locale": "en_US", + "strength": 3 + } + } + }, + { + "updateMany": { + "filter": { + "x": "pong" + }, + "update": { + "$set": { + "x": "PONG" + } + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 6, + "modifiedCount": 4, + "upsertedCount": 1, + "upsertedIds": { + "2": 6 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "PONG" + }, + { + "_id": 3, + "x": "PONG" + }, + { + "_id": 4, + "x": "PONG" + }, + { + "_id": 5, + "x": "PONG" + }, + { + "_id": 6, + "x": "ping" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-comment.json b/test/crud/unified/bulkWrite-comment.json new file mode 100644 index 0000000000..0b2addc850 --- /dev/null +++ b/test/crud/unified/bulkWrite-comment.json @@ -0,0 +1,519 @@ +{ + "description": "bulkWrite-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_comment" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": "comment" + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 5 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "BulkWrite_comment", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "x": "replaced" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": "updated" + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_comment", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "ordered": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": "replaced" + }, + { + "_id": 2, + "x": "updated" + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": "inserted" + } + ] + } + ] + }, + { + "description": "BulkWrite with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 5 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "BulkWrite_comment", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "x": "replaced" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": "updated" + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_comment", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": "replaced" + }, + { + "_id": 2, + "x": "updated" + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": "inserted" + } + ] + } + ] + }, + { + "description": "BulkWrite with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-delete-hint-clientError.json b/test/crud/unified/bulkWrite-delete-hint-clientError.json new file mode 100644 index 0000000000..2961b55dc0 --- /dev/null +++ b/test/crud/unified/bulkWrite-delete-hint-clientError.json @@ -0,0 +1,193 @@ +{ + "description": "bulkWrite-delete-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_delete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with hints unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteMany with hints unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-delete-hint-serverError.json b/test/crud/unified/bulkWrite-delete-hint-serverError.json new file mode 100644 index 0000000000..fa99522093 --- /dev/null +++ b/test/crud/unified/bulkWrite-delete-hint-serverError.json @@ -0,0 +1,252 @@ +{ + "description": "bulkWrite-delete-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_delete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + }, + { + "q": { + "_id": 2 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteMany with hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_", + "limit": 0 + }, + { + "q": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-delete-hint.json b/test/crud/unified/bulkWrite-delete-hint.json new file mode 100644 index 0000000000..9fcdecefd7 --- /dev/null +++ b/test/crud/unified/bulkWrite-delete-hint.json @@ -0,0 +1,247 @@ +{ + "description": "bulkWrite-delete-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_delete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 2, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + }, + { + "q": { + "_id": 2 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteMany with hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 3, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_", + "limit": 0 + }, + { + "q": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json b/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json new file mode 100644 index 0000000000..2dda9486e8 --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json @@ -0,0 +1,269 @@ +{ + "description": "bulkWrite-deleteMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteMany with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteMany-let.json b/test/crud/unified/bulkWrite-deleteMany-let.json new file mode 100644 index 0000000000..45c20ea49a --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteMany-let.json @@ -0,0 +1,200 @@ +{ + "description": "BulkWrite deleteMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 0 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json new file mode 100644 index 0000000000..aadf6d9e99 --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json @@ -0,0 +1,265 @@ +{ + "description": "bulkWrite-deleteOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteOne with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteOne-let.json b/test/crud/unified/bulkWrite-deleteOne-let.json new file mode 100644 index 0000000000..f3268163cb --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteOne-let.json @@ -0,0 +1,200 @@ +{ + "description": "BulkWrite deleteOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-errorResponse.json b/test/crud/unified/bulkWrite-errorResponse.json new file mode 100644 index 0000000000..157637c713 --- /dev/null +++ b/test/crud/unified/bulkWrite-errorResponse.json @@ -0,0 +1,88 @@ +{ + "description": "bulkWrite-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "bulkWrite operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-insertOne-dots_and_dollars.json b/test/crud/unified/bulkWrite-insertOne-dots_and_dollars.json new file mode 100644 index 0000000000..92bbb1aaf2 --- /dev/null +++ b/test/crud/unified/bulkWrite-insertOne-dots_and_dollars.json @@ -0,0 +1,374 @@ +{ + "description": "bulkWrite-insertOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Inserting document with top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "$a": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with top-level dollar-prefixed key on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "$a": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with top-level dotted key", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "a.b": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in embedded doc", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "a": { + "$b": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in embedded doc", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "a": { + "b.c": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-dots_and_dollars.json b/test/crud/unified/bulkWrite-replaceOne-dots_and_dollars.json new file mode 100644 index 0000000000..fce647d8f4 --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-dots_and_dollars.json @@ -0,0 +1,532 @@ +{ + "description": "bulkWrite-replaceOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Replacing document with top-level dotted key on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with top-level dotted key on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json new file mode 100644 index 0000000000..e54cd704df --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json @@ -0,0 +1,293 @@ +{ + "description": "bulkWrite-replaceOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged replaceOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-let.json b/test/crud/unified/bulkWrite-replaceOne-let.json new file mode 100644 index 0000000000..70f63837a8 --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-let.json @@ -0,0 +1,226 @@ +{ + "description": "BulkWrite replaceOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite replaceOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": 3 + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": 3 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 3 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": 3 + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": 3 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-sort.json b/test/crud/unified/bulkWrite-replaceOne-sort.json new file mode 100644 index 0000000000..c0bd383514 --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-sort.json @@ -0,0 +1,239 @@ +{ + "description": "BulkWrite replaceOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite replaceOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-update-hint-clientError.json b/test/crud/unified/bulkWrite-update-hint-clientError.json new file mode 100644 index 0000000000..d5eb71c29e --- /dev/null +++ b/test/crud/unified/bulkWrite-update-hint-clientError.json @@ -0,0 +1,284 @@ +{ + "description": "bulkWrite-update-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_bulkwrite_update_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with update hints unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with update hints unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with update hints unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-update-hint-serverError.json b/test/crud/unified/bulkWrite-update-hint-serverError.json new file mode 100644 index 0000000000..b0f7e1b381 --- /dev/null +++ b/test/crud/unified/bulkWrite-update-hint-serverError.json @@ -0,0 +1,422 @@ +{ + "description": "bulkWrite-update-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_bulkwrite_update_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with update hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with update hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_", + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with update hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "x": 333 + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 4 + }, + "u": { + "x": 444 + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-update-hint.json b/test/crud/unified/bulkWrite-update-hint.json new file mode 100644 index 0000000000..4206359891 --- /dev/null +++ b/test/crud/unified/bulkWrite-update-hint.json @@ -0,0 +1,445 @@ +{ + "description": "bulkWrite-update-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_bulkwrite_update_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with update hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": "_id_" + }, + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 13 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with update hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 4, + "modifiedCount": 4, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": "_id_" + }, + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 13 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with update hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "x": 333 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": "_id_" + }, + { + "q": { + "_id": 4 + }, + "u": { + "x": 444 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 444 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-update-validation.json b/test/crud/unified/bulkWrite-update-validation.json new file mode 100644 index 0000000000..f9bfda0edd --- /dev/null +++ b/test/crud/unified/bulkWrite-update-validation.json @@ -0,0 +1,210 @@ +{ + "description": "bulkWrite-update-validation", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-dots_and_dollars.json b/test/crud/unified/bulkWrite-updateMany-dots_and_dollars.json new file mode 100644 index 0000000000..35a5cdd52a --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-dots_and_dollars.json @@ -0,0 +1,452 @@ +{ + "description": "bulkWrite-updateMany-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json b/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json new file mode 100644 index 0000000000..87478918d2 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json @@ -0,0 +1,305 @@ +{ + "description": "bulkWrite-updateMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateMany with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-let.json b/test/crud/unified/bulkWrite-updateMany-let.json new file mode 100644 index 0000000000..fbeba1a607 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-let.json @@ -0,0 +1,243 @@ +{ + "description": "BulkWrite updateMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 21 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 21 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 21 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 21 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 21 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-pipeline.json b/test/crud/unified/bulkWrite-updateMany-pipeline.json new file mode 100644 index 0000000000..e938ea7535 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-pipeline.json @@ -0,0 +1,148 @@ +{ + "description": "bulkWrite-updateMany-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany in bulk write using pipelines", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-dots_and_dollars.json b/test/crud/unified/bulkWrite-updateOne-dots_and_dollars.json new file mode 100644 index 0000000000..cbbe113ce8 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-dots_and_dollars.json @@ -0,0 +1,460 @@ +{ + "description": "bulkWrite-updateOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json new file mode 100644 index 0000000000..1345f6b536 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json @@ -0,0 +1,305 @@ +{ + "description": "bulkWrite-updateOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-let.json b/test/crud/unified/bulkWrite-updateOne-let.json new file mode 100644 index 0000000000..96783c782f --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-let.json @@ -0,0 +1,247 @@ +{ + "description": "BulkWrite updateOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 22 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 22 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + }, + { + "description": "BulkWrite updateOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 22 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 22 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-pipeline.json b/test/crud/unified/bulkWrite-updateOne-pipeline.json new file mode 100644 index 0000000000..769bd106f8 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-pipeline.json @@ -0,0 +1,156 @@ +{ + "description": "bulkWrite-updateOne-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne in bulk write using pipelines", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-sort.json b/test/crud/unified/bulkWrite-updateOne-sort.json new file mode 100644 index 0000000000..f78bd3bf3e --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-sort.json @@ -0,0 +1,255 @@ +{ + "description": "BulkWrite updateOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": [ + { + "$set": { + "x": 1 + } + } + ] + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": [ + { + "$set": { + "x": 1 + } + } + ], + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + }, + { + "description": "BulkWrite updateOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": [ + { + "$set": { + "x": 1 + } + } + ] + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": [ + { + "$set": { + "x": 1 + } + } + ], + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite.json b/test/crud/unified/bulkWrite.json new file mode 100644 index 0000000000..59b33cbac5 --- /dev/null +++ b/test/crud/unified/bulkWrite.json @@ -0,0 +1,829 @@ +{ + "description": "bulkWrite", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite with deleteOne operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "BulkWrite with deleteMany operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "x": { + "$lt": 11 + } + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$lte": 22 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 2, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [] + } + ] + }, + { + "description": "BulkWrite with insertOne operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite with replaceOne operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 33 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 12 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 33 + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 1, + "upsertedIds": { + "2": 3 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite with updateOne operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 0 + }, + "update": { + "$set": { + "x": 0 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 11 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 33 + } + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 1, + "upsertedCount": 1, + "upsertedIds": { + "3": 3 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite with updateMany operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "x": { + "$lt": 11 + } + }, + "update": { + "$set": { + "x": 0 + } + } + } + }, + { + "updateMany": { + "filter": { + "x": { + "$lte": 22 + } + }, + "update": { + "$unset": { + "y": 1 + } + } + } + }, + { + "updateMany": { + "filter": { + "x": { + "$lte": 22 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 33 + } + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 4, + "modifiedCount": 2, + "upsertedCount": 1, + "upsertedIds": { + "3": 3 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite with mixed ordered operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 2, + "insertedCount": 2, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "3": 4 + } + }, + "matchedCount": 3, + "modifiedCount": 3, + "upsertedCount": 1, + "upsertedIds": { + "5": 4 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 34 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite with mixed unordered operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "_id": 3, + "x": 33 + }, + "upsert": true + } + }, + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": false + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 1, + "upsertedIds": { + "0": 3 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite continue-on-error behavior with unordered (preexisting duplicate key)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "ordered": false + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite continue-on-error behavior with unordered (duplicate key in requests)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "ordered": false + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bypassDocumentValidation.json b/test/crud/unified/bypassDocumentValidation.json new file mode 100644 index 0000000000..aff2d37f81 --- /dev/null +++ b/test/crud/unified/bypassDocumentValidation.json @@ -0,0 +1,493 @@ +{ + "description": "bypassDocumentValidation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.2", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $out passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "bypassDocumentValidation": false + }, + "commandName": "aggregate", + "databaseName": "crud" + } + } + ] + } + ] + }, + { + "description": "BulkWrite passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndReplace passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "InsertMany passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "InsertOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 4, + "x": 44 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "ReplaceOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 32 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "UpdateMany passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "UpdateOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-delete-options.json b/test/crud/unified/client-bulkWrite-delete-options.json new file mode 100644 index 0000000000..d9987897dc --- /dev/null +++ b/test/crud/unified/client-bulkWrite-delete-options.json @@ -0,0 +1,268 @@ +{ + "description": "client bulkWrite delete options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulk write delete with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + }, + { + "description": "client bulk write delete with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "hint": "_id_", + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "multi": true + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-errorResponse.json b/test/crud/unified/client-bulkWrite-errorResponse.json new file mode 100644 index 0000000000..b828aad3b9 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-errorResponse.json @@ -0,0 +1,69 @@ +{ + "description": "client bulkWrite errorResponse", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite operations support errorResponse assertions", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-errors.json b/test/crud/unified/client-bulkWrite-errors.json new file mode 100644 index 0000000000..015bd95c99 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-errors.json @@ -0,0 +1,513 @@ +{ + "description": "client bulkWrite errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "writeConcernErrorCode": 91, + "writeConcernErrorMessage": "Replication is being shut down", + "undefinedVarCode": 17276 + }, + "tests": [ + { + "description": "an individual operation fails during an ordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "an individual operation fails during an unordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "2": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "detailed results are omitted from error when verboseResults is false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a top-level failure occurs during a bulkWrite", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "x": 1 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "errorCode": 8 + } + } + ] + }, + { + "description": "a bulk write with only errors does not report a partial result", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": {} + }, + "writeErrors": { + "0": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a write concern error occurs during a bulkWrite", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 10 + } + }, + "updateResults": {}, + "deleteResults": {} + }, + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ] + }, + { + "description": "an empty list of write models is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [], + "verboseResults": true + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "Requesting unacknowledged write with verboseResults is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true, + "ordered": false, + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot request unacknowledged write concern and verbose results" + } + } + ] + }, + { + "description": "Requesting unacknowledged write with ordered is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot request unacknowledged write concern and ordered writes" + } + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-mixed-namespaces.json b/test/crud/unified/client-bulkWrite-mixed-namespaces.json new file mode 100644 index 0000000000..55f0618923 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-mixed-namespaces.json @@ -0,0 +1,315 @@ +{ + "description": "client bulkWrite with mixed namespaces", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "db1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "_yamlAnchors": { + "db0Coll0Namespace": "db0.coll0", + "db0Coll1Namespace": "db0.coll1", + "db1Coll2Namespace": "db1.coll2" + }, + "tests": [ + { + "description": "client bulkWrite with mixed namespaces", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 1 + } + } + }, + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 2 + } + } + }, + { + "updateOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 2 + } + } + }, + { + "replaceOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 45 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 2, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 2, + "insertResults": { + "0": { + "insertedId": 1 + }, + "1": { + "insertedId": 2 + } + }, + "updateResults": { + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "5": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + }, + "4": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1 + } + }, + { + "insert": 0, + "document": { + "_id": 2 + } + }, + { + "update": 1, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 2, + "filter": { + "_id": 3 + }, + "multi": false + }, + { + "delete": 1, + "filter": { + "_id": 2 + }, + "multi": false + }, + { + "update": 2, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 45 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "db0.coll0" + }, + { + "ns": "db0.coll1" + }, + { + "ns": "db1.coll2" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 4, + "x": 45 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-options.json b/test/crud/unified/client-bulkWrite-options.json new file mode 100644 index 0000000000..708fe4e85b --- /dev/null +++ b/test/crud/unified/client-bulkWrite-options.json @@ -0,0 +1,716 @@ +{ + "description": "client bulkWrite top-level options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "writeConcernClient", + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "comment": { + "bulk": "write" + }, + "let": { + "id1": 1, + "id2": 2 + }, + "writeConcern": { + "w": "majority" + } + }, + "tests": [ + { + "description": "client bulkWrite comment", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "comment": { + "bulk": "write" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "comment": { + "bulk": "write" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": true, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite let", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "let": { + "id1": 1, + "id2": 2 + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 1, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "1": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "let": { + "id1": 1, + "id2": 2 + }, + "ops": [ + { + "update": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation: false is sent", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": false, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite inherits writeConcern from client", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": 1 + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern option overrides client writeConcern", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-ordered.json b/test/crud/unified/client-bulkWrite-ordered.json new file mode 100644 index 0000000000..6fb10d992f --- /dev/null +++ b/test/crud/unified/client-bulkWrite-ordered.json @@ -0,0 +1,291 @@ +{ + "description": "client bulkWrite with ordered option", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with ordered: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite with ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-partialResults.json b/test/crud/unified/client-bulkWrite-partialResults.json new file mode 100644 index 0000000000..1b75e37834 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-partialResults.json @@ -0,0 +1,540 @@ +{ + "description": "client bulkWrite partial results", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "newDocument": { + "_id": 2, + "x": 22 + } + }, + "tests": [ + { + "description": "partialResult is unset when first operation fails during an ordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is unset when first operation fails during an ordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an ordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": true, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an ordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": true, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + }, + { + "description": "partialResult is unset when all operations fail during an unordered bulk write", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": false, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "1": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": false, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an unordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an unordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-replaceOne-sort.json b/test/crud/unified/client-bulkWrite-replaceOne-sort.json new file mode 100644 index 0000000000..fc66ec015d --- /dev/null +++ b/test/crud/unified/client-bulkWrite-replaceOne-sort.json @@ -0,0 +1,163 @@ +{ + "description": "client bulkWrite replaceOne-sort", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne with sort option", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "ops": [ + { + "update": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "updateMods": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "nErrors": 0, + "nMatched": 1, + "nModified": 1 + }, + "commandName": "bulkWrite" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-results.json b/test/crud/unified/client-bulkWrite-results.json new file mode 100644 index 0000000000..accf5a9cbf --- /dev/null +++ b/test/crud/unified/client-bulkWrite-results.json @@ -0,0 +1,833 @@ +{ + "description": "client bulkWrite results", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with verboseResults: true returns detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with verboseResults: false omits detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to verboseResults: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-update-options.json b/test/crud/unified/client-bulkWrite-update-options.json new file mode 100644 index 0000000000..ce6241c681 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-update-options.json @@ -0,0 +1,949 @@ +{ + "description": "client bulkWrite update options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulkWrite update with arrayFilters", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 4, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_", + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_", + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_", + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with upsert", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 6 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 5 + }, + "1": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 6 + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 5 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 6 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 5, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 6, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-update-pipeline.json b/test/crud/unified/client-bulkWrite-update-pipeline.json new file mode 100644 index 0000000000..9dba5ee6c5 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-update-pipeline.json @@ -0,0 +1,258 @@ +{ + "description": "client bulkWrite update pipeline", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": {}, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": {}, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-update-validation.json b/test/crud/unified/client-bulkWrite-update-validation.json new file mode 100644 index 0000000000..617e711338 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-update-validation.json @@ -0,0 +1,216 @@ +{ + "description": "client-bulkWrite-update-validation", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-updateOne-sort.json b/test/crud/unified/client-bulkWrite-updateOne-sort.json new file mode 100644 index 0000000000..ef75dcb374 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-updateOne-sort.json @@ -0,0 +1,167 @@ +{ + "description": "client bulkWrite updateOne-sort", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with sort option", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "ops": [ + { + "update": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "nErrors": 0, + "nMatched": 1, + "nModified": 1 + }, + "commandName": "bulkWrite" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/count-collation.json b/test/crud/unified/count-collation.json new file mode 100644 index 0000000000..eef65e0880 --- /dev/null +++ b/test/crud/unified/count-collation.json @@ -0,0 +1,83 @@ +{ + "description": "count-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "Count documents with collation", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "x": "ping" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "Deprecated count with collation", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": { + "x": "ping" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": 1 + } + ] + } + ] +} diff --git a/test/crud/unified/count-empty.json b/test/crud/unified/count-empty.json new file mode 100644 index 0000000000..29d8d76f67 --- /dev/null +++ b/test/crud/unified/count-empty.json @@ -0,0 +1,71 @@ +{ + "description": "count-empty", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [] + } + ], + "tests": [ + { + "description": "Estimated document count with empty collection", + "operations": [ + { + "object": "collection0", + "name": "estimatedDocumentCount", + "arguments": {}, + "expectResult": 0 + } + ] + }, + { + "description": "Count documents with empty collection", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 0 + } + ] + }, + { + "description": "Deprecated count with empty collection", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 0 + } + ] + } + ] +} diff --git a/test/crud/unified/count.json b/test/crud/unified/count.json new file mode 100644 index 0000000000..80fff5a30c --- /dev/null +++ b/test/crud/unified/count.json @@ -0,0 +1,148 @@ +{ + "description": "count", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Estimated document count", + "operations": [ + { + "object": "collection0", + "name": "estimatedDocumentCount", + "arguments": {}, + "expectResult": 3 + } + ] + }, + { + "description": "Count documents without a filter", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 3 + } + ] + }, + { + "description": "Count documents with a filter", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": 2 + } + ] + }, + { + "description": "Count documents with skip and limit", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {}, + "skip": 1, + "limit": 3 + }, + "expectResult": 2 + } + ] + }, + { + "description": "Deprecated count without a filter", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 3 + } + ] + }, + { + "description": "Deprecated count with a filter", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": 2 + } + ] + }, + { + "description": "Deprecated count with skip and limit", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {}, + "skip": 1, + "limit": 3 + }, + "expectResult": 2 + } + ] + } + ] +} diff --git a/test/crud/unified/countDocuments-comment.json b/test/crud/unified/countDocuments-comment.json new file mode 100644 index 0000000000..e6c7ae8170 --- /dev/null +++ b/test/crud/unified/countDocuments-comment.json @@ -0,0 +1,208 @@ +{ + "description": "countDocuments-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "countDocuments-comments-test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "countDocuments-comments-test", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "countDocuments with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + }, + { + "description": "countDocuments with string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": "comment" + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": "comment" + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + }, + { + "description": "countDocuments with document comment on less than 4.4.0 - server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/create-null-ids.json b/test/crud/unified/create-null-ids.json new file mode 100644 index 0000000000..8e0c3ac5d1 --- /dev/null +++ b/test/crud/unified/create-null-ids.json @@ -0,0 +1,253 @@ +{ + "description": "create-null-ids", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "crud_id" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "type_tests" + } + } + ], + "initialData": [ + { + "collectionName": "type_tests", + "databaseName": "crud_id", + "documents": [] + } + ], + "tests": [ + { + "description": "inserting _id with type null via insertOne", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": null + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via insertMany", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": null + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via updateOne", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": null + }, + "update": { + "$unset": { + "a": "" + } + }, + "upsert": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via updateMany", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": null + }, + "update": { + "$unset": { + "a": "" + } + }, + "upsert": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via replaceOne", + "operations": [ + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "_id": null + }, + "upsert": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via bulkWrite", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": null + } + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via clientBulkWrite", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud_id.type_tests", + "document": { + "_id": null + } + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + } + ] +} diff --git a/test/crud/unified/db-aggregate-write-readPreference.json b/test/crud/unified/db-aggregate-write-readPreference.json new file mode 100644 index 0000000000..b6460f001f --- /dev/null +++ b/test/crud/unified/db-aggregate-write-readPreference.json @@ -0,0 +1,395 @@ +{ + "description": "db-aggregate-write-readPreference", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + } + ], + "_yamlAnchors": { + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + }, + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0", + "databaseOptions": { + "readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + } + } + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "tests": [ + { + "description": "Database-level aggregate with $out includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $out omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ], + "$readPreference": { + "$$exists": false + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $merge includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $merge omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ], + "$readPreference": { + "$$exists": false + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/db-aggregate.json b/test/crud/unified/db-aggregate.json new file mode 100644 index 0000000000..5015405bfc --- /dev/null +++ b/test/crud/unified/db-aggregate.json @@ -0,0 +1,107 @@ +{ + "description": "db-aggregate", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "crud-v2" + } + } + ], + "tests": [ + { + "description": "Aggregate with $listLocalSessions", + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "dummy": "dummy field" + } + }, + { + "$project": { + "_id": 0, + "dummy": 1 + } + } + ] + }, + "expectResult": [ + { + "dummy": "dummy field" + } + ] + } + ] + }, + { + "description": "Aggregate with $listLocalSessions and allowDiskUse", + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "dummy": "dummy field" + } + }, + { + "$project": { + "_id": 0, + "dummy": 1 + } + } + ], + "allowDiskUse": true + }, + "expectResult": [ + { + "dummy": "dummy field" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-collation.json b/test/crud/unified/deleteMany-collation.json new file mode 100644 index 0000000000..23d2f037cb --- /dev/null +++ b/test/crud/unified/deleteMany-collation.json @@ -0,0 +1,86 @@ +{ + "description": "deleteMany-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany when many documents match with collation", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "x": "PING" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-comment.json b/test/crud/unified/deleteMany-comment.json new file mode 100644 index 0000000000..6abc5fd58a --- /dev/null +++ b/test/crud/unified/deleteMany-comment.json @@ -0,0 +1,245 @@ +{ + "description": "deleteMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name2" + }, + { + "_id": 3, + "name": "name3" + } + ] + } + ], + "tests": [ + { + "description": "deleteMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": "comment" + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name2" + }, + { + "_id": 3, + "name": "name3" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint-clientError.json b/test/crud/unified/deleteMany-hint-clientError.json new file mode 100644 index 0000000000..66320122b5 --- /dev/null +++ b/test/crud/unified/deleteMany-hint-clientError.json @@ -0,0 +1,149 @@ +{ + "description": "deleteMany-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteMany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "DeleteMany with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint-serverError.json b/test/crud/unified/deleteMany-hint-serverError.json new file mode 100644 index 0000000000..88d4a65576 --- /dev/null +++ b/test/crud/unified/deleteMany-hint-serverError.json @@ -0,0 +1,190 @@ +{ + "description": "deleteMany-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteMany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "DeleteMany with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint-unacknowledged.json b/test/crud/unified/deleteMany-hint-unacknowledged.json new file mode 100644 index 0000000000..ab7e9c7c09 --- /dev/null +++ b/test/crud/unified/deleteMany-hint-unacknowledged.json @@ -0,0 +1,245 @@ +{ + "description": "deleteMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteMany with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint.json b/test/crud/unified/deleteMany-hint.json new file mode 100644 index 0000000000..59d903d201 --- /dev/null +++ b/test/crud/unified/deleteMany-hint.json @@ -0,0 +1,173 @@ +{ + "description": "deleteMany-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteMany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany with hint string", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "DeleteMany with hint document", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-let.json b/test/crud/unified/deleteMany-let.json new file mode 100644 index 0000000000..71bf26a013 --- /dev/null +++ b/test/crud/unified/deleteMany-let.json @@ -0,0 +1,201 @@ +{ + "description": "deleteMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "deleteMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "let": { + "name": "name" + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "limit": 0 + } + ], + "let": { + "name": "name" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "let": { + "name": "name" + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "limit": 0 + } + ], + "let": { + "name": "name" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany.json b/test/crud/unified/deleteMany.json new file mode 100644 index 0000000000..36cdff8dc0 --- /dev/null +++ b/test/crud/unified/deleteMany.json @@ -0,0 +1,115 @@ +{ + "description": "deleteMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany when many documents match", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "DeleteMany when no document matches", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": 4 + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-collation.json b/test/crud/unified/deleteOne-collation.json new file mode 100644 index 0000000000..44bab6e120 --- /dev/null +++ b/test/crud/unified/deleteOne-collation.json @@ -0,0 +1,90 @@ +{ + "description": "deleteOne-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne when many documents matches with collation", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "x": "PING" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-comment.json b/test/crud/unified/deleteOne-comment.json new file mode 100644 index 0000000000..0f42b086a3 --- /dev/null +++ b/test/crud/unified/deleteOne-comment.json @@ -0,0 +1,243 @@ +{ + "description": "deleteOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + }, + { + "description": "deleteOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + }, + { + "description": "deleteOne with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-errorResponse.json b/test/crud/unified/deleteOne-errorResponse.json new file mode 100644 index 0000000000..1f3a266f1e --- /dev/null +++ b/test/crud/unified/deleteOne-errorResponse.json @@ -0,0 +1,82 @@ +{ + "description": "deleteOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "delete operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint-clientError.json b/test/crud/unified/deleteOne-hint-clientError.json new file mode 100644 index 0000000000..cf629f59e0 --- /dev/null +++ b/test/crud/unified/deleteOne-hint-clientError.json @@ -0,0 +1,133 @@ +{ + "description": "deleteOne-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteOne_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint-serverError.json b/test/crud/unified/deleteOne-hint-serverError.json new file mode 100644 index 0000000000..15541ed857 --- /dev/null +++ b/test/crud/unified/deleteOne-hint-serverError.json @@ -0,0 +1,170 @@ +{ + "description": "deleteOne-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteOne_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint-unacknowledged.json b/test/crud/unified/deleteOne-hint-unacknowledged.json new file mode 100644 index 0000000000..1782f0f525 --- /dev/null +++ b/test/crud/unified/deleteOne-hint-unacknowledged.json @@ -0,0 +1,241 @@ +{ + "description": "deleteOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteOne with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint.json b/test/crud/unified/deleteOne-hint.json new file mode 100644 index 0000000000..bcc4bc2347 --- /dev/null +++ b/test/crud/unified/deleteOne-hint.json @@ -0,0 +1,161 @@ +{ + "description": "deleteOne-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteOne_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne with hint string", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "deleteOne with hint document", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-let.json b/test/crud/unified/deleteOne-let.json new file mode 100644 index 0000000000..9718682235 --- /dev/null +++ b/test/crud/unified/deleteOne-let.json @@ -0,0 +1,191 @@ +{ + "description": "deleteOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "deleteOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne.json b/test/crud/unified/deleteOne.json new file mode 100644 index 0000000000..8177b2fb6b --- /dev/null +++ b/test/crud/unified/deleteOne.json @@ -0,0 +1,136 @@ +{ + "description": "deleteOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne when many documents match", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ] + }, + { + "description": "DeleteOne when one document matches", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 2 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "DeleteOne when no documents match", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 4 + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/distinct-collation.json b/test/crud/unified/distinct-collation.json new file mode 100644 index 0000000000..e40cb0b2cf --- /dev/null +++ b/test/crud/unified/distinct-collation.json @@ -0,0 +1,69 @@ +{ + "description": "distinct-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "string": "PING" + }, + { + "_id": 2, + "string": "ping" + } + ] + } + ], + "tests": [ + { + "description": "Distinct with a collation", + "operations": [ + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "string", + "filter": {}, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": [ + "PING" + ] + } + ] + } + ] +} diff --git a/test/crud/unified/distinct-comment.json b/test/crud/unified/distinct-comment.json new file mode 100644 index 0000000000..11bce9ac9d --- /dev/null +++ b/test/crud/unified/distinct-comment.json @@ -0,0 +1,186 @@ +{ + "description": "distinct-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "distinct-comment-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "distinct-comment-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "distinct with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.14" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectResult": [ + 11, + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": { + "key": "value" + } + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": "comment" + }, + "expectResult": [ + 11, + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": "comment" + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with document comment - pre 4.4, server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.13" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": { + "key": "value" + } + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/distinct-hint.json b/test/crud/unified/distinct-hint.json new file mode 100644 index 0000000000..2a6869cbe0 --- /dev/null +++ b/test/crud/unified/distinct-hint.json @@ -0,0 +1,139 @@ +{ + "description": "distinct-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "7.1.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "distinct-hint-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "distinct-hint-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "distinct with hint string", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": { + "_id": 1 + }, + "hint": "_id_" + }, + "commandName": "distinct", + "databaseName": "distinct-hint-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with hint document", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "commandName": "distinct", + "databaseName": "distinct-hint-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/distinct.json b/test/crud/unified/distinct.json new file mode 100644 index 0000000000..9accffabc9 --- /dev/null +++ b/test/crud/unified/distinct.json @@ -0,0 +1,86 @@ +{ + "description": "distinct", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Distinct without a filter", + "operations": [ + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectResult": [ + 11, + 22, + 33 + ] + } + ] + }, + { + "description": "Distinct with a filter", + "operations": [ + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ] + } + ] +} diff --git a/test/crud/unified/estimatedDocumentCount-comment.json b/test/crud/unified/estimatedDocumentCount-comment.json new file mode 100644 index 0000000000..6c0adacc8f --- /dev/null +++ b/test/crud/unified/estimatedDocumentCount-comment.json @@ -0,0 +1,170 @@ +{ + "description": "estimatedDocumentCount-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "edc-comment-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "edc-comment-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "estimatedDocumentCount with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.14" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": { + "key": "value" + } + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": { + "key": "value" + } + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": "comment" + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": "comment" + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with document comment - pre 4.4.14, server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.13", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": { + "key": "value" + } + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/estimatedDocumentCount.json b/test/crud/unified/estimatedDocumentCount.json new file mode 100644 index 0000000000..3577d9006b --- /dev/null +++ b/test/crud/unified/estimatedDocumentCount.json @@ -0,0 +1,357 @@ +{ + "description": "estimatedDocumentCount", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "edc-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "collection": { + "id": "collection0View", + "database": "database0", + "collectionName": "coll0view" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "edc-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "estimatedDocumentCount always uses count", + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with maxTimeMS", + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "maxTimeMS": 6000 + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "maxTimeMS": 6000 + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount on non-existent collection", + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection1", + "expectResult": 0 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll1" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount errors correctly--command error", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectError": { + "errorCode": 8 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount errors correctly--socket error", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount works correctly on views", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "coll0view" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "coll0view", + "viewOn": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection0View", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "coll0view" + }, + "commandName": "drop", + "databaseName": "edc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "coll0view", + "viewOn": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + }, + "commandName": "create", + "databaseName": "edc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll0view" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-allowdiskuse-clientError.json b/test/crud/unified/find-allowdiskuse-clientError.json new file mode 100644 index 0000000000..5bd954e79d --- /dev/null +++ b/test/crud/unified/find-allowdiskuse-clientError.json @@ -0,0 +1,79 @@ +{ + "description": "find-allowdiskuse-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_find_allowdiskuse_clienterror" + } + } + ], + "tests": [ + { + "description": "Find fails when allowDiskUse true is specified against pre 3.2 server", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Find fails when allowDiskUse false is specified against pre 3.2 server", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/crud/unified/find-allowdiskuse-serverError.json b/test/crud/unified/find-allowdiskuse-serverError.json new file mode 100644 index 0000000000..dc58f8f0e3 --- /dev/null +++ b/test/crud/unified/find-allowdiskuse-serverError.json @@ -0,0 +1,100 @@ +{ + "description": "find-allowdiskuse-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.2", + "maxServerVersion": "4.3.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_find_allowdiskuse_servererror" + } + } + ], + "tests": [ + { + "description": "Find fails when allowDiskUse true is specified against pre 4.4 server (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse_servererror", + "filter": {}, + "allowDiskUse": true + } + } + } + ] + } + ] + }, + { + "description": "Find fails when allowDiskUse false is specified against pre 4.4 server (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse_servererror", + "filter": {}, + "allowDiskUse": false + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-allowdiskuse.json b/test/crud/unified/find-allowdiskuse.json new file mode 100644 index 0000000000..eb238ab93a --- /dev/null +++ b/test/crud/unified/find-allowdiskuse.json @@ -0,0 +1,120 @@ +{ + "description": "find-allowdiskuse", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_find_allowdiskuse" + } + } + ], + "tests": [ + { + "description": "Find does not send allowDiskUse when value is not specified", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Find sends allowDiskUse false when false is specified", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": false + } + } + } + ] + } + ] + }, + { + "description": "Find sends allowDiskUse true when true is specified", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-collation.json b/test/crud/unified/find-collation.json new file mode 100644 index 0000000000..13b105ad5a --- /dev/null +++ b/test/crud/unified/find-collation.json @@ -0,0 +1,69 @@ +{ + "description": "find-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "Find with a collation", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "x": "PING" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": "ping" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-comment.json b/test/crud/unified/find-comment.json new file mode 100644 index 0000000000..600a3723f1 --- /dev/null +++ b/test/crud/unified/find-comment.json @@ -0,0 +1,403 @@ +{ + "description": "find-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "find with string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "find with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with document comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99", + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with comment sets comment on getMore", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": { + "key": "value" + } + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with comment does not set comment on getMore - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": "comment" + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-let.json b/test/crud/unified/find-let.json new file mode 100644 index 0000000000..4e9c9c99f4 --- /dev/null +++ b/test/crud/unified/find-let.json @@ -0,0 +1,148 @@ +{ + "description": "find-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Find with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ] + }, + { + "description": "Find with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "let": { + "x": 1 + } + }, + "expectError": { + "errorContains": "Unrecognized field 'let'", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "let": { + "x": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find.json b/test/crud/unified/find.json new file mode 100644 index 0000000000..325cd96c21 --- /dev/null +++ b/test/crud/unified/find.json @@ -0,0 +1,304 @@ +{ + "description": "find", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "find-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "find-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "find with multiple batches works", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2 + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "find-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "find-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "find-tests" + } + } + ] + } + ] + }, + { + "description": "Find with filter", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "Find with filter, sort, skip, and limit", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 2 + } + }, + "sort": { + "_id": 1 + }, + "skip": 2, + "limit": 2 + }, + "expectResult": [ + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ] + }, + { + "description": "Find with limit, sort, and batchsize", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4, + "batchSize": 2 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "Find with batchSize equal to limit", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": 1 + }, + "limit": 4, + "batchSize": 4 + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "limit": 4, + "batchSize": 5 + }, + "commandName": "find", + "databaseName": "find-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOne.json b/test/crud/unified/findOne.json new file mode 100644 index 0000000000..826c0f5dfd --- /dev/null +++ b/test/crud/unified/findOne.json @@ -0,0 +1,158 @@ +{ + "description": "findOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "find-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "find-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "FindOne with filter", + "operations": [ + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "batchSize": { + "$$exists": false + }, + "limit": 1, + "singleBatch": true + }, + "commandName": "find", + "databaseName": "find-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne with filter, sort, and skip", + "operations": [ + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": { + "$gt": 2 + } + }, + "sort": { + "_id": 1 + }, + "skip": 2 + }, + "expectResult": { + "_id": 5, + "x": 55 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 2 + } + }, + "sort": { + "_id": 1 + }, + "skip": 2, + "batchSize": { + "$$exists": false + }, + "limit": 1, + "singleBatch": true + }, + "commandName": "find", + "databaseName": "find-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-collation.json b/test/crud/unified/findOneAndDelete-collation.json new file mode 100644 index 0000000000..a0452876a3 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-collation.json @@ -0,0 +1,98 @@ +{ + "description": "findOneAndDelete-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete when one document matches with collation", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 2, + "x": "PING" + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "x": "ping" + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-comment.json b/test/crud/unified/findOneAndDelete-comment.json new file mode 100644 index 0000000000..6853b9cc2d --- /dev/null +++ b/test/crud/unified/findOneAndDelete-comment.json @@ -0,0 +1,211 @@ +{ + "description": "findOneAndDelete-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint-clientError.json b/test/crud/unified/findOneAndDelete-hint-clientError.json new file mode 100644 index 0000000000..c6ff467866 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint-clientError.json @@ -0,0 +1,133 @@ +{ + "description": "findOneAndDelete-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndDelete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint-serverError.json b/test/crud/unified/findOneAndDelete-hint-serverError.json new file mode 100644 index 0000000000..b874102728 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint-serverError.json @@ -0,0 +1,162 @@ +{ + "description": "findOneAndDelete-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndDelete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": "_id_", + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint-unacknowledged.json b/test/crud/unified/findOneAndDelete-hint-unacknowledged.json new file mode 100644 index 0000000000..077f9892b9 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint-unacknowledged.json @@ -0,0 +1,225 @@ +{ + "description": "findOneAndDelete-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndDelete with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "remove": true, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "remove": true, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint.json b/test/crud/unified/findOneAndDelete-hint.json new file mode 100644 index 0000000000..8b53f2bd3f --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint.json @@ -0,0 +1,155 @@ +{ + "description": "findOneAndDelete-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndDelete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete with hint string", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": "_id_", + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-let.json b/test/crud/unified/findOneAndDelete-let.json new file mode 100644 index 0000000000..ba8e681c0e --- /dev/null +++ b/test/crud/unified/findOneAndDelete-let.json @@ -0,0 +1,180 @@ +{ + "description": "findOneAndDelete-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "remove": true, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "remove": true, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete.json b/test/crud/unified/findOneAndDelete.json new file mode 100644 index 0000000000..e434b3b740 --- /dev/null +++ b/test/crud/unified/findOneAndDelete.json @@ -0,0 +1,171 @@ +{ + "description": "findOneAndDelete", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete when many documents match", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete when one document matches", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 2 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete when no documents match", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 4 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-collation.json b/test/crud/unified/findOneAndReplace-collation.json new file mode 100644 index 0000000000..0d60d54164 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-collation.json @@ -0,0 +1,97 @@ +{ + "description": "findOneAndReplace-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace when one document matches with collation returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "x": "PING" + }, + "replacement": { + "x": "pong" + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "x": "pong" + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "pong" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-comment.json b/test/crud/unified/findOneAndReplace-comment.json new file mode 100644 index 0000000000..f817bb6937 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-comment.json @@ -0,0 +1,234 @@ +{ + "description": "findOneAndReplace-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 5 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 5 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-dots_and_dollars.json b/test/crud/unified/findOneAndReplace-dots_and_dollars.json new file mode 100644 index 0000000000..19ac447f84 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-dots_and_dollars.json @@ -0,0 +1,430 @@ +{ + "description": "findOneAndReplace-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Replacing document with top-level dotted key on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a.b": 1 + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with top-level dotted key on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a.b": 1 + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint-clientError.json b/test/crud/unified/findOneAndReplace-hint-clientError.json new file mode 100644 index 0000000000..6b07eb1f4d --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint-clientError.json @@ -0,0 +1,139 @@ +{ + "description": "findOneAndReplace-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndReplace_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint-serverError.json b/test/crud/unified/findOneAndReplace-hint-serverError.json new file mode 100644 index 0000000000..7fbf5a0ea3 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint-serverError.json @@ -0,0 +1,172 @@ +{ + "description": "findOneAndReplace-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndReplace_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint-unacknowledged.json b/test/crud/unified/findOneAndReplace-hint-unacknowledged.json new file mode 100644 index 0000000000..8228d8a2aa --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint-unacknowledged.json @@ -0,0 +1,248 @@ +{ + "description": "findOneAndReplace-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndReplace with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 111 + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 111 + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint.json b/test/crud/unified/findOneAndReplace-hint.json new file mode 100644 index 0000000000..d07c5921a7 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint.json @@ -0,0 +1,173 @@ +{ + "description": "findOneAndReplace-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndReplace_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace with hint string", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 33 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 33 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-let.json b/test/crud/unified/findOneAndReplace-let.json new file mode 100644 index 0000000000..5e5de44b31 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-let.json @@ -0,0 +1,197 @@ +{ + "description": "findOneAndReplace-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "x" + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": { + "x": "x" + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "x" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "x" + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": { + "x": "x" + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-upsert.json b/test/crud/unified/findOneAndReplace-upsert.json new file mode 100644 index 0000000000..f1f18996c8 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-upsert.json @@ -0,0 +1,254 @@ +{ + "description": "findOneAndReplace-upsert", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace when no documents match without id specified with upsert returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "upsert": true + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when no documents match without id specified with upsert returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + }, + "upsert": true + }, + "expectResult": { + "x": 44 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when no documents match with id specified with upsert returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "upsert": true + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when no documents match with id specified with upsert returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + }, + "upsert": true + }, + "expectResult": { + "x": 44 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace.json b/test/crud/unified/findOneAndReplace.json new file mode 100644 index 0000000000..a4731602c4 --- /dev/null +++ b/test/crud/unified/findOneAndReplace.json @@ -0,0 +1,332 @@ +{ + "description": "findOneAndReplace", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace when many documents match returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 32 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when many documents match returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 32 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 32 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when one document matches returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 32 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 32 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when one document matches returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 32 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 32 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 32 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when no documents match returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when no documents match returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-arrayFilters.json b/test/crud/unified/findOneAndUpdate-arrayFilters.json new file mode 100644 index 0000000000..6c99e4ff66 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-arrayFilters.json @@ -0,0 +1,251 @@ +{ + "description": "findOneAndUpdate-arrayFilters", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.5.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate when no document matches arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 4 + } + ] + }, + "expectResult": { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when one document matches arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + }, + "expectResult": { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when multiple documents match arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + }, + "expectResult": { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-collation.json b/test/crud/unified/findOneAndUpdate-collation.json new file mode 100644 index 0000000000..7a49347a3a --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-collation.json @@ -0,0 +1,106 @@ +{ + "description": "findOneAndUpdate-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate when many documents match with collation returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "x": "PING" + }, + "update": { + "$set": { + "x": "pong" + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "_id": 1 + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "x": "ping" + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "pong" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-comment.json b/test/crud/unified/findOneAndUpdate-comment.json new file mode 100644 index 0000000000..6dec5b39ee --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-comment.json @@ -0,0 +1,228 @@ +{ + "description": "findOneAndUpdate-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-dots_and_dollars.json b/test/crud/unified/findOneAndUpdate-dots_and_dollars.json new file mode 100644 index 0000000000..40eb547392 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-dots_and_dollars.json @@ -0,0 +1,380 @@ +{ + "description": "findOneAndUpdate-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-errorResponse.json b/test/crud/unified/findOneAndUpdate-errorResponse.json new file mode 100644 index 0000000000..5023a450f3 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-errorResponse.json @@ -0,0 +1,132 @@ +{ + "description": "findOneAndUpdate-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate DuplicateKey error is accessible", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "unique": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "foo" + } + }, + "upsert": true + }, + "expectError": { + "errorCode": 11000, + "errorResponse": { + "keyPattern": { + "x": 1 + }, + "keyValue": { + "x": "foo" + } + } + } + } + ] + }, + { + "description": "findOneAndUpdate document validation errInfo is accessible", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "modifyCollection", + "object": "database0", + "arguments": { + "collection": "test", + "validator": { + "x": { + "$type": "string" + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "errorCode": 121, + "errorResponse": { + "errInfo": { + "failingDocumentId": 1, + "details": { + "$$type": "object" + } + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint-clientError.json b/test/crud/unified/findOneAndUpdate-hint-clientError.json new file mode 100644 index 0000000000..d0b51313c9 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint-clientError.json @@ -0,0 +1,143 @@ +{ + "description": "findOneAndUpdate-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndUpdate_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint-serverError.json b/test/crud/unified/findOneAndUpdate-hint-serverError.json new file mode 100644 index 0000000000..99fd9938f8 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint-serverError.json @@ -0,0 +1,180 @@ +{ + "description": "findOneAndUpdate-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndUpdate_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json b/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json new file mode 100644 index 0000000000..d116a06d0d --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json @@ -0,0 +1,253 @@ +{ + "description": "findOneAndUpdate-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndUpdate with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint.json b/test/crud/unified/findOneAndUpdate-hint.json new file mode 100644 index 0000000000..5be6d2b3e8 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint.json @@ -0,0 +1,181 @@ +{ + "description": "findOneAndUpdate-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndUpdate_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate with hint string", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-let.json b/test/crud/unified/findOneAndUpdate-let.json new file mode 100644 index 0000000000..74d7d0e58b --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-let.json @@ -0,0 +1,217 @@ +{ + "description": "findOneAndUpdate-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-pipeline.json b/test/crud/unified/findOneAndUpdate-pipeline.json new file mode 100644 index 0000000000..81dba9ae93 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-pipeline.json @@ -0,0 +1,130 @@ +{ + "description": "findOneAndUpdate-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate using pipelines", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "commandName": "findAndModify", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate.json b/test/crud/unified/findOneAndUpdate.json new file mode 100644 index 0000000000..d79cf8ac5b --- /dev/null +++ b/test/crud/unified/findOneAndUpdate.json @@ -0,0 +1,448 @@ +{ + "description": "findOneAndUpdate", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate when many documents match returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when many documents match returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 23 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when one document matches returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when one document matches returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 23 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when no documents match returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when no documents match with upsert returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "upsert": true + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when no documents match returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when no documents match with upsert returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + }, + "upsert": true + }, + "expectResult": { + "x": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertMany-comment.json b/test/crud/unified/insertMany-comment.json new file mode 100644 index 0000000000..2b4c80b3f0 --- /dev/null +++ b/test/crud/unified/insertMany-comment.json @@ -0,0 +1,226 @@ +{ + "description": "insertMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "insertMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertMany with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertMany-dots_and_dollars.json b/test/crud/unified/insertMany-dots_and_dollars.json new file mode 100644 index 0000000000..eed8997df9 --- /dev/null +++ b/test/crud/unified/insertMany-dots_and_dollars.json @@ -0,0 +1,338 @@ +{ + "description": "insertMany-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Inserting document with top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with top-level dollar-prefixed key on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with top-level dotted key", + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in embedded doc", + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in embedded doc", + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertMany.json b/test/crud/unified/insertMany.json new file mode 100644 index 0000000000..643b7f44de --- /dev/null +++ b/test/crud/unified/insertMany.json @@ -0,0 +1,205 @@ +{ + "description": "insertMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertMany with non-existing documents", + "operations": [ + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertMany continue-on-error behavior with unordered (preexisting duplicate key)", + "operations": [ + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": false + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertMany continue-on-error behavior with unordered (duplicate key in requests)", + "operations": [ + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": false + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-comment.json b/test/crud/unified/insertOne-comment.json new file mode 100644 index 0000000000..dbd83d9f64 --- /dev/null +++ b/test/crud/unified/insertOne-comment.json @@ -0,0 +1,220 @@ +{ + "description": "insertOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "insertOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertOne with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-dots_and_dollars.json b/test/crud/unified/insertOne-dots_and_dollars.json new file mode 100644 index 0000000000..fdc17af2e8 --- /dev/null +++ b/test/crud/unified/insertOne-dots_and_dollars.json @@ -0,0 +1,614 @@ +{ + "description": "insertOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Inserting document with top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "$a": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with top-level dollar-prefixed key on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "$a": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with top-level dotted key", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a.b": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in embedded doc", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in embedded doc", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in _id yields server-side error", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": { + "$a": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$a": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with dotted key in _id on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": { + "a.b": 1 + } + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": { + "a.b": 1 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "a.b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": { + "a.b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in _id on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": { + "a.b": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "a.b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with DBRef-like keys", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "$db": "foo" + } + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$db": "foo" + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$db": "foo" + } + } + ] + } + ] + }, + { + "description": "Unacknowledged write using dollar-prefixed or dotted keys may be silently rejected on pre-5.0 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "_id": { + "$a": 1 + } + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll1", + "documents": [ + { + "_id": { + "$a": 1 + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-errorResponse.json b/test/crud/unified/insertOne-errorResponse.json new file mode 100644 index 0000000000..04ea6a7451 --- /dev/null +++ b/test/crud/unified/insertOne-errorResponse.json @@ -0,0 +1,82 @@ +{ + "description": "insertOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "insert operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne.json b/test/crud/unified/insertOne.json new file mode 100644 index 0000000000..1a90913476 --- /dev/null +++ b/test/crud/unified/insertOne.json @@ -0,0 +1,77 @@ +{ + "description": "insertOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertOne with a non-existing document", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-collation.json b/test/crud/unified/replaceOne-collation.json new file mode 100644 index 0000000000..dd76b9d616 --- /dev/null +++ b/test/crud/unified/replaceOne-collation.json @@ -0,0 +1,92 @@ +{ + "description": "replaceOne-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne when one document matches with collation", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "x": "PING" + }, + "replacement": { + "_id": 2, + "x": "pong" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "pong" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-comment.json b/test/crud/unified/replaceOne-comment.json new file mode 100644 index 0000000000..88bee5d7b7 --- /dev/null +++ b/test/crud/unified/replaceOne-comment.json @@ -0,0 +1,248 @@ +{ + "description": "replaceOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-dots_and_dollars.json b/test/crud/unified/replaceOne-dots_and_dollars.json new file mode 100644 index 0000000000..d5003dc5ea --- /dev/null +++ b/test/crud/unified/replaceOne-dots_and_dollars.json @@ -0,0 +1,567 @@ +{ + "description": "replaceOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Replacing document with top-level dotted key on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with top-level dotted key on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Unacknowledged write using dollar-prefixed or dotted keys may be silently rejected on pre-5.0 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll1", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-hint-unacknowledged.json b/test/crud/unified/replaceOne-hint-unacknowledged.json new file mode 100644 index 0000000000..5c5dec64f6 --- /dev/null +++ b/test/crud/unified/replaceOne-hint-unacknowledged.json @@ -0,0 +1,269 @@ +{ + "description": "replaceOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged replaceOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-hint.json b/test/crud/unified/replaceOne-hint.json new file mode 100644 index 0000000000..6926e9d8df --- /dev/null +++ b/test/crud/unified/replaceOne-hint.json @@ -0,0 +1,203 @@ +{ + "description": "replaceOne-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_replaceone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_replaceone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with hint string", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_replaceone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_replaceone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 111 + } + ] + } + ] + }, + { + "description": "ReplaceOne with hint document", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_replaceone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_replaceone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 111 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-let.json b/test/crud/unified/replaceOne-let.json new file mode 100644 index 0000000000..e7a7ee65a5 --- /dev/null +++ b/test/crud/unified/replaceOne-let.json @@ -0,0 +1,219 @@ +{ + "description": "replaceOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "foo" + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": "foo" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "ReplaceOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "foo" + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": "foo" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-sort.json b/test/crud/unified/replaceOne-sort.json new file mode 100644 index 0000000000..cf2271dda5 --- /dev/null +++ b/test/crud/unified/replaceOne-sort.json @@ -0,0 +1,232 @@ +{ + "description": "replaceOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + }, + { + "description": "replaceOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-validation.json b/test/crud/unified/replaceOne-validation.json new file mode 100644 index 0000000000..6f5b173e02 --- /dev/null +++ b/test/crud/unified/replaceOne-validation.json @@ -0,0 +1,82 @@ +{ + "description": "replaceOne-validation", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne.json b/test/crud/unified/replaceOne.json new file mode 100644 index 0000000000..bdb7556f2f --- /dev/null +++ b/test/crud/unified/replaceOne.json @@ -0,0 +1,259 @@ +{ + "description": "replaceOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne when many documents match", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ] + }, + { + "description": "ReplaceOne when one document matches", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "ReplaceOne when no documents match", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 1 + } + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "ReplaceOne with upsert when no documents match without an id specified", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 1 + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + }, + { + "description": "ReplaceOne with upsert when no documents match with an id specified", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 1 + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-arrayFilters.json b/test/crud/unified/updateMany-arrayFilters.json new file mode 100644 index 0000000000..8730caeb42 --- /dev/null +++ b/test/crud/unified/updateMany-arrayFilters.json @@ -0,0 +1,233 @@ +{ + "description": "updateMany-arrayFilters", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.5.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany when no documents match arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 4 + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "UpdateMany when one document matches arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "UpdateMany when multiple documents match arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 2 + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-collation.json b/test/crud/unified/updateMany-collation.json new file mode 100644 index 0000000000..0c780a3c2d --- /dev/null +++ b/test/crud/unified/updateMany-collation.json @@ -0,0 +1,101 @@ +{ + "description": "updateMany-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany when many documents match with collation", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "x": "ping" + }, + "update": { + "$set": { + "x": "pong" + } + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "pong" + }, + { + "_id": 3, + "x": "pong" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-comment.json b/test/crud/unified/updateMany-comment.json new file mode 100644 index 0000000000..88b8b67f5a --- /dev/null +++ b/test/crud/unified/updateMany-comment.json @@ -0,0 +1,254 @@ +{ + "description": "updateMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateMany with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-dots_and_dollars.json b/test/crud/unified/updateMany-dots_and_dollars.json new file mode 100644 index 0000000000..5d3b9d0453 --- /dev/null +++ b/test/crud/unified/updateMany-dots_and_dollars.json @@ -0,0 +1,404 @@ +{ + "description": "updateMany-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-hint-clientError.json b/test/crud/unified/updateMany-hint-clientError.json new file mode 100644 index 0000000000..5da878e293 --- /dev/null +++ b/test/crud/unified/updateMany-hint-clientError.json @@ -0,0 +1,159 @@ +{ + "description": "updateMany-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updatemany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateMany with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-hint-serverError.json b/test/crud/unified/updateMany-hint-serverError.json new file mode 100644 index 0000000000..c81f36b13c --- /dev/null +++ b/test/crud/unified/updateMany-hint-serverError.json @@ -0,0 +1,216 @@ +{ + "description": "updateMany-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updatemany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_", + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateMany with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-hint-unacknowledged.json b/test/crud/unified/updateMany-hint-unacknowledged.json new file mode 100644 index 0000000000..e83838aac2 --- /dev/null +++ b/test/crud/unified/updateMany-hint-unacknowledged.json @@ -0,0 +1,281 @@ +{ + "description": "updateMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateMany with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-hint.json b/test/crud/unified/updateMany-hint.json new file mode 100644 index 0000000000..929be52994 --- /dev/null +++ b/test/crud/unified/updateMany-hint.json @@ -0,0 +1,219 @@ +{ + "description": "updateMany-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updatemany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with hint string", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_", + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "UpdateMany with hint document", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-let.json b/test/crud/unified/updateMany-let.json new file mode 100644 index 0000000000..cff3bd4c79 --- /dev/null +++ b/test/crud/unified/updateMany-let.json @@ -0,0 +1,249 @@ +{ + "description": "updateMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "updateMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x", + "y": "$$y" + } + } + ], + "let": { + "name": "name", + "x": "foo", + "y": { + "$literal": "bar" + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "u": [ + { + "$set": { + "x": "$$x", + "y": "$$y" + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "name": "name", + "x": "foo", + "y": { + "$literal": "bar" + } + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name", + "x": "foo", + "y": "bar" + }, + { + "_id": 3, + "name": "name", + "x": "foo", + "y": "bar" + } + ] + } + ] + }, + { + "description": "updateMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-pipeline.json b/test/crud/unified/updateMany-pipeline.json new file mode 100644 index 0000000000..e0f6d9d4a4 --- /dev/null +++ b/test/crud/unified/updateMany-pipeline.json @@ -0,0 +1,142 @@ +{ + "description": "updateMany-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany using pipelines", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-validation.json b/test/crud/unified/updateMany-validation.json new file mode 100644 index 0000000000..e3e46a1384 --- /dev/null +++ b/test/crud/unified/updateMany-validation.json @@ -0,0 +1,98 @@ +{ + "description": "updateMany-validation", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany requires atomic modifiers", + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany.json b/test/crud/unified/updateMany.json new file mode 100644 index 0000000000..19b890592b --- /dev/null +++ b/test/crud/unified/updateMany.json @@ -0,0 +1,236 @@ +{ + "description": "updateMany", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany when many documents match", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "UpdateMany when one document matches", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateMany when no documents match", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateMany with upsert when no documents match", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-arrayFilters.json b/test/crud/unified/updateOne-arrayFilters.json new file mode 100644 index 0000000000..be5d05b01e --- /dev/null +++ b/test/crud/unified/updateOne-arrayFilters.json @@ -0,0 +1,453 @@ +{ + "description": "updateOne-arrayFilters", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.5.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 1 + } + ] + } + ] + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne when no document matches arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 4 + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 1 + } + ] + } + ] + } + ] + } + ] + }, + { + "description": "UpdateOne when one document matches arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 1 + } + ] + } + ] + } + ] + } + ] + }, + { + "description": "UpdateOne when multiple documents match arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 1 + } + ] + } + ] + } + ] + } + ] + }, + { + "description": "UpdateOne when no documents match multiple arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "y.$[i].c.$[j].d": 0 + } + }, + "arrayFilters": [ + { + "i.b": 5 + }, + { + "j.d": 3 + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 1 + } + ] + } + ] + } + ] + } + ] + }, + { + "description": "UpdateOne when one document matches multiple arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "y.$[i].c.$[j].d": 0 + } + }, + "arrayFilters": [ + { + "i.b": 5 + }, + { + "j.d": 1 + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 0 + } + ] + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-collation.json b/test/crud/unified/updateOne-collation.json new file mode 100644 index 0000000000..a39be46054 --- /dev/null +++ b/test/crud/unified/updateOne-collation.json @@ -0,0 +1,93 @@ +{ + "description": "updateOne-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne when one document matches with collation", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "x": "PING" + }, + "update": { + "$set": { + "x": "pong" + } + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "pong" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-comment.json b/test/crud/unified/updateOne-comment.json new file mode 100644 index 0000000000..f4ee74db38 --- /dev/null +++ b/test/crud/unified/updateOne-comment.json @@ -0,0 +1,260 @@ +{ + "description": "updateOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-dots_and_dollars.json b/test/crud/unified/updateOne-dots_and_dollars.json new file mode 100644 index 0000000000..798d522cba --- /dev/null +++ b/test/crud/unified/updateOne-dots_and_dollars.json @@ -0,0 +1,412 @@ +{ + "description": "updateOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-errorResponse.json b/test/crud/unified/updateOne-errorResponse.json new file mode 100644 index 0000000000..0ceddbc4fc --- /dev/null +++ b/test/crud/unified/updateOne-errorResponse.json @@ -0,0 +1,87 @@ +{ + "description": "updateOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "update operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint-clientError.json b/test/crud/unified/updateOne-hint-clientError.json new file mode 100644 index 0000000000..d4f1a53430 --- /dev/null +++ b/test/crud/unified/updateOne-hint-clientError.json @@ -0,0 +1,147 @@ +{ + "description": "updateOne-hint-clientError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updateone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint-serverError.json b/test/crud/unified/updateOne-hint-serverError.json new file mode 100644 index 0000000000..05fb033319 --- /dev/null +++ b/test/crud/unified/updateOne-hint-serverError.json @@ -0,0 +1,208 @@ +{ + "description": "updateOne-hint-serverError", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updateone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint-unacknowledged.json b/test/crud/unified/updateOne-hint-unacknowledged.json new file mode 100644 index 0000000000..859b0f92f9 --- /dev/null +++ b/test/crud/unified/updateOne-hint-unacknowledged.json @@ -0,0 +1,281 @@ +{ + "description": "updateOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint.json b/test/crud/unified/updateOne-hint.json new file mode 100644 index 0000000000..484e00757d --- /dev/null +++ b/test/crud/unified/updateOne-hint.json @@ -0,0 +1,211 @@ +{ + "description": "updateOne-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updateone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with hint string", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + }, + { + "description": "UpdateOne with hint document", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-let.json b/test/crud/unified/updateOne-let.json new file mode 100644 index 0000000000..e43b979358 --- /dev/null +++ b/test/crud/unified/updateOne-let.json @@ -0,0 +1,227 @@ +{ + "description": "updateOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "UpdateOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-pipeline.json b/test/crud/unified/updateOne-pipeline.json new file mode 100644 index 0000000000..1348c6b53b --- /dev/null +++ b/test/crud/unified/updateOne-pipeline.json @@ -0,0 +1,150 @@ +{ + "description": "updateOne-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne using pipelines", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-sort.json b/test/crud/unified/updateOne-sort.json new file mode 100644 index 0000000000..8fe4f50b94 --- /dev/null +++ b/test/crud/unified/updateOne-sort.json @@ -0,0 +1,240 @@ +{ + "description": "updateOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "updateOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-validation.json b/test/crud/unified/updateOne-validation.json new file mode 100644 index 0000000000..1464642c59 --- /dev/null +++ b/test/crud/unified/updateOne-validation.json @@ -0,0 +1,80 @@ +{ + "description": "updateOne-validation", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne requires atomic modifiers", + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne.json b/test/crud/unified/updateOne.json new file mode 100644 index 0000000000..a3f559673e --- /dev/null +++ b/test/crud/unified/updateOne.json @@ -0,0 +1,216 @@ +{ + "description": "updateOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne when many documents match", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ] + }, + { + "description": "UpdateOne when one document matches", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateOne when no documents match", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateOne with upsert when no documents match", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/csot/bulkWrite.json b/test/csot/bulkWrite.json new file mode 100644 index 0000000000..9a05809f77 --- /dev/null +++ b/test/csot/bulkWrite.json @@ -0,0 +1,160 @@ +{ + "description": "timeoutMS behaves correctly for bulkWrite operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "w": 1 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to entire bulkWrite, not individual commands", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": {} + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert", + "update" + ], + "blockConnection": true, + "blockTimeMS": 120 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 1 + } + } + } + ], + "timeoutMS": 200 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/change-streams.json b/test/csot/change-streams.json new file mode 100644 index 0000000000..8cffb08e26 --- /dev/null +++ b/test/csot/change-streams.json @@ -0,0 +1,598 @@ +{ + "description": "timeoutMS behaves correctly for change streams", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to initial aggregate", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 200 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 1050 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 200, + "batchSize": 2, + "maxAwaitTimeMS": 1 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 1 + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to full resume attempt in a next call", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 200 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore", + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 120, + "errorCode": 7, + "errorLabels": [ + "ResumableChangeStreamError" + ] + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "change stream can be iterated again if previous iteration times out", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "maxAwaitTimeMS": 1, + "timeoutMS": 200 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 200 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/close-cursors.json b/test/csot/close-cursors.json new file mode 100644 index 0000000000..79b0de7b6a --- /dev/null +++ b/test/csot/close-cursors.json @@ -0,0 +1,239 @@ +{ + "description": "timeoutMS behaves correctly when closing cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for close", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "batchSize": 2, + "timeoutMS": 200 + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be overridden for close", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "killCursors" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "batchSize": 2, + "timeoutMS": 200 + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "close", + "object": "cursor", + "arguments": { + "timeoutMS": 400 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json new file mode 100644 index 0000000000..aa9c3eb23f --- /dev/null +++ b/test/csot/command-execution.json @@ -0,0 +1,393 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "topologies": [ + "single", + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] + } + ] + }, + { + "description": "command is not sent if RTT is greater than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "rttTooHighTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "rttTooHighTest", + "w": 1, + "timeoutMS": 10, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 4 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + } + ] + } + ] + }, + { + "description": "short-circuit is not enabled with only 1 RTT measurement", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 90, + "heartbeatFrequencyMS": 100000 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/convenient-transactions.json b/test/csot/convenient-transactions.json new file mode 100644 index 0000000000..3868b3026c --- /dev/null +++ b/test/csot/convenient-transactions.json @@ -0,0 +1,209 @@ +{ + "description": "timeoutMS behaves correctly for the withTransaction API", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction raises a client-side error if timeoutMS is overridden inside the callback", + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session", + "timeoutMS": 100 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "timeoutMS is not refreshed for each operation in the callback", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 300 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + }, + "session": "session" + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/cursors.json b/test/csot/cursors.json new file mode 100644 index 0000000000..36949d7509 --- /dev/null +++ b/test/csot/cursors.json @@ -0,0 +1,113 @@ +{ + "description": "tests for timeoutMS behavior that applies to all cursor types", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client" + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "find errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "collection aggregate errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "database aggregate errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [], + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "listCollections errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "listIndexes errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/csot/deprecated-options.json b/test/csot/deprecated-options.json new file mode 100644 index 0000000000..647e1bf792 --- /dev/null +++ b/test/csot/deprecated-options.json @@ -0,0 +1,7222 @@ +{ + "description": "operations ignore deprecated timeout options if timeoutMS is set", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 100000, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 100000, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1", + "timeoutMS": 100000 + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/error-transformations.json b/test/csot/error-transformations.json new file mode 100644 index 0000000000..4889e39583 --- /dev/null +++ b/test/csot/error-transformations.json @@ -0,0 +1,180 @@ +{ + "description": "MaxTimeMSExpired server errors are transformed into a custom timeout error", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "basic MaxTimeMSExpired error is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 50 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "write concern error MaxTimeMSExpired is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 50, + "errmsg": "maxTimeMS expired" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/global-timeoutMS.json b/test/csot/global-timeoutMS.json new file mode 100644 index 0000000000..f1edbe68e3 --- /dev/null +++ b/test/csot/global-timeoutMS.json @@ -0,0 +1,5842 @@ +{ + "description": "timeoutMS can be configured on a MongoClient", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json new file mode 100644 index 0000000000..c6c0944d2f --- /dev/null +++ b/test/csot/gridfs-advanced.json @@ -0,0 +1,385 @@ +{ + "description": "timeoutMS behaves correctly for advanced GridFS API operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for a rename", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "rename", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "newFilename": "foo", + "timeoutMS": 2000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to update during a rename", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "rename", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "newFilename": "foo" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be overridden for drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "arguments": { + "timeoutMS": 2000 + } + } + ] + }, + { + "description": "timeoutMS applied to files collection drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop", + "databaseName": "test", + "command": { + "drop": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to chunks collection drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to drop as a whole, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/gridfs-delete.json b/test/csot/gridfs-delete.json new file mode 100644 index 0000000000..9f4980114b --- /dev/null +++ b/test/csot/gridfs-delete.json @@ -0,0 +1,285 @@ +{ + "description": "timeoutMS behaves correctly for GridFS delete operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for delete", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to delete against the files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to delete against the chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to entire delete, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/gridfs-download.json b/test/csot/gridfs-download.json new file mode 100644 index 0000000000..8542f69e89 --- /dev/null +++ b/test/csot/gridfs-download.json @@ -0,0 +1,359 @@ +{ + "description": "timeoutMS behaves correctly for GridFS download operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for download", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to find to get files document", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find to get chunks", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.chunks", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to entire download, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.chunks", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-find.json b/test/csot/gridfs-find.json new file mode 100644 index 0000000000..7409036284 --- /dev/null +++ b/test/csot/gridfs-find.json @@ -0,0 +1,183 @@ +{ + "description": "timeoutMS behaves correctly for GridFS find operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for a find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "find", + "object": "bucket", + "arguments": { + "filter": {}, + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find command", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "find", + "object": "bucket", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-upload.json b/test/csot/gridfs-upload.json new file mode 100644 index 0000000000..b3f174973d --- /dev/null +++ b/test/csot/gridfs-upload.json @@ -0,0 +1,409 @@ +{ + "description": "timeoutMS behaves correctly for GridFS upload operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 75 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for upload", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to initial find on files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to listIndexes on files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to index creation for files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to listIndexes on chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to index creation for chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to chunk insertion", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to creation of files document", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to upload as a whole, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/legacy-timeouts.json b/test/csot/legacy-timeouts.json new file mode 100644 index 0000000000..535425c934 --- /dev/null +++ b/test/csot/legacy-timeouts.json @@ -0,0 +1,379 @@ +{ + "description": "legacy timeouts continue to work if timeoutMS is not set", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "socketTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "socketTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "waitQueueTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "waitQueueTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "wTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "wTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + }, + "writeConcern": { + "wtimeout": 50000 + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS option is used directly as the maxTimeMS field on a command", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "maxTimeMS": 50000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": 50000 + } + } + } + ] + } + ] + }, + { + "description": "maxCommitTimeMS option is used directly as the maxTimeMS field on a commitTransaction command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 1000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": 1000 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/non-tailable-cursors.json b/test/csot/non-tailable-cursors.json new file mode 100644 index 0000000000..291c6e72aa --- /dev/null +++ b/test/csot/non-tailable-cursors.json @@ -0,0 +1,541 @@ +{ + "description": "timeoutMS behaves correctly for non-tailable cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "collectionName": "aggregateOutputColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to find if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to getMore if timeoutMode is unset", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMS": 200, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "timeoutMS": 200, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find if timeoutMode is iteration", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - success", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration", + "timeoutMS": 200, + "batchSize": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration", + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with $out errors if timeoutMode is iteration", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$out": "aggregateOutputColl" + } + ], + "timeoutMS": 100, + "timeoutMode": "iteration" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "aggregate with $merge errors if timeoutMode is iteration", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$merge": "aggregateOutputColl" + } + ], + "timeoutMS": 100, + "timeoutMode": "iteration" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + } + ] +} diff --git a/test/csot/override-operation-timeoutMS.json b/test/csot/override-operation-timeoutMS.json new file mode 100644 index 0000000000..f33f876137 --- /dev/null +++ b/test/csot/override-operation-timeoutMS.json @@ -0,0 +1,3605 @@ +{ + "description": "timeoutMS can be overridden for an operation", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 0, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 1000, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 0, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/retryability-legacy-timeouts.json b/test/csot/retryability-legacy-timeouts.json new file mode 100644 index 0000000000..aded781aee --- /dev/null +++ b/test/csot/retryability-legacy-timeouts.json @@ -0,0 +1,3042 @@ +{ + "description": "legacy timeouts behave correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 100 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "operation succeeds after one socket timeout - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json new file mode 100644 index 0000000000..9daad260ef --- /dev/null +++ b/test/csot/retryability-timeoutMS.json @@ -0,0 +1,5688 @@ +{ + "description": "timeoutMS behaves correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 100 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/runCursorCommand.json b/test/csot/runCursorCommand.json new file mode 100644 index 0000000000..36f774fb5a --- /dev/null +++ b/test/csot/runCursorCommand.json @@ -0,0 +1,583 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "commandClient", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "commandDb", + "client": "commandClient", + "databaseName": "commandDb" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "errors if timeoutMode is set without timeoutMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection" + }, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if timeoutMode is cursorLifetime and cursorType is tailableAwait", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection" + }, + "timeoutMode": "cursorLifetime", + "cursorType": "tailableAwait" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "timeoutMS": 100, + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "command": { + "find": "collection", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Non-tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + }, + "timeoutMode": "iteration", + "timeoutMS": 100, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Tailable cursor iteration timeoutMS is refreshed for getMore - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "cappedCollection", + "batchSize": 1, + "tailable": true + }, + "timeoutMode": "iteration", + "timeoutMS": 100, + "batchSize": 1, + "cursorType": "tailable" + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "foo": "bar" + }, + { + "fizz": "buzz" + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": true + }, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-inherit-timeoutMS.json b/test/csot/sessions-inherit-timeoutMS.json new file mode 100644 index 0000000000..13ea91c794 --- /dev/null +++ b/test/csot/sessions-inherit-timeoutMS.json @@ -0,0 +1,331 @@ +{ + "description": "sessions inherit timeoutMS from their parent MongoClient", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-override-operation-timeoutMS.json b/test/csot/sessions-override-operation-timeoutMS.json new file mode 100644 index 0000000000..441c698328 --- /dev/null +++ b/test/csot/sessions-override-operation-timeoutMS.json @@ -0,0 +1,335 @@ +{ + "description": "timeoutMS can be overridden for individual session operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 500 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 500 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 500, + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-override-timeoutMS.json b/test/csot/sessions-override-timeoutMS.json new file mode 100644 index 0000000000..d90152e909 --- /dev/null +++ b/test/csot/sessions-override-timeoutMS.json @@ -0,0 +1,331 @@ +{ + "description": "timeoutMS can be overridden at the level of a ClientSession", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTimeoutMS": 500 + } + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/tailable-awaitData.json b/test/csot/tailable-awaitData.json new file mode 100644 index 0000000000..80e95ca906 --- /dev/null +++ b/test/csot/tailable-awaitData.json @@ -0,0 +1,632 @@ +{ + "description": "timeoutMS behaves correctly for tailable awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "error if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "cursorType": "tailableAwait" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error on find if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on aggregate if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on watch if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on find if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on aggregate if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on watch if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "timeoutMS applied to find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 300 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 250, + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 250, + "batchSize": 1, + "maxAwaitTimeMS": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 1 + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + }, + { + "description": "apply remaining timeoutMS if less than maxAwaitTimeMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "cursorType": "tailableAwait", + "batchSize": 1, + "maxAwaitTimeMS": 100, + "timeoutMS": 200 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateOnce", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "maxTimeMS": { + "$$lte": 100 + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "maxTimeMS": { + "$$lte": 70 + } + } + } + } + ] + } + ] + }, + { + "description": "apply maxAwaitTimeMS if less than remaining timeout", + "operations": [ + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1, + "maxAwaitTimeMS": 100, + "timeoutMS": 200 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateOnce", + "object": "tailableCursor" + }, + { + "name": "iterateOnce", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "maxTimeMS": { + "$$lte": 100 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/tailable-non-awaitData.json b/test/csot/tailable-non-awaitData.json new file mode 100644 index 0000000000..e88230e4f7 --- /dev/null +++ b/test/csot/tailable-non-awaitData.json @@ -0,0 +1,312 @@ +{ + "description": "timeoutMS behaves correctly for tailable non-awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "error if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "cursorType": "tailable" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - success", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "timeoutMS": 200, + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/waitQueueTimeout.json b/test/csot/waitQueueTimeout.json new file mode 100644 index 0000000000..138d5cc161 --- /dev/null +++ b/test/csot/waitQueueTimeout.json @@ -0,0 +1,176 @@ +{ + "description": "WaitQueueTimeoutError does not clear the pool", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "maxPoolSize": 1, + "appname": "waitQueueTimeoutErrorTest" + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "WaitQueueTimeoutError does not clear the pool", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "waitQueueTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "commandStartedEvent": { + "commandName": "ping" + } + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100, + "command": { + "hello": 1 + }, + "commandName": "hello" + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "hello": 1 + }, + "commandName": "hello" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "hello", + "databaseName": "test", + "command": { + "hello": 1 + } + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [] + } + ] + } + ] +} diff --git a/test/data_lake/unified/aggregate.json b/test/data_lake/unified/aggregate.json new file mode 100644 index 0000000000..68a3467c71 --- /dev/null +++ b/test/data_lake/unified/aggregate.json @@ -0,0 +1,84 @@ +{ + "description": "aggregate", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "Aggregate with pipeline (project, sort, limit)", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + }, + { + "$sort": { + "a": 1 + } + }, + { + "$limit": 2 + } + ] + }, + "expectResult": [ + { + "a": 1, + "b": 2, + "c": 3 + }, + { + "a": 2, + "b": 3, + "c": 4 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "driverdata" + }, + "commandName": "aggregate", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/estimatedDocumentCount.json b/test/data_lake/unified/estimatedDocumentCount.json new file mode 100644 index 0000000000..b7515a4418 --- /dev/null +++ b/test/data_lake/unified/estimatedDocumentCount.json @@ -0,0 +1,56 @@ +{ + "description": "estimatedDocumentCount", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "estimatedDocumentCount succeeds", + "operations": [ + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 15 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "driverdata" + }, + "commandName": "count", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/find.json b/test/data_lake/unified/find.json new file mode 100644 index 0000000000..d0652dc720 --- /dev/null +++ b/test/data_lake/unified/find.json @@ -0,0 +1,96 @@ +{ + "description": "find", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "Find with projection and sort", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "b": { + "$gt": 5 + } + }, + "projection": { + "_id": 0 + }, + "sort": { + "a": 1 + }, + "limit": 5 + }, + "expectResult": [ + { + "a": 5, + "b": 6, + "c": 7 + }, + { + "a": 6, + "b": 7, + "c": 8 + }, + { + "a": 7, + "b": 8, + "c": 9 + }, + { + "a": 8, + "b": 9, + "c": 10 + }, + { + "a": 9, + "b": 10, + "c": 11 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "driverdata" + }, + "commandName": "find", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/getMore.json b/test/data_lake/unified/getMore.json new file mode 100644 index 0000000000..109b6d3d8e --- /dev/null +++ b/test/data_lake/unified/getMore.json @@ -0,0 +1,95 @@ +{ + "description": "getMore", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "A successful find event with getMore", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "a": { + "$gte": 2 + } + }, + "sort": { + "a": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "driverdata", + "filter": { + "a": { + "$gte": 2 + } + }, + "sort": { + "a": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": { + "$$type": "string" + }, + "batchSize": 1 + }, + "commandName": "getMore", + "databaseName": "cursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/listCollections.json b/test/data_lake/unified/listCollections.json new file mode 100644 index 0000000000..642e7ed328 --- /dev/null +++ b/test/data_lake/unified/listCollections.json @@ -0,0 +1,48 @@ +{ + "description": "listCollections", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "ListCollections succeeds", + "operations": [ + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + }, + "commandName": "listCollections", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/listDatabases.json b/test/data_lake/unified/listDatabases.json new file mode 100644 index 0000000000..64506ee54e --- /dev/null +++ b/test/data_lake/unified/listDatabases.json @@ -0,0 +1,41 @@ +{ + "description": "listDatabases", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "tests": [ + { + "description": "ListCollections succeeds", + "operations": [ + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + }, + "commandName": "listDatabases", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/runCommand.json b/test/data_lake/unified/runCommand.json new file mode 100644 index 0000000000..325b6b3f30 --- /dev/null +++ b/test/data_lake/unified/runCommand.json @@ -0,0 +1,54 @@ +{ + "description": "runCommand", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "ping succeeds using runCommand", + "operations": [ + { + "object": "database0", + "name": "runCommand", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/errors/error_handling_handshake.json b/test/discovery_and_monitoring/errors/error_handling_handshake.json new file mode 100644 index 0000000000..56ca7d1132 --- /dev/null +++ b/test/discovery_and_monitoring/errors/error_handling_handshake.json @@ -0,0 +1,113 @@ +{ + "description": "Network timeouts before and after the handshake completes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore network timeout application error (afterHandshakeCompletes)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Mark server unknown on network timeout application error (beforeHandshakeCompletes)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-network-error.json b/test/discovery_and_monitoring/errors/non-stale-network-error.json new file mode 100644 index 0000000000..c22a47dc8a --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-network-error.json @@ -0,0 +1,80 @@ +{ + "description": "Non-stale network error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale network error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json b/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json new file mode 100644 index 0000000000..03dc5b66c9 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json @@ -0,0 +1,88 @@ +{ + "description": "Non-stale network timeout error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale network timeout error does not mark server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json new file mode 100644 index 0000000000..777e703a3c --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..c4aa7fb71b --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json new file mode 100644 index 0000000000..2a9bc8a5cf --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..638aa306cb --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater NotPrimaryNoSecondaryOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..f327954a9d --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryOrSecondary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater NotPrimaryOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotWritablePrimary.json new file mode 100644 index 0000000000..0ac02fb19b --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotWritablePrimary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater NotWritablePrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json new file mode 100644 index 0000000000..daf2a7e8e1 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json new file mode 100644 index 0000000000..a7d9e1fe24 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion greater ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json new file mode 100644 index 0000000000..2c59e785ab --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..f2cb834e83 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json new file mode 100644 index 0000000000..095128d615 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..3d7312d4a5 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing NotPrimaryNoSecondaryOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..a457ba3072 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryOrSecondary.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing NotPrimaryOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotWritablePrimary.json new file mode 100644 index 0000000000..b7427a3f3d --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotWritablePrimary.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing NotWritablePrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json new file mode 100644 index 0000000000..8146a60d6e --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json new file mode 100644 index 0000000000..c7597007d7 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json @@ -0,0 +1,85 @@ +{ + "description": "Non-stale topologyVersion missing ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json new file mode 100644 index 0000000000..8448c60599 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..9d601c4ede --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json new file mode 100644 index 0000000000..8be833f104 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..f2f94c0d00 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed NotPrimaryNoSecondaryOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..6d3b397566 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryOrSecondary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed NotPrimaryOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotWritablePrimary.json new file mode 100644 index 0000000000..332ddf5ec1 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotWritablePrimary.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed NotWritablePrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json new file mode 100644 index 0000000000..c22a537f58 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json new file mode 100644 index 0000000000..eaaab79273 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json @@ -0,0 +1,100 @@ +{ + "description": "Non-stale topologyVersion proccessId changed ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json new file mode 100644 index 0000000000..40c4ed6c80 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..5c489f5ecb --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json new file mode 100644 index 0000000000..f0851b299e --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/post-42-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..a675f0ca54 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotPrimaryNoSecondaryOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/post-42-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..ea9bf1d16b --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-NotPrimaryOrSecondary.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotPrimaryOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/post-42-NotWritablePrimary.json new file mode 100644 index 0000000000..10211fca70 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-NotWritablePrimary.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotWritablePrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json new file mode 100644 index 0000000000..fa98d0bf06 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json new file mode 100644 index 0000000000..cd587205b6 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json @@ -0,0 +1,70 @@ +{ + "description": "Post-4.2 ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/prefer-error-code.json b/test/discovery_and_monitoring/errors/prefer-error-code.json new file mode 100644 index 0000000000..eb00b69613 --- /dev/null +++ b/test/discovery_and_monitoring/errors/prefer-error-code.json @@ -0,0 +1,131 @@ +{ + "description": "Do not check errmsg when code exists", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "errmsg \"not master\" gets ignored when error code exists", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "not master", + "code": 1 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "errmsg \"node is recovering\" gets ignored when error code exists", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "node is recovering", + "code": 1 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json new file mode 100644 index 0000000000..2f7c7fd13b --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..b0b51ef676 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..b68e23b7a7 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryNoSecondaryOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..d9b3562654 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryOrSecondary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryOrSecondary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/stale-generation-NotWritablePrimary.json new file mode 100644 index 0000000000..90889356dd --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-NotWritablePrimary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotWritablePrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json new file mode 100644 index 0000000000..0a707a1c07 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json new file mode 100644 index 0000000000..5da3413d5b --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json new file mode 100644 index 0000000000..d29310fb61 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedAtShutdown error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..376bb93770 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedDueToReplStateChange error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json new file mode 100644 index 0000000000..990fc45e4e --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation LegacyNotPrimary error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..1744a82f77 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryNoSecondaryOk error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryNoSecondaryOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..57ca1cf158 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryOrSecondary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryOrSecondary error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryOrSecondary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotWritablePrimary.json new file mode 100644 index 0000000000..995453c82b --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotWritablePrimary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotWritablePrimary error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotWritablePrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json new file mode 100644 index 0000000000..bf4c85d24f --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation PrimarySteppedDown error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json new file mode 100644 index 0000000000..9374900e06 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation ShutdownInProgress error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json new file mode 100644 index 0000000000..f5d01b6540 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json @@ -0,0 +1,163 @@ +{ + "description": "Stale generation network error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale network error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json new file mode 100644 index 0000000000..fa84343b0b --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json @@ -0,0 +1,163 @@ +{ + "description": "Stale generation timeout error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale timeout error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json new file mode 100644 index 0000000000..72fac9a86e --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedAtShutdown error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..3c713592a3 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation InterruptedDueToReplStateChange error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json new file mode 100644 index 0000000000..257b6ec6fb --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation LegacyNotPrimary error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..dcb5716f44 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryNoSecondaryOk error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryNoSecondaryOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..58cefafae9 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryOrSecondary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotPrimaryOrSecondary error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryOrSecondary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotWritablePrimary.json new file mode 100644 index 0000000000..c92b01e054 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotWritablePrimary.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation NotWritablePrimary error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotWritablePrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json new file mode 100644 index 0000000000..62759b6ad9 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation PrimarySteppedDown error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json new file mode 100644 index 0000000000..4661632c4f --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json @@ -0,0 +1,176 @@ +{ + "description": "Stale generation ShutdownInProgress error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json new file mode 100644 index 0000000000..15b044fc73 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json @@ -0,0 +1,163 @@ +{ + "description": "Stale generation network error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale network error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json new file mode 100644 index 0000000000..acbb9e581e --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json @@ -0,0 +1,163 @@ +{ + "description": "Stale generation timeout error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale timeout error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json new file mode 100644 index 0000000000..f2207a04d5 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..4387451ce6 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json new file mode 100644 index 0000000000..8c0cf00f22 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryNoSecondaryOk.json new file mode 100644 index 0000000000..99a828326c --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryNoSecondaryOk.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion NotPrimaryNoSecondaryOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryNoSecondaryOk error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryNoSecondaryOk error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryOrSecondary.json new file mode 100644 index 0000000000..ba2ea87106 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryOrSecondary.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion NotPrimaryOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryOrSecondary error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotPrimaryOrSecondary error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotPrimaryOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotWritablePrimary.json new file mode 100644 index 0000000000..8edd317a73 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotWritablePrimary.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion NotWritablePrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotWritablePrimary error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotWritablePrimary error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotWritablePrimary", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json new file mode 100644 index 0000000000..da8e4755eb --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json new file mode 100644 index 0000000000..aa252e1dc4 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json @@ -0,0 +1,147 @@ +{ + "description": "Stale topologyVersion ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/write_errors_ignored.json b/test/discovery_and_monitoring/errors/write_errors_ignored.json new file mode 100644 index 0000000000..b588807e08 --- /dev/null +++ b/test/discovery_and_monitoring/errors/write_errors_ignored.json @@ -0,0 +1,98 @@ +{ + "description": "writeErrors field is ignored", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore command error with writeErrors field", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 1, + "writeErrors": [ + { + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, + "index": 0 + } + ] + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/load-balanced/discover_load_balancer.json b/test/discovery_and_monitoring/load-balanced/discover_load_balancer.json new file mode 100644 index 0000000000..d2e34478e6 --- /dev/null +++ b/test/discovery_and_monitoring/load-balanced/discover_load_balancer.json @@ -0,0 +1,28 @@ +{ + "description": "Load balancer can be discovered and only has the address property set", + "uri": "mongodb://a/?loadBalanced=true", + "phases": [ + { + "outcome": { + "servers": { + "a:27017": { + "type": "LoadBalancer", + "setName": null, + "setVersion": null, + "electionId": null, + "logicalSessionTimeoutMinutes": null, + "minWireVersion": null, + "maxWireVersion": null, + "topologyVersion": null + } + }, + "topologyType": "LoadBalanced", + "setName": null, + "logicalSessionTimeoutMinutes": null, + "maxSetVersion": null, + "maxElectionId": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/compatible.json b/test/discovery_and_monitoring/rs/compatible.json new file mode 100644 index 0000000000..dfd5d57dfa --- /dev/null +++ b/test/discovery_and_monitoring/rs/compatible.json @@ -0,0 +1,57 @@ +{ + "description": "Replica set member with large maxWireVersion", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 1000 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/compatible_unknown.json b/test/discovery_and_monitoring/rs/compatible_unknown.json new file mode 100644 index 0000000000..95e03ea958 --- /dev/null +++ b/test/discovery_and_monitoring/rs/compatible_unknown.json @@ -0,0 +1,40 @@ +{ + "description": "Replica set member and an unknown server", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_arbiters.json b/test/discovery_and_monitoring/rs/discover_arbiters.json new file mode 100644 index 0000000000..803462b156 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_arbiters.json @@ -0,0 +1,42 @@ +{ + "description": "Discover arbiters with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "arbiters": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json new file mode 100644 index 0000000000..e58d7c7fb4 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json @@ -0,0 +1,42 @@ +{ + "description": "Discover arbiters with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "arbiters": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_ghost.json b/test/discovery_and_monitoring/rs/discover_ghost.json new file mode 100644 index 0000000000..3b7fc836ec --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_ghost.json @@ -0,0 +1,32 @@ +{ + "description": "Discover ghost with directConnection URI option", + "uri": "mongodb://b/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "Unknown", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json new file mode 100644 index 0000000000..1a8457983b --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json @@ -0,0 +1,36 @@ +{ + "description": "Discover ghost with replicaSet URI option", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + }, + "b:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_hidden.json b/test/discovery_and_monitoring/rs/discover_hidden.json new file mode 100644 index 0000000000..10bd51edeb --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_hidden.json @@ -0,0 +1,46 @@ +{ + "description": "Discover hidden with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hidden": true, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json new file mode 100644 index 0000000000..63cf558675 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json @@ -0,0 +1,46 @@ +{ + "description": "Discover hidden with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hidden": true, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_passives.json b/test/discovery_and_monitoring/rs/discover_passives.json new file mode 100644 index 0000000000..0a292c675c --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_passives.json @@ -0,0 +1,80 @@ +{ + "description": "Discover passives with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "passives": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "passive": true, + "hosts": [ + "a:27017" + ], + "passives": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json new file mode 100644 index 0000000000..c48fd47625 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json @@ -0,0 +1,80 @@ +{ + "description": "Discover passives with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "passives": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "passive": true, + "hosts": [ + "a:27017" + ], + "passives": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_primary.json b/test/discovery_and_monitoring/rs/discover_primary.json new file mode 100644 index 0000000000..04e7a4984c --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_primary.json @@ -0,0 +1,40 @@ +{ + "description": "Discover primary with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json new file mode 100644 index 0000000000..3cdcfdcee2 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json @@ -0,0 +1,40 @@ +{ + "description": "Discover primary with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_rsother.json b/test/discovery_and_monitoring/rs/discover_rsother.json new file mode 100644 index 0000000000..9c3b8d8b7d --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_rsother.json @@ -0,0 +1,45 @@ +{ + "description": "Discover RSOther with directConnection URI option", + "uri": "mongodb://b/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": false, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json new file mode 100644 index 0000000000..3da9efb066 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json @@ -0,0 +1,66 @@ +{ + "description": "Discover RSOther with replicaSet URI option", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hidden": true, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": false, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_secondary.json b/test/discovery_and_monitoring/rs/discover_secondary.json new file mode 100644 index 0000000000..64a1ce31e3 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_secondary.json @@ -0,0 +1,41 @@ +{ + "description": "Discover secondary with directConnection URI option", + "uri": "mongodb://b/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json new file mode 100644 index 0000000000..d230f976a2 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json @@ -0,0 +1,41 @@ +{ + "description": "Discover secondary with replicaSet URI option", + "uri": "mongodb://b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discovery.json b/test/discovery_and_monitoring/rs/discovery.json new file mode 100644 index 0000000000..e9deaa7587 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discovery.json @@ -0,0 +1,179 @@ +{ + "description": "Replica set discovery", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + }, + "c:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "primary": "d:27017", + "hosts": [ + "b:27017", + "c:27017", + "d:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "PossiblePrimary", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "d:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "b:27017", + "c:27017", + "d:27017", + "e:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "e:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "c:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "d:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "e:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json new file mode 100644 index 0000000000..2fcea2bf66 --- /dev/null +++ b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json @@ -0,0 +1,92 @@ +{ + "description": "ElectionId is considered higher precedence than setVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ], + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "setVersion": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/equal_electionids.json b/test/discovery_and_monitoring/rs/equal_electionids.json new file mode 100644 index 0000000000..f1deedf9f4 --- /dev/null +++ b/test/discovery_and_monitoring/rs/equal_electionids.json @@ -0,0 +1,73 @@ +{ + "description": "New primary with equal electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "setVersion": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json new file mode 100644 index 0000000000..085e81e266 --- /dev/null +++ b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json @@ -0,0 +1,35 @@ +{ + "description": "Host list differs from seeds", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/incompatible_arbiter.json b/test/discovery_and_monitoring/rs/incompatible_arbiter.json new file mode 100644 index 0000000000..bda18d9f6f --- /dev/null +++ b/test/discovery_and_monitoring/rs/incompatible_arbiter.json @@ -0,0 +1,56 @@ +{ + "description": "Incompatible arbiter", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "arbiterOnly": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 1 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSArbiter", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/incompatible_ghost.json b/test/discovery_and_monitoring/rs/incompatible_ghost.json new file mode 100644 index 0000000000..9d82e31682 --- /dev/null +++ b/test/discovery_and_monitoring/rs/incompatible_ghost.json @@ -0,0 +1,51 @@ +{ + "description": "Incompatible ghost", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 1 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/incompatible_other.json b/test/discovery_and_monitoring/rs/incompatible_other.json new file mode 100644 index 0000000000..149ba01142 --- /dev/null +++ b/test/discovery_and_monitoring/rs/incompatible_other.json @@ -0,0 +1,56 @@ +{ + "description": "Incompatible other", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "hidden": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 1 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSOther", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/ls_timeout.json b/test/discovery_and_monitoring/rs/ls_timeout.json new file mode 100644 index 0000000000..c68790ddfd --- /dev/null +++ b/test/discovery_and_monitoring/rs/ls_timeout.json @@ -0,0 +1,279 @@ +{ + "description": "Parse logicalSessionTimeoutMinutes from replica set", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017", + "d:27017", + "e:27017" + ], + "setName": "rs", + "logicalSessionTimeoutMinutes": 3, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + }, + "d:27017": { + "type": "Unknown" + }, + "e:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": 3, + "setName": "rs" + } + }, + { + "responses": [ + [ + "d:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + }, + "d:27017": { + "type": "RSGhost" + }, + "e:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": 3, + "setName": "rs" + } + }, + { + "responses": [ + [ + "e:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "hosts": [ + "a:27017", + "b:27017", + "c:27017", + "d:27017", + "e:27017" + ], + "setName": "rs", + "arbiterOnly": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + }, + "d:27017": { + "type": "RSGhost" + }, + "e:27017": { + "type": "RSArbiter", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": 3, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017", + "d:27017", + "e:27017" + ], + "setName": "rs", + "logicalSessionTimeoutMinutes": 2, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown" + }, + "d:27017": { + "type": "RSGhost" + }, + "e:27017": { + "type": "RSArbiter", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": 2, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "setName": "rs", + "hidden": true, + "logicalSessionTimeoutMinutes": 1, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "c:27017": { + "type": "RSOther", + "setName": "rs" + }, + "d:27017": { + "type": "RSGhost" + }, + "e:27017": { + "type": "RSArbiter", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": 2, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017", + "d:27017", + "e:27017" + ], + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "c:27017": { + "type": "RSOther", + "setName": "rs" + }, + "d:27017": { + "type": "RSGhost" + }, + "e:27017": { + "type": "RSArbiter", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/member_reconfig.json b/test/discovery_and_monitoring/rs/member_reconfig.json new file mode 100644 index 0000000000..a05fed0efb --- /dev/null +++ b/test/discovery_and_monitoring/rs/member_reconfig.json @@ -0,0 +1,69 @@ +{ + "description": "Member removed by reconfig", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/member_standalone.json b/test/discovery_and_monitoring/rs/member_standalone.json new file mode 100644 index 0000000000..db100db9f3 --- /dev/null +++ b/test/discovery_and_monitoring/rs/member_standalone.json @@ -0,0 +1,60 @@ +{ + "description": "Member brought up as standalone", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "Unknown", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/new_primary.json b/test/discovery_and_monitoring/rs/new_primary.json new file mode 100644 index 0000000000..69b07516b9 --- /dev/null +++ b/test/discovery_and_monitoring/rs/new_primary.json @@ -0,0 +1,75 @@ +{ + "description": "New primary", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json new file mode 100644 index 0000000000..90ef0ce8dc --- /dev/null +++ b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json @@ -0,0 +1,149 @@ +{ + "description": "New primary with greater setVersion and electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json new file mode 100644 index 0000000000..9c1e2d4bdd --- /dev/null +++ b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json @@ -0,0 +1,149 @@ +{ + "description": "New primary with greater setVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json new file mode 100644 index 0000000000..774b3a5736 --- /dev/null +++ b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json @@ -0,0 +1,69 @@ +{ + "description": "New primary with wrong setName", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "wrong", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/non_rs_member.json b/test/discovery_and_monitoring/rs/non_rs_member.json new file mode 100644 index 0000000000..6bf10bd628 --- /dev/null +++ b/test/discovery_and_monitoring/rs/non_rs_member.json @@ -0,0 +1,30 @@ +{ + "description": "Non replicaSet member responds", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/normalize_case.json b/test/discovery_and_monitoring/rs/normalize_case.json new file mode 100644 index 0000000000..62915495e0 --- /dev/null +++ b/test/discovery_and_monitoring/rs/normalize_case.json @@ -0,0 +1,49 @@ +{ + "description": "Replica set case normalization", + "uri": "mongodb://A/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "A:27017" + ], + "passives": [ + "B:27017" + ], + "arbiters": [ + "C:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + }, + "c:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/normalize_case_me.json b/test/discovery_and_monitoring/rs/normalize_case_me.json new file mode 100644 index 0000000000..0d9ba6213e --- /dev/null +++ b/test/discovery_and_monitoring/rs/normalize_case_me.json @@ -0,0 +1,95 @@ +{ + "description": "Replica set mixed case normalization", + "uri": "mongodb://A/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "me": "A:27017", + "hosts": [ + "A:27017" + ], + "passives": [ + "B:27017" + ], + "arbiters": [ + "C:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + }, + "c:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "me": "B:27017", + "hosts": [ + "A:27017" + ], + "passives": [ + "B:27017" + ], + "arbiters": [ + "C:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json new file mode 100644 index 0000000000..8a77f31c50 --- /dev/null +++ b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json @@ -0,0 +1,203 @@ +{ + "description": "Pre 6.0 Primaries with and without electionIds", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json new file mode 100644 index 0000000000..8a99a78475 --- /dev/null +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -0,0 +1,209 @@ +{ + "description": "Primaries with and without electionIds", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "setVersion": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "setVersion": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json new file mode 100644 index 0000000000..e34280e88c --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json @@ -0,0 +1,61 @@ +{ + "description": "Primary becomes ghost", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json new file mode 100644 index 0000000000..79510d9399 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json @@ -0,0 +1,56 @@ +{ + "description": "Primary becomes mongos", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json new file mode 100644 index 0000000000..abcc1e2d01 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json @@ -0,0 +1,53 @@ +{ + "description": "Primary becomes standalone", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_changes_set_name.json b/test/discovery_and_monitoring/rs/primary_changes_set_name.json new file mode 100644 index 0000000000..3b564d2c93 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_changes_set_name.json @@ -0,0 +1,59 @@ +{ + "description": "Primary changes setName", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "wrong", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_disconnect.json b/test/discovery_and_monitoring/rs/primary_disconnect.json new file mode 100644 index 0000000000..73a01a82a9 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_disconnect.json @@ -0,0 +1,54 @@ +{ + "description": "Disconnected from primary", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json new file mode 100644 index 0000000000..b030bd2c53 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json @@ -0,0 +1,237 @@ +{ + "description": "Disconnected from primary, reject primary with stale electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "b:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "error": "primary marked stale due to electionId/setVersion mismatch", + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000003" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000003" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000003" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000003" + } + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000003" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json new file mode 100644 index 0000000000..653a5f29e8 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json @@ -0,0 +1,237 @@ +{ + "description": "Disconnected from primary, reject primary with stale setVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "error": "primary marked stale due to electionId/setVersion mismatch", + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json new file mode 100644 index 0000000000..1ca72225a2 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json @@ -0,0 +1,68 @@ +{ + "description": "Secondary with mismatched 'me' tells us who the primary is", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "me": "c:27017", + "hosts": [ + "b:27017" + ], + "setName": "rs", + "primary": "b:27017", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "PossiblePrimary", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "me": "b:27017", + "hosts": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_mismatched_me.json new file mode 100644 index 0000000000..6bb6226f8a --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me.json @@ -0,0 +1,41 @@ +{ + "description": "Primary mismatched me", + "phases": [ + { + "outcome": { + "servers": { + "a:27017": { + "setName": null, + "type": "Unknown" + }, + "b:27017": { + "setName": null, + "type": "Unknown" + } + }, + "setName": "rs", + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null + }, + "responses": [ + [ + "localhost:27017", + { + "me": "a:27017", + "hosts": [ + "a:27017", + "b:27017" + ], + "helloOk": true, + "isWritablePrimary": true, + "ok": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ] + } + ], + "uri": "mongodb://localhost:27017/?replicaSet=rs" +} diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json new file mode 100644 index 0000000000..a55dcfc6d4 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json @@ -0,0 +1,79 @@ +{ + "description": "Primary mismatched me is not removed", + "uri": "mongodb://localhost:27017,localhost:27018/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "localhost:27017", + { + "ok": 1, + "hosts": [ + "localhost:27017", + "localhost:27018" + ], + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "primary": "localhost:27017", + "me": "a:27017", + "minWireVersion": 0, + "maxWireVersion": 25 + } + ] + ], + "outcome": { + "servers": { + "localhost:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "localhost:27018": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "localhost:27018", + { + "ok": 1, + "hosts": [ + "localhost:27017", + "localhost:27018" + ], + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "primary": "localhost:27017", + "me": "localhost:27018", + "minWireVersion": 0, + "maxWireVersion": 25 + } + ] + ], + "outcome": { + "servers": { + "localhost:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "localhost:27018": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_reports_new_member.json b/test/discovery_and_monitoring/rs/primary_reports_new_member.json new file mode 100644 index 0000000000..ed28c48c87 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_reports_new_member.json @@ -0,0 +1,155 @@ +{ + "description": "Primary reports a new member", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "primary": "b:27017", + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSSecondary", + "setName": "rs" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "c:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json new file mode 100644 index 0000000000..798a648d19 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json @@ -0,0 +1,76 @@ +{ + "description": "Primary to no primary with mismatched me", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "me": "a:27017", + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "c:27017", + "d:27017" + ], + "me": "c:27017", + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json new file mode 100644 index 0000000000..1366e38996 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json @@ -0,0 +1,30 @@ +{ + "description": "Primary wrong setName", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "wrong", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/repeated.json b/test/discovery_and_monitoring/rs/repeated.json new file mode 100644 index 0000000000..3ce0948ab8 --- /dev/null +++ b/test/discovery_and_monitoring/rs/repeated.json @@ -0,0 +1,144 @@ +{ + "description": "Repeated isWritablePrimary response must be processed", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hidden": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hidden": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/replicaset_rsnp.json b/test/discovery_and_monitoring/rs/replicaset_rsnp.json new file mode 100644 index 0000000000..1cd732b82f --- /dev/null +++ b/test/discovery_and_monitoring/rs/replicaset_rsnp.json @@ -0,0 +1,26 @@ +{ + "description": "replicaSet URI option causes starting topology to be RSNP", + "uri": "mongodb://a/?replicaSet=rs&directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/response_from_removed.json b/test/discovery_and_monitoring/rs/response_from_removed.json new file mode 100644 index 0000000000..fa46a14ceb --- /dev/null +++ b/test/discovery_and_monitoring/rs/response_from_removed.json @@ -0,0 +1,66 @@ +{ + "description": "Response from removed server", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/sec_not_auth.json b/test/discovery_and_monitoring/rs/sec_not_auth.json new file mode 100644 index 0000000000..ccbe7a08af --- /dev/null +++ b/test/discovery_and_monitoring/rs/sec_not_auth.json @@ -0,0 +1,56 @@ +{ + "description": "Secondary's host list is not authoritative", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "b:27017", + "c:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json new file mode 100644 index 0000000000..f27060533c --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json @@ -0,0 +1,83 @@ +{ + "description": "Pre 6.0 New primary", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 0, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json new file mode 100644 index 0000000000..9ffff58ef0 --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -0,0 +1,83 @@ +{ + "description": "Secondary ignored when ok is zero", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 0, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_ipv6_literal.json b/test/discovery_and_monitoring/rs/secondary_ipv6_literal.json new file mode 100644 index 0000000000..c23d8dc4c9 --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ipv6_literal.json @@ -0,0 +1,38 @@ +{ + "description": "Secondary with IPv6 literal", + "uri": "mongodb://[::1]/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "[::1]:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "me": "[::1]:27017", + "hosts": [ + "[::1]:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 26 + } + ] + ], + "outcome": { + "servers": { + "[::1]:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json new file mode 100644 index 0000000000..790e4bfca8 --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json @@ -0,0 +1,41 @@ +{ + "description": "Secondary mismatched me", + "uri": "mongodb://localhost:27017/?replicaSet=rs", + "phases": [ + { + "outcome": { + "servers": { + "a:27017": { + "setName": null, + "type": "Unknown" + }, + "b:27017": { + "setName": null, + "type": "Unknown" + } + }, + "setName": "rs", + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null + }, + "responses": [ + [ + "localhost:27017", + { + "me": "a:27017", + "hosts": [ + "a:27017", + "b:27017" + ], + "helloOk": true, + "isWritablePrimary": false, + "ok": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ] + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json new file mode 100644 index 0000000000..1f86b50543 --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json @@ -0,0 +1,31 @@ +{ + "description": "Secondary wrong setName", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hosts": [ + "a:27017" + ], + "setName": "wrong", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json new file mode 100644 index 0000000000..6b89914151 --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json @@ -0,0 +1,71 @@ +{ + "description": "Secondary wrong setName with primary", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "wrong", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json new file mode 100644 index 0000000000..1cc608a344 --- /dev/null +++ b/test/discovery_and_monitoring/rs/set_version_can_rollback.json @@ -0,0 +1,147 @@ +{ + "description": "Set version rolls back after new primary with higher election Id", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json new file mode 100644 index 0000000000..3669511c5a --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion version that is equal is treated the same as greater than if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json new file mode 100644 index 0000000000..06c89609f5 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json @@ -0,0 +1,85 @@ +{ + "description": "setVersion that is greater than maxSetVersion is used if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json new file mode 100644 index 0000000000..9a1ee61399 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json @@ -0,0 +1,85 @@ +{ + "description": "Pre 6.0 setVersion is ignored if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json new file mode 100644 index 0000000000..256fafe108 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion that is less than maxSetVersion is ignored if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json new file mode 100644 index 0000000000..6de995518d --- /dev/null +++ b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json @@ -0,0 +1,60 @@ +{ + "description": "Primary becomes a secondary with wrong setName", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hosts": [ + "a:27017" + ], + "setName": "wrong", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/too_new.json b/test/discovery_and_monitoring/rs/too_new.json new file mode 100644 index 0000000000..696246f8e1 --- /dev/null +++ b/test/discovery_and_monitoring/rs/too_new.json @@ -0,0 +1,57 @@ +{ + "description": "Replica set member with large minWireVersion", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 999, + "maxWireVersion": 1000 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/too_old.json b/test/discovery_and_monitoring/rs/too_old.json new file mode 100644 index 0000000000..dc8a5b2b9c --- /dev/null +++ b/test/discovery_and_monitoring/rs/too_old.json @@ -0,0 +1,57 @@ +{ + "description": "Replica set member with default maxWireVersion of 0", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 999, + "maxWireVersion": 1000 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/topology_version_equal.json b/test/discovery_and_monitoring/rs/topology_version_equal.json new file mode 100644 index 0000000000..d3baa13479 --- /dev/null +++ b/test/discovery_and_monitoring/rs/topology_version_equal.json @@ -0,0 +1,101 @@ +{ + "description": "Primary with equal topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + }, + "b:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/topology_version_greater.json b/test/discovery_and_monitoring/rs/topology_version_greater.json new file mode 100644 index 0000000000..f296ccee62 --- /dev/null +++ b/test/discovery_and_monitoring/rs/topology_version_greater.json @@ -0,0 +1,259 @@ +{ + "description": "Primary with newer topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + }, + "b:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "0" + } + } + }, + "c:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null + }, + "d:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "e:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000003" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000003" + }, + "counter": { + "$numberLong": "0" + } + } + }, + "e:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null + }, + "e:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/topology_version_less.json b/test/discovery_and_monitoring/rs/topology_version_less.json new file mode 100644 index 0000000000..435337ff25 --- /dev/null +++ b/test/discovery_and_monitoring/rs/topology_version_less.json @@ -0,0 +1,97 @@ +{ + "description": "Primary with older topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/unexpected_mongos.json b/test/discovery_and_monitoring/rs/unexpected_mongos.json new file mode 100644 index 0000000000..c6ffb321ca --- /dev/null +++ b/test/discovery_and_monitoring/rs/unexpected_mongos.json @@ -0,0 +1,27 @@ +{ + "description": "Unexpected mongos", + "uri": "mongodb://b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json new file mode 100644 index 0000000000..03195aacde --- /dev/null +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json @@ -0,0 +1,140 @@ +{ + "description": "Pre 6.0 Record max setVersion, even from primary without electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json new file mode 100644 index 0000000000..eaf586d728 --- /dev/null +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -0,0 +1,146 @@ +{ + "description": "Record max setVersion, even from primary without electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/wrong_set_name.json b/test/discovery_and_monitoring/rs/wrong_set_name.json new file mode 100644 index 0000000000..d0764d24dc --- /dev/null +++ b/test/discovery_and_monitoring/rs/wrong_set_name.json @@ -0,0 +1,37 @@ +{ + "description": "Wrong setName", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hosts": [ + "b:27017", + "c:27017" + ], + "setName": "wrong", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/sharded/compatible.json b/test/discovery_and_monitoring/sharded/compatible.json new file mode 100644 index 0000000000..ceb0ec24c4 --- /dev/null +++ b/test/discovery_and_monitoring/sharded/compatible.json @@ -0,0 +1,48 @@ +{ + "description": "Multiple mongoses with large maxWireVersion", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 1000 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + }, + "b:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/sharded/discover_single_mongos.json b/test/discovery_and_monitoring/sharded/discover_single_mongos.json new file mode 100644 index 0000000000..bf7e57521c --- /dev/null +++ b/test/discovery_and_monitoring/sharded/discover_single_mongos.json @@ -0,0 +1,31 @@ +{ + "description": "Discover single mongos", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json new file mode 100644 index 0000000000..3da0f84ca2 --- /dev/null +++ b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json @@ -0,0 +1,91 @@ +{ + "description": "Parse logicalSessionTimeoutMinutes from mongoses", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "logicalSessionTimeoutMinutes": 1, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "logicalSessionTimeoutMinutes": 2, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + }, + "b:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "logicalSessionTimeoutMinutes": 1, + "setName": null + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "logicalSessionTimeoutMinutes": 1, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + }, + "b:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/sharded/mongos_disconnect.json b/test/discovery_and_monitoring/sharded/mongos_disconnect.json new file mode 100644 index 0000000000..29b3351869 --- /dev/null +++ b/test/discovery_and_monitoring/sharded/mongos_disconnect.json @@ -0,0 +1,100 @@ +{ + "description": "Mongos disconnect", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + }, + "b:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + }, + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + }, + "b:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + }, + "b:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/sharded/multiple_mongoses.json b/test/discovery_and_monitoring/sharded/multiple_mongoses.json new file mode 100644 index 0000000000..ae0c2d9cde --- /dev/null +++ b/test/discovery_and_monitoring/sharded/multiple_mongoses.json @@ -0,0 +1,47 @@ +{ + "description": "Multiple mongoses", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + }, + "b:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/sharded/non_mongos_removed.json b/test/discovery_and_monitoring/sharded/non_mongos_removed.json new file mode 100644 index 0000000000..4698f576d5 --- /dev/null +++ b/test/discovery_and_monitoring/sharded/non_mongos_removed.json @@ -0,0 +1,46 @@ +{ + "description": "Non-Mongos server in sharded cluster", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/sharded/normalize_uri_case.json b/test/discovery_and_monitoring/sharded/normalize_uri_case.json new file mode 100644 index 0000000000..4aa7cb08b6 --- /dev/null +++ b/test/discovery_and_monitoring/sharded/normalize_uri_case.json @@ -0,0 +1,24 @@ +{ + "description": "Normalize URI case", + "uri": "mongodb://A,B", + "phases": [ + { + "responses": [], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "Unknown", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/sharded/too_new.json b/test/discovery_and_monitoring/sharded/too_new.json new file mode 100644 index 0000000000..c4e984ddec --- /dev/null +++ b/test/discovery_and_monitoring/sharded/too_new.json @@ -0,0 +1,48 @@ +{ + "description": "Multiple mongoses with large minWireVersion", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 999, + "maxWireVersion": 1000 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 7, + "maxWireVersion": 900 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + }, + "b:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/sharded/too_old.json b/test/discovery_and_monitoring/sharded/too_old.json new file mode 100644 index 0000000000..b918715ada --- /dev/null +++ b/test/discovery_and_monitoring/sharded/too_old.json @@ -0,0 +1,46 @@ +{ + "description": "Multiple mongoses with default maxWireVersion of 0", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 2, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid" + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + }, + "b:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/compatible.json b/test/discovery_and_monitoring/single/compatible.json new file mode 100644 index 0000000000..493d9b748e --- /dev/null +++ b/test/discovery_and_monitoring/single/compatible.json @@ -0,0 +1,32 @@ +{ + "description": "Standalone with large maxWireVersion", + "uri": "mongodb://a", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_external_ip.json b/test/discovery_and_monitoring/single/direct_connection_external_ip.json new file mode 100644 index 0000000000..1461b4c469 --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_external_ip.json @@ -0,0 +1,35 @@ +{ + "description": "Direct connection to RSPrimary via external IP", + "uri": "mongodb://a/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_mongos.json b/test/discovery_and_monitoring/single/direct_connection_mongos.json new file mode 100644 index 0000000000..72be020862 --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_mongos.json @@ -0,0 +1,32 @@ +{ + "description": "Direct connection to mongos", + "uri": "mongodb://a/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_replicaset.json b/test/discovery_and_monitoring/single/direct_connection_replicaset.json new file mode 100644 index 0000000000..82a51d390e --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_replicaset.json @@ -0,0 +1,32 @@ +{ + "description": "Direct connection with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs&directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json new file mode 100644 index 0000000000..e06d284364 --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json @@ -0,0 +1,37 @@ +{ + "description": "Direct connection to RSArbiter", + "uri": "mongodb://a/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "arbiterOnly": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSArbiter", + "setName": "rs" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json new file mode 100644 index 0000000000..45eb1602fb --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json @@ -0,0 +1,36 @@ +{ + "description": "Direct connection to RSPrimary", + "uri": "mongodb://a/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json new file mode 100644 index 0000000000..b1bef8a49f --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json @@ -0,0 +1,37 @@ +{ + "description": "Direct connection to RSSecondary", + "uri": "mongodb://a/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_standalone.json b/test/discovery_and_monitoring/single/direct_connection_standalone.json new file mode 100644 index 0000000000..e71ba07e74 --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_standalone.json @@ -0,0 +1,31 @@ +{ + "description": "Direct connection to standalone", + "uri": "mongodb://a/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_unavailable_seed.json b/test/discovery_and_monitoring/single/direct_connection_unavailable_seed.json new file mode 100644 index 0000000000..16f2735da5 --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_unavailable_seed.json @@ -0,0 +1,25 @@ +{ + "description": "Direct connection to unavailable seed", + "uri": "mongodb://a/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json new file mode 100644 index 0000000000..8014a0a533 --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json @@ -0,0 +1,65 @@ +{ + "description": "Direct connection to RSPrimary with wrong set name", + "uri": "mongodb://a/?directConnection=true&replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "wrong", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/discover_standalone.json b/test/discovery_and_monitoring/single/discover_standalone.json new file mode 100644 index 0000000000..d78c81654b --- /dev/null +++ b/test/discovery_and_monitoring/single/discover_standalone.json @@ -0,0 +1,31 @@ +{ + "description": "Discover standalone", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/discover_unavailable_seed.json b/test/discovery_and_monitoring/single/discover_unavailable_seed.json new file mode 100644 index 0000000000..b1f306c2be --- /dev/null +++ b/test/discovery_and_monitoring/single/discover_unavailable_seed.json @@ -0,0 +1,25 @@ +{ + "description": "Discover unavailable seed", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "Unknown", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/ls_timeout_standalone.json b/test/discovery_and_monitoring/single/ls_timeout_standalone.json new file mode 100644 index 0000000000..236eabe00a --- /dev/null +++ b/test/discovery_and_monitoring/single/ls_timeout_standalone.json @@ -0,0 +1,32 @@ +{ + "description": "Parse logicalSessionTimeoutMinutes from standalone", + "uri": "mongodb://a", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "logicalSessionTimeoutMinutes": 7, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": 7, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/not_ok_response.json b/test/discovery_and_monitoring/single/not_ok_response.json new file mode 100644 index 0000000000..cfaac3564a --- /dev/null +++ b/test/discovery_and_monitoring/single/not_ok_response.json @@ -0,0 +1,41 @@ +{ + "description": "Handle a not-ok isWritablePrimary response", + "uri": "mongodb://a", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "a:27017", + { + "ok": 0, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/standalone_removed.json b/test/discovery_and_monitoring/single/standalone_removed.json new file mode 100644 index 0000000000..675cdbb008 --- /dev/null +++ b/test/discovery_and_monitoring/single/standalone_removed.json @@ -0,0 +1,31 @@ +{ + "description": "Standalone removed from multi-server topology", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "Unknown", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json b/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json new file mode 100644 index 0000000000..488cac4918 --- /dev/null +++ b/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json @@ -0,0 +1,30 @@ +{ + "description": "Connect to standalone using legacy hello", + "uri": "mongodb://a", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/too_new.json b/test/discovery_and_monitoring/single/too_new.json new file mode 100644 index 0000000000..8dd57d3348 --- /dev/null +++ b/test/discovery_and_monitoring/single/too_new.json @@ -0,0 +1,32 @@ +{ + "description": "Standalone with large minWireVersion", + "uri": "mongodb://a", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 999, + "maxWireVersion": 1000 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/too_old.json b/test/discovery_and_monitoring/single/too_old.json new file mode 100644 index 0000000000..8c027e01db --- /dev/null +++ b/test/discovery_and_monitoring/single/too_old.json @@ -0,0 +1,30 @@ +{ + "description": "Standalone with default maxWireVersion of 0", + "uri": "mongodb://a", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/too_old_then_upgraded.json b/test/discovery_and_monitoring/single/too_old_then_upgraded.json new file mode 100644 index 0000000000..c3dd98cf62 --- /dev/null +++ b/test/discovery_and_monitoring/single/too_old_then_upgraded.json @@ -0,0 +1,56 @@ +{ + "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 21", + "uri": "mongodb://a", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": false + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-error.json b/test/discovery_and_monitoring/unified/auth-error.json new file mode 100644 index 0000000000..62d26494c7 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after AuthenticationFailure error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authErrorTest", + "errorCode": 18 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-misc-command-error.json b/test/discovery_and_monitoring/unified/auth-misc-command-error.json new file mode 100644 index 0000000000..fd62fe604e --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-misc-command-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-misc-command-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-misc-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after misc command error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authMiscErrorTest", + "errorCode": 1 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authMiscErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-misc-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-misc-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-misc-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json new file mode 100644 index 0000000000..84763af32e --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "authNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json new file mode 100644 index 0000000000..3cf9576eba --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -0,0 +1,233 @@ +{ + "description": "auth-network-timeout-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network timeout error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "authNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkTimeoutErrorTest", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-timeout-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-timeout-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-shutdown-error.json b/test/discovery_and_monitoring/unified/auth-shutdown-error.json new file mode 100644 index 0000000000..b9e503af66 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-shutdown-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-shutdown-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after shutdown error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authShutdownErrorTest", + "errorCode": 91 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authShutdownErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-shutdown-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-shutdown-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/cancel-server-check.json b/test/discovery_and_monitoring/unified/cancel-server-check.json new file mode 100644 index 0000000000..a60ccfcb41 --- /dev/null +++ b/test/discovery_and_monitoring/unified/cancel-server-check.json @@ -0,0 +1,201 @@ +{ + "description": "cancel-server-check", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "cancel-server-check", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Cancel server check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": true, + "heartbeatFrequencyMS": 10000, + "serverSelectionTimeoutMS": 5000, + "appname": "cancelServerCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "cancel-server-check" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "insertedId": 2 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectResult": { + "insertedId": 3 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "cancel-server-check", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/connectTimeoutMS.json b/test/discovery_and_monitoring/unified/connectTimeoutMS.json new file mode 100644 index 0000000000..d3e860a9cb --- /dev/null +++ b/test/discovery_and_monitoring/unified/connectTimeoutMS.json @@ -0,0 +1,221 @@ +{ + "description": "connectTimeoutMS", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "connectTimeoutMS", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "connectTimeoutMS=0", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 0, + "heartbeatFrequencyMS": 500, + "appname": "connectTimeoutMS=0" + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "connectTimeoutMS" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "connectTimeoutMS=0", + "blockConnection": true, + "blockTimeMS": 550 + } + }, + "client": "setupClient" + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 750 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "connectTimeoutMS", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-network-error.json b/test/discovery_and_monitoring/unified/find-network-error.json new file mode 100644 index 0000000000..c1b6db40ca --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-network-error.json @@ -0,0 +1,234 @@ +{ + "description": "find-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true, + "appName": "findNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "find-network-error" + }, + "commandName": "find", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "find-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-network-timeout-error.json b/test/discovery_and_monitoring/unified/find-network-timeout-error.json new file mode 100644 index 0000000000..e5ac9f21aa --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-network-timeout-error.json @@ -0,0 +1,199 @@ +{ + "description": "find-network-timeout-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Ignore network timeout error on find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "findNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkTimeoutErrorTest", + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-timeout-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "find-network-timeout-error" + }, + "commandName": "find", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "find-network-timeout-error", + "documents": [ + { + "_id": 3 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "find-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-shutdown-error.json b/test/discovery_and_monitoring/unified/find-shutdown-error.json new file mode 100644 index 0000000000..6e5a2cac05 --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-shutdown-error.json @@ -0,0 +1,251 @@ +{ + "description": "find-shutdown-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-shutdown-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Concurrent shutdown error on find", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorFindTest" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-shutdown-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "appName": "shutdownErrorFindTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "find-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-command-error.json b/test/discovery_and_monitoring/unified/hello-command-error.json new file mode 100644 index 0000000000..87958cb2c0 --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-command-error.json @@ -0,0 +1,376 @@ +{ + "description": "hello-command-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Command error on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "commandErrorHandshakeTest", + "closeConnection": false, + "errorCode": 91 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent", + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-command-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Command error on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 1000, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-command-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "commandErrorCheckTest", + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750, + "errorCode": 91 + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-network-error.json b/test/discovery_and_monitoring/unified/hello-network-error.json new file mode 100644 index 0000000000..15ed2b605e --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-network-error.json @@ -0,0 +1,346 @@ +{ + "description": "hello-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network error on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "networkErrorHandshakeTest", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-network-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Network error on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "networkErrorCheckTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-timeout.json b/test/discovery_and_monitoring/unified/hello-timeout.json new file mode 100644 index 0000000000..fe7cf4e78d --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-timeout.json @@ -0,0 +1,514 @@ +{ + "description": "hello-timeout", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network timeout on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "timeoutMonitorHandshakeTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Network timeout on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 750, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "timeoutMonitorCheckTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "Driver extends timeout while streaming", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "extendsTimeoutTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 2000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/insert-network-error.json b/test/discovery_and_monitoring/unified/insert-network-error.json new file mode 100644 index 0000000000..bfe41a4cb6 --- /dev/null +++ b/test/discovery_and_monitoring/unified/insert-network-error.json @@ -0,0 +1,246 @@ +{ + "description": "insert-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "insert-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on insert", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true, + "appName": "insertNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "insertNetworkErrorTest" + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "insert-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "insert-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/insert-shutdown-error.json b/test/discovery_and_monitoring/unified/insert-shutdown-error.json new file mode 100644 index 0000000000..af7c6c987a --- /dev/null +++ b/test/discovery_and_monitoring/unified/insert-shutdown-error.json @@ -0,0 +1,250 @@ +{ + "description": "insert-shutdown-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "insert-shutdown-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Concurrent shutdown error on insert", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorInsertTest" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "insert-shutdown-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "appName": "shutdownErrorInsertTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "insert-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json b/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json new file mode 100644 index 0000000000..d9329646d4 --- /dev/null +++ b/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json @@ -0,0 +1,591 @@ +{ + "description": "interruptInUse", + "schemaVersion": "1.11", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Connection pool clear uses interruptInUseConnections=true after monitor timeout", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "poolClearedEvent", + "connectionClosedEvent", + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ], + "uriOptions": { + "connectTimeoutMS": 500, + "heartbeatFrequencyMS": 500, + "appname": "interruptInUse", + "retryReads": false, + "minPoolSize": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "interruptInUse" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$where": "sleep(2000) || true" + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "blockConnection": true, + "blockTimeMS": 1500, + "appName": "interruptInUse" + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": { + "interruptInUseConnections": true + } + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + } + ] + } + ], + "outcome": [ + { + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Error returned from connection pool clear with interruptInUseConnections=true is retryable", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "poolClearedEvent", + "connectionClosedEvent", + "commandStartedEvent", + "commandFailedEvent", + "commandSucceededEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ], + "uriOptions": { + "connectTimeoutMS": 500, + "heartbeatFrequencyMS": 500, + "appname": "interruptInUseRetryable", + "retryReads": true, + "minPoolSize": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "interruptInUse" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$where": "sleep(2000) || true" + } + } + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "blockConnection": true, + "blockTimeMS": 1500, + "appName": "interruptInUseRetryable" + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": { + "interruptInUseConnections": true + } + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ], + "outcome": [ + { + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Error returned from connection pool clear with interruptInUseConnections=true is retryable for write", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "poolClearedEvent", + "connectionClosedEvent", + "commandStartedEvent", + "commandFailedEvent", + "commandSucceededEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ], + "uriOptions": { + "connectTimeoutMS": 500, + "heartbeatFrequencyMS": 500, + "appname": "interruptInUseRetryableWrite", + "retryWrites": true, + "minPoolSize": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "interruptInUse" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "$where": "sleep(2000) || true" + }, + "update": { + "$set": { + "a": "bar" + } + } + } + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "blockConnection": true, + "blockTimeMS": 1500, + "appName": "interruptInUseRetryableWrite" + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandFailedEvent": { + "commandName": "update" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": { + "interruptInUseConnections": true + } + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ], + "outcome": [ + { + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1, + "a": "bar" + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/loadbalanced-emit-topology-changed-before-close.json b/test/discovery_and_monitoring/unified/loadbalanced-emit-topology-changed-before-close.json new file mode 100644 index 0000000000..30c0657630 --- /dev/null +++ b/test/discovery_and_monitoring/unified/loadbalanced-emit-topology-changed-before-close.json @@ -0,0 +1,88 @@ +{ + "description": "loadbalanced-emit-topology-description-changed-before-close", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "topologyDescriptionChangedEvent", + "topologyOpeningEvent", + "topologyClosedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "events": [ + { + "topologyOpeningEvent": {} + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": {} + } + }, + { + "topologyDescriptionChangedEvent": { + "newDescription": { + "type": "LoadBalanced" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-loadbalanced.json b/test/discovery_and_monitoring/unified/logging-loadbalanced.json new file mode 100644 index 0000000000..0ad3b0ceaa --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-loadbalanced.json @@ -0,0 +1,166 @@ +{ + "description": "loadbalanced-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-replicaset.json b/test/discovery_and_monitoring/unified/logging-replicaset.json new file mode 100644 index 0000000000..fe6ac60b68 --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-replicaset.json @@ -0,0 +1,610 @@ +{ + "description": "replicaset-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 3 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-sharded.json b/test/discovery_and_monitoring/unified/logging-sharded.json new file mode 100644 index 0000000000..3788708ab0 --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-sharded.json @@ -0,0 +1,494 @@ +{ + "description": "sharded-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ], + "useMultipleMongoses": true + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 3 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-standalone.json b/test/discovery_and_monitoring/unified/logging-standalone.json new file mode 100644 index 0000000000..0682a1a4fb --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-standalone.json @@ -0,0 +1,519 @@ +{ + "description": "standalone-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "single" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/minPoolSize-error.json b/test/discovery_and_monitoring/unified/minPoolSize-error.json new file mode 100644 index 0000000000..bd9e9fcdec --- /dev/null +++ b/test/discovery_and_monitoring/unified/minPoolSize-error.json @@ -0,0 +1,177 @@ +{ + "description": "minPoolSize-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "serverless": "forbid", + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "sdam-minPoolSize-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network error on minPoolSize background creation", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 3 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "SDAMminPoolSizeError", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent", + "poolReadyEvent" + ], + "uriOptions": { + "heartbeatFrequencyMS": 10000, + "appname": "SDAMminPoolSizeError", + "minPoolSize": 10, + "serverSelectionTimeoutMS": 1000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "sdam-minPoolSize-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": {} + }, + "commandName": "ping" + }, + "expectError": { + "isError": true + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + }, + "client": "setupClient" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 2 + } + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/pool-cleared-error.json b/test/discovery_and_monitoring/unified/pool-cleared-error.json new file mode 100644 index 0000000000..b7f6924f2b --- /dev/null +++ b/test/discovery_and_monitoring/unified/pool-cleared-error.json @@ -0,0 +1,373 @@ +{ + "description": "pool-cleared-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "pool-cleared-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "PoolClearedError does not mark server unknown", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": true, + "maxPoolSize": 1, + "appname": "poolClearedErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "pool-cleared-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100, + "closeConnection": true, + "appName": "poolClearedErrorTest" + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + }, + { + "thread": { + "id": "thread2" + } + }, + { + "thread": { + "id": "thread3" + } + }, + { + "thread": { + "id": "thread4" + } + }, + { + "thread": { + "id": "thread5" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread3", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 5 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread4", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread5", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 7 + } + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread2" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread3" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread4" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread5" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 8 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "pool-cleared-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + }, + { + "_id": 8 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json new file mode 100644 index 0000000000..3147a07a1e --- /dev/null +++ b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json @@ -0,0 +1,242 @@ +{ + "description": "rediscover-quickly-after-step-down", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "setupClient", + "databaseName": "admin" + } + } + ], + "initialData": [ + { + "collectionName": "test-replSetStepDown", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Rediscover quickly after replSetStepDown", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolClearedEvent", + "commandStartedEvent" + ], + "uriOptions": { + "appname": "replSetStepDownTest", + "heartbeatFrequencyMS": 60000, + "serverSelectionTimeoutMS": 5000, + "w": "majority" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test-replSetStepDown" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "recordTopologyDescription", + "object": "testRunner", + "arguments": { + "client": "client", + "id": "topologyDescription" + } + }, + { + "name": "assertTopologyType", + "object": "testRunner", + "arguments": { + "topologyDescription": "topologyDescription", + "topologyType": "ReplicaSetWithPrimary" + } + }, + { + "name": "runCommand", + "object": "adminDatabase", + "arguments": { + "command": { + "replSetFreeze": 0 + }, + "readPreference": { + "mode": "secondary" + }, + "commandName": "replSetFreeze" + } + }, + { + "name": "runCommand", + "object": "adminDatabase", + "arguments": { + "command": { + "replSetStepDown": 30, + "secondaryCatchUpPeriodSecs": 30, + "force": false + }, + "commandName": "replSetStepDown" + } + }, + { + "name": "waitForPrimaryChange", + "object": "testRunner", + "arguments": { + "client": "client", + "priorTopologyDescription": "topologyDescription", + "timeoutMS": 15000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test-replSetStepDown", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/replicaset-emit-topology-changed-before-close.json b/test/discovery_and_monitoring/unified/replicaset-emit-topology-changed-before-close.json new file mode 100644 index 0000000000..066a4ffee5 --- /dev/null +++ b/test/discovery_and_monitoring/unified/replicaset-emit-topology-changed-before-close.json @@ -0,0 +1,89 @@ +{ + "description": "replicaset-emit-topology-description-changed-before-close", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "topologyDescriptionChangedEvent", + "topologyOpeningEvent", + "topologyClosedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": false, + "events": [ + { + "topologyOpeningEvent": {} + }, + { + "topologyDescriptionChangedEvent": {} + }, + { + "topologyDescriptionChangedEvent": {} + }, + { + "topologyDescriptionChangedEvent": {} + }, + { + "topologyDescriptionChangedEvent": {} + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "ReplicaSetWithPrimary" + }, + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/serverMonitoringMode.json b/test/discovery_and_monitoring/unified/serverMonitoringMode.json new file mode 100644 index 0000000000..e44fad1bcd --- /dev/null +++ b/test/discovery_and_monitoring/unified/serverMonitoringMode.json @@ -0,0 +1,511 @@ +{ + "description": "serverMonitoringMode", + "schemaVersion": "1.17", + "runOnRequirements": [ + { + "topologies": [ + "single", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "connect with serverMonitoringMode=auto >=4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "auto" + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": true + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=auto <4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "auto", + "heartbeatFrequencyMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=stream >=4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "stream" + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": true + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=stream <4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "stream", + "heartbeatFrequencyMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=poll", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "poll", + "heartbeatFrequencyMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + } + ] + } + ] + }, + { + "description": "poll waits after successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "poll", + "heartbeatFrequencyMS": 1000000 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 500 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 1 + } + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/sharded-emit-topology-changed-before-close.json b/test/discovery_and_monitoring/unified/sharded-emit-topology-changed-before-close.json new file mode 100644 index 0000000000..98fb585531 --- /dev/null +++ b/test/discovery_and_monitoring/unified/sharded-emit-topology-changed-before-close.json @@ -0,0 +1,108 @@ +{ + "description": "sharded-emit-topology-description-changed-before-close", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "topologyDescriptionChangedEvent", + "topologyOpeningEvent", + "topologyClosedEvent" + ], + "useMultipleMongoses": true + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 3 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": false, + "events": [ + { + "topologyOpeningEvent": {} + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Sharded" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Sharded" + }, + "newDescription": { + "type": "Sharded" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Sharded" + }, + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/standalone-emit-topology-changed-before-close.json b/test/discovery_and_monitoring/unified/standalone-emit-topology-changed-before-close.json new file mode 100644 index 0000000000..27b5444d54 --- /dev/null +++ b/test/discovery_and_monitoring/unified/standalone-emit-topology-changed-before-close.json @@ -0,0 +1,97 @@ +{ + "description": "standalone-emit-topology-description-changed-before-close", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "single" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "topologyDescriptionChangedEvent", + "topologyOpeningEvent", + "topologyClosedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": false, + "events": [ + { + "topologyOpeningEvent": {} + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Single" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Single" + }, + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/gridfs/delete.json b/test/gridfs/delete.json new file mode 100644 index 0000000000..9a9b22fc1e --- /dev/null +++ b/test/gridfs/delete.json @@ -0,0 +1,739 @@ +{ + "description": "gridfs-delete", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0-with-empty-chunk", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-2", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "delete when length is 0", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0-with-empty-chunk", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-2", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "delete when length is 0 and there is one extra empty chunk", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000002" + } + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-2", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "delete when length is 8", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0-with-empty-chunk", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-2", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "delete when files entry does not exist", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0-with-empty-chunk", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-2", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "delete when files entry does not exist and there are orphaned chunks", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_files_collection", + "arguments": { + "filter": { + "_id": { + "$oid": "000000000000000000000004" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0-with-empty-chunk", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-2", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/gridfs/deleteByName.json b/test/gridfs/deleteByName.json new file mode 100644 index 0000000000..884d0300ce --- /dev/null +++ b/test/gridfs/deleteByName.json @@ -0,0 +1,230 @@ +{ + "description": "gridfs-deleteByName", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "delete when multiple revisions of the file exist", + "operations": [ + { + "name": "deleteByName", + "object": "bucket0", + "arguments": { + "filename": "filename" + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "delete when file name does not exist", + "operations": [ + { + "name": "deleteByName", + "object": "bucket0", + "arguments": { + "filename": "missing-file" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/gridfs/download.json b/test/gridfs/download.json new file mode 100644 index 0000000000..67658ac512 --- /dev/null +++ b/test/gridfs/download.json @@ -0,0 +1,540 @@ +{ + "description": "gridfs-download", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-0-with-empty-chunk", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-2", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-8", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "length-10", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000007" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2, + "data": { + "$binary": { + "base64": "mao=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000008" + }, + "files_id": { + "$oid": "000000000000000000000006" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "download when length is zero", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "" + } + } + ] + }, + { + "description": "download when length is zero and there is one empty chunk", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000002" + } + }, + "expectResult": { + "$$matchesHexBytes": "" + } + } + ] + }, + { + "description": "download when there is one chunk", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000003" + } + }, + "expectResult": { + "$$matchesHexBytes": "1122" + } + } + ] + }, + { + "description": "download when there are two chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } + }, + "expectResult": { + "$$matchesHexBytes": "1122334455667788" + } + } + ] + }, + { + "description": "download when there are three chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectResult": { + "$$matchesHexBytes": "112233445566778899aa" + } + } + ] + }, + { + "description": "download when files entry does not exist", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "download when an intermediate chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "download when final chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "download when an intermediate chunk is the wrong size", + "operations": [ + { + "name": "bulkWrite", + "object": "bucket0_chunks_collection", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 + }, + "update": { + "$set": { + "data": { + "$binary": { + "base64": "VWZ3", + "subType": "00" + } + } + } + } + } + }, + { + "updateOne": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 + }, + "update": { + "$set": { + "data": { + "$binary": { + "base64": "iJmq", + "subType": "00" + } + } + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2 + } + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "download when final chunk is the wrong size", + "operations": [ + { + "name": "updateOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 + }, + "update": { + "$set": { + "data": { + "$binary": { + "base64": "mQ==", + "subType": "00" + } + } + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1 + } + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "download legacy file with no name", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000006" + } + }, + "expectResult": { + "$$matchesHexBytes": "1122" + } + } + ] + } + ] +} diff --git a/test/gridfs/downloadByName.json b/test/gridfs/downloadByName.json new file mode 100644 index 0000000000..45abaf7b42 --- /dev/null +++ b/test/gridfs/downloadByName.json @@ -0,0 +1,315 @@ +{ + "description": "gridfs-downloadByName", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-02T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-03T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-04T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-05T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "Ig==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "Mw==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "RA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "downloadByName defaults to latest revision (-1)", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "55" + } + } + ] + }, + { + "description": "downloadByName when revision is 0", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 0 + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ] + }, + { + "description": "downloadByName when revision is 1", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 1 + }, + "expectResult": { + "$$matchesHexBytes": "22" + } + } + ] + }, + { + "description": "downloadByName when revision is 2", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 2 + }, + "expectResult": { + "$$matchesHexBytes": "33" + } + } + ] + }, + { + "description": "downloadByName when revision is -2", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": -2 + }, + "expectResult": { + "$$matchesHexBytes": "44" + } + } + ] + }, + { + "description": "downloadByName when revision is -1", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": -1 + }, + "expectResult": { + "$$matchesHexBytes": "55" + } + } + ] + }, + { + "description": "downloadByName when files entry does not exist", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "xyz" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "downloadByName when revision does not exist", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 999 + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/gridfs/rename.json b/test/gridfs/rename.json new file mode 100644 index 0000000000..08064d4a5c --- /dev/null +++ b/test/gridfs/rename.json @@ -0,0 +1,179 @@ +{ + "description": "gridfs-rename", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "rename by id", + "operations": [ + { + "name": "rename", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + }, + "newFilename": "newfilename" + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "rename when file id does not exist", + "operations": [ + { + "name": "rename", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000003" + }, + "newFilename": "newfilename" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/gridfs/renameByName.json b/test/gridfs/renameByName.json new file mode 100644 index 0000000000..26f04fb9e0 --- /dev/null +++ b/test/gridfs/renameByName.json @@ -0,0 +1,313 @@ +{ + "description": "gridfs-renameByName", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "rename when multiple revisions of the file exist", + "operations": [ + { + "name": "renameByName", + "object": "bucket0", + "arguments": { + "filename": "filename", + "newFilename": "newfilename" + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "rename when file name does not exist", + "operations": [ + { + "name": "renameByName", + "object": "bucket0", + "arguments": { + "filename": "missing-file", + "newFilename": "newfilename" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/gridfs/upload-disableMD5.json b/test/gridfs/upload-disableMD5.json new file mode 100644 index 0000000000..d5a9d6f4ab --- /dev/null +++ b/test/gridfs/upload-disableMD5.json @@ -0,0 +1,172 @@ +{ + "description": "gridfs-upload-disableMD5", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "upload when length is 0 sans MD5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "" + }, + "chunkSizeBytes": 4, + "disableMD5": true + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$exists": false + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ] + }, + { + "description": "upload when length is 1 sans MD5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, + "chunkSizeBytes": 4, + "disableMD5": true + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$exists": false + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/gridfs/upload.json b/test/gridfs/upload.json new file mode 100644 index 0000000000..3c1644653a --- /dev/null +++ b/test/gridfs/upload.json @@ -0,0 +1,547 @@ +{ + "description": "gridfs-upload", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "upload when length is 0", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "" + }, + "chunkSizeBytes": 4 + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "d41d8cd98f00b204e9800998ecf8427e" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ] + }, + { + "description": "upload when length is 1", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, + "chunkSizeBytes": 4 + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "upload when length is 3", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "112233" + }, + "chunkSizeBytes": 4 + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 3, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "bafae3a174ab91fc70db7a6aa50f4f52" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIz", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "upload when length is 4", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11223344" + }, + "chunkSizeBytes": 4 + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 4, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "7e7c77cff5705d1f7574a25ef6662117" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "upload when length is 5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, + "chunkSizeBytes": 4 + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 5, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "283d4fea5dded59cf837d3047328f5af" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {}, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "upload when length is 8", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455667788" + }, + "chunkSizeBytes": 4 + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "dd254cdc958e53abaa67da9f797125f5" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {}, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "upload when metadata is provided", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, + "chunkSizeBytes": 4, + "metadata": { + "x": 1 + } + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename", + "metadata": { + "x": 1 + } + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/handshake/unified/metadata-not-propagated.json b/test/handshake/unified/metadata-not-propagated.json new file mode 100644 index 0000000000..500b579b89 --- /dev/null +++ b/test/handshake/unified/metadata-not-propagated.json @@ -0,0 +1,100 @@ +{ + "description": "client metadata is not propagated to the server", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "6.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandSucceededEvent", + "commandFailedEvent", + "connectionClosedEvent", + "connectionCreatedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "metadata append does not create new connections or close existing ones and no hello command is sent", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "appendMetadata", + "object": "client", + "arguments": { + "driverInfoOptions": { + "name": "framework", + "version": "2.0", + "platform": "Framework Platform" + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + } + ] + }, + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandSucceededEvent": { + "commandName": "ping" + } + }, + { + "commandSucceededEvent": { + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/helpers.py b/test/helpers.py new file mode 100644 index 0000000000..163bf01c12 --- /dev/null +++ b/test/helpers.py @@ -0,0 +1,176 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared helper methods for pymongo, bson, and gridfs test suites.""" +from __future__ import annotations + +import asyncio +import threading +import traceback +from functools import wraps +from typing import Optional, no_type_check + +from bson import SON +from pymongo import common +from pymongo._asyncio_task import create_task +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = True + + +def repl_set_step_down(client, **kwargs): + """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" + cmd = SON([("replSetStepDown", 1)]) + cmd.update(kwargs) + + # Unfreeze a secondary to ensure a speedy election. + client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) + client.admin.command(cmd) + + +class client_knobs: + def __init__( + self, + heartbeat_frequency=None, + min_heartbeat_interval=None, + kill_cursor_frequency=None, + events_queue_frequency=None, + ): + self.heartbeat_frequency = heartbeat_frequency + self.min_heartbeat_interval = min_heartbeat_interval + self.kill_cursor_frequency = kill_cursor_frequency + self.events_queue_frequency = events_queue_frequency + + self.old_heartbeat_frequency = None + self.old_min_heartbeat_interval = None + self.old_kill_cursor_frequency = None + self.old_events_queue_frequency = None + self._enabled = False + self._stack = None + + def enable(self): + self.old_heartbeat_frequency = common.HEARTBEAT_FREQUENCY + self.old_min_heartbeat_interval = common.MIN_HEARTBEAT_INTERVAL + self.old_kill_cursor_frequency = common.KILL_CURSOR_FREQUENCY + self.old_events_queue_frequency = common.EVENTS_QUEUE_FREQUENCY + + if self.heartbeat_frequency is not None: + common.HEARTBEAT_FREQUENCY = self.heartbeat_frequency + + if self.min_heartbeat_interval is not None: + common.MIN_HEARTBEAT_INTERVAL = self.min_heartbeat_interval + + if self.kill_cursor_frequency is not None: + common.KILL_CURSOR_FREQUENCY = self.kill_cursor_frequency + + if self.events_queue_frequency is not None: + common.EVENTS_QUEUE_FREQUENCY = self.events_queue_frequency + self._enabled = True + # Store the allocation traceback to catch non-disabled client_knobs. + self._stack = "".join(traceback.format_stack()) + + def __enter__(self): + self.enable() + + @no_type_check + def disable(self): + common.HEARTBEAT_FREQUENCY = self.old_heartbeat_frequency + common.MIN_HEARTBEAT_INTERVAL = self.old_min_heartbeat_interval + common.KILL_CURSOR_FREQUENCY = self.old_kill_cursor_frequency + common.EVENTS_QUEUE_FREQUENCY = self.old_events_queue_frequency + self._enabled = False + + def __exit__(self, exc_type, exc_val, exc_tb): + self.disable() + + def __call__(self, func): + def make_wrapper(f): + @wraps(f) + def wrap(*args, **kwargs): + with self: + return f(*args, **kwargs) + + return wrap + + return make_wrapper(func) + + def __del__(self): + if self._enabled: + msg = ( + "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY={}, " + "MIN_HEARTBEAT_INTERVAL={}, KILL_CURSOR_FREQUENCY={}, " + "EVENTS_QUEUE_FREQUENCY={}, stack:\n{}".format( + common.HEARTBEAT_FREQUENCY, + common.MIN_HEARTBEAT_INTERVAL, + common.KILL_CURSOR_FREQUENCY, + common.EVENTS_QUEUE_FREQUENCY, + self._stack, + ) + ) + self.disable() + raise Exception(msg) + + +# Global knobs to speed up the test suite. +global_knobs = client_knobs(events_queue_frequency=0.05) + + +if _IS_SYNC: + PARENT = threading.Thread +else: + PARENT = object + + +class ConcurrentRunner(PARENT): + def __init__(self, **kwargs): + if _IS_SYNC: + super().__init__(**kwargs) + self.name = kwargs.get("name", "ConcurrentRunner") + self.stopped = False + self.task = None + self.target = kwargs.get("target", None) + self.args = kwargs.get("args", []) + + if not _IS_SYNC: + + def start(self): + self.task = create_task(self.run(), name=self.name) + + def join(self, timeout: Optional[float] = None): # type: ignore[override] + if self.task is not None: + asyncio.wait([self.task], timeout=timeout) + + def is_alive(self): + return not self.stopped + + def run(self): + try: + self.target(*self.args) + finally: + self.stopped = True + + +class ExceptionCatchingTask(ConcurrentRunner): + """A Task that stores any exception encountered while running.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.exc = None + + def run(self): + try: + super().run() + except BaseException as exc: + self.exc = exc + raise diff --git a/test/helpers_shared.py b/test/helpers_shared.py new file mode 100644 index 0000000000..49cf131808 --- /dev/null +++ b/test/helpers_shared.py @@ -0,0 +1,271 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import base64 +import gc +import os +import socket +import subprocess +import sys +import traceback +import unittest +from pathlib import Path + +try: + import ipaddress + + HAVE_IPADDRESS = True +except ImportError: + HAVE_IPADDRESS = False +from functools import wraps +from typing import no_type_check +from unittest import SkipTest + +from bson.son import SON +from pymongo import message +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] +from pymongo.synchronous.uri_parser import parse_uri + +if HAVE_SSL: + import ssl + + +# Enable debug output for uncollectable objects. PyPy does not have set_debug. +if hasattr(gc, "set_debug"): + gc.set_debug( + gc.DEBUG_UNCOLLECTABLE | getattr(gc, "DEBUG_OBJECTS", 0) | getattr(gc, "DEBUG_INSTANCES", 0) + ) + +# The host and port of a single mongod or mongos, or the seed host +# for a replica set. +host = os.environ.get("DB_IP", "localhost") +port = int(os.environ.get("DB_PORT", 27017)) +IS_SRV = "mongodb+srv" in host + +db_user = os.environ.get("DB_USER", "user") +db_pwd = os.environ.get("DB_PASSWORD", "password") + +HERE = Path(__file__).absolute() +CERT_PATH = str(HERE.parent / "certificates") +CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) +CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) + +TLS_OPTIONS: dict = {"tls": True} +if CLIENT_PEM: + TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM +if CA_PEM: + TLS_OPTIONS["tlsCAFile"] = CA_PEM + +COMPRESSORS = os.environ.get("COMPRESSORS") +MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") +TEST_LOADBALANCER = bool(os.environ.get("TEST_LOAD_BALANCER")) +SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") +MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") + +if TEST_LOADBALANCER: + res = parse_uri(SINGLE_MONGOS_LB_URI or "") + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd + + +# Shared KMS data. +LOCAL_MASTER_KEY = base64.b64decode( + b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" + b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" +) +AWS_CREDS = { + "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), +} +AWS_CREDS_2 = { + "accessKeyId": os.environ.get("FLE_AWS_KEY2", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET2", ""), +} +AZURE_CREDS = { + "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), + "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), + "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), +} +GCP_CREDS = { + "email": os.environ.get("FLE_GCP_EMAIL", ""), + "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), +} +KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} +AWS_TEMP_CREDS = { + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), + "sessionToken": os.environ.get("CSFLE_AWS_TEMP_SESSION_TOKEN", ""), +} + +ALL_KMS_PROVIDERS = dict( + aws=AWS_CREDS, + azure=AZURE_CREDS, + gcp=GCP_CREDS, + local=dict(key=LOCAL_MASTER_KEY), + kmip=KMIP_CREDS, +) +DEFAULT_KMS_TLS = dict(kmip=dict(tlsCAFile=CA_PEM, tlsCertificateKeyFile=CLIENT_PEM)) + +# Ensure Evergreen metadata doesn't result in truncation +os.environ.setdefault("MONGOB_LOG_MAX_DOCUMENT_LENGTH", "2000") + + +def is_server_resolvable(): + """Returns True if 'server' is resolvable.""" + socket_timeout = socket.getdefaulttimeout() + socket.setdefaulttimeout(1) + try: + try: + socket.gethostbyname("server") + return True + except OSError: + return False + finally: + socket.setdefaulttimeout(socket_timeout) + + +def _create_user(authdb, user, pwd=None, roles=None, **kwargs): + cmd = SON([("createUser", user)]) + # X509 doesn't use a password + if pwd: + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] + cmd.update(**kwargs) + return authdb.command(cmd) + + +def _all_users(db): + return {u["user"] for u in db.command("usersInfo").get("users", [])} + + +def sanitize_cmd(cmd): + cp = cmd.copy() + cp.pop("$clusterTime", None) + cp.pop("$db", None) + cp.pop("$readPreference", None) + cp.pop("lsid", None) + if MONGODB_API_VERSION: + # Stable API parameters + cp.pop("apiVersion", None) + # OP_MSG encoding may move the payload type one field to the + # end of the command. Do the same here. + name = next(iter(cp)) + try: + identifier = message._FIELD_MAP[name] + docs = cp.pop(identifier) + cp[identifier] = docs + except KeyError: + pass + return cp + + +def sanitize_reply(reply): + cp = reply.copy() + cp.pop("$clusterTime", None) + cp.pop("operationTime", None) + return cp + + +def print_thread_tracebacks() -> None: + """Print all Python thread tracebacks.""" + for thread_id, frame in sys._current_frames().items(): + sys.stderr.write(f"\n--- Traceback for thread {thread_id} ---\n") + traceback.print_stack(frame, file=sys.stderr) + + +def print_thread_stacks(pid: int) -> None: + """Print all C-level thread stacks for a given process id.""" + if sys.platform == "darwin": + cmd = ["lldb", "--attach-pid", f"{pid}", "--batch", "--one-line", '"thread backtrace all"'] + else: + cmd = ["gdb", f"--pid={pid}", "--batch", '--eval-command="thread apply all bt"'] + + try: + res = subprocess.run( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8" + ) + except Exception as exc: + sys.stderr.write(f"Could not print C-level thread stacks because {cmd[0]} failed: {exc}") + else: + sys.stderr.write(res.stdout) + + +def _get_executors(topology): + executors = [] + for server in topology._servers.values(): + # Some MockMonitor do not have an _executor. + if hasattr(server._monitor, "_executor"): + executors.append(server._monitor._executor) + if hasattr(server._monitor, "_rtt_monitor"): + executors.append(server._monitor._rtt_monitor._executor) + executors.append(topology._Topology__events_executor) + if topology._srv_monitor: + executors.append(topology._srv_monitor._executor) + + return [e for e in executors if e is not None] + + +def print_running_topology(topology): + running = [e for e in _get_executors(topology) if not e._stopped] + if running: + print( + "WARNING: found Topology with running threads:\n" + f" Threads: {running}\n" + f" Topology: {topology}\n" + f" Creation traceback:\n{topology._settings._stack}" + ) + + +def test_cases(suite): + """Iterator over all TestCases within a TestSuite.""" + for suite_or_case in suite._tests: + if isinstance(suite_or_case, unittest.TestCase): + # unittest.TestCase + yield suite_or_case + else: + # unittest.TestSuite + yield from test_cases(suite_or_case) + + +# Helper method to workaround https://bugs.python.org/issue21724 +def clear_warning_registry(): + """Clear the __warningregistry__ for all modules.""" + for _, module in list(sys.modules.items()): + if hasattr(module, "__warningregistry__"): + module.__warningregistry__ = {} # type:ignore[attr-defined] + + +class SystemCertsPatcher: + def __init__(self, ca_certs): + if ( + ssl.OPENSSL_VERSION.lower().startswith("libressl") + and sys.platform == "darwin" + and not _ssl.IS_PYOPENSSL + ): + raise SkipTest( + "LibreSSL on OSX doesn't support setting CA certificates " + "using SSL_CERT_FILE environment variable." + ) + self.original_certs = os.environ.get("SSL_CERT_FILE") + # Tell OpenSSL where CA certificates live. + os.environ["SSL_CERT_FILE"] = ca_certs + + def disable(self): + if self.original_certs is None: + os.environ.pop("SSL_CERT_FILE") + else: + os.environ["SSL_CERT_FILE"] = self.original_certs diff --git a/test/high_availability/ha_tools.py b/test/high_availability/ha_tools.py deleted file mode 100644 index 18a6346996..0000000000 --- a/test/high_availability/ha_tools.py +++ /dev/null @@ -1,498 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for testing high availability in PyMongo.""" - -import os -import random -import shutil -import signal -import socket -import subprocess -import sys -import time - -from stat import S_IRUSR - -import pymongo -import pymongo.errors -from pymongo.read_preferences import ReadPreference - -home = os.environ.get('HOME') -default_dbpath = os.path.join(home, 'data', 'pymongo_high_availability') -dbpath = os.environ.get('DBPATH', default_dbpath) -default_logpath = os.path.join(home, 'log', 'pymongo_high_availability') -logpath = os.environ.get('LOGPATH', default_logpath) -hostname = os.environ.get('HOSTNAME', socket.gethostname()) -port = int(os.environ.get('DBPORT', 27017)) -mongod = os.environ.get('MONGOD', 'mongod') -mongos = os.environ.get('MONGOS', 'mongos') -set_name = os.environ.get('SETNAME', 'repl0') -use_greenlets = bool(os.environ.get('GREENLETS')) -ha_tools_debug = bool(os.environ.get('HA_TOOLS_DEBUG')) - - -nodes = {} -routers = {} -cur_port = port -key_file = None - -try: - from subprocess import DEVNULL # Python 3. -except ImportError: - DEVNULL = open(os.devnull, 'wb') - - -def kill_members(members, sig, hosts=nodes): - for member in sorted(members): - try: - if ha_tools_debug: - print('killing %s' % (member,)), - proc = hosts[member]['proc'] - if 'java' in sys.platform: - # _process is a wrapped java.lang.UNIXProcess. - proc._process.destroy() - # Not sure if cygwin makes sense here... - elif sys.platform in ('win32', 'cygwin'): - os.kill(proc.pid, signal.CTRL_C_EVENT) - else: - os.kill(proc.pid, sig) - except OSError: - if ha_tools_debug: - print('%s already dead?' % (member,)) - - -def kill_all_members(): - kill_members(nodes.keys(), 2, nodes) - kill_members(routers.keys(), 2, routers) - - -def wait_for(proc, port_num): - trys = 0 - while proc.poll() is None and trys < 160: - trys += 1 - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - try: - s.connect((hostname, port_num)) - return True - except (IOError, socket.error): - time.sleep(0.25) - finally: - s.close() - - kill_all_members() - return False - - -def start_subprocess(cmd): - """Run cmd (a list of strings) and return a Popen instance.""" - return subprocess.Popen(cmd, stdout=DEVNULL, stderr=DEVNULL) - - -def start_replica_set(members, auth=False, fresh=True): - global cur_port - global key_file - - if fresh: - if os.path.exists(dbpath): - try: - shutil.rmtree(dbpath) - except OSError: - pass - - try: - os.makedirs(dbpath) - except OSError: - exc = sys.exc_info()[1] - print(exc) - print("\tWhile creating %s" % (dbpath,)) - - if auth: - key_file = os.path.join(dbpath, 'key.txt') - if not os.path.exists(key_file): - f = open(key_file, 'w') - try: - f.write("my super secret system password") - finally: - f.close() - os.chmod(key_file, S_IRUSR) - - for i in range(len(members)): - host = '%s:%d' % (hostname, cur_port) - members[i].update({'_id': i, 'host': host}) - path = os.path.join(dbpath, 'db' + str(i)) - if not os.path.exists(path): - os.makedirs(path) - member_logpath = os.path.join(logpath, 'db' + str(i) + '.log') - if not os.path.exists(os.path.dirname(member_logpath)): - os.makedirs(os.path.dirname(member_logpath)) - cmd = [mongod, - '--dbpath', path, - '--port', str(cur_port), - '--replSet', set_name, - '--nojournal', '--oplogSize', '64', - '--logappend', '--logpath', member_logpath] - if auth: - cmd += ['--keyFile', key_file] - - if ha_tools_debug: - print('starting %s' % (' '.join(cmd),)) - - proc = start_subprocess(cmd) - nodes[host] = {'proc': proc, 'cmd': cmd, 'dbpath': path} - res = wait_for(proc, cur_port) - - cur_port += 1 - - if not res: - return None - - config = {'_id': set_name, 'members': members} - primary = members[0]['host'] - c = pymongo.MongoClient(primary, use_greenlets=use_greenlets) - try: - if ha_tools_debug: - print('rs.initiate(%s)' % (config,)) - - c.admin.command('replSetInitiate', config) - except pymongo.errors.OperationFailure: - # Already initialized from a previous run? - if ha_tools_debug: - exc = sys.exc_info()[1] - print(exc) - - expected_arbiters = 0 - for member in members: - if member.get('arbiterOnly'): - expected_arbiters += 1 - expected_secondaries = len(members) - expected_arbiters - 1 - - # Wait a minute for replica set to come up. - patience = 1 - for i in range(int(patience * 60 / 2)): - time.sleep(2) - try: - if (get_primary() and - len(get_secondaries()) == expected_secondaries and - len(get_arbiters()) == expected_arbiters): - break - except pymongo.errors.ConnectionFailure: - # Keep waiting - pass - - if ha_tools_debug: - print('waiting for RS %s' % (i,)) - else: - kill_all_members() - raise Exception( - "Replica set still not initalized after %s minutes" % patience) - return primary, set_name - - -def create_sharded_cluster(num_routers=3): - global cur_port - - # Start a config server - configdb_host = '%s:%d' % (hostname, cur_port) - path = os.path.join(dbpath, 'configdb') - if not os.path.exists(path): - os.makedirs(path) - configdb_logpath = os.path.join(logpath, 'configdb.log') - cmd = [mongod, - '--dbpath', path, - '--port', str(cur_port), - '--nojournal', '--logappend', - '--logpath', configdb_logpath] - proc = start_subprocess(cmd) - nodes[configdb_host] = {'proc': proc, 'cmd': cmd, 'dbpath': path} - res = wait_for(proc, cur_port) - if not res: - return None - - # ...and a shard server - cur_port = cur_port + 1 - shard_host = '%s:%d' % (hostname, cur_port) - path = os.path.join(dbpath, 'shard1') - if not os.path.exists(path): - os.makedirs(path) - db_logpath = os.path.join(logpath, 'shard1.log') - cmd = [mongod, - '--dbpath', path, - '--port', str(cur_port), - '--nojournal', '--logappend', - '--logpath', db_logpath] - proc = start_subprocess(cmd) - nodes[shard_host] = {'proc': proc, 'cmd': cmd, 'dbpath': path} - res = wait_for(proc, cur_port) - if not res: - return None - - # ...and a few mongos instances - cur_port = cur_port + 1 - for i in range(num_routers): - cur_port = cur_port + i - host = '%s:%d' % (hostname, cur_port) - mongos_logpath = os.path.join(logpath, 'mongos' + str(i) + '.log') - cmd = [mongos, - '--port', str(cur_port), - '--logappend', - '--logpath', mongos_logpath, - '--configdb', configdb_host] - proc = start_subprocess(cmd) - routers[host] = {'proc': proc, 'cmd': cmd} - res = wait_for(proc, cur_port) - if not res: - return None - - # Add the shard - client = pymongo.MongoClient(host) - try: - client.admin.command({'addshard': shard_host}) - except pymongo.errors.OperationFailure: - # Already configured. - pass - - return get_mongos_seed_list() - - -# Connect to a random member -def get_client(): - # Attempt a direct connection to each node until one succeeds. Using a - # non-PRIMARY read preference allows us to use the node even if it's a - # secondary. - for i, node in enumerate(nodes.keys()): - try: - return pymongo.MongoClient( - node, - read_preference=ReadPreference.PRIMARY_PREFERRED, - use_greenlets=use_greenlets) - except pymongo.errors.ConnectionFailure: - if i == len(nodes.keys()) - 1: - raise - - -def get_mongos_seed_list(): - members = routers.keys() - return ','.join(members) - - -def kill_mongos(host): - kill_members([host], 2, hosts=routers) - return host - - -def restart_mongos(host): - restart_members([host], True) - - -def get_members_in_state(state): - status = get_client().admin.command('replSetGetStatus') - members = status['members'] - return [k['name'] for k in members if k['state'] == state] - - -def get_primary(): - try: - primaries = get_members_in_state(1) - assert len(primaries) <= 1 - if primaries: - return primaries[0] - except (pymongo.errors.ConnectionFailure, pymongo.errors.OperationFailure): - pass - - return None - - -def wait_for_primary(): - for _ in range(30): - time.sleep(1) - if get_primary(): - break - else: - raise AssertionError("Primary didn't come back up") - - -def get_random_secondary(): - secondaries = get_members_in_state(2) - if len(secondaries): - return random.choice(secondaries) - return None - - -def get_secondaries(): - return get_members_in_state(2) - - -def get_arbiters(): - return get_members_in_state(7) - - -def get_recovering(): - return get_members_in_state(3) - - -def get_passives(): - return get_client().admin.command('ismaster').get('passives', []) - - -def get_hosts(): - return get_client().admin.command('ismaster').get('hosts', []) - - -def get_hidden_members(): - # Both 'hidden' and 'slaveDelay' - secondaries = get_secondaries() - readers = get_hosts() + get_passives() - for member in readers: - try: - secondaries.remove(member) - except: - # Skip primary - pass - return secondaries - - -def get_tags(member): - config = get_client().local.system.replset.find_one() - for m in config['members']: - if m['host'] == member: - return m.get('tags', {}) - - raise Exception('member %s not in config' % repr(member)) - - -def kill_primary(sig=2): - primary = get_primary() - kill_members([primary], sig) - return primary - - -def kill_secondary(sig=2): - secondary = get_random_secondary() - kill_members([secondary], sig) - return secondary - - -def kill_all_secondaries(sig=2): - secondaries = get_secondaries() - kill_members(secondaries, sig) - return secondaries - - -# TODO: refactor w/ start_replica_set -def add_member(auth=False): - global cur_port - host = '%s:%d' % (hostname, cur_port) - primary = get_primary() - assert primary - c = pymongo.MongoClient(primary, use_greenlets=use_greenlets) - config = c.local.system.replset.find_one() - _id = max([member['_id'] for member in config['members']]) + 1 - member = {'_id': _id, 'host': host} - path = os.path.join(dbpath, 'db' + str(_id)) - if os.path.exists(path): - shutil.rmtree(path) - - os.makedirs(path) - member_logpath = os.path.join(logpath, 'db' + str(_id) + '.log') - if not os.path.exists(os.path.dirname(member_logpath)): - os.makedirs(os.path.dirname(member_logpath)) - cmd = [mongod, - '--dbpath', path, - '--port', str(cur_port), - '--replSet', set_name, - '--nojournal', '--oplogSize', '64', - '--logappend', '--logpath', member_logpath] - if auth: - cmd += ['--keyFile', key_file] - - if ha_tools_debug: - print 'starting', ' '.join(cmd) - - proc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - nodes[host] = {'proc': proc, 'cmd': cmd, 'dbpath': path} - res = wait_for(proc, cur_port) - - cur_port += 1 - - config['members'].append(member) - config['version'] += 1 - - if ha_tools_debug: - print {'replSetReconfig': config} - - response = c.admin.command({'replSetReconfig': config}) - if ha_tools_debug: - print response - - if not res: - return None - return host - - -def stepdown_primary(): - primary = get_primary() - if primary: - if ha_tools_debug: - print('stepping down primary: %s' % (primary,)) - c = pymongo.MongoClient(primary, use_greenlets=use_greenlets) - # replSetStepDown causes mongod to close all connections - try: - c.admin.command('replSetStepDown', 20) - except Exception: - if ha_tools_debug: - exc = sys.exc_info()[1] - print('Exception from replSetStepDown: %s' % exc) - if ha_tools_debug: - print('\tcalled replSetStepDown') - elif ha_tools_debug: - print('stepdown_primary() found no primary') - - -def set_maintenance(member, value): - """Put a member into RECOVERING state if value is True, else normal state. - """ - c = pymongo.MongoClient(member, use_greenlets=use_greenlets) - c.admin.command('replSetMaintenance', value) - start = time.time() - while value != (member in get_recovering()): - assert (time.time() - start) <= 10, ( - "Member %s never switched state" % member) - - time.sleep(0.25) - - -def restart_members(members, router=False): - restarted = [] - for member in members: - if router: - cmd = routers[member]['cmd'] - else: - cmd = nodes[member]['cmd'] - lockfile_path = os.path.join(nodes[member]['dbpath'], 'mongod.lock') - if os.path.exists(lockfile_path): - os.remove(lockfile_path) - - proc = start_subprocess(cmd) - if router: - routers[member]['proc'] = proc - else: - nodes[member]['proc'] = proc - res = wait_for(proc, int(member.split(':')[1])) - if res: - restarted.append(member) - return restarted diff --git a/test/high_availability/test_ha.py b/test/high_availability/test_ha.py deleted file mode 100644 index 4a49dfc58d..0000000000 --- a/test/high_availability/test_ha.py +++ /dev/null @@ -1,1177 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test replica set operations and failures.""" - -# These test methods exuberantly violate the "one assert per test" rule, because -# each method requires running setUp, which takes about 30 seconds to bring up -# a replica set. Thus each method asserts everything we want to assert for a -# given replica-set configuration. - -import time -import unittest - -import ha_tools -from ha_tools import use_greenlets - -from nose.plugins.skip import SkipTest -from pymongo.errors import (AutoReconnect, - OperationFailure, - ConnectionFailure, - WTimeoutError) -from pymongo.member import Member -from pymongo.mongo_replica_set_client import Monitor -from pymongo.mongo_replica_set_client import MongoReplicaSetClient -from pymongo.mongo_client import MongoClient, _partition_node -from pymongo.read_preferences import ReadPreference, modes - -from test import utils, version -from test.utils import one - - -# May be imported from gevent, below. -sleep = time.sleep - - -# Override default 30-second interval for faster testing -Monitor._refresh_interval = MONITOR_INTERVAL = 0.5 - - -# To make the code terser, copy modes into module scope -PRIMARY = ReadPreference.PRIMARY -PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED -SECONDARY = ReadPreference.SECONDARY -SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED -NEAREST = ReadPreference.NEAREST - - -def partition_nodes(nodes): - """Translate from ['host:port', ...] to [(host, port), ...]""" - return [_partition_node(node) for node in nodes] - - -# Backport permutations to Python 2.4. -# http://docs.python.org/2.7/library/itertools.html#itertools.permutations -def permutations(iterable, r=None): - pool = tuple(iterable) - n = len(pool) - if r is None: - r = n - if r > n: - return - indices = range(n) - cycles = range(n, n-r, -1) - yield tuple(pool[i] for i in indices[:r]) - while n: - for i in reversed(range(r)): - cycles[i] -= 1 - if cycles[i] == 0: - indices[i:] = indices[i+1:] + indices[i:i+1] - cycles[i] = n - i - else: - j = cycles[i] - indices[i], indices[-j] = indices[-j], indices[i] - yield tuple(pool[i] for i in indices[:r]) - break - else: - return - - -class HATestCase(unittest.TestCase): - """A test case for connections to replica sets or mongos.""" - - def tearDown(self): - ha_tools.kill_all_members() - ha_tools.nodes.clear() - ha_tools.routers.clear() - sleep(1) # Let members really die. - - -class TestDirectConnection(HATestCase): - - def setUp(self): - members = [{}, {}, {'arbiterOnly': True}] - res = ha_tools.start_replica_set(members) - self.seed, self.name = res - - def test_secondary_connection(self): - self.c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - self.assertTrue(bool(len(self.c.secondaries))) - db = self.c.pymongo_test - - # Wait for replication... - w = len(self.c.secondaries) + 1 - db.test.remove({}, w=w) - db.test.insert({'foo': 'bar'}, w=w) - - # Test direct connection to a primary or secondary - primary_host, primary_port = ha_tools.get_primary().split(':') - primary_port = int(primary_port) - (secondary_host, - secondary_port) = ha_tools.get_secondaries()[0].split(':') - secondary_port = int(secondary_port) - arbiter_host, arbiter_port = ha_tools.get_arbiters()[0].split(':') - arbiter_port = int(arbiter_port) - - # MongoClient succeeds no matter the read preference - for kwargs in [ - {'read_preference': PRIMARY}, - {'read_preference': PRIMARY_PREFERRED}, - {'read_preference': SECONDARY}, - {'read_preference': SECONDARY_PREFERRED}, - {'read_preference': NEAREST}, - {'slave_okay': True} - ]: - client = MongoClient(primary_host, - primary_port, - use_greenlets=use_greenlets, - **kwargs) - self.assertEqual(primary_host, client.host) - self.assertEqual(primary_port, client.port) - self.assertTrue(client.is_primary) - - # Direct connection to primary can be queried with any read pref - self.assertTrue(client.pymongo_test.test.find_one()) - - client = MongoClient(secondary_host, - secondary_port, - use_greenlets=use_greenlets, - **kwargs) - self.assertEqual(secondary_host, client.host) - self.assertEqual(secondary_port, client.port) - self.assertFalse(client.is_primary) - - # Direct connection to secondary can be queried with any read pref - # but PRIMARY - if kwargs.get('read_preference') != PRIMARY: - self.assertTrue(client.pymongo_test.test.find_one()) - else: - self.assertRaises( - AutoReconnect, client.pymongo_test.test.find_one) - - # Since an attempt at an acknowledged write to a secondary from a - # direct connection raises AutoReconnect('not master'), MongoClient - # should do the same for unacknowledged writes. - try: - client.pymongo_test.test.insert({}, w=0) - except AutoReconnect, e: - self.assertEqual('not master', e.args[0]) - else: - self.fail( - 'Unacknowledged insert into secondary client %s should' - 'have raised exception' % (client,)) - - # Test direct connection to an arbiter - client = MongoClient(arbiter_host, arbiter_port, **kwargs) - self.assertEqual(arbiter_host, client.host) - self.assertEqual(arbiter_port, client.port) - self.assertFalse(client.is_primary) - - # See explanation above - try: - client.pymongo_test.test.insert({}, w=0) - except AutoReconnect, e: - self.assertEqual('not master', e.args[0]) - else: - self.fail( - 'Unacknowledged insert into arbiter client %s should' - 'have raised exception' % (client,)) - - def tearDown(self): - self.c.close() - super(TestDirectConnection, self).tearDown() - - -class TestPassiveAndHidden(HATestCase): - - def setUp(self): - members = [{}, - {'priority': 0}, - {'arbiterOnly': True}, - {'priority': 0, 'hidden': True}, - {'priority': 0, 'slaveDelay': 5} - ] - res = ha_tools.start_replica_set(members) - self.seed, self.name = res - - def test_passive_and_hidden(self): - self.c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - - passives = ha_tools.get_passives() - passives = partition_nodes(passives) - self.assertEqual(self.c.secondaries, set(passives)) - - for mode in SECONDARY, SECONDARY_PREFERRED: - utils.assertReadFromAll(self, self.c, passives, mode) - - ha_tools.kill_members(ha_tools.get_passives(), 2) - sleep(2 * MONITOR_INTERVAL) - utils.assertReadFrom(self, self.c, self.c.primary, SECONDARY_PREFERRED) - - def tearDown(self): - self.c.close() - super(TestPassiveAndHidden, self).tearDown() - - -class TestMonitorRemovesRecoveringMember(HATestCase): - # Members in STARTUP2 or RECOVERING states are shown in the primary's - # isMaster response, but aren't secondaries and shouldn't be read from. - # Verify that if a secondary goes into RECOVERING mode, the Monitor removes - # it from the set of readers. - - def setUp(self): - members = [{}, {'priority': 0}, {'priority': 0}] - res = ha_tools.start_replica_set(members) - self.seed, self.name = res - - def test_monitor_removes_recovering_member(self): - self.c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - - secondaries = ha_tools.get_secondaries() - - for mode in SECONDARY, SECONDARY_PREFERRED: - partitioned_secondaries = partition_nodes(secondaries) - utils.assertReadFromAll(self, self.c, partitioned_secondaries, mode) - - secondary, recovering_secondary = secondaries - ha_tools.set_maintenance(recovering_secondary, True) - sleep(2 * MONITOR_INTERVAL) - - for mode in SECONDARY, SECONDARY_PREFERRED: - # Don't read from recovering member - utils.assertReadFrom(self, self.c, _partition_node(secondary), mode) - - def tearDown(self): - self.c.close() - super(TestMonitorRemovesRecoveringMember, self).tearDown() - - -class TestTriggeredRefresh(HATestCase): - # Verify that if a secondary goes into RECOVERING mode or if the primary - # changes, the next exception triggers an immediate refresh. - - def setUp(self): - members = [{}, {}] - res = ha_tools.start_replica_set(members) - self.seed, self.name = res - - # Disable periodic refresh - Monitor._refresh_interval = 1e6 - - def test_recovering_member_triggers_refresh(self): - # To test that find_one() and count() trigger immediate refreshes, - # we'll create a separate client for each - self.c_find_one, self.c_count = [ - MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets, - read_preference=SECONDARY) - for _ in xrange(2)] - - # We've started the primary and one secondary - primary = ha_tools.get_primary() - secondary = ha_tools.get_secondaries()[0] - - # Pre-condition: just make sure they all connected OK - for c in self.c_find_one, self.c_count: - self.assertEqual(one(c.secondaries), _partition_node(secondary)) - - ha_tools.set_maintenance(secondary, True) - - # Trigger a refresh in various ways - self.assertRaises(AutoReconnect, self.c_find_one.test.test.find_one) - self.assertRaises(AutoReconnect, self.c_count.test.test.count) - - # Wait for the immediate refresh to complete - we're not waiting for - # the periodic refresh, which has been disabled - sleep(1) - - for c in self.c_find_one, self.c_count: - self.assertFalse(c.secondaries) - self.assertEqual(_partition_node(primary), c.primary) - - def test_stepdown_triggers_refresh(self): - c_find_one = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - - # We've started the primary and one secondary - primary = ha_tools.get_primary() - secondary = ha_tools.get_secondaries()[0] - self.assertEqual( - one(c_find_one.secondaries), _partition_node(secondary)) - - ha_tools.stepdown_primary() - - # Make sure the stepdown completes - sleep(1) - - # Trigger a refresh - self.assertRaises(AutoReconnect, c_find_one.test.test.find_one) - - # Wait for the immediate refresh to complete - we're not waiting for - # the periodic refresh, which has been disabled - sleep(1) - - # We've detected the stepdown - self.assertTrue( - not c_find_one.primary - or _partition_node(primary) != c_find_one.primary) - - def tearDown(self): - Monitor._refresh_interval = MONITOR_INTERVAL - super(TestTriggeredRefresh, self).tearDown() - - -class TestHealthMonitor(HATestCase): - - def setUp(self): - res = ha_tools.start_replica_set([{}, {}, {}]) - self.seed, self.name = res - - def test_primary_failure(self): - c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - self.assertTrue(bool(len(c.secondaries))) - primary = c.primary - secondaries = c.secondaries - - # Wait for new primary to be elected - def primary_changed(): - for _ in xrange(30): - if c.primary and c.primary != primary: - return True - sleep(1) - return False - - killed = ha_tools.kill_primary() - self.assertTrue(bool(len(killed))) - self.assertTrue(primary_changed()) - self.assertNotEqual(secondaries, c.secondaries) - - def test_secondary_failure(self): - c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - self.assertTrue(bool(len(c.secondaries))) - primary = c.primary - secondaries = c.secondaries - - def readers_changed(): - for _ in xrange(20): - if c.secondaries != secondaries: - return True - - sleep(1) - return False - - killed = ha_tools.kill_secondary() - sleep(2 * MONITOR_INTERVAL) - self.assertTrue(bool(len(killed))) - self.assertEqual(primary, c.primary) - self.assertTrue(readers_changed()) - secondaries = c.secondaries - - ha_tools.restart_members([killed]) - self.assertEqual(primary, c.primary) - self.assertTrue(readers_changed()) - - def test_primary_stepdown(self): - c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - self.assertTrue(bool(len(c.secondaries))) - primary = c.primary - ha_tools.stepdown_primary() - - # Wait for new primary - patience_seconds = 30 - for _ in xrange(patience_seconds): - sleep(1) - rs_state = c._MongoReplicaSetClient__rs_state - if rs_state.writer and rs_state.writer != primary: - if ha_tools.get_primary(): - # New primary stepped up - new_primary = _partition_node(ha_tools.get_primary()) - self.assertEqual(new_primary, rs_state.writer) - new_secondaries = partition_nodes(ha_tools.get_secondaries()) - self.assertEqual(set(new_secondaries), rs_state.secondaries) - break - else: - self.fail( - "No new primary after %s seconds. Old primary was %s, current" - " is %s" % (patience_seconds, primary, ha_tools.get_primary())) - - -class TestWritesWithFailover(HATestCase): - - def setUp(self): - res = ha_tools.start_replica_set([{}, {}, {}]) - self.seed, self.name = res - - # Disable periodic refresh. - Monitor._refresh_interval = 1e6 - - def test_writes_with_failover(self): - c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - primary = c.primary - db = c.pymongo_test - w = len(c.secondaries) + 1 - db.test.remove({}, w=w) - db.test.insert({'foo': 'bar'}, w=w) - self.assertEqual('bar', db.test.find_one()['foo']) - - killed = ha_tools.kill_primary(9) - self.assertTrue(bool(len(killed))) - - # Wait past pool's check interval, so it throws an error from - # get_socket(). - sleep(1) - - # Verify that we only raise AutoReconnect, not some other error, - # while we wait for new primary. - for _ in xrange(10000): - try: - db.test.insert({'bar': 'baz'}) - - # No error, found primary. - break - except AutoReconnect: - sleep(.01) - else: - self.fail("Couldn't connect to new primary") - - # Found new primary. - self.assertTrue(c.primary) - self.assertTrue(primary != c.primary) - self.assertEqual('baz', db.test.find_one({'bar': 'baz'})['bar']) - - def tearDown(self): - Monitor._refresh_interval = MONITOR_INTERVAL - super(TestWritesWithFailover, self).tearDown() - - -class TestReadWithFailover(HATestCase): - - def setUp(self): - res = ha_tools.start_replica_set([{}, {}, {}]) - self.seed, self.name = res - - def test_read_with_failover(self): - c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - self.assertTrue(bool(len(c.secondaries))) - - def iter_cursor(cursor): - for _ in cursor: - pass - return True - - db = c.pymongo_test - w = len(c.secondaries) + 1 - db.test.remove({}, w=w) - # Force replication - db.test.insert([{'foo': i} for i in xrange(10)], w=w) - self.assertEqual(10, db.test.count()) - - db.read_preference = SECONDARY_PREFERRED - cursor = db.test.find().batch_size(5) - cursor.next() - self.assertEqual(5, cursor._Cursor__retrieved) - self.assertTrue(cursor._Cursor__connection_id in c.secondaries) - ha_tools.kill_primary() - # Primary failure shouldn't interrupt the cursor - self.assertTrue(iter_cursor(cursor)) - self.assertEqual(10, cursor._Cursor__retrieved) - - -class TestReadPreference(HATestCase): - def setUp(self): - members = [ - # primary - {'tags': {'dc': 'ny', 'name': 'primary'}}, - - # secondary - {'tags': {'dc': 'la', 'name': 'secondary'}, 'priority': 0}, - - # other_secondary - {'tags': {'dc': 'ny', 'name': 'other_secondary'}, 'priority': 0}, - ] - - res = ha_tools.start_replica_set(members) - self.seed, self.name = res - - primary = ha_tools.get_primary() - self.primary = _partition_node(primary) - self.primary_tags = ha_tools.get_tags(primary) - # Make sure priority worked - self.assertEqual('primary', self.primary_tags['name']) - - self.primary_dc = {'dc': self.primary_tags['dc']} - - secondaries = ha_tools.get_secondaries() - - (secondary, ) = [ - s for s in secondaries - if ha_tools.get_tags(s)['name'] == 'secondary'] - - self.secondary = _partition_node(secondary) - self.secondary_tags = ha_tools.get_tags(secondary) - self.secondary_dc = {'dc': self.secondary_tags['dc']} - - (other_secondary, ) = [ - s for s in secondaries - if ha_tools.get_tags(s)['name'] == 'other_secondary'] - - self.other_secondary = _partition_node(other_secondary) - self.other_secondary_tags = ha_tools.get_tags(other_secondary) - self.other_secondary_dc = {'dc': self.other_secondary_tags['dc']} - - self.c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - self.db = self.c.pymongo_test - self.w = len(self.c.secondaries) + 1 - self.db.test.remove({}, w=self.w) - self.db.test.insert( - [{'foo': i} for i in xrange(10)], w=self.w) - - self.clear_ping_times() - - def set_ping_time(self, host, ping_time_seconds): - Member._host_to_ping_time[host] = ping_time_seconds - - def clear_ping_times(self): - Member._host_to_ping_time.clear() - - def test_read_preference(self): - # We pass through four states: - # - # 1. A primary and two secondaries - # 2. Primary down - # 3. Primary up, one secondary down - # 4. Primary up, all secondaries down - # - # For each state, we verify the behavior of PRIMARY, - # PRIMARY_PREFERRED, SECONDARY, SECONDARY_PREFERRED, and NEAREST - c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - - def assertReadFrom(member, *args, **kwargs): - utils.assertReadFrom(self, c, member, *args, **kwargs) - - def assertReadFromAll(members, *args, **kwargs): - utils.assertReadFromAll(self, c, members, *args, **kwargs) - - def unpartition_node(node): - host, port = node - return '%s:%s' % (host, port) - - # To make the code terser, copy hosts into local scope - primary = self.primary - secondary = self.secondary - other_secondary = self.other_secondary - - bad_tag = {'bad': 'tag'} - - # 1. THREE MEMBERS UP ------------------------------------------------- - # PRIMARY - assertReadFrom(primary, PRIMARY) - - # PRIMARY_PREFERRED - # Trivial: mode and tags both match - assertReadFrom(primary, PRIMARY_PREFERRED, self.primary_dc) - - # Secondary matches but not primary, choose primary - assertReadFrom(primary, PRIMARY_PREFERRED, self.secondary_dc) - - # Chooses primary, ignoring tag sets - assertReadFrom(primary, PRIMARY_PREFERRED, self.primary_dc) - - # Chooses primary, ignoring tag sets - assertReadFrom(primary, PRIMARY_PREFERRED, bad_tag) - assertReadFrom(primary, PRIMARY_PREFERRED, [bad_tag, {}]) - - # SECONDARY - assertReadFromAll([secondary, other_secondary], SECONDARY) - - # SECONDARY_PREFERRED - assertReadFromAll([secondary, other_secondary], SECONDARY_PREFERRED) - - # Multiple tags - assertReadFrom(secondary, SECONDARY_PREFERRED, self.secondary_tags) - - # Fall back to primary if it's the only one matching the tags - assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'primary'}) - - # No matching secondaries - assertReadFrom(primary, SECONDARY_PREFERRED, bad_tag) - - # Fall back from non-matching tag set to matching set - assertReadFromAll([secondary, other_secondary], - SECONDARY_PREFERRED, [bad_tag, {}]) - - assertReadFrom(other_secondary, - SECONDARY_PREFERRED, [bad_tag, {'dc': 'ny'}]) - - # NEAREST - self.clear_ping_times() - - assertReadFromAll([primary, secondary, other_secondary], NEAREST) - - assertReadFromAll([primary, other_secondary], - NEAREST, [bad_tag, {'dc': 'ny'}]) - - self.set_ping_time(primary, 0) - self.set_ping_time(secondary, .03) # 30 ms - self.set_ping_time(other_secondary, 10) - - # Nearest member, no tags - assertReadFrom(primary, NEAREST) - - # Tags override nearness - assertReadFrom(primary, NEAREST, {'name': 'primary'}) - assertReadFrom(secondary, NEAREST, self.secondary_dc) - - # Make secondary fast - self.set_ping_time(primary, .03) # 30 ms - self.set_ping_time(secondary, 0) - - assertReadFrom(secondary, NEAREST) - - # Other secondary fast - self.set_ping_time(secondary, 10) - self.set_ping_time(other_secondary, 0) - - assertReadFrom(other_secondary, NEAREST) - - # High secondaryAcceptableLatencyMS, should read from all members - assertReadFromAll( - [primary, secondary, other_secondary], - NEAREST, secondary_acceptable_latency_ms=1000*1000) - - self.clear_ping_times() - - assertReadFromAll([primary, other_secondary], NEAREST, [{'dc': 'ny'}]) - - # 2. PRIMARY DOWN ----------------------------------------------------- - killed = ha_tools.kill_primary() - - # Let monitor notice primary's gone - sleep(2 * MONITOR_INTERVAL) - - # PRIMARY - assertReadFrom(None, PRIMARY) - - # PRIMARY_PREFERRED - # No primary, choose matching secondary - assertReadFromAll([secondary, other_secondary], PRIMARY_PREFERRED) - assertReadFrom(secondary, PRIMARY_PREFERRED, {'name': 'secondary'}) - - # No primary or matching secondary - assertReadFrom(None, PRIMARY_PREFERRED, bad_tag) - - # SECONDARY - assertReadFromAll([secondary, other_secondary], SECONDARY) - - # Only primary matches - assertReadFrom(None, SECONDARY, {'name': 'primary'}) - - # No matching secondaries - assertReadFrom(None, SECONDARY, bad_tag) - - # SECONDARY_PREFERRED - assertReadFromAll([secondary, other_secondary], SECONDARY_PREFERRED) - - # Mode and tags both match - assertReadFrom(secondary, SECONDARY_PREFERRED, {'name': 'secondary'}) - - # NEAREST - self.clear_ping_times() - - assertReadFromAll([secondary, other_secondary], NEAREST) - - # 3. PRIMARY UP, ONE SECONDARY DOWN ----------------------------------- - ha_tools.restart_members([killed]) - ha_tools.wait_for_primary() - - ha_tools.kill_members([unpartition_node(secondary)], 2) - sleep(5) - ha_tools.wait_for_primary() - self.assertTrue(MongoClient( - unpartition_node(primary), use_greenlets=use_greenlets, - read_preference=PRIMARY_PREFERRED - ).admin.command('ismaster')['ismaster']) - - sleep(2 * MONITOR_INTERVAL) - - # PRIMARY - assertReadFrom(primary, PRIMARY) - - # PRIMARY_PREFERRED - assertReadFrom(primary, PRIMARY_PREFERRED) - - # SECONDARY - assertReadFrom(other_secondary, SECONDARY) - assertReadFrom(other_secondary, SECONDARY, self.other_secondary_dc) - - # Only the down secondary matches - assertReadFrom(None, SECONDARY, {'name': 'secondary'}) - - # SECONDARY_PREFERRED - assertReadFrom(other_secondary, SECONDARY_PREFERRED) - assertReadFrom( - other_secondary, SECONDARY_PREFERRED, self.other_secondary_dc) - - # The secondary matching the tag is down, use primary - assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'secondary'}) - - # NEAREST - assertReadFromAll([primary, other_secondary], NEAREST) - assertReadFrom(other_secondary, NEAREST, {'name': 'other_secondary'}) - assertReadFrom(primary, NEAREST, {'name': 'primary'}) - - # 4. PRIMARY UP, ALL SECONDARIES DOWN --------------------------------- - ha_tools.kill_members([unpartition_node(other_secondary)], 2) - self.assertTrue(MongoClient( - unpartition_node(primary), use_greenlets=use_greenlets, - read_preference=PRIMARY_PREFERRED - ).admin.command('ismaster')['ismaster']) - - # PRIMARY - assertReadFrom(primary, PRIMARY) - - # PRIMARY_PREFERRED - assertReadFrom(primary, PRIMARY_PREFERRED) - assertReadFrom(primary, PRIMARY_PREFERRED, self.secondary_dc) - - # SECONDARY - assertReadFrom(None, SECONDARY) - assertReadFrom(None, SECONDARY, self.other_secondary_dc) - assertReadFrom(None, SECONDARY, {'dc': 'ny'}) - - # SECONDARY_PREFERRED - assertReadFrom(primary, SECONDARY_PREFERRED) - assertReadFrom(primary, SECONDARY_PREFERRED, self.secondary_dc) - assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'secondary'}) - assertReadFrom(primary, SECONDARY_PREFERRED, {'dc': 'ny'}) - - # NEAREST - assertReadFrom(primary, NEAREST) - assertReadFrom(None, NEAREST, self.secondary_dc) - assertReadFrom(None, NEAREST, {'name': 'secondary'}) - - # Even if primary's slow, still read from it - self.set_ping_time(primary, 100) - assertReadFrom(primary, NEAREST) - assertReadFrom(None, NEAREST, self.secondary_dc) - - self.clear_ping_times() - - def test_pinning(self): - # To make the code terser, copy modes into local scope - PRIMARY = ReadPreference.PRIMARY - PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED - SECONDARY = ReadPreference.SECONDARY - SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED - NEAREST = ReadPreference.NEAREST - - c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets, - auto_start_request=True) - - # Verify that changing the mode unpins the member. We'll try it for - # every relevant change of mode. - for mode0, mode1 in permutations( - (PRIMARY, SECONDARY, SECONDARY_PREFERRED, NEAREST), 2 - ): - # Try reading and then changing modes and reading again, see if we - # read from a different host - for _ in range(1000): - # pin to this host - host = utils.read_from_which_host(c, mode0) - # unpin? - new_host = utils.read_from_which_host(c, mode1) - if host != new_host: - # Reading with a different mode unpinned, hooray! - break - else: - self.fail( - "Changing from mode %s to mode %s never unpinned" % ( - modes[mode0], modes[mode1])) - - # Now verify changing the tag_sets unpins the member. - tags0 = [{'a': 'a'}, {}] - tags1 = [{'a': 'x'}, {}] - for _ in range(1000): - host = utils.read_from_which_host(c, NEAREST, tags0) - new_host = utils.read_from_which_host(c, NEAREST, tags1) - if host != new_host: - break - else: - self.fail( - "Changing from tags %s to tags %s never unpinned" % ( - tags0, tags1)) - - # Finally, verify changing the secondary_acceptable_latency_ms unpins - # the member. - for _ in range(1000): - host = utils.read_from_which_host(c, SECONDARY, None, 15) - new_host = utils.read_from_which_host(c, SECONDARY, None, 20) - if host != new_host: - break - else: - self.fail( - "Changing secondary_acceptable_latency_ms from 15 to 20" - " never unpinned") - - def tearDown(self): - self.c.close() - super(TestReadPreference, self).tearDown() - - -class TestReplicaSetAuth(HATestCase): - def setUp(self): - members = [ - {}, - {'priority': 0}, - {'priority': 0}, - ] - - res = ha_tools.start_replica_set(members, auth=True) - self.c = MongoReplicaSetClient(res[0], replicaSet=res[1], - use_greenlets=use_greenlets) - - # Add an admin user to enable auth - self.c.admin.add_user('admin', 'adminpass') - self.c.admin.authenticate('admin', 'adminpass') - - self.db = self.c.pymongo_ha_auth - self.db.add_user('user', 'userpass') - self.c.admin.logout() - - def test_auth_during_failover(self): - self.assertTrue(self.db.authenticate('user', 'userpass')) - self.assertTrue(self.db.foo.insert({'foo': 'bar'}, - safe=True, w=3, wtimeout=3000)) - self.db.logout() - self.assertRaises(OperationFailure, self.db.foo.find_one) - - primary = self.c.primary - ha_tools.kill_members(['%s:%d' % primary], 2) - - # Let monitor notice primary's gone - sleep(2 * MONITOR_INTERVAL) - self.assertFalse(primary == self.c.primary) - - # Make sure we can still authenticate - self.assertTrue(self.db.authenticate('user', 'userpass')) - # And still query. - self.db.read_preference = PRIMARY_PREFERRED - self.assertEqual('bar', self.db.foo.find_one()['foo']) - - def tearDown(self): - self.c.close() - super(TestReplicaSetAuth, self).tearDown() - - -class TestAlive(HATestCase): - def setUp(self): - members = [{}, {}] - self.seed, self.name = ha_tools.start_replica_set(members) - - def test_alive(self): - primary = ha_tools.get_primary() - secondary = ha_tools.get_random_secondary() - primary_cx = MongoClient(primary, use_greenlets=use_greenlets) - secondary_cx = MongoClient(secondary, use_greenlets=use_greenlets) - rsc = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - - try: - self.assertTrue(primary_cx.alive()) - self.assertTrue(secondary_cx.alive()) - self.assertTrue(rsc.alive()) - - ha_tools.kill_primary() - time.sleep(0.5) - - self.assertFalse(primary_cx.alive()) - self.assertTrue(secondary_cx.alive()) - self.assertFalse(rsc.alive()) - - ha_tools.kill_members([secondary], 2) - time.sleep(0.5) - - self.assertFalse(primary_cx.alive()) - self.assertFalse(secondary_cx.alive()) - self.assertFalse(rsc.alive()) - finally: - rsc.close() - - -class TestMongosHighAvailability(HATestCase): - def setUp(self): - seed_list = ha_tools.create_sharded_cluster() - self.dbname = 'pymongo_mongos_ha' - self.client = MongoClient(seed_list) - self.client.drop_database(self.dbname) - - def test_mongos_ha(self): - coll = self.client[self.dbname].test - self.assertTrue(coll.insert({'foo': 'bar'})) - - first = '%s:%d' % (self.client.host, self.client.port) - ha_tools.kill_mongos(first) - # Fail first attempt - self.assertRaises(AutoReconnect, coll.count) - # Find new mongos - self.assertEqual(1, coll.count()) - - second = '%s:%d' % (self.client.host, self.client.port) - self.assertNotEqual(first, second) - ha_tools.kill_mongos(second) - # Fail first attempt - self.assertRaises(AutoReconnect, coll.count) - # Find new mongos - self.assertEqual(1, coll.count()) - - third = '%s:%d' % (self.client.host, self.client.port) - self.assertNotEqual(second, third) - ha_tools.kill_mongos(third) - # Fail first attempt - self.assertRaises(AutoReconnect, coll.count) - - # We've killed all three, restart one. - ha_tools.restart_mongos(first) - - # Find new mongos - self.assertEqual(1, coll.count()) - - def tearDown(self): - self.client.drop_database(self.dbname) - super(TestMongosHighAvailability, self).tearDown() - - -class TestReplicaSetRequest(HATestCase): - def setUp(self): - members = [{}, {}, {'arbiterOnly': True}] - res = ha_tools.start_replica_set(members) - self.c = MongoReplicaSetClient(res[0], replicaSet=res[1], - use_greenlets=use_greenlets, - auto_start_request=True) - - def test_request_during_failover(self): - primary = _partition_node(ha_tools.get_primary()) - secondary = _partition_node(ha_tools.get_random_secondary()) - - self.assertTrue(self.c.auto_start_request) - self.assertTrue(self.c.in_request()) - - rs_state = self.c._MongoReplicaSetClient__rs_state - primary_pool = rs_state.get(primary).pool - secondary_pool = rs_state.get(secondary).pool - - # Trigger start_request on primary pool - utils.assertReadFrom(self, self.c, primary, PRIMARY) - self.assertTrue(primary_pool.in_request()) - - # Fail over - ha_tools.kill_primary() - sleep(5) - - patience_seconds = 60 - for _ in range(patience_seconds): - try: - if ha_tools.ha_tools_debug: - print 'Waiting for failover' - if ha_tools.get_primary(): - # We have a new primary - break - except ConnectionFailure: - pass - - sleep(1) - else: - self.fail("Problem with test: No new primary after %s seconds" - % patience_seconds) - - try: - # Trigger start_request on secondary_pool, which is becoming new - # primary - self.c.test.test.find_one() - except AutoReconnect: - # We've noticed the failover now - pass - - # The old secondary is now primary - utils.assertReadFrom(self, self.c, secondary, PRIMARY) - self.assertTrue(self.c.in_request()) - self.assertTrue(secondary_pool.in_request()) - - def tearDown(self): - self.c.close() - super(TestReplicaSetRequest, self).tearDown() - - -class TestLastErrorDefaults(HATestCase): - - def setUp(self): - members = [{}, {}] - res = ha_tools.start_replica_set(members) - self.seed, self.name = res - self.c = MongoReplicaSetClient(self.seed, replicaSet=self.name, - use_greenlets=use_greenlets) - - def test_get_last_error_defaults(self): - if not version.at_least(self.c, (1, 9, 0)): - raise SkipTest("Need MongoDB >= 1.9.0 to test getLastErrorDefaults") - - replset = self.c.local.system.replset.find_one() - settings = replset.get('settings', {}) - # This should cause a WTimeoutError for every write command - settings['getLastErrorDefaults'] = { - 'w': 3, - 'wtimeout': 1 - } - replset['settings'] = settings - replset['version'] = replset.get("version", 1) + 1 - - self.c.admin.command("replSetReconfig", replset) - - self.assertRaises(WTimeoutError, self.c.pymongo_test.test.insert, - {'_id': 0}) - self.assertRaises(WTimeoutError, self.c.pymongo_test.test.save, - {'_id': 0, "a": 5}) - self.assertRaises(WTimeoutError, self.c.pymongo_test.test.update, - {'_id': 0}, {"$set": {"a": 10}}) - self.assertRaises(WTimeoutError, self.c.pymongo_test.test.remove, - {'_id': 0}) - - def tearDown(self): - self.c.close() - super(TestLastErrorDefaults, self).tearDown() - - -class TestShipOfTheseus(HATestCase): - # If all of a replica set's members are replaced with new ones, is it still - # the same replica set, or a different one? - def setUp(self): - super(TestShipOfTheseus, self).setUp() - res = ha_tools.start_replica_set([{}, {}]) - self.seed, self.name = res - - def test_ship_of_theseus(self): - c = MongoReplicaSetClient( - self.seed, replicaSet=self.name, use_greenlets=use_greenlets) - - db = c.pymongo_test - db.test.insert({}, w=len(c.secondaries) + 1) - find_one = db.test.find_one - - primary = ha_tools.get_primary() - secondary1 = ha_tools.get_random_secondary() - - new_hosts = [] - for i in range(3): - new_hosts.append(ha_tools.add_member()) - - # RS closes all connections after reconfig. - for j in xrange(30): - try: - if ha_tools.get_primary(): - break - except (ConnectionFailure, OperationFailure): - pass - - sleep(1) - else: - self.fail("Couldn't recover from reconfig") - - # Wait for new members to join. - for _ in xrange(120): - if ha_tools.get_primary() and len(ha_tools.get_secondaries()) == 4: - break - - sleep(1) - else: - self.fail("New secondaries didn't join") - - ha_tools.kill_members([primary, secondary1], 9) - sleep(5) - - # Wait for primary. - for _ in xrange(30): - if ha_tools.get_primary() and len(ha_tools.get_secondaries()) == 2: - break - - sleep(1) - else: - self.fail("No failover") - - sleep(2 * MONITOR_INTERVAL) - - # No error. - find_one() - find_one(read_preference=SECONDARY) - - # All members down. - ha_tools.kill_members(new_hosts, 9) - self.assertRaises( - ConnectionFailure, - find_one, read_preference=SECONDARY) - - ha_tools.restart_members(new_hosts) - - # Should be able to reconnect to set even though original seed - # list is useless. Use SECONDARY so we don't have to wait for - # the election, merely for the client to detect members are up. - sleep(2 * MONITOR_INTERVAL) - find_one(read_preference=SECONDARY) - - # Kill new members and switch back to original two members. - ha_tools.kill_members(new_hosts, 9) - self.assertRaises( - ConnectionFailure, - find_one, read_preference=SECONDARY) - - ha_tools.restart_members([primary, secondary1]) - - # Wait for members to figure out they're secondaries. - for _ in xrange(30): - try: - if len(ha_tools.get_secondaries()) == 2: - break - except ConnectionFailure: - pass - - sleep(1) - else: - self.fail("Original members didn't become secondaries") - - # Should be able to reconnect to set again. - sleep(2 * MONITOR_INTERVAL) - find_one(read_preference=SECONDARY) - - -if __name__ == '__main__': - if use_greenlets: - print('Using Gevent') - import gevent - print('gevent version %s' % gevent.__version__) - - from gevent import monkey - monkey.patch_socket() - sleep = gevent.sleep - - unittest.main() diff --git a/test/index_management/createSearchIndex.json b/test/index_management/createSearchIndex.json new file mode 100644 index 0000000000..f9c4e44d3e --- /dev/null +++ b/test/index_management/createSearchIndex.json @@ -0,0 +1,136 @@ +{ + "description": "createSearchIndex", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "no name provided for an index definition", + "operations": [ + { + "name": "createSearchIndex", + "object": "collection0", + "arguments": { + "model": { + "definition": { + "mappings": { + "dynamic": true + } + } + } + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "name provided for an index definition", + "operations": [ + { + "name": "createSearchIndex", + "object": "collection0", + "arguments": { + "model": { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + ], + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/createSearchIndexes.json b/test/index_management/createSearchIndexes.json new file mode 100644 index 0000000000..3cf56ce12e --- /dev/null +++ b/test/index_management/createSearchIndexes.json @@ -0,0 +1,172 @@ +{ + "description": "createSearchIndexes", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "empty index definition array", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [] + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "no name provided for an index definition", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ] + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "name provided for an index definition", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [ + { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + ] + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + ], + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/dropSearchIndex.json b/test/index_management/dropSearchIndex.json new file mode 100644 index 0000000000..d8957a2227 --- /dev/null +++ b/test/index_management/dropSearchIndex.json @@ -0,0 +1,74 @@ +{ + "description": "dropSearchIndex", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "sends the correct command", + "operations": [ + { + "name": "dropSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index" + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "dropSearchIndex": "collection0", + "name": "test index", + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/listSearchIndexes.json b/test/index_management/listSearchIndexes.json new file mode 100644 index 0000000000..a8cef42f7a --- /dev/null +++ b/test/index_management/listSearchIndexes.json @@ -0,0 +1,156 @@ +{ + "description": "listSearchIndexes", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "when no name is provided, it does not populate the filter", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$listSearchIndexes": {} + } + ] + } + } + } + ] + } + ] + }, + { + "description": "when a name is provided, it is present in the filter", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "arguments": { + "name": "test index" + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$listSearchIndexes": { + "name": "test index" + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "aggregation cursor options are supported", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "arguments": { + "name": "test index", + "aggregationOptions": { + "batchSize": 10 + } + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": { + "batchSize": 10 + }, + "pipeline": [ + { + "$listSearchIndexes": { + "name": "test index" + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/searchIndexIgnoresReadWriteConcern.json b/test/index_management/searchIndexIgnoresReadWriteConcern.json new file mode 100644 index 0000000000..edf71b7b7e --- /dev/null +++ b/test/index_management/searchIndexIgnoresReadWriteConcern.json @@ -0,0 +1,252 @@ +{ + "description": "search index operations ignore read and write concern", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "createSearchIndex ignores read and write concern", + "operations": [ + { + "name": "createSearchIndex", + "object": "collection0", + "arguments": { + "model": { + "definition": { + "mappings": { + "dynamic": true + } + } + } + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ], + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "createSearchIndexes ignores read and write concern", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [] + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [], + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "dropSearchIndex ignores read and write concern", + "operations": [ + { + "name": "dropSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index" + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "dropSearchIndex": "collection0", + "name": "test index", + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "listSearchIndexes ignores read and write concern", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$listSearchIndexes": {} + } + ], + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateSearchIndex ignores the read and write concern", + "operations": [ + { + "name": "updateSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index", + "definition": {} + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "updateSearchIndex": "collection0", + "name": "test index", + "definition": {}, + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/updateSearchIndex.json b/test/index_management/updateSearchIndex.json new file mode 100644 index 0000000000..76a5962146 --- /dev/null +++ b/test/index_management/updateSearchIndex.json @@ -0,0 +1,76 @@ +{ + "description": "updateSearchIndex", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "sends the correct command", + "operations": [ + { + "name": "updateSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index", + "definition": {} + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "updateSearchIndex": "collection0", + "name": "test index", + "definition": {}, + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/lambda/README.md b/test/lambda/README.md new file mode 100644 index 0000000000..2727a2cee9 --- /dev/null +++ b/test/lambda/README.md @@ -0,0 +1,17 @@ +AWS Lambda Testing +------------------ + +Running locally +=============== + +Prerequisites: + +- AWS SAM CLI +- Docker daemon running + +Usage +===== + +- Start a local mongodb instance on port 27017 +- Run ``build.sh`` +- Run ``test.sh`` diff --git a/test/lambda/build_internal.sh b/test/lambda/build_internal.sh new file mode 100755 index 0000000000..84423db4d1 --- /dev/null +++ b/test/lambda/build_internal.sh @@ -0,0 +1,5 @@ +#!/bin/bash -ex + +cd /src +PYTHON=/opt/python/cp310-cp310/bin/python +$PYTHON -m pip install -v -e . diff --git a/test/lambda/events/event.json b/test/lambda/events/event.json new file mode 100644 index 0000000000..a6197dea6c --- /dev/null +++ b/test/lambda/events/event.json @@ -0,0 +1,62 @@ +{ + "body": "{\"message\": \"hello world\"}", + "resource": "/hello", + "path": "/hello", + "httpMethod": "GET", + "isBase64Encoded": false, + "queryStringParameters": { + "foo": "bar" + }, + "pathParameters": { + "proxy": "/path/to/resource" + }, + "stageVariables": { + "baz": "qux" + }, + "headers": { + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", + "Accept-Encoding": "gzip, deflate, sdch", + "Accept-Language": "en-US,en;q=0.8", + "Cache-Control": "max-age=0", + "CloudFront-Forwarded-Proto": "https", + "CloudFront-Is-Desktop-Viewer": "true", + "CloudFront-Is-Mobile-Viewer": "false", + "CloudFront-Is-SmartTV-Viewer": "false", + "CloudFront-Is-Tablet-Viewer": "false", + "CloudFront-Viewer-Country": "US", + "Host": "1234567890.execute-api.us-east-1.amazonaws.com", + "Upgrade-Insecure-Requests": "1", + "User-Agent": "Custom User Agent String", + "Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)", + "X-Amz-Cf-Id": "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA==", + "X-Forwarded-For": "127.0.0.1, 127.0.0.2", + "X-Forwarded-Port": "443", + "X-Forwarded-Proto": "https" + }, + "requestContext": { + "accountId": "123456789012", + "resourceId": "123456", + "stage": "prod", + "requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef", + "requestTime": "09/Apr/2015:12:34:56 +0000", + "requestTimeEpoch": 1428582896000, + "identity": { + "cognitoIdentityPoolId": null, + "accountId": null, + "cognitoIdentityId": null, + "caller": null, + "accessKey": null, + "sourceIp": "127.0.0.1", + "cognitoAuthenticationType": null, + "cognitoAuthenticationProvider": null, + "userArn": null, + "userAgent": "Custom User Agent String", + "user": null + }, + "path": "/prod/hello", + "resourcePath": "/hello", + "httpMethod": "POST", + "apiId": "1234567890", + "protocol": "HTTP/1.1" + } +} diff --git a/test/lambda/mongodb/Makefile b/test/lambda/mongodb/Makefile new file mode 100644 index 0000000000..3632dfb161 --- /dev/null +++ b/test/lambda/mongodb/Makefile @@ -0,0 +1,4 @@ + +build-MongoDBFunction: + cp -r . $(ARTIFACTS_DIR) + python -m pip install -t $(ARTIFACTS_DIR) dnspython diff --git a/test/lambda/mongodb/__init__.py b/test/lambda/mongodb/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/lambda/mongodb/app.py b/test/lambda/mongodb/app.py new file mode 100644 index 0000000000..274990d3bc --- /dev/null +++ b/test/lambda/mongodb/app.py @@ -0,0 +1,168 @@ +""" +Lambda function for Python Driver testing + +Creates the client that is cached for all requests, subscribes to +relevant events, and forces the connection pool to get populated. +""" +from __future__ import annotations + +import json +import os +import warnings + +from bson import has_c as has_bson_c +from pymongo import MongoClient +from pymongo import has_c as has_pymongo_c +from pymongo.monitoring import ( + CommandListener, + ConnectionPoolListener, + ServerHeartbeatListener, +) + +# Ensure there are no warnings raised in normal operation. +warnings.simplefilter("error") + +open_connections = 0 +heartbeat_count = 0 +streaming_heartbeat_count = 0 +total_heartbeat_duration = 0 +total_commands = 0 +total_command_duration = 0 + +# Ensure we are using C extensions +assert has_bson_c() +assert has_pymongo_c() + + +class CommandHandler(CommandListener): + def started(self, event): + print("command started", event) + + def succeeded(self, event): + global total_commands, total_command_duration + total_commands += 1 + total_command_duration += event.duration_micros / 1e6 + print("command succeeded", event) + + def failed(self, event): + global total_commands, total_command_duration + total_commands += 1 + total_command_duration += event.duration_micros / 1e6 + print("command failed", event) + + +class ServerHeartbeatHandler(ServerHeartbeatListener): + def started(self, event): + print("server heartbeat started", event) + + def succeeded(self, event): + global heartbeat_count, total_heartbeat_duration, streaming_heartbeat_count + heartbeat_count += 1 + total_heartbeat_duration += event.duration + if event.awaited: + streaming_heartbeat_count += 1 + print("server heartbeat succeeded", event) + + def failed(self, event): + global heartbeat_count, total_heartbeat_duration + heartbeat_count += 1 + total_heartbeat_duration += event.duration + print("server heartbeat failed", event) + + +class ConnectionHandler(ConnectionPoolListener): + def connection_created(self, event): + global open_connections + open_connections += 1 + print("connection created") + + def connection_ready(self, event): + pass + + def connection_closed(self, event): + global open_connections + open_connections -= 1 + print("connection closed") + + def connection_check_out_started(self, event): + pass + + def connection_check_out_failed(self, event): + pass + + def connection_checked_out(self, event): + pass + + def connection_checked_in(self, event): + pass + + def pool_created(self, event): + pass + + def pool_ready(self, event): + pass + + def pool_cleared(self, event): + pass + + def pool_closed(self, event): + pass + + +listeners = [CommandHandler(), ServerHeartbeatHandler(), ConnectionHandler()] +print("Creating client") +client = MongoClient(os.environ["MONGODB_URI"], event_listeners=listeners) + + +# Populate the connection pool. +print("Connecting") +client.lambdaTest.list_collections() +print("Connected") + + +# Create the response to send back. +def create_response(): + return dict( + averageCommandDuration=total_command_duration / total_commands, + averageHeartbeatDuration=total_heartbeat_duration / heartbeat_count + if heartbeat_count + else 0, + openConnections=open_connections, + heartbeatCount=heartbeat_count, + ) + + +# Reset the numbers. +def reset(): + global \ + open_connections, \ + heartbeat_count, \ + total_heartbeat_duration, \ + total_commands, \ + total_command_duration + open_connections = 0 + heartbeat_count = 0 + total_heartbeat_duration = 0 + total_commands = 0 + total_command_duration = 0 + + +def lambda_handler(event, context): + """ + The handler function itself performs an insert/delete and returns the + id of the document in play. + """ + print("initializing") + db = client.lambdaTest + collection = db.test + result = collection.insert_one({"n": 1}) + collection.delete_one({"_id": result.inserted_id}) + # Create the response and then reset the numbers. + response = json.dumps(create_response()) + reset() + print("finished!") + assert ( + streaming_heartbeat_count == 0 + ), f"streaming_heartbeat_count was {streaming_heartbeat_count} not 0" + + return dict(statusCode=200, body=response) diff --git a/test/lambda/run.sh b/test/lambda/run.sh new file mode 100755 index 0000000000..5f1980a5f9 --- /dev/null +++ b/test/lambda/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail + +sam build +sam local invoke --docker-network host --parameter-overrides "MongoDbUri=mongodb://host.docker.internal:27017" diff --git a/test/lambda/template.yaml b/test/lambda/template.yaml new file mode 100644 index 0000000000..11052f88dd --- /dev/null +++ b/test/lambda/template.yaml @@ -0,0 +1,49 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Description: > + Python driver lambda function test + +# More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst +Globals: + Function: + Timeout: 30 + MemorySize: 128 + +Parameters: + MongoDbUri: + Type: String + Description: The MongoDB connection string. + +Resources: + MongoDBFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: mongodb/ + Environment: + Variables: + MONGODB_URI: !Ref MongoDbUri + Handler: app.lambda_handler + Runtime: python3.10 + Architectures: + - x86_64 + Events: + MongoDB: + Type: Api + Properties: + Path: /mongodb + Method: get + # Use a custom build method to make sure *.so files are copied. + # https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/building-custom-runtimes.html + Metadata: + BuildMethod: makefile + +Outputs: + MongoDBApi: + Description: "API Gateway endpoint URL for Prod stage for Python driver lambda function" + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/hello/" + MongoDBFunction: + Description: "Python driver lambda Function ARN" + Value: !GetAtt MongoDBFunction.Arn + MongoDBFunctionIamRole: + Description: "Implicit IAM Role created for Python driver lambda function" + Value: !GetAtt MongoDBFunctionRole.Arn diff --git a/test/load_balancer/cursors.json b/test/load_balancer/cursors.json new file mode 100644 index 0000000000..27aaddd5b6 --- /dev/null +++ b/test/load_balancer/cursors.json @@ -0,0 +1,1271 @@ +{ + "description": "cursors are correctly pinned to connections for load-balanced clusters", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent", + "connectionReadyEvent", + "connectionClosedEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database0", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "database0Name", + "documents": [] + }, + { + "collectionName": "coll2", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "no connection is pinned if all documents are returned in the initial batch", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {} + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {} + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connections are returned when the cursor is drained", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 3 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "close", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connections are returned to the pool when the cursor is closed", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connections are returned after an network error during getMore", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectError": { + "isClientError": true + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + } + ] + } + ] + }, + { + "description": "pinned connections are returned after a network error during a killCursors request", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "killCursors" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandFailedEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + } + ] + } + ] + }, + { + "description": "pinned connections are returned to the pool after a non-network error on getMore", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 7 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectError": { + "errorCode": 7 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "aggregate pins the cursor to a connection", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [], + "batchSize": 2 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "cursor": { + "batchSize": 2 + } + }, + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "listCollections pins the cursor to a connection", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "listCollections", + "object": "database0", + "arguments": { + "filter": {}, + "batchSize": 2 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "cursor": { + "batchSize": 2 + } + }, + "commandName": "listCollections", + "databaseName": "database0Name" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": { + "$$type": "string" + } + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "listIndexes pins the cursor to a connection", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "y": 1 + }, + "name": "y_1" + } + }, + { + "name": "listIndexes", + "object": "collection0", + "arguments": { + "batchSize": 2 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createIndexes": "coll0", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ] + }, + "commandName": "createIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "coll0", + "indexes": [ + { + "name": "y_1", + "key": { + "y": 1 + } + } + ] + }, + "commandName": "createIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll0", + "cursor": { + "batchSize": 2 + } + }, + "commandName": "listIndexes", + "databaseName": "database0Name" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "change streams pin to a connection", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "changeStream0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/event-monitoring.json b/test/load_balancer/event-monitoring.json new file mode 100644 index 0000000000..938c70bf38 --- /dev/null +++ b/test/load_balancer/event-monitoring.json @@ -0,0 +1,184 @@ +{ + "description": "monitoring events include correct fields", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [] + } + ], + "tests": [ + { + "description": "command started and succeeded events include serviceId", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "hasServiceId": true + } + }, + { + "commandSucceededEvent": { + "commandName": "insert", + "hasServiceId": true + } + } + ] + } + ] + }, + { + "description": "command failed events include serviceId", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "hasServiceId": true + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "hasServiceId": true + } + } + ] + } + ] + }, + { + "description": "poolClearedEvent events include serviceId", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "hasServiceId": true + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "hasServiceId": true + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClearedEvent": { + "hasServiceId": true + } + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/lb-connection-establishment.json b/test/load_balancer/lb-connection-establishment.json new file mode 100644 index 0000000000..0eaadf30c2 --- /dev/null +++ b/test/load_balancer/lb-connection-establishment.json @@ -0,0 +1,58 @@ +{ + "description": "connection establishment for load-balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": { + "loadBalanced": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + } + ], + "tests": [ + { + "description": "operations against load balancers fail if URI contains loadBalanced=false", + "skipReason": "servers have not implemented LB support yet so they will not fail the connection handshake in this case", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/load_balancer/non-lb-connection-establishment.json b/test/load_balancer/non-lb-connection-establishment.json new file mode 100644 index 0000000000..6aaa7bdf98 --- /dev/null +++ b/test/load_balancer/non-lb-connection-establishment.json @@ -0,0 +1,92 @@ +{ + "description": "connection establishment if loadBalanced is specified for non-load balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "single", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "lbTrueClient", + "useMultipleMongoses": false, + "uriOptions": { + "loadBalanced": true + } + } + }, + { + "database": { + "id": "lbTrueDatabase", + "client": "lbTrueClient", + "databaseName": "lbTrueDb" + } + }, + { + "client": { + "id": "lbFalseClient", + "uriOptions": { + "loadBalanced": false + } + } + }, + { + "database": { + "id": "lbFalseDatabase", + "client": "lbFalseClient", + "databaseName": "lbFalseDb" + } + } + ], + "_yamlAnchors": { + "runCommandArguments": [ + { + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + } + } + ] + }, + "tests": [ + { + "description": "operations against non-load balanced clusters fail if URI contains loadBalanced=true", + "operations": [ + { + "name": "runCommand", + "object": "lbTrueDatabase", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "errorContains": "Driver attempted to initialize in load balancing mode, but the server does not support this mode" + } + } + ] + }, + { + "description": "operations against non-load balanced clusters succeed if URI contains loadBalanced=false", + "operations": [ + { + "name": "runCommand", + "object": "lbFalseDatabase", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + } + } + ] + } + ] +} diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json new file mode 100644 index 0000000000..5892dcacd6 --- /dev/null +++ b/test/load_balancer/sdam-error-handling.json @@ -0,0 +1,514 @@ +{ + "description": "state change errors are correctly handled", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "_yamlAnchors": { + "observedEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + }, + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "singleClient", + "useMultipleMongoses": false, + "uriOptions": { + "appname": "lbSDAMErrorTestClient", + "retryWrites": false + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "singleDB", + "client": "singleClient", + "databaseName": "singleDB" + } + }, + { + "collection": { + "id": "singleColl", + "database": "singleDB", + "collectionName": "singleColl" + } + }, + { + "client": { + "id": "multiClient", + "useMultipleMongoses": true, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "multiDB", + "client": "multiClient", + "databaseName": "multiDB" + } + }, + { + "collection": { + "id": "multiColl", + "database": "multiDB", + "collectionName": "multiColl" + } + } + ], + "initialData": [ + { + "collectionName": "singleColl", + "databaseName": "singleDB", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + }, + { + "collectionName": "multiColl", + "databaseName": "multiDB", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "only connections for a specific serviceId are closed when pools are cleared", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "createFindCursor", + "object": "multiColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "createFindCursor", + "object": "multiColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor1" + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "close", + "object": "cursor1" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "multiClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "insertOne", + "object": "multiColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "errorCode": 11600 + } + }, + { + "name": "insertOne", + "object": "multiColl", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "multiClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "stale" + } + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "errors during the initial connection hello are ignored", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "lbSDAMErrorTestClient" + } + } + } + }, + { + "name": "insertOne", + "object": "singleColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } + } + ] + } + ] + }, + { + "description": "errors during authentication are processed", + "runOnRequirements": [ + { + "auth": true + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "lbSDAMErrorTestClient" + } + } + } + }, + { + "name": "insertOne", + "object": "singleColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } + }, + { + "poolClearedEvent": {} + } + ] + } + ] + }, + { + "description": "stale errors are ignored", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createFindCursor", + "object": "singleColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "createFindCursor", + "object": "singleColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectError": { + "isClientError": true + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1", + "expectError": { + "isClientError": true + } + }, + { + "name": "close", + "object": "cursor1" + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/server-selection.json b/test/load_balancer/server-selection.json new file mode 100644 index 0000000000..00c7e4c95b --- /dev/null +++ b/test/load_balancer/server-selection.json @@ -0,0 +1,82 @@ +{ + "description": "server selection for load-balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "readPreference": { + "mode": "secondaryPreferred" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "$readPreference is sent for load-balanced clusters", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "$readPreference": { + "mode": "secondaryPreferred" + } + }, + "commandName": "find", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/transactions.json b/test/load_balancer/transactions.json new file mode 100644 index 0000000000..ca9c145217 --- /dev/null +++ b/test/load_balancer/transactions.json @@ -0,0 +1,1665 @@ +{ + "description": "transactions are correctly pinned to connections for load-balanced clusters", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent", + "connectionClosedEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "_yamlAnchors": { + "documents": [ + { + "_id": 4 + } + ] + }, + "tests": [ + { + "description": "sessions are reused in LB mode", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ] + }, + { + "description": "all operations go to the same mongos", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "transaction can be committed multiple times", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is not released after a non-transient CRUD error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 51 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + }, + "expectError": { + "errorCode": 51, + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is not released after a non-transient commit error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 51 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0", + "expectError": { + "errorCode": 51, + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a non-transient abort error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 51 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient non-network CRUD error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + }, + "expectError": { + "errorCode": 24, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient network CRUD error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient non-network commit error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0", + "expectError": { + "errorCode": 24, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient network commit error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0", + "ignoreResultAndError": true + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient non-network abort error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient network abort error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released on successful abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is returned when a new transaction is started", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is returned when a non-transaction operation uses the session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "a connection can be shared by a transaction and a cursor", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2, + "session": "session0" + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released when session ended", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "endSession", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/wait-queue-timeouts.json b/test/load_balancer/wait-queue-timeouts.json new file mode 100644 index 0000000000..3dc6e46cff --- /dev/null +++ b/test/load_balancer/wait-queue-timeouts.json @@ -0,0 +1,153 @@ +{ + "description": "wait queue timeout errors include details about checked out connections", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "uriOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 50 + }, + "observeEvents": [ + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent" + ] + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "wait queue timeout errors include cursor statistics", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "maxPoolSize: 1, connections in use by cursors: 1, connections in use by transactions: 0, connections in use by other operations: 0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckOutFailedEvent": {} + } + ] + } + ] + }, + { + "description": "wait queue timeout errors include transaction statistics", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "maxPoolSize: 1, connections in use by cursors: 0, connections in use by transactions: 1, connections in use by other operations: 0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckOutFailedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json new file mode 100644 index 0000000000..db8b061b30 --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json @@ -0,0 +1,74 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json new file mode 100644 index 0000000000..10b6f28786 --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json @@ -0,0 +1,88 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 25002, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 25001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 21 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 25002, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.json b/test/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.json new file mode 100644 index 0000000000..28e5e2aa4a --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.json @@ -0,0 +1,20 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "Unknown" + }, + { + "address": "b:27017", + "type": "Unknown" + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "error": true +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json new file mode 100644 index 0000000000..38b9986500 --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json @@ -0,0 +1,88 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 21 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json new file mode 100644 index 0000000000..586b47ccd2 --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json @@ -0,0 +1,88 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 21 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json b/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json new file mode 100644 index 0000000000..5905fcbc60 --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json @@ -0,0 +1,21 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "Unknown" + }, + { + "address": "b:27017", + "type": "Unknown" + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 90 + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json b/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json new file mode 100644 index 0000000000..15a62090e3 --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "PossiblePrimary", + "avg_rtt_ms": 5, + "maxWireVersion": 0 + }, + { + "address": "b:27017", + "type": "Unknown", + "avg_rtt_ms": 5, + "maxWireVersion": 0 + }, + { + "address": "c:27017", + "type": "RSSecondary", + "maxWireVersion": 21, + "avg_rtt_ms": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 120 + }, + "suitable_servers": [ + { + "address": "c:27017", + "type": "RSSecondary", + "maxWireVersion": 21, + "avg_rtt_ms": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "c:27017", + "type": "RSSecondary", + "maxWireVersion": 21, + "avg_rtt_ms": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json new file mode 100644 index 0000000000..7c036f725c --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json @@ -0,0 +1,64 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "PrimaryPreferred", + "maxStalenessSeconds": 90 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json new file mode 100644 index 0000000000..56fcb156bb --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json @@ -0,0 +1,84 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "nyc" + } + } + ] + }, + "read_preference": { + "mode": "PrimaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json new file mode 100644 index 0000000000..5a4b0c8226 --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json @@ -0,0 +1,111 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "tags": { + "data_center": "tokyo" + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "Secondary", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json new file mode 100644 index 0000000000..19a948e928 --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json @@ -0,0 +1,63 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 120 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json new file mode 100644 index 0000000000..b4633d88f3 --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json @@ -0,0 +1,111 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "tags": { + "data_center": "tokyo" + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json new file mode 100644 index 0000000000..ccb916f107 --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json @@ -0,0 +1,36 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 0 + }, + "error": true +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json new file mode 100644 index 0000000000..00137cf69e --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json @@ -0,0 +1,74 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json new file mode 100644 index 0000000000..9d1db2de65 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json @@ -0,0 +1,88 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 21 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json new file mode 100644 index 0000000000..b0636236cc --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json @@ -0,0 +1,76 @@ +{ + "heartbeatFrequencyMS": 120000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 130 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json new file mode 100644 index 0000000000..76edfcb836 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json @@ -0,0 +1,37 @@ +{ + "heartbeatFrequencyMS": 120000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 129 + }, + "error": true +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json new file mode 100644 index 0000000000..aa936e3c67 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json @@ -0,0 +1,37 @@ +{ + "heartbeatFrequencyMS": 500, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 89 + }, + "error": true +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json new file mode 100644 index 0000000000..c24752a7f1 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "maxStalenessSeconds": 120 + }, + "error": true +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json new file mode 100644 index 0000000000..d3a9535b09 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json @@ -0,0 +1,88 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 21 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json new file mode 100644 index 0000000000..f91706e804 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json @@ -0,0 +1,88 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 21 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21 + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json new file mode 100644 index 0000000000..4ed0b9ed2e --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json @@ -0,0 +1,84 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "nyc" + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json new file mode 100644 index 0000000000..7945530e6a --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json @@ -0,0 +1,64 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "PrimaryPreferred", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json new file mode 100644 index 0000000000..b433d6a430 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json @@ -0,0 +1,63 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 120 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json new file mode 100644 index 0000000000..e594af7832 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json @@ -0,0 +1,138 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "e:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json new file mode 100644 index 0000000000..bc0953c657 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json @@ -0,0 +1,96 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "nyc" + } + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json new file mode 100644 index 0000000000..2817cf9225 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json @@ -0,0 +1,138 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "e:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "Secondary", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json new file mode 100644 index 0000000000..7aa487a078 --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json @@ -0,0 +1,96 @@ +{ + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 21 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "nyc" + } + } + ] + }, + "read_preference": { + "mode": "Secondary", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 21, + "tags": { + "data_center": "tokyo" + } + } + ] +} diff --git a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json new file mode 100644 index 0000000000..fff5609fcc --- /dev/null +++ b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json @@ -0,0 +1,36 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 0 + }, + "error": true +} diff --git a/test/max_staleness/Sharded/SmallMaxStaleness.json b/test/max_staleness/Sharded/SmallMaxStaleness.json new file mode 100644 index 0000000000..98e05be363 --- /dev/null +++ b/test/max_staleness/Sharded/SmallMaxStaleness.json @@ -0,0 +1,76 @@ +{ + "heartbeatFrequencyMS": 10000, + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "Mongos", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "Mongos", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/max_staleness/Single/SmallMaxStaleness.json b/test/max_staleness/Single/SmallMaxStaleness.json new file mode 100644 index 0000000000..d948739855 --- /dev/null +++ b/test/max_staleness/Single/SmallMaxStaleness.json @@ -0,0 +1,52 @@ +{ + "heartbeatFrequencyMS": 10000, + "topology_description": { + "type": "Single", + "servers": [ + { + "address": "a:27017", + "type": "Standalone", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "Standalone", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "Standalone", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 21, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/max_staleness/Unknown/SmallMaxStaleness.json b/test/max_staleness/Unknown/SmallMaxStaleness.json new file mode 100644 index 0000000000..0e609bcf94 --- /dev/null +++ b/test/max_staleness/Unknown/SmallMaxStaleness.json @@ -0,0 +1,19 @@ +{ + "heartbeatFrequencyMS": 10000, + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "type": "Unknown", + "maxWireVersion": 21 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py new file mode 100644 index 0000000000..d4d8f53ff8 --- /dev/null +++ b/test/mockupdb/operations.py @@ -0,0 +1,127 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"),; +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +from collections import namedtuple + +try: + from mockupdb import OpMsgReply, OpReply + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from pymongo import ReadPreference + +__all__ = ["operations", "upgrades"] + + +Operation = namedtuple("Operation", ["name", "function", "reply", "op_type", "not_master"]) +"""Client operations on MongoDB. + +Each has a human-readable name, a function that actually executes a test, and +a type that maps to one of the types in the Server Selection Spec: +'may-use-secondary', 'must-use-primary', etc. + +The special type 'always-use-secondary' applies to an operation with an explicit +read mode, like the operation "command('c', read_preference=SECONDARY)". + +The not-master response is how a secondary responds to a must-use-primary op, +or how a recovering member responds to a may-use-secondary op. + +Example uses: + +We can use "find_one" to validate that the SlaveOk bit is set when querying a +standalone, even with mode PRIMARY, but that it isn't set when sent to a mongos +with mode PRIMARY. Or it can validate that "$readPreference" is included in +mongos queries except with mode PRIMARY or SECONDARY_PREFERRED (PYTHON-865). + +We can use "options_old" and "options_new" to test that the driver queries an +old server's system.namespaces collection, but uses the listCollections command +on a new server (PYTHON-857). + +"secondary command" is good to test that the client can direct reads to +secondaries in a replica set, or select a mongos for secondary reads in a +sharded cluster (PYTHON-868). +""" + +if _HAVE_MOCKUPDB: + not_master_reply = OpMsgReply(ok=0, errmsg="not master") + + operations = [ + Operation( + "find_one", + lambda client: client.db.collection.find_one(), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "count_documents", + lambda client: client.db.collection.count_documents({}), + reply={"n": 1}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "estimated_document_count", + lambda client: client.db.collection.estimated_document_count(), + reply={"n": 1}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "aggregate", + lambda client: client.db.collection.aggregate([]), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "options", + lambda client: client.db.collection.options(), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="must-use-primary", + not_master=not_master_reply, + ), + Operation( + "command", + lambda client: client.db.command("foo"), + reply={"ok": 1}, + op_type="must-use-primary", # Ignores client's read preference. + not_master=not_master_reply, + ), + Operation( + "secondary command", + lambda client: client.db.command("foo", read_preference=ReadPreference.SECONDARY), + reply={"ok": 1}, + op_type="always-use-secondary", + not_master=OpReply(ok=0, errmsg="node is recovering"), + ), + Operation( + "listIndexes", + lambda client: client.db.collection.index_information(), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="must-use-primary", + not_master=not_master_reply, + ), + ] +else: + operations = [] + +_ops_by_name = {op.name: op for op in operations} + +Upgrade = namedtuple("Upgrade", ["name", "function", "old", "new", "wire_version"]) + +upgrades = [] diff --git a/test/mockupdb/test_auth_recovering_member.py b/test/mockupdb/test_auth_recovering_member.py new file mode 100644 index 0000000000..046d8d4b0f --- /dev/null +++ b/test/mockupdb/test_auth_recovering_member.py @@ -0,0 +1,64 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from pymongo import common +from pymongo.errors import ServerSelectionTimeoutError + +pytestmark = pytest.mark.mockupdb + + +class TestAuthRecoveringMember(PyMongoTestCase): + def test_auth_recovering_member(self): + # Test that we don't attempt auth against a recovering RS member. + server = MockupDB() + server.autoresponds( + "ismaster", + { + "minWireVersion": 2, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + "ismaster": False, + "secondary": False, + "setName": "rs", + }, + ) + + server.run() + self.addCleanup(server.stop) + + client = self.simple_client( + server.uri, replicaSet="rs", serverSelectionTimeoutMS=100, socketTimeoutMS=100 + ) + + # Should see there's no primary or secondary and raise selection timeout + # error. If it raises AutoReconnect we know it actually tried the + # server, and that's wrong. + with self.assertRaises(ServerSelectionTimeoutError): + client.db.command("ping") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py new file mode 100644 index 0000000000..42ca916971 --- /dev/null +++ b/test/mockupdb/test_cluster_time.py @@ -0,0 +1,159 @@ +# Copyright 2017-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test $clusterTime handling.""" +from __future__ import annotations + +import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from bson import Timestamp +from pymongo import DeleteMany, InsertOne, MongoClient, UpdateOne +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.errors import OperationFailure + +pytestmark = pytest.mark.mockupdb + + +class TestClusterTime(PyMongoTestCase): + def cluster_time_conversation( + self, callback, replies, max_wire_version=MIN_SUPPORTED_WIRE_VERSION + ): + cluster_time = Timestamp(0, 0) + server = MockupDB() + + # First test all commands include $clusterTime with max_wire_version. + _ = server.autoresponds( + "ismaster", + { + "minWireVersion": 0, + "maxWireVersion": max_wire_version, + "$clusterTime": {"clusterTime": cluster_time}, + }, + ) + + server.run() + self.addCleanup(server.stop) + + client = self.simple_client(server.uri) + + with going(callback, client): + for reply in replies: + request = server.receives() + self.assertIn("$clusterTime", request) + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) + cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) + reply["$clusterTime"] = {"clusterTime": cluster_time} + request.reply(reply) + + def test_command(self): + def callback(client): + client.db.command("ping") + client.db.command("ping") + + self.cluster_time_conversation(callback, [{"ok": 1}] * 2) + + def test_bulk(self): + def callback(client: MongoClient[dict]) -> None: + client.db.collection.bulk_write( + [InsertOne({}), InsertOne({}), UpdateOne({}, {"$inc": {"x": 1}}), DeleteMany({})] + ) + + self.cluster_time_conversation( + callback, + [{"ok": 1, "nInserted": 2}, {"ok": 1, "nModified": 1}, {"ok": 1, "nDeleted": 2}], + ) + + batches = [ + {"cursor": {"id": 123, "firstBatch": [{"a": 1}]}}, + {"cursor": {"id": 123, "nextBatch": [{"a": 2}]}}, + {"cursor": {"id": 0, "nextBatch": [{"a": 3}]}}, + ] + + def test_cursor(self): + def callback(client): + list(client.db.collection.find()) + + self.cluster_time_conversation(callback, self.batches) + + def test_aggregate(self): + def callback(client): + list(client.db.collection.aggregate([])) + + self.cluster_time_conversation(callback, self.batches) + + def test_explain(self): + def callback(client): + client.db.collection.find().explain() + + self.cluster_time_conversation(callback, [{"ok": 1}]) + + def test_monitor(self): + cluster_time = Timestamp(0, 0) + reply = { + "minWireVersion": 0, + "maxWireVersion": MIN_SUPPORTED_WIRE_VERSION, + "$clusterTime": {"clusterTime": cluster_time}, + } + + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + client = self.simple_client(server.uri, heartbeatFrequencyMS=500) + + for _ in range(3): + request = server.receives("ismaster") + # No $clusterTime in heartbeats or handshakes. + self.assertNotIn("$clusterTime", request) + request.ok(reply) + client.close() + + def test_collection_bulk_error(self): + def callback(client: MongoClient[dict]) -> None: + with self.assertRaises(OperationFailure): + client.db.collection.bulk_write([InsertOne({}), InsertOne({})]) + + self.cluster_time_conversation( + callback, + [{"ok": 0, "errmsg": "mock error"}], + ) + + def test_client_bulk_error(self): + def callback(client: MongoClient[dict]) -> None: + with self.assertRaises(OperationFailure): + client.bulk_write( + [ + InsertOne({}, namespace="db.collection"), + InsertOne({}, namespace="db.collection"), + ] + ) + + self.cluster_time_conversation( + callback, [{"ok": 0, "errmsg": "mock error"}], max_wire_version=25 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_cursor.py b/test/mockupdb/test_cursor.py new file mode 100644 index 0000000000..e61f220d5b --- /dev/null +++ b/test/mockupdb/test_cursor.py @@ -0,0 +1,99 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo cursor does not set exhaustAllowed automatically (PYTHON-4007).""" +from __future__ import annotations + +import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, OpMsg, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from bson.objectid import ObjectId +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.errors import OperationFailure + +pytestmark = pytest.mark.mockupdb + + +class TestCursor(PyMongoTestCase): + def test_getmore_load_balanced(self): + server = MockupDB() + server.autoresponds( + "hello", + isWritablePrimary=True, + msg="isdbgrid", + minWireVersion=0, + maxWireVersion=20, + helloOk=True, + serviceId=ObjectId(), + ) + server.run() + self.addCleanup(server.stop) + + client = self.simple_client(server.uri, loadBalanced=True) + self.addCleanup(client.close) + collection = client.db.coll + cursor = collection.find() + with going(next, cursor): + request = server.receives(OpMsg({"find": "coll"})) + self.assertEqual(request.flags, 0, "exhaustAllowed should not be set") + # Respond with a different namespace. + request.reply({"cursor": {"id": 123, "firstBatch": [{}]}}) + + # 3 batches, check exhaustAllowed on all getMores. + for i in range(1, 3): + with going(next, cursor): + request = server.receives(OpMsg({"getMore": 123})) + self.assertEqual(request.flags, 0, "exhaustAllowed should not be set") + cursor_id = 123 if i < 2 else 0 + request.replies({"cursor": {"id": cursor_id, "nextBatch": [{}]}}) + + +class TestRetryableErrorCodeCatch(PyMongoTestCase): + def _test_fail_on_operation_failure_with_code(self, code): + """Test reads on error codes that should not be retried""" + server = MockupDB() + server.run() + self.addCleanup(server.stop) + server.autoresponds("ismaster", maxWireVersion=MIN_SUPPORTED_WIRE_VERSION) + + client = self.simple_client(server.uri) + + with going(lambda: server.receives(OpMsg({"find": "collection"})).command_err(code=code)): + cursor = client.db.collection.find() + with self.assertRaises(OperationFailure) as ctx: + cursor.next() + self.assertEqual(ctx.exception.code, code) + + def test_fail_on_operation_failure_none(self): + self._test_fail_on_operation_failure_with_code(None) + + def test_fail_on_operation_failure_zero(self): + self._test_fail_on_operation_failure_with_code(0) + + def test_fail_on_operation_failure_one(self): + self._test_fail_on_operation_failure_with_code(1) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py new file mode 100644 index 0000000000..7538540bda --- /dev/null +++ b/test/mockupdb/test_cursor_namespace.py @@ -0,0 +1,153 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test list_indexes with more than one batch.""" +from __future__ import annotations + +import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb + + +class TestCursorNamespace(PyMongoTestCase): + server: MockupDB + client: MongoClient + + @classmethod + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster={"maxWireVersion": 8}) + cls.server.run() + cls.client = cls.unmanaged_simple_client(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.client.close() + cls.server.stop() + + def _test_cursor_namespace(self, cursor_op, command): + with going(cursor_op) as docs: + request = self.server.receives(**{command: "collection", "namespace": "test"}) + # Respond with a different namespace. + request.reply( + { + "cursor": { + "firstBatch": [{"doc": 1}], + "id": 123, + "ns": "different_db.different.coll", + } + } + ) + # Client uses the namespace we returned. + request = self.server.receives( + getMore=123, namespace="different_db", collection="different.coll" + ) + + request.reply({"cursor": {"nextBatch": [{"doc": 2}], "id": 0}}) + + self.assertEqual([{"doc": 1}, {"doc": 2}], docs()) + + def test_aggregate_cursor(self): + def op(): + return list(self.client.test.collection.aggregate([])) + + self._test_cursor_namespace(op, "aggregate") + + def test_find_cursor(self): + def op(): + return list(self.client.test.collection.find()) + + self._test_cursor_namespace(op, "find") + + def test_list_indexes(self): + def op(): + return list(self.client.test.collection.list_indexes()) + + self._test_cursor_namespace(op, "listIndexes") + + +class TestKillCursorsNamespace(PyMongoTestCase): + server: MockupDB + client: MongoClient + + @classmethod + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster={"maxWireVersion": MIN_SUPPORTED_WIRE_VERSION}) + cls.server.run() + cls.client = cls.unmanaged_simple_client(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.client.close() + cls.server.stop() + + def _test_killCursors_namespace(self, cursor_op, command): + with going(cursor_op): + request = self.server.receives(**{command: "collection", "namespace": "test"}) + # Respond with a different namespace. + request.reply( + { + "cursor": { + "firstBatch": [{"doc": 1}], + "id": 123, + "ns": "different_db.different.coll", + } + } + ) + # Client uses the namespace we returned for killCursors. + request = self.server.receives( + **{"killCursors": "different.coll", "cursors": [123], "$db": "different_db"} + ) + request.reply( + { + "ok": 1, + "cursorsKilled": [123], + "cursorsNotFound": [], + "cursorsAlive": [], + "cursorsUnknown": [], + } + ) + + def test_aggregate_killCursor(self): + def op(): + cursor = self.client.test.collection.aggregate([], batchSize=1) + next(cursor) + cursor.close() + + self._test_killCursors_namespace(op, "aggregate") + + def test_find_killCursor(self): + def op(): + cursor = self.client.test.collection.find(batch_size=1) + next(cursor) + cursor.close() + + self._test_killCursors_namespace(op, "find") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py new file mode 100644 index 0000000000..d24c8aa10a --- /dev/null +++ b/test/mockupdb/test_getmore_sharded.py @@ -0,0 +1,75 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo cursor with a sharded cluster.""" +from __future__ import annotations + +import unittest +from queue import Queue +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb + + +class TestGetmoreSharded(PyMongoTestCase): + def test_getmore_sharded(self): + servers = [MockupDB(), MockupDB()] + + # Collect queries to either server in one queue. + q: Queue = Queue() + for server in servers: + server.subscribe(q.put) + server.autoresponds( + "ismaster", + ismaster=True, + msg="isdbgrid", + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + server.run() + self.addCleanup(server.stop) + + client = self.simple_client( + "mongodb://%s:%d,%s:%d" + % (servers[0].host, servers[0].port, servers[1].host, servers[1].port) + ) + collection = client.db.collection + cursor = collection.find() + with going(next, cursor): + query = q.get(timeout=1) + query.replies({"cursor": {"id": 123, "firstBatch": [{}]}}) + + # 10 batches, all getMores go to same server. + for i in range(1, 10): + with going(next, cursor): + getmore = q.get(timeout=1) + self.assertEqual(query.server, getmore.server) + cursor_id = 123 if i < 9 else 0 + getmore.replies({"cursor": {"id": cursor_id, "nextBatch": [{}]}}) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py new file mode 100644 index 0000000000..c2c978c4ad --- /dev/null +++ b/test/mockupdb/test_handshake.py @@ -0,0 +1,297 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest + +import pytest + +try: + from mockupdb import Command, MockupDB, OpMsg, OpMsgReply, OpQuery, OpReply, absent, go + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from bson.objectid import ObjectId +from pymongo import MongoClient, has_c +from pymongo import version as pymongo_version +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.errors import OperationFailure +from pymongo.server_api import ServerApi, ServerApiVersion + +pytestmark = pytest.mark.mockupdb + + +def _check_handshake_data(request): + assert "client" in request + data = request["client"] + + assert data["application"] == {"name": "my app"} + if has_c(): + name = "PyMongo|c" + else: + name = "PyMongo" + assert data["driver"] == {"name": name, "version": pymongo_version} + + # Keep it simple, just check these fields exist. + assert "os" in data + assert "platform" in data + + +class TestHandshake(unittest.TestCase): + def hello_with_option_helper(self, protocol, **kwargs): + hello = "ismaster" if isinstance(protocol(), OpQuery) else "hello" + # `db.command("hello"|"ismaster")` commands are the same for primaries and + # secondaries, so we only need one server. + primary = MockupDB() + # Set up a custom handler to save the first request from the driver. + self.handshake_req = None + + def respond(r): + # Only save the very first request from the driver. + if self.handshake_req is None: + self.handshake_req = r + load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} + return r.reply( + OpMsgReply(minWireVersion=0, maxWireVersion=13, **kwargs, **load_balanced_kwargs) + ) + + primary.autoresponds(respond) + primary.run() + self.addCleanup(primary.stop) + + # We need a special dict because MongoClient uses "server_api" and all + # of the commands use "apiVersion". + k_map = {("apiVersion", "1"): ("server_api", ServerApi(ServerApiVersion.V1))} + client = MongoClient( + "mongodb://" + primary.address_string, + appname="my app", # For _check_handshake_data() + **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()]), # type: ignore[arg-type] + ) + + self.addCleanup(client.close) + + # We have an autoresponder luckily, so no need for `go()`. + assert client.db.command(hello) + + # We do this checking here rather than in the autoresponder `respond()` + # because it runs in another Python thread so there are some funky things + # with error handling within that thread, and we want to be able to use + # self.assertRaises(). + self.handshake_req.assert_matches(protocol(hello, **kwargs)) + _check_handshake_data(self.handshake_req) + + def test_client_handshake_data(self): + primary, secondary = MockupDB(), MockupDB() + for server in primary, secondary: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string for server in (primary, secondary)] + primary_response = OpReply( + "ismaster", + True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + error_response = OpReply(0, errmsg="Cache Reader No keys found for HMAC ...", code=211) + + secondary_response = OpReply( + "ismaster", + False, + setName="rs", + hosts=hosts, + secondary=True, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + + client = MongoClient( + primary.uri, replicaSet="rs", appname="my app", heartbeatFrequencyMS=500 + ) # Speed up the test. + + self.addCleanup(client.close) + + # New monitoring connections send data during handshake. + heartbeat = primary.receives("ismaster") + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + heartbeat = secondary.receives("ismaster") + _check_handshake_data(heartbeat) + heartbeat.ok(secondary_response) + + # Subsequent heartbeats have no client data. + primary.receives("ismaster", 1, client=absent).ok(error_response) + secondary.receives("ismaster", 1, client=absent).ok(error_response) + + # The heartbeat retry (on a new connection) does have client data. + heartbeat = primary.receives("ismaster") + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + heartbeat = secondary.receives("ismaster") + _check_handshake_data(heartbeat) + heartbeat.ok(secondary_response) + + # Still no client data. + primary.receives("ismaster", 1, client=absent).ok(primary_response) + secondary.receives("ismaster", 1, client=absent).ok(secondary_response) + + # After a disconnect, next ismaster has client data again. + primary.receives("ismaster", 1, client=absent).hangup() + heartbeat = primary.receives("ismaster") + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + secondary.autoresponds("ismaster", secondary_response) + + # Start a command, so the client opens an application socket. + future = go(client.db.command, "whatever") + + for request in primary: + if request.matches(Command("ismaster")): + if request.client_port == heartbeat.client_port: + # This is the monitor again, keep going. + request.ok(primary_response) + else: + # Handshaking a new application socket. + _check_handshake_data(request) + request.ok(primary_response) + else: + # Command succeeds. + request.assert_matches(OpMsg("whatever")) + request.ok() + assert future() + return + + def test_client_handshake_saslSupportedMechs(self): + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + primary_response = OpReply( + "ismaster", True, minWireVersion=2, maxWireVersion=MIN_SUPPORTED_WIRE_VERSION + ) + client = MongoClient(server.uri, username="username", password="password") + + self.addCleanup(client.close) + + # New monitoring connections send data during handshake. + heartbeat = server.receives("ismaster") + heartbeat.ok(primary_response) + + future = go(client.db.command, "whatever") + for request in server: + if request.matches("ismaster"): + if request.client_port == heartbeat.client_port: + # This is the monitor again, keep going. + request.ok(primary_response) + else: + # Handshaking a new application socket should send + # saslSupportedMechs and speculativeAuthenticate. + self.assertEqual(request["saslSupportedMechs"], "admin.username") + self.assertIn("saslStart", request["speculativeAuthenticate"]) + auth = { + "conversationId": 1, + "done": False, + "payload": b"r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0" + b"1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky" + b"tXdF9r,s=4dcxugMJq2P4hQaDbGXZR8uR3ei" + b"PHrSmh4uhkg==,i=15000", + } + request.ok( + "ismaster", + True, + # Unsupported auth mech should be ignored. + saslSupportedMechs=["SCRAM-SHA-256", "does_not_exist"], + speculativeAuthenticate=auth, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + # Authentication should immediately fail with: + # OperationFailure: Server returned an invalid nonce. + with self.assertRaises(OperationFailure) as cm: + future() + self.assertEqual(str(cm.exception), "Server returned an invalid nonce.") + return + + def test_handshake_load_balanced(self): + self.hello_with_option_helper(OpMsg, loadBalanced=True) + with self.assertRaisesRegex(AssertionError, "does not match"): + self.hello_with_option_helper(Command, loadBalanced=True) + + def test_handshake_versioned_api(self): + self.hello_with_option_helper(OpMsg, apiVersion="1") + with self.assertRaisesRegex(AssertionError, "does not match"): + self.hello_with_option_helper(Command, apiVersion="1") + + def test_handshake_not_either(self): + # If we don't specify either option then it should be using + # OP_QUERY for the initial step of the handshake. + self.hello_with_option_helper(Command) + with self.assertRaisesRegex(AssertionError, "does not match"): + self.hello_with_option_helper(OpMsg) + + def test_handshake_max_wire(self): + server = MockupDB() + primary_response = { + "hello": 1, + "ok": 1, + "minWireVersion": 0, + "maxWireVersion": MIN_SUPPORTED_WIRE_VERSION, + } + self.found_auth_msg = False + + def responder(request): + if request.matches(OpMsg, saslStart=1): + self.found_auth_msg = True + # Immediately closes the connection with + # OperationFailure: Server returned an invalid nonce. + request.reply( + OpMsgReply( + **primary_response, + payload=b"r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0" + b"1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky" + b"tXdF9r," + b"s=4dcxugMJq2P4hQaDbGXZR8uR3ei" + b"PHrSmh4uhkg==,i=15000", + saslSupportedMechs=["SCRAM-SHA-1"], + ) + ) + return None + else: + return request.reply(**primary_response) + + server.autoresponds(responder) + self.addCleanup(server.stop) + server.run() + client = MongoClient( + server.uri, + username="username", + password="password", + ) + self.addCleanup(client.close) + self.assertRaises(OperationFailure, client.db.collection.find_one, {"a": 1}) + self.assertTrue( + self.found_auth_msg, "Could not find authentication command with correct protocol" + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_initial_ismaster.py b/test/mockupdb/test_initial_ismaster.py new file mode 100644 index 0000000000..a52930c742 --- /dev/null +++ b/test/mockupdb/test_initial_ismaster.py @@ -0,0 +1,59 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import time +import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, wait_until + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb + + +class TestInitialIsMaster(PyMongoTestCase): + def test_initial_ismaster(self): + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + start = time.time() + client = self.simple_client(server.uri) + + # A single ismaster is enough for the client to be connected. + self.assertFalse(client.nodes) + server.receives("ismaster").ok( + ismaster=True, minWireVersion=2, maxWireVersion=MIN_SUPPORTED_WIRE_VERSION + ) + wait_until(lambda: client.nodes, "update nodes", timeout=1) + + # At least 10 seconds before next heartbeat. + server.receives("ismaster").ok( + ismaster=True, minWireVersion=2, maxWireVersion=MIN_SUPPORTED_WIRE_VERSION + ) + self.assertGreaterEqual(time.time() - start, 10) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py new file mode 100644 index 0000000000..71cad43aa2 --- /dev/null +++ b/test/mockupdb/test_list_indexes.py @@ -0,0 +1,58 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test list_indexes with more than one batch.""" +from __future__ import annotations + +import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from bson import SON +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb + + +class TestListIndexes(PyMongoTestCase): + def test_list_indexes_command(self): + server = MockupDB(auto_ismaster={"maxWireVersion": MIN_SUPPORTED_WIRE_VERSION}) + server.run() + self.addCleanup(server.stop) + client = self.simple_client(server.uri) + with going(client.test.collection.list_indexes) as cursor: + request = server.receives(listIndexes="collection", namespace="test") + request.reply({"cursor": {"firstBatch": [{"name": "index_0"}], "id": 123}}) + + with going(list, cursor()) as indexes: + request = server.receives(getMore=123, namespace="test", collection="collection") + + request.reply({"cursor": {"nextBatch": [{"name": "index_1"}], "id": 0}}) + + self.assertEqual([{"name": "index_0"}, {"name": "index_1"}], indexes()) + for index_info in indexes(): + self.assertIsInstance(index_info, SON) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_max_staleness.py b/test/mockupdb/test_max_staleness.py new file mode 100644 index 0000000000..7168bd2954 --- /dev/null +++ b/test/mockupdb/test_max_staleness.py @@ -0,0 +1,77 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb + + +class TestMaxStalenessMongos(PyMongoTestCase): + def test_mongos(self): + mongos = MockupDB() + mongos.autoresponds( + "ismaster", maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ismaster=True, msg="isdbgrid" + ) + mongos.run() + self.addCleanup(mongos.stop) + + # No maxStalenessSeconds. + uri = "mongodb://localhost:%d/?readPreference=secondary" % mongos.port + + client = self.simple_client(uri) + with going(client.db.coll.find_one) as future: + request = mongos.receives() + self.assertNotIn("maxStalenessSeconds", request.doc["$readPreference"]) + + self.assertTrue(request.slave_okay) + request.ok(cursor={"firstBatch": [], "id": 0}) + + # find_one succeeds with no result. + self.assertIsNone(future()) + + # Set maxStalenessSeconds to 1. Client has no minimum with mongos, + # we let mongos enforce the 90-second minimum and return an error: + # SERVER-27146. + uri = ( + "mongodb://localhost:%d/?readPreference=secondary" + "&maxStalenessSeconds=1" % mongos.port + ) + + client = self.simple_client(uri) + with going(client.db.coll.find_one) as future: + request = mongos.receives() + self.assertEqual(1, request.doc["$readPreference"]["maxStalenessSeconds"]) + + self.assertTrue(request.slave_okay) + request.ok(cursor={"firstBatch": [], "id": 0}) + + self.assertIsNone(future()) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py new file mode 100644 index 0000000000..adbe61204b --- /dev/null +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -0,0 +1,100 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo with a mixed-version cluster.""" +from __future__ import annotations + +import time +import unittest +from queue import Queue +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, go + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import upgrades # type: ignore[import] + +pytestmark = pytest.mark.mockupdb + + +class TestMixedVersionSharded(PyMongoTestCase): + def setup_server(self, upgrade): + self.mongos_old, self.mongos_new = MockupDB(), MockupDB() + + # Collect queries to either server in one queue. + self.q: Queue = Queue() + for server in self.mongos_old, self.mongos_new: + server.subscribe(self.q.put) + server.autoresponds("getlasterror") + server.run() + self.addCleanup(server.stop) + + # Max wire version is too old for the upgraded operation. + self.mongos_old.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version - 1 + ) + + # Up-to-date max wire version. + self.mongos_new.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version + ) + + self.mongoses_uri = "mongodb://{},{}".format( + self.mongos_old.address_string, + self.mongos_new.address_string, + ) + + self.client = self.simple_client(self.mongoses_uri) + + def tearDown(self): + if hasattr(self, "client") and self.client: + self.client.close() + + +def create_mixed_version_sharded_test(upgrade): + def test(self): + self.setup_server(upgrade) + start = time.time() + servers_used: set = set() + while len(servers_used) < 2: + go(upgrade.function, self.client) + request = self.q.get(timeout=1) + servers_used.add(request.server) + request.assert_matches( + upgrade.old if request.server is self.mongos_old else upgrade.new + ) + if time.time() > start + 10: + self.fail("never used both mongoses") + + return test + + +def generate_mixed_version_sharded_tests(): + for upgrade in upgrades: + test = create_mixed_version_sharded_test(upgrade) + test_name = "test_%s" % upgrade.name.replace(" ", "_") + test.__name__ = test_name + setattr(TestMixedVersionSharded, test_name, test) + + +generate_mixed_version_sharded_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py new file mode 100644 index 0000000000..61744e184d --- /dev/null +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -0,0 +1,141 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import itertools +import unittest + +import pytest + +try: + from mockupdb import MockupDB, OpMsg, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import operations # type: ignore[import] + +from pymongo import MongoClient, ReadPreference +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.read_preferences import ( + _MONGOS_MODES, + make_read_preference, + read_pref_mode_from_name, +) + +pytestmark = pytest.mark.mockupdb + + +class TestMongosCommandReadMode(unittest.TestCase): + def test_aggregate(self): + server = MockupDB() + server.autoresponds( + "ismaster", + ismaster=True, + msg="isdbgrid", + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + self.addCleanup(server.stop) + server.run() + + client = MongoClient(server.uri) + self.addCleanup(client.close) + collection = client.test.collection + with going(collection.aggregate, []): + command = server.receives(aggregate="collection", pipeline=[]) + self.assertFalse(command.slave_ok, "SlaveOkay set") + command.ok(result=[{}]) + + secondary_collection = collection.with_options(read_preference=ReadPreference.SECONDARY) + + with going(secondary_collection.aggregate, []): + command = server.receives( + OpMsg( + { + "aggregate": "collection", + "pipeline": [], + "$readPreference": {"mode": "secondary"}, + } + ) + ) + command.ok(result=[{}]) + self.assertTrue(command.slave_ok, "SlaveOkay not set") + + +def create_mongos_read_mode_test(mode, operation): + def test(self): + server = MockupDB() + self.addCleanup(server.stop) + server.run() + server.autoresponds( + "ismaster", + ismaster=True, + msg="isdbgrid", + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) + + client = MongoClient(server.uri, read_preference=pref) + self.addCleanup(client.close) + + with going(operation.function, client): + request = server.receive() + request.reply(operation.reply) + + if operation.op_type == "always-use-secondary": + self.assertEqual(ReadPreference.SECONDARY.document, request.doc.get("$readPreference")) + slave_ok = mode != "primary" + elif operation.op_type == "must-use-primary": + slave_ok = False + elif operation.op_type == "may-use-secondary": + slave_ok = mode != "primary" + actual_pref = request.doc.get("$readPreference") + if mode == "primary": + self.assertIsNone(actual_pref) + else: + self.assertEqual(pref.document, actual_pref) + else: + self.fail("unrecognized op_type %r" % operation.op_type) + + if slave_ok: + self.assertTrue(request.slave_ok, "SlaveOkay not set") + else: + self.assertFalse(request.slave_ok, "SlaveOkay set") + + return test + + +def generate_mongos_read_mode_tests(): + matrix = itertools.product(_MONGOS_MODES, operations) + + for entry in matrix: + mode, operation = entry + if mode == "primary" and operation.op_type == "always-use-secondary": + # Skip something like command('foo', read_preference=SECONDARY). + continue + test = create_mongos_read_mode_test(mode, operation) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) + test.__name__ = test_name + setattr(TestMongosCommandReadMode, test_name, test) + + +generate_mongos_read_mode_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py new file mode 100644 index 0000000000..b5ccd5276f --- /dev/null +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -0,0 +1,100 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest + +import pytest + +try: + from mockupdb import Future, MockupDB, OpReply, going, wait_until + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.errors import ConnectionFailure +from pymongo.topology_description import TOPOLOGY_TYPE + +pytestmark = pytest.mark.mockupdb + + +class TestNetworkDisconnectPrimary(unittest.TestCase): + def test_network_disconnect_primary(self): + # Application operation fails against primary. Test that topology + # type changes from ReplicaSetWithPrimary to ReplicaSetNoPrimary. + # http://bit.ly/1B5ttuL + primary, secondary = MockupDB(), MockupDB() + for server in primary, secondary: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string for server in (primary, secondary)] + primary_response = OpReply( + ismaster=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + primary.autoresponds("ismaster", primary_response) + secondary.autoresponds( + "ismaster", + ismaster=False, + secondary=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + + client = MongoClient(primary.uri, replicaSet="rs") + self.addCleanup(client.close) + wait_until(lambda: client.primary == primary.address, "discover primary") + + topology = client._topology + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, topology.description.topology_type) + + # Open a socket in the application pool (calls ismaster). + with going(client.db.command, "buildinfo"): + primary.receives("buildinfo").ok() + + # The primary hangs replying to ismaster. + ismaster_future = Future() + primary.autoresponds("ismaster", lambda r: r.ok(ismaster_future.result())) + + # Network error on application operation. + with self.assertRaises(ConnectionFailure): + with going(client.db.command, "buildinfo"): + primary.receives("buildinfo").hangup() + + # Topology type is updated. + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, topology.description.topology_type) + + # Let ismasters through again. + ismaster_future.set_result(primary_response) + + # Demand a primary. + with going(client.db.command, "buildinfo"): + wait_until(lambda: client.primary == primary.address, "rediscover primary") + primary.receives("buildinfo").ok() + + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, topology.description.topology_type) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py new file mode 100644 index 0000000000..4b85c5a48a --- /dev/null +++ b/test/mockupdb/test_op_msg.py @@ -0,0 +1,333 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest +from collections import namedtuple +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import OP_MSG_FLAGS, MockupDB, OpMsg, OpMsgReply, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from pymongo import MongoClient, WriteConcern +from pymongo.cursor_shared import CursorType +from pymongo.operations import DeleteOne, InsertOne, UpdateOne + +pytestmark = pytest.mark.mockupdb + +Operation = namedtuple("Operation", ["name", "function", "request", "reply"]) + +if _HAVE_MOCKUPDB: + operations = [ + Operation( + "find_one", + lambda coll: coll.find_one({}), + request=OpMsg({"find": "coll"}, flags=0), + reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, + ), + Operation( + "aggregate", + lambda coll: coll.aggregate([]), + request=OpMsg({"aggregate": "coll"}, flags=0), + reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, + ), + Operation( + "insert_one", + lambda coll: coll.insert_one({}), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 1}, + ), + Operation( + "insert_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_one({}), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "insert_many", + lambda coll: coll.insert_many([{}, {}, {}]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 3}, + ), + Operation( + "insert_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many( + [{}, {}, {}] + ), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 3}, + ), + Operation( + "insert_many-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many( + [{}, {}, {}], ordered=False + ), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "replace_one", + lambda coll: coll.replace_one({"_id": 1}, {"new": 1}), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), + Operation( + "replace_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).replace_one( + {"_id": 1}, {"new": 1} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "update_one", + lambda coll: coll.update_one({"_id": 1}, {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), + Operation( + "replace_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_one( + {"_id": 1}, {"$set": {"new": 1}} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "update_many", + lambda coll: coll.update_many({"_id": 1}, {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), + Operation( + "update_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_many( + {"_id": 1}, {"$set": {"new": 1}} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "delete_one", + lambda coll: coll.delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 1}, + ), + Operation( + "delete_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "delete_many", + lambda coll: coll.delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 1}, + ), + Operation( + "delete_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + # Legacy methods + Operation( + "bulk_write_insert", + lambda coll: coll.bulk_write([InsertOne[dict]({}), InsertOne[dict]({})]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_insert-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne[dict]({}), InsertOne[dict]({})] + ), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_insert-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne[dict]({}), InsertOne[dict]({})], ordered=False + ), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "bulk_write_update", + lambda coll: coll.bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ] + ), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 2, "nModified": 2}, + ), + Operation( + "bulk_write_update-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ] + ), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 2, "nModified": 2}, + ), + Operation( + "bulk_write_update-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ], + ordered=False, + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "bulk_write_delete", + lambda coll: coll.bulk_write([DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_delete-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})] + ), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_delete-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})], ordered=False + ), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + ] + + operations_312 = [ + Operation( + "find_raw_batches", + lambda coll: list(coll.find_raw_batches({})), + request=[ + OpMsg({"find": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=0), + ], + reply=[ + {"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, + {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, + ], + ), + Operation( + "aggregate_raw_batches", + lambda coll: list(coll.aggregate_raw_batches([])), + request=[ + OpMsg({"aggregate": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=0), + ], + reply=[ + {"ok": 1, "cursor": {"firstBatch": [], "id": 7}}, + {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, + ], + ), + Operation( + "find_exhaust_cursor", + lambda coll: list(coll.find({}, cursor_type=CursorType.EXHAUST)), + request=[ + OpMsg({"find": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=1 << 16), + ], + reply=[ + OpMsgReply({"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, flags=0), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, flags=0), + ], + ), + ] +else: + operations = [] + operations_312 = [] + + +class TestOpMsg(PyMongoTestCase): + server: MockupDB + client: MongoClient + + @classmethod + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster=True, max_wire_version=8) + cls.server.run() + cls.client = cls.unmanaged_simple_client(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.server.stop() + cls.client.close() + + def _test_operation(self, op): + coll = self.client.db.coll + with going(op.function, coll) as future: + expected_requests = op.request + replies = op.reply + if not isinstance(op.request, list): + expected_requests = [op.request] + replies = [op.reply] + + for expected_request in expected_requests: + request = self.server.receives(expected_request) + reply = None + if replies: + reply = replies.pop(0) + if reply is not None: + request.reply(reply) + for reply in replies: + if reply is not None: + request.reply(reply) + + future() # No error. + + +def operation_test(op): + def test(self): + self._test_operation(op) + + return test + + +def create_tests(ops): + for op in ops: + test_name = f"test_op_msg_{op.name}" + setattr(TestOpMsg, test_name, operation_test(op)) + + +create_tests(operations) + +create_tests(operations_312) + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py new file mode 100644 index 0000000000..4cf82c760e --- /dev/null +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -0,0 +1,207 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import copy +import itertools +import unittest +from test import PyMongoTestCase +from typing import Any + +import pytest + +try: + from mockupdb import CommandBase, MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import operations # type: ignore[import] + +from pymongo import ReadPreference +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.read_preferences import ( + _MONGOS_MODES, + make_read_preference, + read_pref_mode_from_name, +) + +pytestmark = pytest.mark.mockupdb + + +class OpMsgReadPrefBase(PyMongoTestCase): + single_mongod = False + primary: MockupDB + secondary: MockupDB + + @classmethod + def setUpClass(cls): + super().setUpClass() + + @classmethod + def add_test(cls, mode, test_name, test): + setattr(cls, test_name, test) + + def setup_client(self, read_preference): + client = self.simple_client(self.primary.uri, read_preference=read_preference) + return client + + +class TestOpMsgMongos(OpMsgReadPrefBase): + @classmethod + def setUpClass(cls): + super().setUpClass() + auto_ismaster = { + "ismaster": True, + "msg": "isdbgrid", # Mongos. + "minWireVersion": 2, + "maxWireVersion": MIN_SUPPORTED_WIRE_VERSION, + } + cls.primary = MockupDB(auto_ismaster=auto_ismaster) + cls.primary.run() + cls.secondary = cls.primary + + @classmethod + def tearDownClass(cls): + cls.primary.stop() + super().tearDownClass() + + +class TestOpMsgReplicaSet(OpMsgReadPrefBase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.primary, cls.secondary = MockupDB(), MockupDB() + for server in cls.primary, cls.secondary: + server.run() + + hosts = [server.address_string for server in (cls.primary, cls.secondary)] + + primary_ismaster = { + "ismaster": True, + "setName": "rs", + "hosts": hosts, + "minWireVersion": 2, + "maxWireVersion": MIN_SUPPORTED_WIRE_VERSION, + } + cls.primary.autoresponds(CommandBase("ismaster"), primary_ismaster) + secondary_ismaster = copy.copy(primary_ismaster) + secondary_ismaster["ismaster"] = False + secondary_ismaster["secondary"] = True + cls.secondary.autoresponds(CommandBase("ismaster"), secondary_ismaster) + + @classmethod + def tearDownClass(cls): + for server in cls.primary, cls.secondary: + server.stop() + super().tearDownClass() + + @classmethod + def add_test(cls, mode, test_name, test): + # Skip nearest tests since we don't know if we will select the primary + # or secondary. + if mode != "nearest": + setattr(cls, test_name, test) + + def setup_client(self, read_preference): + client = self.simple_client( + self.primary.uri, replicaSet="rs", read_preference=read_preference + ) + + # Run a command on a secondary to discover the topology. This ensures + # that secondaryPreferred commands will select the secondary. + client.admin.command("ismaster", read_preference=ReadPreference.SECONDARY) + return client + + +class TestOpMsgSingle(OpMsgReadPrefBase): + single_mongod = True + + @classmethod + def setUpClass(cls): + super().setUpClass() + auto_ismaster = { + "ismaster": True, + "minWireVersion": 2, + "maxWireVersion": MIN_SUPPORTED_WIRE_VERSION, + } + cls.primary = MockupDB(auto_ismaster=auto_ismaster) + cls.primary.run() + cls.secondary = cls.primary + + @classmethod + def tearDownClass(cls): + cls.primary.stop() + super().tearDownClass() + + +def create_op_msg_read_mode_test(mode, operation): + def test(self): + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) + + client = self.setup_client(read_preference=pref) + expected_pref: Any + if operation.op_type == "always-use-secondary": + expected_server = self.secondary + expected_pref = ReadPreference.SECONDARY + elif operation.op_type == "must-use-primary": + expected_server = self.primary + expected_pref = None + elif operation.op_type == "may-use-secondary": + if mode == "primary": + expected_server = self.primary + expected_pref = None + elif mode == "primaryPreferred": + expected_server = self.primary + expected_pref = pref + else: + expected_server = self.secondary + expected_pref = pref + else: + self.fail("unrecognized op_type %r" % operation.op_type) + # For single mongod we omit the read preference. + if self.single_mongod: + expected_pref = None + with going(operation.function, client): + request = expected_server.receive() + request.reply(operation.reply) + + actual_pref = request.doc.get("$readPreference") + if expected_pref: + self.assertEqual(expected_pref.document, actual_pref) + else: + self.assertIsNone(actual_pref) + self.assertNotIn("$query", request.doc) + + return test + + +def generate_op_msg_read_mode_tests(): + matrix = itertools.product(_MONGOS_MODES, operations) + + for entry in matrix: + mode, operation = entry + test = create_op_msg_read_mode_test(mode, operation) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) + test.__name__ = test_name + for cls in TestOpMsgMongos, TestOpMsgReplicaSet, TestOpMsgSingle: + cls.add_test(mode, test_name, test) + + +generate_op_msg_read_mode_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py new file mode 100644 index 0000000000..2fae46be76 --- /dev/null +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -0,0 +1,88 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo query and read preference with a sharded cluster.""" +from __future__ import annotations + +import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, OpMsg, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from bson import SON +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.read_preferences import ( + Nearest, + Primary, + PrimaryPreferred, + Secondary, + SecondaryPreferred, +) + +pytestmark = pytest.mark.mockupdb + + +class TestQueryAndReadModeSharded(PyMongoTestCase): + def test_query_and_read_mode_sharded_op_msg(self): + """Test OP_MSG sends non-primary $readPreference and never $query.""" + server = MockupDB() + server.autoresponds( + "ismaster", + ismaster=True, + msg="isdbgrid", + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + server.run() + self.addCleanup(server.stop) + + client = self.simple_client(server.uri) + + read_prefs = ( + Primary(), + SecondaryPreferred(), + PrimaryPreferred(), + Secondary(), + Nearest(), + SecondaryPreferred([{"tag": "value"}]), + ) + + for query in ( + {"a": 1}, + {"$query": {"a": 1}}, + ): + for pref in read_prefs: + collection = client.db.get_collection("test", read_preference=pref) + cursor = collection.find(query.copy()) + with going(next, cursor): + request = server.receives() + # Command is not nested in $query. + expected_cmd = SON([("find", "test"), ("filter", {"a": 1})]) + if pref.mode: + expected_cmd["$readPreference"] = pref.document + request.assert_matches(OpMsg(expected_cmd)) + + request.replies({"cursor": {"id": 0, "firstBatch": [{}]}}) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py new file mode 100644 index 0000000000..b438afe894 --- /dev/null +++ b/test/mockupdb/test_reset_and_request_check.py @@ -0,0 +1,170 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import itertools +import time +import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, going, wait_until + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from operations import operations # type: ignore[import] + +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.errors import ConnectionFailure +from pymongo.operations import _Op +from pymongo.server_type import SERVER_TYPE + +pytestmark = pytest.mark.mockupdb + + +class TestResetAndRequestCheck(PyMongoTestCase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.ismaster_time = 0.0 + self.client = None + self.server = None + + def setup_server(self): + self.server = MockupDB() + + def responder(request): + self.ismaster_time = time.time() + return request.ok( + ismaster=True, minWireVersion=2, maxWireVersion=MIN_SUPPORTED_WIRE_VERSION + ) + + self.server.autoresponds("ismaster", responder) + self.server.run() + self.addCleanup(self.server.stop) + + kwargs = {"socketTimeoutMS": 100} + # Disable retryable reads when pymongo supports it. + kwargs["retryReads"] = False + self.client = self.simple_client(self.server.uri, **kwargs) # type: ignore + wait_until(lambda: self.client.nodes, "connect to standalone") + + def tearDown(self): + if hasattr(self, "client") and self.client: + self.client.close() + + def _test_disconnect(self, operation): + # Application operation fails. Test that client resets server + # description and does *not* schedule immediate check. + self.setup_server() + assert self.server is not None + assert self.client is not None + + # Network error on application operation. + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + self.server.receives().hangup() + + # Server is Unknown. + topology = self.client._topology + with self.assertRaises(ConnectionFailure): + topology.select_server_by_address(self.server.address, _Op.TEST, 0) + + time.sleep(0.5) + after = time.time() + + # Demand a reconnect. + with going(self.client.db.command, "buildinfo"): + self.server.receives("buildinfo").ok() + + last = self.ismaster_time + self.assertGreaterEqual(last, after, "called ismaster before needed") + + def _test_timeout(self, operation): + # Application operation times out. Test that client does *not* reset + # server description and does *not* schedule immediate check. + self.setup_server() + assert self.server is not None + assert self.client is not None + + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + self.server.receives() + before = self.ismaster_time + time.sleep(0.5) + + # Server is *not* Unknown. + topology = self.client._topology + server = topology.select_server_by_address(self.server.address, _Op.TEST, 0) + assert server is not None + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + + after = self.ismaster_time + self.assertEqual(after, before, "unneeded ismaster call") + + def _test_not_master(self, operation): + # Application operation gets a "not master" error. + self.setup_server() + assert self.server is not None + assert self.client is not None + + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + request = self.server.receives() + before = self.ismaster_time + request.replies(operation.not_master) + time.sleep(1) + + # Server is rediscovered. + topology = self.client._topology + server = topology.select_server_by_address(self.server.address, _Op.TEST, 0) + assert server is not None + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + + after = self.ismaster_time + self.assertGreater(after, before, "ismaster not called") + + +def create_reset_test(operation, test_method): + def test(self): + test_method(self, operation) + + return test + + +def generate_reset_tests(): + test_methods = [ + (TestResetAndRequestCheck._test_disconnect, "test_disconnect"), + (TestResetAndRequestCheck._test_timeout, "test_timeout"), + (TestResetAndRequestCheck._test_not_master, "test_not_master"), + ] + + matrix = itertools.product(operations, test_methods) + + for entry in matrix: + operation, (test_method, name) = entry + test = create_reset_test(operation, test_method) + test_name = "{}_{}".format(name, operation.name.replace(" ", "_")) + test.__name__ = test_name + setattr(TestResetAndRequestCheck, test_name, test) + + +generate_reset_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_rsghost.py b/test/mockupdb/test_rsghost.py new file mode 100644 index 0000000000..84d52170db --- /dev/null +++ b/test/mockupdb/test_rsghost.py @@ -0,0 +1,70 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test connections to RSGhost nodes.""" +from __future__ import annotations + +import datetime +import unittest + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from pymongo import MongoClient +from pymongo.errors import ServerSelectionTimeoutError + +pytestmark = pytest.mark.mockupdb + + +class TestRSGhost(unittest.TestCase): + def test_rsghost(self): + rsother_response = { + "ok": 1.0, + "ismaster": False, + "secondary": False, + "info": "Does not have a valid replica set config", + "isreplicaset": True, + "maxBsonObjectSize": 16777216, + "maxMessageSizeBytes": 48000000, + "maxWriteBatchSize": 100000, + "localTime": datetime.datetime(2021, 11, 30, 0, 53, 4, 99000), + "logicalSessionTimeoutMinutes": 30, + "connectionId": 3, + "minWireVersion": 0, + "maxWireVersion": 15, + "readOnly": False, + } + server = MockupDB(auto_ismaster=rsother_response) + server.run() + self.addCleanup(server.stop) + # Default auto discovery yields a server selection timeout. + with MongoClient(server.uri, serverSelectionTimeoutMS=250) as client: + with self.assertRaises(ServerSelectionTimeoutError): + client.test.command("ping") + # Direct connection succeeds. + with MongoClient(server.uri, directConnection=True) as client: + with going(client.test.command, "ping"): + request = server.receives(ping=1) + request.reply() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py new file mode 100644 index 0000000000..728e4e2ce0 --- /dev/null +++ b/test/mockupdb/test_slave_okay_rs.py @@ -0,0 +1,100 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with a replica set connection. + +Just make sure SlaveOkay is *not* set on primary reads. +""" +from __future__ import annotations + +import unittest + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import operations # type: ignore[import] + +from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb + + +class TestSlaveOkayRS(unittest.TestCase): + def setup_server(self): + self.primary, self.secondary = MockupDB(), MockupDB() + for server in self.primary, self.secondary: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string for server in (self.primary, self.secondary)] + self.primary.autoresponds( + "ismaster", + ismaster=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + self.secondary.autoresponds( + "ismaster", + ismaster=False, + secondary=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ) + + +def create_slave_ok_rs_test(operation): + def test(self): + self.setup_server() + assert operation.op_type != "always-use-secondary" + + client = MongoClient(self.primary.uri, replicaSet="rs") + self.addCleanup(client.close) + with going(operation.function, client): + request = self.primary.receive() + request.reply(operation.reply) + + self.assertFalse(request.slave_ok, 'SlaveOkay set read mode "primary"') + + return test + + +def generate_slave_ok_rs_tests(): + for operation in operations: + # Don't test secondary operations with MockupDB, the server enforces the + # SlaveOkay bit so integration tests prove we set it. + if operation.op_type == "always-use-secondary": + continue + test = create_slave_ok_rs_test(operation) + + test_name = "test_%s" % operation.name.replace(" ", "_") + test.__name__ = test_name + setattr(TestSlaveOkayRS, test_name, test) + + +generate_slave_ok_rs_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py new file mode 100644 index 0000000000..6efbff6583 --- /dev/null +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -0,0 +1,110 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with: + +- A direct connection to a standalone. +- A direct connection to a slave. +- A direct connection to a mongos. +""" +from __future__ import annotations + +import itertools +import unittest +from queue import Queue + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import operations # type: ignore[import] + +from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name + +pytestmark = pytest.mark.mockupdb + + +class TestSlaveOkaySharded(unittest.TestCase): + def setup_server(self): + self.mongos1, self.mongos2 = MockupDB(), MockupDB() + + # Collect queries to either server in one queue. + self.q: Queue = Queue() + for server in self.mongos1, self.mongos2: + server.subscribe(self.q.put) + server.run() + self.addCleanup(server.stop) + server.autoresponds( + "ismaster", + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ismaster=True, + msg="isdbgrid", + ) + + self.mongoses_uri = f"mongodb://{self.mongos1.address_string},{self.mongos2.address_string}" + + +def create_slave_ok_sharded_test(mode, operation): + def test(self): + self.setup_server() + if operation.op_type == "always-use-secondary": + slave_ok = True + elif operation.op_type == "may-use-secondary": + slave_ok = mode != "primary" + elif operation.op_type == "must-use-primary": + slave_ok = False + else: + raise AssertionError("unrecognized op_type %r" % operation.op_type) + + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) + + client = MongoClient(self.mongoses_uri, read_preference=pref) + self.addCleanup(client.close) + with going(operation.function, client): + request = self.q.get(timeout=1) + request.reply(operation.reply) + + if slave_ok: + self.assertTrue(request.slave_ok, "SlaveOkay not set") + else: + self.assertFalse(request.slave_ok, "SlaveOkay set") + + return test + + +def generate_slave_ok_sharded_tests(): + modes = "primary", "secondary", "nearest" + matrix = itertools.product(modes, operations) + + for entry in matrix: + mode, operation = entry + test = create_slave_ok_sharded_test(mode, operation) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) + + test.__name__ = test_name + setattr(TestSlaveOkaySharded, test_name, test) + + +generate_slave_ok_sharded_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py new file mode 100644 index 0000000000..88cb1a48a5 --- /dev/null +++ b/test/mockupdb/test_slave_okay_single.py @@ -0,0 +1,107 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with: + +- A direct connection to a standalone. +- A direct connection to a slave. +- A direct connection to a mongos. +""" +from __future__ import annotations + +import itertools +import unittest + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import operations # type: ignore[import] + +from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name +from pymongo.topology_description import TOPOLOGY_TYPE + +pytestmark = pytest.mark.mockupdb + + +def topology_type_name(client): + topology_type = client._topology._description.topology_type + return TOPOLOGY_TYPE._fields[topology_type] + + +class TestSlaveOkaySingle(unittest.TestCase): + def setUp(self): + self.server = MockupDB() + self.server.run() + self.addCleanup(self.server.stop) + + +def create_slave_ok_single_test(mode, server_type, ismaster, operation): + def test(self): + ismaster_with_version = ismaster.copy() + ismaster_with_version["minWireVersion"] = 2 + ismaster_with_version["maxWireVersion"] = MIN_SUPPORTED_WIRE_VERSION + self.server.autoresponds("ismaster", **ismaster_with_version) + self.assertIn( + operation.op_type, ("always-use-secondary", "may-use-secondary", "must-use-primary") + ) + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) + + client = MongoClient(self.server.uri, read_preference=pref) + self.addCleanup(client.close) + with going(operation.function, client): + request = self.server.receive() + request.reply(operation.reply) + + self.assertIn(topology_type_name(client), ["Sharded", "Single"]) + + return test + + +def generate_slave_ok_single_tests(): + modes = "primary", "secondary", "nearest" + server_types = [ + ("standalone", {"ismaster": True}), + ("slave", {"ismaster": False}), + ("mongos", {"ismaster": True, "msg": "isdbgrid"}), + ] + + matrix = itertools.product(modes, server_types, operations) + + for entry in matrix: + mode, (server_type, ismaster), operation = entry + test = create_slave_ok_single_test(mode, server_type, ismaster, operation) + + test_name = "test_{}_{}_with_mode_{}".format( + operation.name.replace(" ", "_"), + server_type, + mode, + ) + + test.__name__ = test_name + setattr(TestSlaveOkaySingle, test_name, test) + + +generate_slave_ok_single_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_standalone_shard.py b/test/mockupdb/test_standalone_shard.py new file mode 100644 index 0000000000..28a582071c --- /dev/null +++ b/test/mockupdb/test_standalone_shard.py @@ -0,0 +1,67 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test errors that come from a standalone shard.""" +from __future__ import annotations + +import unittest + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from pymongo import MongoClient +from pymongo.errors import OperationFailure + +pytestmark = pytest.mark.mockupdb + + +class TestStandaloneShard(unittest.TestCase): + # See PYTHON-2048 and SERVER-44591. + def test_bulk_txn_error_message(self): + server = MockupDB(auto_ismaster={"maxWireVersion": 8}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + self.addCleanup(client.close) + + with self.assertRaisesRegex( + OperationFailure, "This MongoDB deployment does not support retryable writes" + ): + with going(client.db.collection.insert_many, [{}, {}]): + request = server.receives() + request.reply( + { + "n": 0, + "ok": 1.0, + "writeErrors": [ + { + "code": 20, + "codeName": "IllegalOperation", + "errmsg": "Transaction numbers are only allowed on a replica set member or mongos", + "index": 0, + } + ], + } + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mod_wsgi_test/README.rst b/test/mod_wsgi_test/README.rst new file mode 100644 index 0000000000..e96db9406c --- /dev/null +++ b/test/mod_wsgi_test/README.rst @@ -0,0 +1,110 @@ +Testing PyMongo with mod_wsgi +============================= + +These tests verify that PyMongo works with Apache and mod_wsgi. They are +primarily intended to prevent regression of +`PYTHON-353 `_, a connection leak +when PyMongo 2.2 was used with Python 2.6 and mod_wsgi 2.8. However, the test +may also catch concurrency bugs, or incompatibilities between PyMongo's C +extensions and the way mod_wsgi manages Python sub interpreters. It is +generally useful to test PyMongo in the unconventional environment that +mod_wsgi creates. + +Test Matrix +----------- + +PyMongo should be tested with several versions of mod_wsgi and a selection +of Python versions. Each combination of mod_wsgi and Python version should +be tested with a standalone and a replica set. ``mod_wsgi_test.py`` +detects if the deployment is a replica set and connects to the whole set. + +Setup +----- + +Compile Python +.............. + +We need a Python interpreter built as a shared library. Download the +source tarball for each Python version tested, untar it, and run:: + + ./configure --prefix=/some/directory --enable-shared LDFLAGS="-Wl,--rpath=/some/directory/lib" + make + make install + +This results in an executable named "python" or "python3" and a shared +library named something like "libpython2.7.so.1.0" or "libpython3.3m.so.1.0". + +Compile mod_wsgi +................ + +Compile mod_wsgi for each combination for Python and mod_wsgi version in the +test matrix. For example, to compile mod_wsgi 3.4 for Python 2.7 on a +RedHat-like Linux:: + + sudo yum install -y httpd httpd-devel + wget https://modwsgi.googlecode.com/files/mod_wsgi-3.4.tar.gz + tar xzf mod_wsgi-3.4.tar.gz + cd mod_wsgi-3.4 + ./configure --with-python=/some/directory/bin/python LDFLAGS="-Wl,--rpath=/some/directory/lib" + make + make install + +To ease testing of several matrix combinations, copy the resulting +``mod_wsgi.so`` to a safe place. + +Start mongod +............ + +Start a standalone listening on port 27017, or a replica set with a member +listening on port 27017. + +Configure Apache +................ + +Set a MOD_WSGI_SO environment variable so our ``mod_wsgi_test.conf`` +can find and load mod_wsgi.so:: + + export MOD_WSGI_SO=/path/to/mod_wsgi.so + +Start Apache with one of the config files in this directory. + +Run the test +------------ + +Run the included ``test_client.py`` script:: + + python test/mod_wsgi_test/test_client.py -n 2500 -t 100 parallel \ + http://localhost/interpreter1${WORKSPACE} http://localhost/interpreter2${WORKSPACE} + +...where the "n" argument is the total number of requests to make to Apache, +and "t" specifies the number of threads. ``WORKSPACE`` is the location of +the PyMongo checkout. Note that multiple URLs are passed, each one corresponds +to a different sub interpreter. + +Run this script again with different arguments to make serial requests:: + + python test/mod_wsgi_test/test_client.py -n 25000 serial \ + http://localhost/interpreter1${WORKSPACE} http://localhost/interpreter2${WORKSPACE} + +The ``test_client.py`` script merely makes HTTP requests to Apache. Its +exit code is non-zero if any of its requests fails, for example with an +HTTP 500. + +The core of the test is in the WSGI script, ``mod_wsgi_test.py``. +This script inserts some documents into MongoDB at startup, then queries +documents for each HTTP request. + +If PyMongo is leaking connections and "n" is much greater than the ulimit, +the test will fail when PyMongo exhausts its file descriptors. + +The script also encodes and decodes all BSON types to ensure that +multiple sub interpreters in the same process are supported. This tests +the workaround added in `PYTHON-569 `_. + +Automation +---------- + +At MongoDB, Inc. we use a continuous integration job that tests each +combination in the matrix. The job starts up Apache, starts a single server +or replica set, and runs ``test_client.py`` with the proper arguments. +See `run-mod-wsgi-tests.sh `_ diff --git a/test/mod_wsgi_test/apache22amazon.conf b/test/mod_wsgi_test/apache22amazon.conf new file mode 100644 index 0000000000..7755336b07 --- /dev/null +++ b/test/mod_wsgi_test/apache22amazon.conf @@ -0,0 +1,34 @@ +# This is a minimal httpd.conf file written for Apache 2.2 on Amazon Linux + +# The modules directory is here by default. +# ServerRoot "/etc/httpd" +DocumentRoot ${PWD} +PidFile ${PWD}/apache2.pid + +User nobody +Group nobody + +# Bind to localhost only, don't require sudo. +Listen 127.0.0.1:8080 + +# Required modules. +LoadModule authz_host_module modules/mod_authz_host.so +# Needed so we can set a custom log location. +LoadModule log_config_module modules/mod_log_config.so + +ErrorLog ${PWD}/error_log +CustomLog ${PWD}/access_log combined + + + AllowOverride None + Order Deny,Allow + Deny from All + + + + AllowOverride None + Order Allow,Deny + Allow from All + + +Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${MOD_WSGI_CONF} diff --git a/test/mod_wsgi_test/apache22ubuntu1204.conf b/test/mod_wsgi_test/apache22ubuntu1204.conf new file mode 100644 index 0000000000..9fa4b2060b --- /dev/null +++ b/test/mod_wsgi_test/apache22ubuntu1204.conf @@ -0,0 +1,29 @@ +# This is a minimal httpd.conf file written for Apache 2.2 on Ubuntu 12.04 + +# The modules directory is here on Ubuntu. +ServerRoot "/usr/lib/apache2" +DocumentRoot ${PWD} +PidFile ${PWD}/apache2.pid + +# Bind to localhost only, don't require sudo. +Listen 127.0.0.1:8080 + +# Required modules. +LoadModule authz_host_module modules/mod_authz_host.so + +ErrorLog ${PWD}/error_log +CustomLog ${PWD}/access_log combined + + + AllowOverride None + Order Deny,Allow + Deny from All + + + + AllowOverride None + Order Allow,Deny + Allow from All + + +Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${MOD_WSGI_CONF} diff --git a/test/mod_wsgi_test/apache24ubuntu161404.conf b/test/mod_wsgi_test/apache24ubuntu161404.conf new file mode 100644 index 0000000000..eb5414f0f7 --- /dev/null +++ b/test/mod_wsgi_test/apache24ubuntu161404.conf @@ -0,0 +1,28 @@ +# This is a minimal httpd.conf file written for Apache 2.4 on Ubuntu 14.04/16.04 + +# The modules directory is here on Ubuntu. +ServerRoot "/usr/lib/apache2" +DocumentRoot ${PWD} +PidFile ${PWD}/apache2.pid + +# Bind to localhost only, don't require sudo. +Listen 127.0.0.1:8080 + +# Required modules. +LoadModule mpm_prefork_module modules/mod_mpm_prefork.so +LoadModule authz_core_module modules/mod_authz_core.so + +ErrorLog ${PWD}/error_log +CustomLog ${PWD}/access_log combined + + + AllowOverride None + Require all denied + + + + AllowOverride None + Require all granted + + +Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${MOD_WSGI_CONF} diff --git a/test/mod_wsgi_test/mod_wsgi_test.conf b/test/mod_wsgi_test/mod_wsgi_test.conf index 0b8c714af5..a5b09e437f 100644 --- a/test/mod_wsgi_test/mod_wsgi_test.conf +++ b/test/mod_wsgi_test/mod_wsgi_test.conf @@ -1,4 +1,4 @@ -# Copyright 2012-2014 MongoDB, Inc. +# Copyright 2012-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,26 +14,19 @@ # Minimal test of PyMongo in a WSGI application, see bug PYTHON-353 -LoadModule wsgi_module modules/mod_wsgi.so +LoadModule wsgi_module ${MOD_WSGI_SO} # Avoid permissions issues WSGISocketPrefix /tmp/ - ServerName localhost - WSGIDaemonProcess mod_wsgi_test processes=1 threads=15 display-name=mod_wsgi_test - WSGIProcessGroup mod_wsgi_test - - # For the convienience of unittests, rather than hard-code the location of - # mod_wsgi_test_single_server.wsgi and mod_wsgi_test_replica_set.wsgi, - # include it in the URL, so - # http://localhost/single_server/location-of-pymongo-checkout will work: - - WSGIScriptAliasMatch ^/single_server(.+) $1/test/mod_wsgi_test/mod_wsgi_test_single_server.wsgi - - WSGIScriptAliasMatch ^/replica_set(.+) $1/test/mod_wsgi_test/mod_wsgi_test_replica_set.wsgi - + # Mount the script twice so that multiple interpreters are used. + # For the convenience of unittests, rather than hard-code the location of + # mod_wsgi_test.py, include it in the URL, so + # http://localhost/interpreter1/location-of-pymongo-checkout will work: + WSGIScriptAliasMatch ^/interpreter1/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py + WSGIScriptAliasMatch ^/interpreter2/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py diff --git a/test/mod_wsgi_test/mod_wsgi_test.py b/test/mod_wsgi_test/mod_wsgi_test.py new file mode 100644 index 0000000000..4ab2df2442 --- /dev/null +++ b/test/mod_wsgi_test/mod_wsgi_test.py @@ -0,0 +1,114 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Minimal test of PyMongo in a WSGI application, see bug PYTHON-353 +""" +from __future__ import annotations + +import datetime +import os +import re +import sys +import uuid + +this_path = os.path.dirname(os.path.join(os.getcwd(), __file__)) + +# Location of PyMongo checkout +repository_path = os.path.normpath(os.path.join(this_path, "..", "..")) +sys.path.insert(0, repository_path) + +import bson +import pymongo +from bson.binary import STANDARD, Binary +from bson.code import Code +from bson.codec_options import CodecOptions +from bson.datetime_ms import DatetimeConversion, DatetimeMS +from bson.dbref import DBRef +from bson.objectid import ObjectId +from bson.regex import Regex +from pymongo.synchronous.mongo_client import MongoClient + +try: + from mod_wsgi import version as mod_wsgi_version # type: ignore[import] + + _HAVE_MOD_WSGI = True +except: + mod_wsgi_version = None + _HAVE_MOD_WSGI = False + +if _HAVE_MOD_WSGI: + # Ensure the C extensions are installed. + assert bson.has_c() + assert pymongo.has_c() + + OPTS: CodecOptions[dict] = CodecOptions( + uuid_representation=STANDARD, datetime_conversion=DatetimeConversion.DATETIME_AUTO + ) + client: MongoClient[dict] = MongoClient() + # Use a unique collection name for each process: + coll_name = f"test-{uuid.uuid4()}" + collection = client.test.get_collection(coll_name, codec_options=OPTS) + ndocs = 20 + collection.drop() + doc = { + "int32": 2 << 15, + "int64": 2 << 50, + "null": None, + "bool": True, + "float": 1.5, + "str": "string", + "list": [1, 2, 3], + "dict": {"a": 1, "b": 2, "c": 3}, + "datetime": datetime.datetime.fromtimestamp(1690328577.446), + "datetime_ms_out_of_range": DatetimeMS(-2 << 60), + "regex_native": re.compile("regex*"), + "regex_pymongo": Regex("regex*"), + "binary": Binary(b"bytes", 128), + "oid": ObjectId(), + "dbref": DBRef("test", 1), + "code": Code("function(){ return true; }"), + "code_w_scope": Code("return function(){ return x; }", scope={"x": False}), + "bytes": b"bytes", + "uuid": uuid.uuid4(), + } + collection.insert_many([dict(i=i, **doc) for i in range(ndocs)]) + client.close() # Discard main thread's request socket. + client = MongoClient() + collection = client.test.get_collection(coll_name, codec_options=OPTS) + + def application(environ, start_response): + results = list(collection.find().batch_size(10)) + assert len(results) == ndocs, f"n_actual={len(results)} n_expected={ndocs}" + # Test encoding and decoding works (for sub interpreter support). + decoded = bson.decode(bson.encode(doc, codec_options=OPTS), codec_options=OPTS) + for key, value in doc.items(): + # Native regex objects are decoded as bson Regex. + if isinstance(value, re.Pattern): + value = Regex.from_native(value) + assert decoded[key] == value, f"failed on doc[{key!r}]: {decoded[key]!r} != {value!r}" + assert isinstance( + decoded[key], type(value) + ), f"failed on doc[{key}]: {decoded[key]!r} is not an instance of {type(value)}" + + output = ( + f" python {sys.version}, mod_wsgi {mod_wsgi_version}," + f" pymongo {pymongo.version}," + f' mod_wsgi.process_group = {environ["mod_wsgi.process_group"]!r}' + f' mod_wsgi.application_group = {environ["mod_wsgi.application_group"]!r}' + f' wsgi.multithread = {environ["wsgi.multithread"]!r}' + "\n" + ) + response_headers = [("Content-Length", str(len(output)))] + start_response("200 OK", response_headers) + return [output.encode("ascii")] diff --git a/test/mod_wsgi_test/mod_wsgi_test_embedded.conf b/test/mod_wsgi_test/mod_wsgi_test_embedded.conf new file mode 100644 index 0000000000..306dab4ab6 --- /dev/null +++ b/test/mod_wsgi_test/mod_wsgi_test_embedded.conf @@ -0,0 +1,30 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Minimal test of PyMongo in an *Embedded mode* WSGI application. + +LoadModule wsgi_module ${MOD_WSGI_SO} + +# Avoid permissions issues +WSGISocketPrefix /tmp/ + + + ServerName localhost + # Mount the script twice so that multiple interpreters are used. + # For the convenience of unittests, rather than hard-code the location of + # mod_wsgi_test.py, include it in the URL, so + # http://localhost/interpreter1/location-of-pymongo-checkout will work: + WSGIScriptAliasMatch ^/interpreter1/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py + WSGIScriptAliasMatch ^/interpreter2/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py + diff --git a/test/mod_wsgi_test/mod_wsgi_test_replica_set.wsgi b/test/mod_wsgi_test/mod_wsgi_test_replica_set.wsgi deleted file mode 100644 index fef57611a1..0000000000 --- a/test/mod_wsgi_test/mod_wsgi_test_replica_set.wsgi +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2012-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Minimal test of PyMongo in a WSGI application with MongoReplicaSetClient, - see bug PYTHON-353. -""" - -import os -import sys - -this_path = os.path.dirname(os.path.join(os.getcwd(), __file__)) - -# Location of PyMongo checkout -repository_path = os.path.normpath(os.path.join(this_path, '..', '..')) -sys.path.insert(0, repository_path) - -import pymongo -from pymongo.mongo_replica_set_client import MongoReplicaSetClient - -# auto_start_request is part of the PYTHON-353 pathology -client = MongoReplicaSetClient(replicaSet='repl0', auto_start_request=True) -collection = client.test.test - -ndocs = 20 - -collection.drop() -collection.insert([{'i': i} for i in range(ndocs)]) -client.disconnect() # Discard main thread's request socket. - -try: - from mod_wsgi import version as mod_wsgi_version -except: - mod_wsgi_version = None - - -def application(environ, start_response): - results = list(collection.find().batch_size(10)) - assert len(results) == ndocs - output = 'python %s, mod_wsgi %s, pymongo %s' % ( - sys.version, mod_wsgi_version, pymongo.version) - response_headers = [('Content-Length', str(len(output)))] - start_response('200 OK', response_headers) - return [output] diff --git a/test/mod_wsgi_test/mod_wsgi_test_single_server.wsgi b/test/mod_wsgi_test/mod_wsgi_test_single_server.wsgi deleted file mode 100644 index 995bc668a1..0000000000 --- a/test/mod_wsgi_test/mod_wsgi_test_single_server.wsgi +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2012-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Minimal test of PyMongo in a WSGI application, see bug PYTHON-353 -""" - -import os -import sys - -this_path = os.path.dirname(os.path.join(os.getcwd(), __file__)) - -# Location of PyMongo checkout -repository_path = os.path.normpath(os.path.join(this_path, '..', '..')) -sys.path.insert(0, repository_path) - -import pymongo -from pymongo.mongo_client import MongoClient - -# auto_start_request is part of the PYTHON-353 pathology -client = MongoClient(auto_start_request=True) -collection = client.test.test - -ndocs = 20 - -collection.drop() -collection.insert([{'i': i} for i in range(ndocs)]) -client.disconnect() # Discard main thread's request socket. - -try: - from mod_wsgi import version as mod_wsgi_version -except: - mod_wsgi_version = None - - -def application(environ, start_response): - results = list(collection.find().batch_size(10)) - assert len(results) == ndocs - output = 'python %s, mod_wsgi %s, pymongo %s' % ( - sys.version, mod_wsgi_version, pymongo.version) - response_headers = [('Content-Length', str(len(output)))] - start_response('200 OK', response_headers) - return [output] diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index a73689a05a..c122863bfa 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -1,4 +1,4 @@ -# Copyright 2012-2014 MongoDB, Inc. +# Copyright 2012-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,60 +12,80 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test client for mod_wsgi application, see bug PYTHON-353. -""" +"""Test client for mod_wsgi application, see bug PYTHON-353.""" +from __future__ import annotations +import _thread as thread +import random import sys -import urllib2 -import thread import threading import time - from optparse import OptionParser +from urllib.request import urlopen -def parse_args(): - parser = OptionParser("""usage: %prog [options] mode url +def parse_args(args=None): + parser = OptionParser( + """usage: %prog [options] mode url [...] - mode:\tparallel or serial""") + mode:\tparallel or serial""" + ) # Should be enough that any connection leak will exhaust available file # descriptors. parser.add_option( - "-n", "--nrequests", type="int", - dest="nrequests", default=50 * 1000, - help="Number of times to GET the URL, in total") + "-n", + "--nrequests", + type="int", + dest="nrequests", + default=50 * 1000, + help="Number of times to GET the URLs, in total", + ) parser.add_option( - "-t", "--nthreads", type="int", - dest="nthreads", default=100, - help="Number of threads with mode 'parallel'") + "-t", + "--nthreads", + type="int", + dest="nthreads", + default=100, + help="Number of threads with mode 'parallel'", + ) parser.add_option( - "-q", "--quiet", - action="store_false", dest="verbose", default=True, - help="Don't print status messages to stdout") + "-q", + "--quiet", + action="store_false", + dest="verbose", + default=True, + help="Don't print status messages to stdout", + ) parser.add_option( - "-c", "--continue", - action="store_true", dest="continue_", default=False, - help="Continue after HTTP errors") + "-c", + "--continue", + action="store_true", + dest="continue_", + default=False, + help="Continue after HTTP errors", + ) try: - options, (mode, url) = parser.parse_args() - except ValueError: + options, args = parser.parse_args(args or sys.argv[1:]) + mode, urls = args[0], args[1:] + except (ValueError, IndexError): parser.print_usage() sys.exit(1) - if mode not in ('parallel', 'serial'): + if mode not in ("parallel", "serial"): parser.print_usage() sys.exit(1) - return options, mode, url + return options, mode, urls -def get(url): - urllib2.urlopen(url).read().strip() +def get(urls): + url = random.choice(urls) + urlopen(url).read().strip() class URLGetterThread(threading.Thread): @@ -73,52 +93,53 @@ class URLGetterThread(threading.Thread): counter_lock = threading.Lock() counter = 0 - def __init__(self, options, url, nrequests_per_thread): - super(URLGetterThread, self).__init__() + def __init__(self, options, urls, nrequests_per_thread): + super().__init__() self.options = options - self.url = url + self.urls = urls self.nrequests_per_thread = nrequests_per_thread self.errors = 0 def run(self): - for i in range(self.nrequests_per_thread): + for _i in range(self.nrequests_per_thread): try: - get(url) - except Exception, e: - print e + get(self.urls) + except Exception as e: + print(e) - if not options.continue_: + if not self.options.continue_: thread.interrupt_main() thread.exit() self.errors += 1 - URLGetterThread.counter_lock.acquire() - URLGetterThread.counter += 1 - counter = URLGetterThread.counter - URLGetterThread.counter_lock.release() + with URLGetterThread.counter_lock: + URLGetterThread.counter += 1 + counter = URLGetterThread.counter - should_print = options.verbose and not counter % 1000 + should_print = self.options.verbose and not counter % 1000 if should_print: - print counter + print(counter) -def main(options, mode, url): +def main(options, mode, urls): start_time = time.time() errors = 0 - if mode == 'parallel': - nrequests_per_thread = options.nrequests / options.nthreads + if mode == "parallel": + nrequests_per_thread = options.nrequests // options.nthreads if options.verbose: - print ( - 'Getting %s %s times total in %s threads, ' - '%s times per thread' % ( - url, nrequests_per_thread * options.nthreads, - options.nthreads, nrequests_per_thread)) + print( + "Getting {} {} times total in {} threads, " "{} times per thread".format( + urls, + nrequests_per_thread * options.nthreads, + options.nthreads, + nrequests_per_thread, + ) + ) threads = [ - URLGetterThread(options, url, nrequests_per_thread) - for _ in range(options.nthreads) + URLGetterThread(options, urls, nrequests_per_thread) for _ in range(options.nthreads) ] for t in threads: @@ -130,39 +151,36 @@ def main(options, mode, url): errors = sum([t.errors for t in threads]) nthreads_with_errors = len([t for t in threads if t.errors]) if nthreads_with_errors: - print '%d threads had errors! %d errors in total' % ( - nthreads_with_errors, errors) + print("%d threads had errors! %d errors in total" % (nthreads_with_errors, errors)) else: - assert mode == 'serial' + assert mode == "serial" if options.verbose: - print 'Getting %s %s times in one thread' % ( - url, options.nrequests - ) + print(f"Getting {urls} {options.nrequests} times in one thread") for i in range(1, options.nrequests + 1): try: - get(url) - except Exception, e: - print e + get(urls) + except Exception as e: + print(e) if not options.continue_: sys.exit(1) errors += 1 if options.verbose and not i % 1000: - print i + print(i) if errors: - print '%d errors!' % errors + print("%d errors!" % errors) if options.verbose: - print 'Completed in %.2f seconds' % (time.time() - start_time) + print("Completed in %.2f seconds" % (time.time() - start_time)) if errors: # Failure sys.exit(1) -if __name__ == '__main__': - options, mode, url = parse_args() - main(options, mode, url) +if __name__ == "__main__": + options, mode, urls = parse_args() + main(options, mode, urls) diff --git a/test/mypy_fails/insert_many_dict.py b/test/mypy_fails/insert_many_dict.py new file mode 100644 index 0000000000..5f9a2d45a9 --- /dev/null +++ b/test/mypy_fails/insert_many_dict.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from pymongo import MongoClient + +client: MongoClient = MongoClient() +client.test.test.insert_many( + {"a": 1} +) # error: Dict entry 0 has incompatible type "str": "int"; expected "Mapping[str, Any]": "int" diff --git a/test/mypy_fails/insert_one_list.py b/test/mypy_fails/insert_one_list.py new file mode 100644 index 0000000000..7c27d5cac9 --- /dev/null +++ b/test/mypy_fails/insert_one_list.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from pymongo import MongoClient + +client: MongoClient = MongoClient() +client.test.test.insert_one( + [{}] +) # error: Argument 1 to "insert_one" of "Collection" has incompatible type "List[Dict[, ]]"; expected "Mapping[str, Any]" diff --git a/test/mypy_fails/raw_bson_document.py b/test/mypy_fails/raw_bson_document.py new file mode 100644 index 0000000000..49f3659e90 --- /dev/null +++ b/test/mypy_fails/raw_bson_document.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +from bson.raw_bson import RawBSONDocument +from pymongo import MongoClient + +client = MongoClient(document_class=RawBSONDocument) +coll = client.test.test +doc = {"my": "doc"} +coll.insert_one(doc) +retrieved = coll.find_one({"_id": doc["_id"]}) +assert retrieved is not None +assert len(retrieved.raw) > 0 +retrieved[ + "foo" +] = "bar" # error: Unsupported target for indexed assignment ("RawBSONDocument") [index] diff --git a/test/mypy_fails/typedict_client.py b/test/mypy_fails/typedict_client.py new file mode 100644 index 0000000000..37c3f0bfcc --- /dev/null +++ b/test/mypy_fails/typedict_client.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +from typing import TypedDict + +from pymongo import MongoClient + + +class Movie(TypedDict): + name: str + year: int + + +client: MongoClient[Movie] = MongoClient() +coll = client.test.test +retrieved = coll.find_one({"_id": "foo"}) +assert retrieved is not None +assert retrieved["year"] == 1 +assert ( + retrieved["name"] == 2 +) # error: Non-overlapping equality check (left operand type: "str", right operand type: "Literal[2]") [comparison-overlap] diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py new file mode 100644 index 0000000000..b20eaa35d6 --- /dev/null +++ b/test/ocsp/test_ocsp.py @@ -0,0 +1,76 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test OCSP.""" +from __future__ import annotations + +import logging +import os +import sys +import unittest +from pathlib import Path + +import pytest + +sys.path[0:0] = [""] + +import pymongo +from pymongo.errors import ServerSelectionTimeoutError + +pytestmark = pytest.mark.ocsp + + +CA_FILE = os.environ.get("CA_FILE") +OCSP_TLS_SHOULD_SUCCEED = os.environ.get("OCSP_TLS_SHOULD_SUCCEED") == "true" + +# Enable logs in this format: +# 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response +FORMAT = "%(asctime)s %(levelname)s %(module)s %(message)s" +logging.basicConfig(format=FORMAT, level=logging.DEBUG) + + +def _connect(options): + assert CA_FILE is not None + uri = f"mongodb://localhost:27017/?serverSelectionTimeoutMS=10000&tlsCAFile={Path(CA_FILE).as_posix()}&{options}" + print(uri) + try: + client = pymongo.MongoClient(uri) + client.admin.command("ping") + finally: + client.close() + + +class TestOCSP(unittest.TestCase): + def test_tls_insecure(self): + # Should always succeed + options = "tls=true&tlsInsecure=true" + _connect(options) + + def test_allow_invalid_certificates(self): + # Should always succeed + options = "tls=true&tlsAllowInvalidCertificates=true" + _connect(options) + + def test_tls(self): + options = "tls=true" + if not OCSP_TLS_SHOULD_SUCCEED: + self.assertRaisesRegex( + ServerSelectionTimeoutError, "invalid status response", _connect, options + ) + else: + _connect(options) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/performance/async_perf_test.py b/test/performance/async_perf_test.py new file mode 100644 index 0000000000..6eb31ea4fe --- /dev/null +++ b/test/performance/async_perf_test.py @@ -0,0 +1,496 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Asynchronous Tests for the MongoDB Driver Performance Benchmarking Spec. + +See https://github.com/mongodb/specifications/blob/master/source/benchmarking/benchmarking.md + + +To set up the benchmarks locally:: + + python -m pip install simplejson + git clone --depth 1 https://github.com/mongodb/specifications.git + pushd specifications/source/benchmarking/data + tar xf extended_bson.tgz + tar xf parallel.tgz + tar xf single_and_multi_document.tgz + popd + export TEST_PATH="specifications/source/benchmarking/data" + export OUTPUT_FILE="results.json" + +Then to run all benchmarks quickly:: + + FASTBENCH=1 python test/performance/async_perf_test.py -v + +To run individual benchmarks quickly:: + + FASTBENCH=1 python test/performance/async_perf_test.py -v TestRunCommand TestFindManyAndEmptyCursor +""" +from __future__ import annotations + +import asyncio +import os +import sys +import tempfile +import time +import warnings +from typing import Any, List, Optional, Union + +import pytest + +try: + import simplejson as json +except ImportError: + import json # type: ignore[no-redef] + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncPyMongoTestCase, async_client_context, unittest + +from bson import encode +from gridfs import AsyncGridFSBucket +from pymongo import ( + DeleteOne, + InsertOne, + ReplaceOne, +) + +pytestmark = pytest.mark.perf + +# Spec says to use at least 1 minute cumulative execution time and up to 100 iterations or 5 minutes but that +# makes the benchmarks too slow. Instead, we use at least 30 seconds and at most 60 seconds. +NUM_ITERATIONS = 100 +MIN_ITERATION_TIME = 30 +MAX_ITERATION_TIME = 120 +NUM_DOCS = 10000 +# When debugging or prototyping it's often useful to run the benchmarks locally, set FASTBENCH=1 to run quickly. +if bool(os.getenv("FASTBENCH")): + NUM_ITERATIONS = 2 + MIN_ITERATION_TIME = 1 + MAX_ITERATION_TIME = 30 + NUM_DOCS = 1000 + +TEST_PATH = os.environ.get( + "TEST_PATH", os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join("data")) +) + +OUTPUT_FILE = os.environ.get("OUTPUT_FILE") + +result_data: List = [] + + +def tearDownModule(): + output = json.dumps(result_data, indent=4) + if OUTPUT_FILE: + with open(OUTPUT_FILE, "w") as opf: + opf.write(output) + else: + print(output) + + +class Timer: + def __enter__(self): + self.start = time.monotonic() + return self + + def __exit__(self, *args): + self.end = time.monotonic() + self.interval = self.end - self.start + + +async def concurrent(n_tasks, func): + tasks = [func() for _ in range(n_tasks)] + await asyncio.gather(*tasks) + + +class PerformanceTest: + dataset: str + data_size: int + fail: Any + n_tasks: int = 1 + did_init: bool = False + + async def asyncSetUp(self): + await async_client_context.init() + self.setup_time = time.monotonic() + + async def asyncTearDown(self): + duration = time.monotonic() - self.setup_time + # Remove "Test" so that TestFlatEncoding is reported as "FlatEncoding". + name = self.__class__.__name__[4:] + median = self.percentile(50) + megabytes_per_sec = (self.data_size * self.n_tasks) / median / 1000000 + print( + f"Completed {self.__class__.__name__} {megabytes_per_sec:.3f} MB/s, MEDIAN={self.percentile(50):.3f}s, " + f"total time={duration:.3f}s, iterations={len(self.results)}" + ) + result_data.append( + { + "info": { + "test_name": name, + "args": { + "tasks": self.n_tasks, + }, + }, + "metrics": [ + { + "name": "megabytes_per_sec", + "type": "MEDIAN", + "value": megabytes_per_sec, + "metadata": { + "improvement_direction": "up", + "measurement_unit": "megabytes_per_second", + }, + }, + ], + } + ) + + async def before(self): + pass + + async def do_task(self): + raise NotImplementedError + + async def after(self): + pass + + def percentile(self, percentile): + if hasattr(self, "results"): + sorted_results = sorted(self.results) + percentile_index = int(len(sorted_results) * percentile / 100) - 1 + return sorted_results[percentile_index] + else: + self.fail("Test execution failed") + return None + + async def runTest(self): + results = [] + start = time.monotonic() + i = 0 + while True: + i += 1 + await self.before() + with Timer() as timer: + if self.n_tasks == 1: + await self.do_task() + else: + await concurrent(self.n_tasks, self.do_task) + await self.after() + results.append(timer.interval) + duration = time.monotonic() - start + if duration > MIN_ITERATION_TIME and i >= NUM_ITERATIONS: + break + if i >= NUM_ITERATIONS: + break + if duration > MAX_ITERATION_TIME: + with warnings.catch_warnings(): + warnings.simplefilter("default") + warnings.warn( + f"{self.__class__.__name__} timed out after {MAX_ITERATION_TIME}s, completed {i}/{NUM_ITERATIONS} iterations." + ) + + break + + self.results = results + + +# SINGLE-DOC BENCHMARKS +class TestRunCommand(PerformanceTest, AsyncPyMongoTestCase): + data_size = len(encode({"hello": True})) * NUM_DOCS + + async def asyncSetUp(self): + await super().asyncSetUp() + self.client = async_client_context.client + await self.client.drop_database("perftest") + + async def do_task(self): + command = self.client.perftest.command + for _ in range(NUM_DOCS): + await command("hello", True) + + +class TestRunCommand8Tasks(TestRunCommand): + n_tasks = 8 + + +class TestRunCommand80Tasks(TestRunCommand): + n_tasks = 80 + + +class TestRunCommandUnlimitedTasks(TestRunCommand): + async def do_task(self): + command = self.client.perftest.command + await asyncio.gather(*[command("hello", True) for _ in range(NUM_DOCS)]) + + +class TestDocument(PerformanceTest): + async def asyncSetUp(self): + await super().asyncSetUp() + # Location of test data. + with open( # noqa: ASYNC101 + os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)) + ) as data: + self.document = json.loads(data.read()) + + self.client = async_client_context.client + await self.client.drop_database("perftest") + + async def asyncTearDown(self): + await super().asyncTearDown() + await self.client.drop_database("perftest") + + async def before(self): + self.corpus = await self.client.perftest.create_collection("corpus") + + async def after(self): + await self.client.perftest.drop_collection("corpus") + + +class FindTest(TestDocument): + dataset = "tweet.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + self.data_size = len(encode(self.document)) * NUM_DOCS + documents = [self.document.copy() for _ in range(NUM_DOCS)] + self.corpus = self.client.perftest.corpus + result = await self.corpus.insert_many(documents) + self.inserted_ids = result.inserted_ids + + async def before(self): + pass + + async def after(self): + pass + + +class TestFindOneByID(FindTest, AsyncPyMongoTestCase): + async def do_task(self): + find_one = self.corpus.find_one + for _id in self.inserted_ids: + await find_one({"_id": _id}) + + +class TestFindOneByID8Tasks(TestFindOneByID): + n_tasks = 8 + + +class TestFindOneByID80Tasks(TestFindOneByID): + n_tasks = 80 + + +class TestFindOneByIDUnlimitedTasks(TestFindOneByID): + async def do_task(self): + find_one = self.corpus.find_one + await asyncio.gather(*[find_one({"_id": _id}) for _id in self.inserted_ids]) + + +class SmallDocInsertTest(TestDocument): + dataset = "small_doc.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + self.data_size = len(encode(self.document)) * NUM_DOCS + self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + + +class SmallDocMixedTest(TestDocument): + dataset = "small_doc.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + self.data_size = len(encode(self.document)) * NUM_DOCS * 2 + self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + + +class TestSmallDocInsertOne(SmallDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + for doc in self.documents: + await insert_one(doc) + + +class TestSmallDocInsertOneUnlimitedTasks(SmallDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + await asyncio.gather(*[insert_one(doc) for doc in self.documents]) + + +class LargeDocInsertTest(TestDocument): + dataset = "large_doc.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + n_docs = 10 + self.data_size = len(encode(self.document)) * n_docs + self.documents = [self.document.copy() for _ in range(n_docs)] + + +class TestLargeDocInsertOne(LargeDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + for doc in self.documents: + await insert_one(doc) + + +class TestLargeDocInsertOneUnlimitedTasks(LargeDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + await asyncio.gather(*[insert_one(doc) for doc in self.documents]) + + +# MULTI-DOC BENCHMARKS +class TestFindManyAndEmptyCursor(FindTest, AsyncPyMongoTestCase): + async def do_task(self): + await self.corpus.find().to_list() + + +class TestFindManyAndEmptyCursor8Tasks(TestFindManyAndEmptyCursor): + n_tasks = 8 + + +class TestFindManyAndEmptyCursor80Tasks(TestFindManyAndEmptyCursor): + n_tasks = 80 + + +class TestSmallDocBulkInsert(SmallDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + await self.corpus.insert_many(self.documents, ordered=True) + + +class TestSmallDocCollectionBulkInsert(SmallDocInsertTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + async def do_task(self): + await self.corpus.bulk_write(self.models, ordered=True) + + +class TestSmallDocClientBulkInsert(SmallDocInsertTest, AsyncPyMongoTestCase): + @async_client_context.require_version_min(8, 0, 0, -24) + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def do_task(self): + await self.client.bulk_write(self.models, ordered=True) + + +class TestSmallDocBulkMixedOps(SmallDocMixedTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(document=doc)) + self.models.append(ReplaceOne(filter={}, replacement=doc.copy(), upsert=True)) + self.models.append(DeleteOne(filter={})) + + async def do_task(self): + await self.corpus.bulk_write(self.models, ordered=True) + + +class TestSmallDocClientBulkMixedOps(SmallDocMixedTest, AsyncPyMongoTestCase): + @async_client_context.require_version_min(8, 0, 0, -24) + async def asyncSetUp(self): + await super().asyncSetUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + self.models.append( + ReplaceOne( + namespace="perftest.corpus", filter={}, replacement=doc.copy(), upsert=True + ) + ) + self.models.append(DeleteOne(namespace="perftest.corpus", filter={})) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def do_task(self): + await self.client.bulk_write(self.models, ordered=True) + + +class TestLargeDocBulkInsert(LargeDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + await self.corpus.insert_many(self.documents, ordered=True) + + +class TestLargeDocCollectionBulkInsert(LargeDocInsertTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + async def do_task(self): + await self.corpus.bulk_write(self.models, ordered=True) + + +class TestLargeDocClientBulkInsert(LargeDocInsertTest, AsyncPyMongoTestCase): + @async_client_context.require_version_min(8, 0, 0, -24) + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def do_task(self): + await self.client.bulk_write(self.models, ordered=True) + + +class GridFsTest(PerformanceTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.client = async_client_context.client + await self.client.drop_database("perftest") + + gridfs_path = os.path.join( + TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") + ) + with open(gridfs_path, "rb") as data: # noqa: ASYNC101 + self.document = data.read() + self.data_size = len(self.document) + self.bucket = AsyncGridFSBucket(self.client.perftest) + + async def asyncTearDown(self): + await super().asyncTearDown() + await self.client.drop_database("perftest") + + +class TestGridFsUpload(GridFsTest, AsyncPyMongoTestCase): + async def before(self): + # Create the bucket. + await self.bucket.upload_from_stream("init", b"x") + + async def do_task(self): + await self.bucket.upload_from_stream("gridfstest", self.document) + + +class TestGridFsDownload(GridFsTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.uploaded_id = await self.bucket.upload_from_stream("gridfstest", self.document) + + async def do_task(self): + await (await self.bucket.open_download_stream(self.uploaded_id)).read() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py new file mode 100644 index 0000000000..5688d28d2d --- /dev/null +++ b/test/performance/perf_test.py @@ -0,0 +1,728 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the MongoDB Driver Performance Benchmarking Spec. + +See https://github.com/mongodb/specifications/blob/master/source/benchmarking/benchmarking.md + + +To set up the benchmarks locally:: + + python -m pip install simplejson + git clone --depth 1 https://github.com/mongodb/specifications.git + pushd specifications/source/benchmarking/data + tar xf extended_bson.tgz + tar xf parallel.tgz + tar xf single_and_multi_document.tgz + popd + export TEST_PATH="specifications/source/benchmarking/data" + export OUTPUT_FILE="results.json" + +Then to run all benchmarks quickly:: + + FASTBENCH=1 python test/performance/perf_test.py -v + +To run individual benchmarks quickly:: + + FASTBENCH=1 python test/performance/perf_test.py -v TestRunCommand TestFindManyAndEmptyCursor +""" +from __future__ import annotations + +import multiprocessing as mp +import os +import sys +import tempfile +import threading +import time +import warnings +from typing import Any, List, Optional, Union + +import pytest + +try: + import simplejson as json +except ImportError: + import json # type: ignore[no-redef] + +sys.path[0:0] = [""] + +from test import client_context, unittest + +from bson import decode, encode, json_util +from gridfs import GridFSBucket +from pymongo import ( + DeleteOne, + InsertOne, + MongoClient, + ReplaceOne, +) + +pytestmark = pytest.mark.perf + +# Spec says to use at least 1 minute cumulative execution time and up to 100 iterations or 5 minutes but that +# makes the benchmarks too slow. Instead, we use at least 30 seconds and at most 60 seconds. +NUM_ITERATIONS = 100 +MIN_ITERATION_TIME = 30 +MAX_ITERATION_TIME = 60 +NUM_DOCS = 10000 +# When debugging or prototyping it's often useful to run the benchmarks locally, set FASTBENCH=1 to run quickly. +if bool(os.getenv("FASTBENCH")): + NUM_ITERATIONS = 2 + MIN_ITERATION_TIME = 0.1 + MAX_ITERATION_TIME = 0.5 + NUM_DOCS = 1000 + +TEST_PATH = os.environ.get( + "TEST_PATH", os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join("data")) +) + +OUTPUT_FILE = os.environ.get("OUTPUT_FILE") + +result_data: List = [] + + +def tearDownModule(): + output = json.dumps(result_data, indent=4) + if OUTPUT_FILE: + with open(OUTPUT_FILE, "w") as opf: + opf.write(output) + else: + print(output) + + +class Timer: + def __enter__(self): + self.start = time.monotonic() + return self + + def __exit__(self, *args): + self.end = time.monotonic() + self.interval = self.end - self.start + + +def threaded(n_threads, func): + threads = [threading.Thread(target=func) for _ in range(n_threads)] + for t in threads: + t.start() + for t in threads: + t.join() + + +class PerformanceTest: + dataset: str + data_size: int + fail: Any + n_threads: int = 1 + + @classmethod + def setUpClass(cls): + client_context.init() + + def setUp(self): + self.setup_time = time.monotonic() + + def tearDown(self): + duration = time.monotonic() - self.setup_time + # Remove "Test" so that TestFlatEncoding is reported as "FlatEncoding". + name = self.__class__.__name__[4:] + median = self.percentile(50) + megabytes_per_sec = (self.data_size * self.n_threads) / median / 1000000 + print( + f"Completed {self.__class__.__name__} {megabytes_per_sec:.3f} MB/s, MEDIAN={self.percentile(50):.3f}s, " + f"total time={duration:.3f}s, iterations={len(self.results)}" + ) + result_data.append( + { + "info": { + "test_name": name, + "args": { + "threads": self.n_threads, + }, + }, + "metrics": [ + { + "name": "megabytes_per_sec", + "type": "MEDIAN", + "value": megabytes_per_sec, + "metadata": { + "improvement_direction": "up", + "measurement_unit": "megabytes_per_second", + }, + }, + ], + } + ) + + def before(self): + pass + + def do_task(self): + raise NotImplementedError + + def after(self): + pass + + def percentile(self, percentile): + if hasattr(self, "results"): + sorted_results = sorted(self.results) + percentile_index = int(len(sorted_results) * percentile / 100) - 1 + return sorted_results[percentile_index] + else: + self.fail("Test execution failed") + return None + + def runTest(self): + results = [] + start = time.monotonic() + i = 0 + while True: + i += 1 + self.before() + with Timer() as timer: + if self.n_threads == 1: + self.do_task() + else: + threaded(self.n_threads, self.do_task) + self.after() + results.append(timer.interval) + duration = time.monotonic() - start + if duration > MIN_ITERATION_TIME and i >= NUM_ITERATIONS: + break + if duration > MAX_ITERATION_TIME: + with warnings.catch_warnings(): + warnings.simplefilter("default") + warnings.warn( + f"{self.__class__.__name__} timed out after {MAX_ITERATION_TIME}s, completed {i}/{NUM_ITERATIONS} iterations." + ) + + break + + self.results = results + + def mp_map(self, map_func, files): + with mp.Pool(initializer=proc_init, initargs=(client_context.client_options,)) as pool: + pool.map(map_func, files) + + +# BSON MICRO-BENCHMARKS + + +class MicroTest(PerformanceTest): + def setUp(self): + super().setUp() + # Location of test data. + with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: + self.file_data = data.read() + + +class BsonEncodingTest(MicroTest): + def setUp(self): + super().setUp() + # Location of test data. + self.document = json_util.loads(self.file_data) + self.data_size = len(encode(self.document)) * NUM_DOCS + + def do_task(self): + for _ in range(NUM_DOCS): + encode(self.document) + + +class BsonDecodingTest(MicroTest): + def setUp(self): + super().setUp() + self.document = encode(json_util.loads(self.file_data)) + self.data_size = len(self.document) * NUM_DOCS + + def do_task(self): + for _ in range(NUM_DOCS): + decode(self.document) + + +class TestFlatEncoding(BsonEncodingTest, unittest.TestCase): + dataset = "flat_bson.json" + + +class TestFlatDecoding(BsonDecodingTest, unittest.TestCase): + dataset = "flat_bson.json" + + +class TestDeepEncoding(BsonEncodingTest, unittest.TestCase): + dataset = "deep_bson.json" + + +class TestDeepDecoding(BsonDecodingTest, unittest.TestCase): + dataset = "deep_bson.json" + + +class TestFullEncoding(BsonEncodingTest, unittest.TestCase): + dataset = "full_bson.json" + + +class TestFullDecoding(BsonDecodingTest, unittest.TestCase): + dataset = "full_bson.json" + + +# JSON MICRO-BENCHMARKS +class JsonEncodingTest(MicroTest): + def setUp(self): + super().setUp() + # Location of test data. + self.document = json_util.loads(self.file_data) + # Note: use the BSON size as the data size so we can compare BSON vs JSON performance. + self.data_size = len(encode(self.document)) * NUM_DOCS + + def do_task(self): + for _ in range(NUM_DOCS): + json_util.dumps(self.document) + + +class JsonDecodingTest(MicroTest): + def setUp(self): + super().setUp() + self.document = self.file_data + # Note: use the BSON size as the data size so we can compare BSON vs JSON performance. + self.data_size = len(encode(json_util.loads(self.file_data))) * NUM_DOCS + + def do_task(self): + for _ in range(NUM_DOCS): + json_util.loads(self.document) + + +class TestJsonFlatEncoding(JsonEncodingTest, unittest.TestCase): + dataset = "flat_bson.json" + + +class TestJsonFlatDecoding(JsonDecodingTest, unittest.TestCase): + dataset = "flat_bson.json" + + +class TestJsonDeepEncoding(JsonEncodingTest, unittest.TestCase): + dataset = "deep_bson.json" + + +class TestJsonDeepDecoding(JsonDecodingTest, unittest.TestCase): + dataset = "deep_bson.json" + + +class TestJsonFullEncoding(JsonEncodingTest, unittest.TestCase): + dataset = "full_bson.json" + + +class TestJsonFullDecoding(JsonDecodingTest, unittest.TestCase): + dataset = "full_bson.json" + + +# SINGLE-DOC BENCHMARKS +class TestRunCommand(PerformanceTest, unittest.TestCase): + data_size = len(encode({"hello": True})) * NUM_DOCS + + def setUp(self): + super().setUp() + self.client = client_context.client + self.client.drop_database("perftest") + + def do_task(self): + command = self.client.perftest.command + for _ in range(NUM_DOCS): + command("hello", True) + + +class TestRunCommand8Threads(TestRunCommand): + n_threads = 8 + + +class TestDocument(PerformanceTest): + def setUp(self): + super().setUp() + # Location of test data. + with open( + os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)) + ) as data: + self.document = json.loads(data.read()) + + self.client = client_context.client + self.client.drop_database("perftest") + + def tearDown(self): + super().tearDown() + self.client.drop_database("perftest") + + def before(self): + self.corpus = self.client.perftest.create_collection("corpus") + + def after(self): + self.client.perftest.drop_collection("corpus") + + +class FindTest(TestDocument): + dataset = "tweet.json" + + def setUp(self): + super().setUp() + self.data_size = len(encode(self.document)) * NUM_DOCS + documents = [self.document.copy() for _ in range(NUM_DOCS)] + self.corpus = self.client.perftest.corpus + result = self.corpus.insert_many(documents) + self.inserted_ids = result.inserted_ids + + def before(self): + pass + + def after(self): + pass + + +class TestFindOneByID(FindTest, unittest.TestCase): + def do_task(self): + find_one = self.corpus.find_one + for _id in self.inserted_ids: + find_one({"_id": _id}) + + +class TestFindOneByID8Threads(TestFindOneByID): + n_threads = 8 + + +class SmallDocInsertTest(TestDocument): + dataset = "small_doc.json" + + def setUp(self): + super().setUp() + self.data_size = len(encode(self.document)) * NUM_DOCS + self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + + +class SmallDocMixedTest(TestDocument): + dataset = "small_doc.json" + + def setUp(self): + super().setUp() + self.data_size = len(encode(self.document)) * NUM_DOCS * 2 + self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + + +class TestSmallDocInsertOne(SmallDocInsertTest, unittest.TestCase): + def do_task(self): + insert_one = self.corpus.insert_one + for doc in self.documents: + insert_one(doc) + + +class LargeDocInsertTest(TestDocument): + dataset = "large_doc.json" + + def setUp(self): + super().setUp() + n_docs = 10 + self.data_size = len(encode(self.document)) * n_docs + self.documents = [self.document.copy() for _ in range(n_docs)] + + +class TestLargeDocInsertOne(LargeDocInsertTest, unittest.TestCase): + def do_task(self): + insert_one = self.corpus.insert_one + for doc in self.documents: + insert_one(doc) + + +# MULTI-DOC BENCHMARKS +class TestFindManyAndEmptyCursor(FindTest, unittest.TestCase): + def do_task(self): + list(self.corpus.find()) + + +class TestFindManyAndEmptyCursor8Threads(TestFindManyAndEmptyCursor): + n_threads = 8 + + +class TestSmallDocBulkInsert(SmallDocInsertTest, unittest.TestCase): + def do_task(self): + self.corpus.insert_many(self.documents, ordered=True) + + +class TestSmallDocCollectionBulkInsert(SmallDocInsertTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + def do_task(self): + self.corpus.bulk_write(self.models, ordered=True) + + +class TestSmallDocClientBulkInsert(SmallDocInsertTest, unittest.TestCase): + @client_context.require_version_min(8, 0, 0, -24) + def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + @client_context.require_version_min(8, 0, 0, -24) + def do_task(self): + self.client.bulk_write(self.models, ordered=True) + + +class TestSmallDocBulkMixedOps(SmallDocMixedTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(document=doc)) + self.models.append(ReplaceOne(filter={}, replacement=doc.copy(), upsert=True)) + self.models.append(DeleteOne(filter={})) + + def do_task(self): + self.corpus.bulk_write(self.models, ordered=True) + + +class TestSmallDocClientBulkMixedOps(SmallDocMixedTest, unittest.TestCase): + @client_context.require_version_min(8, 0, 0, -24) + def setUp(self): + super().setUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + self.models.append( + ReplaceOne( + namespace="perftest.corpus", filter={}, replacement=doc.copy(), upsert=True + ) + ) + self.models.append(DeleteOne(namespace="perftest.corpus", filter={})) + + @client_context.require_version_min(8, 0, 0, -24) + def do_task(self): + self.client.bulk_write(self.models, ordered=True) + + +class TestLargeDocBulkInsert(LargeDocInsertTest, unittest.TestCase): + def do_task(self): + self.corpus.insert_many(self.documents, ordered=True) + + +class TestLargeDocCollectionBulkInsert(LargeDocInsertTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + def do_task(self): + self.corpus.bulk_write(self.models, ordered=True) + + +class TestLargeDocClientBulkInsert(LargeDocInsertTest, unittest.TestCase): + @client_context.require_version_min(8, 0, 0, -24) + def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + @client_context.require_version_min(8, 0, 0, -24) + def do_task(self): + self.client.bulk_write(self.models, ordered=True) + + +class GridFsTest(PerformanceTest): + def setUp(self): + super().setUp() + self.client = client_context.client + self.client.drop_database("perftest") + + gridfs_path = os.path.join( + TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") + ) + with open(gridfs_path, "rb") as data: + self.document = data.read() + self.data_size = len(self.document) + self.bucket = GridFSBucket(self.client.perftest) + + def tearDown(self): + super().tearDown() + self.client.drop_database("perftest") + + +class TestGridFsUpload(GridFsTest, unittest.TestCase): + def before(self): + # Create the bucket. + self.bucket.upload_from_stream("init", b"x") + + def do_task(self): + self.bucket.upload_from_stream("gridfstest", self.document) + + +class TestGridFsDownload(GridFsTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.uploaded_id = self.bucket.upload_from_stream("gridfstest", self.document) + + def do_task(self): + self.bucket.open_download_stream(self.uploaded_id).read() + + +proc_client: Optional[MongoClient] = None + + +def proc_init(client_kwargs): + global proc_client + proc_client = MongoClient(**client_kwargs) + + +# PARALLEL BENCHMARKS + + +def insert_json_file(filename): + assert proc_client is not None + with open(filename) as data: + coll = proc_client.perftest.corpus + coll.insert_many([json.loads(line) for line in data]) + + +def insert_json_file_with_file_id(filename): + documents = [] + with open(filename) as data: + for line in data: + doc = json.loads(line) + doc["file"] = filename + documents.append(doc) + assert proc_client is not None + coll = proc_client.perftest.corpus + coll.insert_many(documents) + + +def read_json_file(filename): + assert proc_client is not None + coll = proc_client.perftest.corpus + with tempfile.TemporaryFile(mode="w") as temp: + for doc in coll.find({"file": filename}, {"_id": False}): + temp.write(json.dumps(doc)) + temp.write("\n") + + +def insert_gridfs_file(filename): + assert proc_client is not None + bucket = GridFSBucket(proc_client.perftest) + + with open(filename, "rb") as gfile: + bucket.upload_from_stream(filename, gfile) + + +def read_gridfs_file(filename): + assert proc_client is not None + bucket = GridFSBucket(proc_client.perftest) + + temp = tempfile.TemporaryFile() + try: + bucket.download_to_stream_by_name(filename, temp) + finally: + temp.close() + + +class TestJsonMultiImport(PerformanceTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.client = client_context.client + self.client.drop_database("perftest") + ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) + self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] + self.data_size = sum(os.path.getsize(fname) for fname in self.files) + self.corpus = self.client.perftest.corpus + + def before(self): + self.client.perftest.command({"create": "corpus"}) + + def do_task(self): + self.mp_map(insert_json_file, self.files) + + def after(self): + self.corpus.drop() + + def tearDown(self): + super().tearDown() + self.client.drop_database("perftest") + + +class TestJsonMultiExport(PerformanceTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.client = client_context.client + self.client.drop_database("perftest") + self.client.perfest.corpus.create_index("file") + + ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) + self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] + self.data_size = sum(os.path.getsize(fname) for fname in self.files) + + self.mp_map(insert_json_file_with_file_id, self.files) + + def do_task(self): + self.mp_map(read_json_file, self.files) + + def tearDown(self): + super().tearDown() + self.client.drop_database("perftest") + + +class TestGridFsMultiFileUpload(PerformanceTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.client = client_context.client + self.client.drop_database("perftest") + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] + self.data_size = sum(os.path.getsize(fname) for fname in self.files) + + def before(self): + self.client.perftest.drop_collection("fs.files") + self.client.perftest.drop_collection("fs.chunks") + + self.bucket = GridFSBucket(self.client.perftest) + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] + + def do_task(self): + self.mp_map(insert_gridfs_file, self.files) + + def tearDown(self): + super().tearDown() + self.client.drop_database("perftest") + + +class TestGridFsMultiFileDownload(PerformanceTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.client = client_context.client + self.client.drop_database("perftest") + + bucket = GridFSBucket(self.client.perftest) + + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] + self.data_size = sum(os.path.getsize(fname) for fname in self.files) + for fname in self.files: + with open(fname, "rb") as gfile: + bucket.upload_from_stream(fname, gfile) + + def do_task(self): + self.mp_map(read_gridfs_file, self.files) + + def tearDown(self): + super().tearDown() + self.client.drop_database("perftest") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index af60a20eed..7662dc9682 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 MongoDB, Inc. +# Copyright 2013-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,53 +13,104 @@ # limitations under the License. """Tools for mocking parts of PyMongo to test other parts.""" +from __future__ import annotations -import socket +import contextlib +import weakref +from functools import partial +from test import client_context -from pymongo import common -from pymongo import MongoClient, MongoReplicaSetClient -from pymongo.pool import Pool +from pymongo import MongoClient, common +from pymongo.errors import AutoReconnect, NetworkTimeout +from pymongo.hello import Hello, HelloCompat +from pymongo.server_description import ServerDescription +from pymongo.synchronous.monitor import Monitor +from pymongo.synchronous.pool import Pool -from test import host as default_host, port as default_port -from test.utils import my_partial +_IS_SYNC = True class MockPool(Pool): def __init__(self, client, pair, *args, **kwargs): - # MockPool gets a 'client' arg, regular pools don't. - self.client = client + # MockPool gets a 'client' arg, regular pools don't. Weakref it to + # avoid cycle with __del__, causing ResourceWarnings in Python 3.3. + self.client = weakref.proxy(client) self.mock_host, self.mock_port = pair # Actually connect to the default server. - Pool.__init__( - self, - pair=(default_host, default_port), - max_size=None, - net_timeout=None, - conn_timeout=20, - use_ssl=False, - use_greenlets=False) - - def get_socket(self, force=False): + Pool.__init__(self, (client_context.host, client_context.port), *args, **kwargs) + + @contextlib.contextmanager + def checkout(self, handler=None): client = self.client - host_and_port = '%s:%s' % (self.mock_host, self.mock_port) + host_and_port = f"{self.mock_host}:{self.mock_port}" if host_and_port in client.mock_down_hosts: - raise socket.error('mock error') + raise AutoReconnect("mock error") assert host_and_port in ( - client.mock_standalones - + client.mock_members - + client.mock_mongoses), "bad host: %s" % host_and_port + client.mock_standalones + client.mock_members + client.mock_mongoses + ), "bad host: %s" % host_and_port + + with Pool.checkout(self, handler) as conn: + conn.mock_host = self.mock_host + conn.mock_port = self.mock_port + yield conn + + +class DummyMonitor: + def __init__(self, server_description, topology, pool, topology_settings): + self._server_description = server_description + self.opened = False + + def cancel_check(self): + pass + + def join(self): + pass + + def open(self): + self.opened = True + + def request_check(self): + pass + + def close(self): + self.opened = False + + +class SyncMockMonitor(Monitor): + def __init__(self, client, server_description, topology, pool, topology_settings): + # MockMonitor gets a 'client' arg, regular monitors don't. Weakref it + # to avoid cycles. + self.client = weakref.proxy(client) + Monitor.__init__(self, server_description, topology, pool, topology_settings) + + def _check_once(self): + client = self.client + address = self._server_description.address + response, rtt = client.mock_hello("%s:%d" % address) # type: ignore[str-format] + return ServerDescription(address, Hello(response), rtt) - sock_info = Pool.get_socket(self, force) - sock_info.mock_host = self.mock_host - sock_info.mock_port = self.mock_port - return sock_info +class MockClient(MongoClient): + def __init__( + self, + standalones, + members, + mongoses, + hello_hosts=None, + arbiters=None, + down_hosts=None, + *args, + **kwargs, + ): + """A MongoClient connected to the default server, with a mock topology. -class MockClientBase(object): - def __init__(self, standalones, members, mongoses, config): - """standalones, etc., are like ['a:1', 'b:2']""" + standalones, members, mongoses, arbiters, and down_hosts determine the + configuration of the topology. They are formatted like ['a:1', 'b:2']. + hello_hosts provides an alternative host list for the server's + mocked hello response; see test_connect_with_internal_ips. + """ self.mock_standalones = standalones[:] self.mock_members = members[:] @@ -68,15 +119,18 @@ def __init__(self, standalones, members, mongoses, config): else: self.mock_primary = None - if config is not None: - self.mock_ismaster_hosts = config + # Hosts that should be considered an arbiter. + self.mock_arbiters = arbiters[:] if arbiters else [] + + if hello_hosts is not None: + self.mock_hello_hosts = hello_hosts else: - self.mock_ismaster_hosts = members[:] + self.mock_hello_hosts = members[:] self.mock_mongoses = mongoses[:] # Hosts that should raise socket errors. - self.mock_down_hosts = [] + self.mock_down_hosts = down_hosts[:] if down_hosts else [] # Hostname -> (min wire version, max wire version) self.mock_wire_versions = {} @@ -84,6 +138,36 @@ def __init__(self, standalones, members, mongoses, config): # Hostname -> max write batch size self.mock_max_write_batch_sizes = {} + # Hostname -> round trip time + self.mock_rtts = {} + + kwargs["_pool_class"] = partial(MockPool, self) + kwargs["_monitor_class"] = partial(SyncMockMonitor, self) + + client_options = client_context.default_client_options.copy() + client_options.update(kwargs) + + super().__init__(*args, **client_options) + + @classmethod + def get_mock_client( + cls, + standalones, + members, + mongoses, + hello_hosts=None, + arbiters=None, + down_hosts=None, + *args, + **kwargs, + ): + c = MockClient( + standalones, members, mongoses, hello_hosts, arbiters, down_hosts, *args, **kwargs + ) + + c._connect() + return c + def kill_host(self, host): """Host is like 'a:1'.""" self.mock_down_hosts.append(host) @@ -98,97 +182,70 @@ def set_wire_version_range(self, host, min_version, max_version): def set_max_write_batch_size(self, host, size): self.mock_max_write_batch_sizes[host] = size - def mock_is_master(self, host): - min_wire_version, max_wire_version = self.mock_wire_versions.get( - host, - (common.MIN_WIRE_VERSION, common.MAX_WIRE_VERSION)) + def mock_hello(self, host): + """Return mock hello response (a dict) and round trip time.""" + if host in self.mock_wire_versions: + min_wire_version, max_wire_version = self.mock_wire_versions[host] + else: + min_wire_version = common.MIN_SUPPORTED_WIRE_VERSION + max_wire_version = common.MAX_SUPPORTED_WIRE_VERSION max_write_batch_size = self.mock_max_write_batch_sizes.get( - host, common.MAX_WRITE_BATCH_SIZE) + host, common.MAX_WRITE_BATCH_SIZE + ) + + rtt = self.mock_rtts.get(host, 0) # host is like 'a:1'. if host in self.mock_down_hosts: - raise socket.timeout('mock timeout') + raise NetworkTimeout("mock timeout") - if host in self.mock_standalones: - return { - 'ismaster': True, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'maxWriteBatchSize': max_write_batch_size} - - if host in self.mock_members: - ismaster = (host == self.mock_primary) + elif host in self.mock_standalones: + response = { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } + elif host in self.mock_members: + primary = host == self.mock_primary # Simulate a replica set member. response = { - 'ismaster': ismaster, - 'secondary': not ismaster, - 'setName': 'rs', - 'hosts': self.mock_ismaster_hosts, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'maxWriteBatchSize': max_write_batch_size} + "ok": 1, + HelloCompat.LEGACY_CMD: primary, + "secondary": not primary, + "setName": "rs", + "hosts": self.mock_hello_hosts, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } if self.mock_primary: - response['primary'] = self.mock_primary - - return response - - if host in self.mock_mongoses: - return { - 'ismaster': True, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'msg': 'isdbgrid', - 'maxWriteBatchSize': max_write_batch_size} - - # In test_internal_ips(), we try to connect to a host listed - # in ismaster['hosts'] but not publicly accessible. - raise socket.error('Unknown host: %s' % host) - - def simple_command(self, sock_info, dbname, spec): - # __simple_command is also used for authentication, but in this - # test it's only used for ismaster. - assert spec == {'ismaster': 1} - response = self.mock_is_master( - '%s:%s' % (sock_info.mock_host, sock_info.mock_port)) - - ping_time = 10 - return response, ping_time - + response["primary"] = self.mock_primary -class MockClient(MockClientBase, MongoClient): - def __init__( - self, standalones, members, mongoses, ismaster_hosts=None, - *args, **kwargs - ): - MockClientBase.__init__( - self, standalones, members, mongoses, ismaster_hosts) - - kwargs['_pool_class'] = my_partial(MockPool, self) - MongoClient.__init__(self, *args, **kwargs) - - def _MongoClient__simple_command(self, sock_info, dbname, spec): - return self.simple_command(sock_info, dbname, spec) - - -class MockReplicaSetClient(MockClientBase, MongoReplicaSetClient): - def __init__( - self, standalones, members, mongoses, ismaster_hosts=None, - *args, **kwargs - ): - MockClientBase.__init__( - self, standalones, members, mongoses, ismaster_hosts) - - kwargs['_pool_class'] = my_partial(MockPool, self) - MongoReplicaSetClient.__init__(self, *args, **kwargs) + if host in self.mock_arbiters: + response["arbiterOnly"] = True + response["secondary"] = False + elif host in self.mock_mongoses: + response = { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "msg": "isdbgrid", + "maxWriteBatchSize": max_write_batch_size, + } + else: + # In test_internal_ips(), we try to connect to a host listed + # in hello['hosts'] but not publicly accessible. + raise AutoReconnect("Unknown host: %s" % host) - def _MongoReplicaSetClient__is_master(self, host): - response = self.mock_is_master('%s:%s' % host) - connection_pool = MockPool(self, host) - ping_time = 10 - return response, connection_pool, ping_time + return response, rtt - def _MongoReplicaSetClient__simple_command(self, sock_info, dbname, spec): - return self.simple_command(sock_info, dbname, spec) + def _process_periodic_tasks(self): + # Avoid the background thread causing races, e.g. a surprising + # reconnect while we're trying to test a disconnected client. + pass diff --git a/test/pytest_conf.py b/test/pytest_conf.py new file mode 100644 index 0000000000..a6e24cd9b1 --- /dev/null +++ b/test/pytest_conf.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + +def pytest_collection_modifyitems(items, config): + # Markers that should overlap with the default markers. + overlap_markers = ["async"] + + for item in items: + if "asynchronous" in item.fspath.dirname: + default_marker = "default_async" + else: + default_marker = "default" + markers = [m for m in item.iter_markers() if m not in overlap_markers] + if not markers: + item.add_marker(default_marker) diff --git a/test/qcheck.py b/test/qcheck.py index 569d65178d..842580cbff 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,26 +11,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations -import random -import traceback import datetime +import random import re import sys +import traceback + sys.path[0:0] = [""] -from bson.binary import Binary from bson.dbref import DBRef from bson.objectid import ObjectId -from bson.py3compat import b, binary_type from bson.son import SON +_IS_SYNC = True + gen_target = 100 reduction_attempts = 10 examples = 5 -PY3 = sys.version_info[0] == 3 - def lift(value): return lambda: value @@ -58,7 +58,7 @@ def gen_int(): def gen_float(): - return lambda: (random.random() - 0.5) * sys.maxint + return lambda: (random.random() - 0.5) * sys.maxsize def gen_boolean(): @@ -73,26 +73,20 @@ def gen_printable_string(gen_length): return lambda: "".join(gen_list(gen_printable_char(), gen_length)()) -if PY3: - def gen_char(set=None): - return lambda: bytes([random.randint(0, 255)]) -else: - def gen_char(set=None): - return lambda: chr(random.randint(0, 255)) +def gen_char(set=None): + return lambda: bytes([random.randint(0, 255)]) def gen_string(gen_length): - return lambda: b("").join(gen_list(gen_char(), gen_length)()) + return lambda: b"".join(gen_list(gen_char(), gen_length)()) def gen_unichar(): - return lambda: unichr(random.randint(1, 0xFFF)) + return lambda: chr(random.randint(1, 0xFFF)) def gen_unicode(gen_length): - return lambda: u"".join([x for x in - gen_list(gen_unichar(), gen_length)() if - x not in ".$"]) + return lambda: "".join([x for x in gen_list(gen_unichar(), gen_length)() if x not in ".$"]) def gen_list(generator, gen_length): @@ -100,22 +94,24 @@ def gen_list(generator, gen_length): def gen_datetime(): - return lambda: datetime.datetime(random.randint(1970, 2037), - random.randint(1, 12), - random.randint(1, 28), - random.randint(0, 23), - random.randint(0, 59), - random.randint(0, 59), - random.randint(0, 999) * 1000) + return lambda: datetime.datetime( + random.randint(1970, 2037), + random.randint(1, 12), + random.randint(1, 28), + random.randint(0, 23), + random.randint(0, 59), + random.randint(0, 59), + random.randint(0, 999) * 1000, + ) def gen_dict(gen_key, gen_value, gen_length): - def a_dict(gen_key, gen_value, length): result = {} for _ in range(length): result[gen_key()] = gen_value() return result + return lambda: a_dict(gen_key, gen_value, gen_length()) @@ -123,7 +119,8 @@ def gen_regexp(gen_length): # TODO our patterns only consist of one letter. # this is because of a bug in CPython's regex equality testing, # which I haven't quite tracked down, so I'm just ignoring it... - pattern = lambda: u"".join(gen_list(choose_lifted(u"a"), gen_length)()) + def pattern(): + return "".join(gen_list(choose_lifted("a"), gen_length)()) def gen_flags(): flags = 0 @@ -135,6 +132,7 @@ def gen_flags(): flags = flags | re.VERBOSE return flags + return lambda: re.compile(pattern(), gen_flags()) @@ -148,23 +146,17 @@ def gen_dbref(): def gen_mongo_value(depth, ref): - - bintype = Binary - if PY3: - # If we used Binary in python3 tests would fail since we - # decode BSON binary subtype 0 to bytes. Testing this with - # bytes in python3 makes a lot more sense. - # binary_type is `str` in python 2, `bytes` in python 3. - bintype = binary_type - choices = [gen_unicode(gen_range(0, 50)), - gen_printable_string(gen_range(0, 50)), - my_map(gen_string(gen_range(0, 1000)), bintype), - gen_int(), - gen_float(), - gen_boolean(), - gen_datetime(), - gen_objectid(), - lift(None)] + choices = [ + gen_unicode(gen_range(0, 50)), + gen_printable_string(gen_range(0, 50)), + my_map(gen_string(gen_range(0, 1000)), bytes), + gen_int(), + gen_float(), + gen_boolean(), + gen_datetime(), + gen_objectid(), + lift(None), + ] if ref: choices.append(gen_dbref()) if depth > 0: @@ -178,9 +170,10 @@ def gen_mongo_list(depth, ref): def gen_mongo_dict(depth, ref=True): - return my_map(gen_dict(gen_unicode(gen_range(0, 20)), - gen_mongo_value(depth - 1, ref), - gen_range(0, 10)), SON) + return my_map( + gen_dict(gen_unicode(gen_range(0, 20)), gen_mongo_value(depth - 1, ref), gen_range(0, 10)), + SON, + ) def simplify(case): # TODO this is a hack @@ -188,15 +181,17 @@ def simplify(case): # TODO this is a hack simplified = SON(case) # make a copy! if random.choice([True, False]): # delete - if not len(simplified.keys()): + simplified_keys = list(simplified) + if not len(simplified_keys): return (False, case) - del simplified[random.choice(simplified.keys())] + simplified.pop(random.choice(simplified_keys)) return (True, simplified) else: # simplify a value - if not len(simplified.items()): + simplified_items = list(simplified.items()) + if not len(simplified_items): return (False, case) - (key, value) = random.choice(simplified.items()) + (key, value) = random.choice(simplified_items) (success, value) = simplify(value) simplified[key] = value return (success, success and simplified or case) @@ -228,7 +223,10 @@ def reduce(case, predicate, reductions=0): def isnt(predicate): - return lambda x: not predicate(x) + def is_not(x): + return not predicate(x) + + return is_not def check(predicate, generator): @@ -238,9 +236,9 @@ def check(predicate, generator): try: if not predicate(case): reduction = reduce(case, predicate) - counter_examples.append("after %s reductions: %r" % reduction) + counter_examples.append("after {} reductions: {!r}".format(*reduction)) except: - counter_examples.append("%r : %s" % (case, traceback.format_exc())) + counter_examples.append(f"{case!r} : {traceback.format_exc()}") return counter_examples @@ -248,8 +246,10 @@ def check_unittest(test, predicate, generator): counter_examples = check(predicate, generator) if counter_examples: failures = len(counter_examples) - message = "\n".join([" -> %s" % f for f in - counter_examples[:examples]]) - message = ("found %d counter examples, displaying first %d:\n%s" % - (failures, min(failures, examples), message)) + message = "\n".join([" -> %s" % f for f in counter_examples[:examples]]) + message = "found %d counter examples, displaying first %d:\n%s" % ( + failures, + min(failures, examples), + message, + ) test.fail(message) diff --git a/test/read_write_concern/connection-string/read-concern.json b/test/read_write_concern/connection-string/read-concern.json new file mode 100644 index 0000000000..1ecad8c268 --- /dev/null +++ b/test/read_write_concern/connection-string/read-concern.json @@ -0,0 +1,47 @@ +{ + "tests": [ + { + "description": "Default", + "uri": "mongodb://localhost/", + "valid": true, + "warning": false, + "readConcern": {} + }, + { + "description": "local specified", + "uri": "mongodb://localhost/?readConcernLevel=local", + "valid": true, + "warning": false, + "readConcern": { + "level": "local" + } + }, + { + "description": "majority specified", + "uri": "mongodb://localhost/?readConcernLevel=majority", + "valid": true, + "warning": false, + "readConcern": { + "level": "majority" + } + }, + { + "description": "linearizable specified", + "uri": "mongodb://localhost/?readConcernLevel=linearizable", + "valid": true, + "warning": false, + "readConcern": { + "level": "linearizable" + } + }, + { + "description": "available specified", + "uri": "mongodb://localhost/?readConcernLevel=available", + "valid": true, + "warning": false, + "readConcern": { + "level": "available" + } + } + ] +} diff --git a/test/read_write_concern/connection-string/write-concern.json b/test/read_write_concern/connection-string/write-concern.json new file mode 100644 index 0000000000..51bdf821c3 --- /dev/null +++ b/test/read_write_concern/connection-string/write-concern.json @@ -0,0 +1,118 @@ +{ + "tests": [ + { + "description": "Default", + "uri": "mongodb://localhost/", + "valid": true, + "warning": false, + "writeConcern": {} + }, + { + "description": "w as a valid number", + "uri": "mongodb://localhost/?w=1", + "valid": true, + "warning": false, + "writeConcern": { + "w": 1 + } + }, + { + "description": "w as an invalid number", + "uri": "mongodb://localhost/?w=-2", + "valid": false, + "warning": null + }, + { + "description": "w as a string", + "uri": "mongodb://localhost/?w=majority", + "valid": true, + "warning": false, + "writeConcern": { + "w": "majority" + } + }, + { + "description": "wtimeoutMS as a valid number", + "uri": "mongodb://localhost/?wtimeoutMS=500", + "valid": true, + "warning": false, + "writeConcern": { + "wtimeoutMS": 500 + } + }, + { + "description": "wtimeoutMS as an invalid number", + "uri": "mongodb://localhost/?wtimeoutMS=-500", + "valid": false, + "warning": null + }, + { + "description": "journal as false", + "uri": "mongodb://localhost/?journal=false", + "valid": true, + "warning": false, + "writeConcern": { + "journal": false + } + }, + { + "description": "journal as true", + "uri": "mongodb://localhost/?journal=true", + "valid": true, + "warning": false, + "writeConcern": { + "journal": true + } + }, + { + "description": "All options combined", + "uri": "mongodb://localhost/?w=3&wtimeoutMS=500&journal=true", + "valid": true, + "warning": false, + "writeConcern": { + "w": 3, + "wtimeoutMS": 500, + "journal": true + } + }, + { + "description": "Unacknowledged with w", + "uri": "mongodb://localhost/?w=0", + "valid": true, + "warning": false, + "writeConcern": { + "w": 0 + } + }, + { + "description": "Unacknowledged with w and journal", + "uri": "mongodb://localhost/?w=0&journal=false", + "valid": true, + "warning": false, + "writeConcern": { + "w": 0, + "journal": false + } + }, + { + "description": "Unacknowledged with w and wtimeoutMS", + "uri": "mongodb://localhost/?w=0&wtimeoutMS=500", + "valid": true, + "warning": false, + "writeConcern": { + "w": 0, + "wtimeoutMS": 500 + } + }, + { + "description": "Acknowledged with w as 0 and journal true", + "uri": "mongodb://localhost/?w=0&journal=true", + "valid": false, + "warning": false, + "writeConcern": { + "w": 0, + "journal": true + } + } + ] +} diff --git a/test/read_write_concern/document/read-concern.json b/test/read_write_concern/document/read-concern.json new file mode 100644 index 0000000000..187397dae5 --- /dev/null +++ b/test/read_write_concern/document/read-concern.json @@ -0,0 +1,66 @@ +{ + "tests": [ + { + "description": "Default", + "valid": true, + "readConcern": {}, + "readConcernDocument": {}, + "isServerDefault": true + }, + { + "description": "Majority", + "valid": true, + "readConcern": { + "level": "majority" + }, + "readConcernDocument": { + "level": "majority" + }, + "isServerDefault": false + }, + { + "description": "Local", + "valid": true, + "readConcern": { + "level": "local" + }, + "readConcernDocument": { + "level": "local" + }, + "isServerDefault": false + }, + { + "description": "Linearizable", + "valid": true, + "readConcern": { + "level": "linearizable" + }, + "readConcernDocument": { + "level": "linearizable" + }, + "isServerDefault": false + }, + { + "description": "Snapshot", + "valid": true, + "readConcern": { + "level": "snapshot" + }, + "readConcernDocument": { + "level": "snapshot" + }, + "isServerDefault": false + }, + { + "description": "Available", + "valid": true, + "readConcern": { + "level": "available" + }, + "readConcernDocument": { + "level": "available" + }, + "isServerDefault": false + } + ] +} diff --git a/test/read_write_concern/document/write-concern.json b/test/read_write_concern/document/write-concern.json new file mode 100644 index 0000000000..64cd5d0eae --- /dev/null +++ b/test/read_write_concern/document/write-concern.json @@ -0,0 +1,174 @@ +{ + "tests": [ + { + "description": "Default", + "valid": true, + "writeConcern": {}, + "writeConcernDocument": {}, + "isServerDefault": true, + "isAcknowledged": true + }, + { + "description": "W as a number", + "valid": true, + "writeConcern": { + "w": 3 + }, + "writeConcernDocument": { + "w": 3 + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "W as an invalid number", + "valid": false, + "writeConcern": { + "w": -3 + }, + "writeConcernDocument": null, + "isServerDefault": null, + "isAcknowledged": null + }, + { + "description": "W as majority", + "valid": true, + "writeConcern": { + "w": "majority" + }, + "writeConcernDocument": { + "w": "majority" + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "W as a custom string", + "valid": true, + "writeConcern": { + "w": "my_mode" + }, + "writeConcernDocument": { + "w": "my_mode" + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "WTimeoutMS", + "valid": true, + "writeConcern": { + "wtimeoutMS": 1000 + }, + "writeConcernDocument": { + "wtimeout": 1000 + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "WTimeoutMS as an invalid number", + "valid": false, + "writeConcern": { + "wtimeoutMS": -1000 + }, + "writeConcernDocument": null, + "isServerDefault": null, + "isAcknowledged": null + }, + { + "description": "Journal as true", + "valid": true, + "writeConcern": { + "journal": true + }, + "writeConcernDocument": { + "j": true + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "Journal as false", + "valid": true, + "writeConcern": { + "journal": false + }, + "writeConcernDocument": { + "j": false + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "Unacknowledged with only w", + "valid": true, + "writeConcern": { + "w": 0 + }, + "writeConcernDocument": { + "w": 0 + }, + "isServerDefault": false, + "isAcknowledged": false + }, + { + "description": "Unacknowledged with wtimeoutMS", + "valid": true, + "writeConcern": { + "w": 0, + "wtimeoutMS": 500 + }, + "writeConcernDocument": { + "w": 0, + "wtimeout": 500 + }, + "isServerDefault": false, + "isAcknowledged": false + }, + { + "description": "Unacknowledged with journal", + "valid": true, + "writeConcern": { + "w": 0, + "journal": false + }, + "writeConcernDocument": { + "w": 0, + "j": false + }, + "isServerDefault": false, + "isAcknowledged": false + }, + { + "description": "W is 0 with journal true", + "valid": false, + "writeConcern": { + "w": 0, + "journal": true + }, + "writeConcernDocument": { + "w": 0, + "j": true + }, + "isServerDefault": false, + "isAcknowledged": true + }, + { + "description": "Everything", + "valid": true, + "writeConcern": { + "w": 3, + "wtimeoutMS": 1000, + "journal": true + }, + "writeConcernDocument": { + "w": 3, + "wtimeout": 1000, + "j": true + }, + "isServerDefault": false, + "isAcknowledged": true + } + ] +} diff --git a/test/read_write_concern/operation/default-write-concern-2.6.json b/test/read_write_concern/operation/default-write-concern-2.6.json new file mode 100644 index 0000000000..0d8f9c98a1 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-2.6.json @@ -0,0 +1,636 @@ +{ + "description": "default-write-concern-2.6", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default-write-concern-tests", + "databaseOptions": { + "writeConcern": {} + } + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll", + "collectionOptions": { + "writeConcern": {} + } + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne omits default write concern", + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "DeleteMany omits default write concern", + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": {}, + "limit": 0 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "BulkWrite with all models omits default write concern", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "ordered": true, + "requests": [ + { + "deleteMany": { + "filter": {} + } + }, + { + "insertOne": { + "document": { + "_id": 1 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 2 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 2 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3 + } + } + }, + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 3 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": {}, + "limit": 0 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 2 + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 3 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 3 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "InsertOne and InsertMany omit default write concern", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ] + }, + { + "description": "UpdateOne, UpdateMany, and ReplaceOne omit default write concern", + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": 2 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 3 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": 2 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "x": 3 + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 3 + } + ] + } + ] + } + ] +} diff --git a/test/read_write_concern/operation/default-write-concern-3.2.json b/test/read_write_concern/operation/default-write-concern-3.2.json new file mode 100644 index 0000000000..166a184916 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-3.2.json @@ -0,0 +1,164 @@ +{ + "description": "default-write-concern-3.2", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.2" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default-write-concern-tests", + "databaseOptions": { + "writeConcern": {} + } + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll", + "collectionOptions": { + "writeConcern": {} + } + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "findAndModify operations omit default write concern", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 2 + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + }, + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": 2 + }, + "update": { + "x": 2 + }, + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": 2 + }, + "remove": true, + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/read_write_concern/operation/default-write-concern-3.4.json b/test/read_write_concern/operation/default-write-concern-3.4.json new file mode 100644 index 0000000000..e18cdfc0c4 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-3.4.json @@ -0,0 +1,278 @@ +{ + "description": "default-write-concern-3.4", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default-write-concern-tests", + "databaseOptions": { + "writeConcern": {} + } + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll", + "collectionOptions": { + "writeConcern": {} + } + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $out omits default write concern", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_collection_name" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_collection_name" + } + ], + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_collection_name", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "RunCommand with a write command omits default write concern (runCommand should never inherit write concern)", + "operations": [ + { + "object": "database0", + "name": "runCommand", + "arguments": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ] + }, + "commandName": "delete" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "CreateIndex and dropIndex omits default write concern", + "operations": [ + { + "object": "collection0", + "name": "createIndex", + "arguments": { + "keys": { + "x": 1 + } + } + }, + { + "object": "collection0", + "name": "dropIndex", + "arguments": { + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createIndexes": "coll", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "dropIndexes": "coll", + "index": "x_1", + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "MapReduce omits default write concern", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "mapReduce", + "object": "collection0", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + }, + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/read_write_concern/operation/default-write-concern-4.2.json b/test/read_write_concern/operation/default-write-concern-4.2.json new file mode 100644 index 0000000000..e8bb78d91d --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-4.2.json @@ -0,0 +1,125 @@ +{ + "description": "default-write-concern-4.2", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default-write-concern-tests", + "databaseOptions": { + "writeConcern": {} + } + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll", + "collectionOptions": { + "writeConcern": {} + } + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $merge omits default write concern", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_collection_name" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_collection_name" + } + } + ], + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_collection_name", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/aggregate-merge.json b/test/retryable_reads/unified/aggregate-merge.json new file mode 100644 index 0000000000..96bbd0fc38 --- /dev/null +++ b/test/retryable_reads/unified/aggregate-merge.json @@ -0,0 +1,143 @@ +{ + "description": "aggregate-merge", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $merge does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "output-collection" + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "output-collection" + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/aggregate-serverErrors.json b/test/retryable_reads/unified/aggregate-serverErrors.json new file mode 100644 index 0000000000..d39835a5d3 --- /dev/null +++ b/test/retryable_reads/unified/aggregate-serverErrors.json @@ -0,0 +1,1430 @@ +{ + "description": "aggregate-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/aggregate.json b/test/retryable_reads/unified/aggregate.json new file mode 100644 index 0000000000..2b504c8d49 --- /dev/null +++ b/test/retryable_reads/unified/aggregate.json @@ -0,0 +1,527 @@ +{ + "description": "aggregate", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $out does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "output-collection" + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "output-collection" + } + ] + }, + "commandName": "aggregate", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-client.watch-serverErrors.json b/test/retryable_reads/unified/changeStreams-client.watch-serverErrors.json new file mode 100644 index 0000000000..47375974d2 --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-client.watch-serverErrors.json @@ -0,0 +1,959 @@ +{ + "description": "changeStreams-client.watch-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ], + "tests": [ + { + "description": "client.watch succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-client.watch.json b/test/retryable_reads/unified/changeStreams-client.watch.json new file mode 100644 index 0000000000..95ddaf921d --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-client.watch.json @@ -0,0 +1,294 @@ +{ + "description": "changeStreams-client.watch", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ], + "tests": [ + { + "description": "client.watch succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-db.coll.watch-serverErrors.json b/test/retryable_reads/unified/changeStreams-db.coll.watch-serverErrors.json new file mode 100644 index 0000000000..589d0a3c37 --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-db.coll.watch-serverErrors.json @@ -0,0 +1,944 @@ +{ + "description": "changeStreams-db.coll.watch-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.coll.watch succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-db.coll.watch.json b/test/retryable_reads/unified/changeStreams-db.coll.watch.json new file mode 100644 index 0000000000..bbea2ffe4f --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-db.coll.watch.json @@ -0,0 +1,314 @@ +{ + "description": "changeStreams-db.coll.watch", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.coll.watch succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-db.watch-serverErrors.json b/test/retryable_reads/unified/changeStreams-db.watch-serverErrors.json new file mode 100644 index 0000000000..6c12d7ddd8 --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-db.watch-serverErrors.json @@ -0,0 +1,930 @@ +{ + "description": "changeStreams-db.watch-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.watch succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-db.watch.json b/test/retryable_reads/unified/changeStreams-db.watch.json new file mode 100644 index 0000000000..1b6d911c76 --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-db.watch.json @@ -0,0 +1,303 @@ +{ + "description": "changeStreams-db.watch", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.watch succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/count-serverErrors.json b/test/retryable_reads/unified/count-serverErrors.json new file mode 100644 index 0000000000..c52edfdb98 --- /dev/null +++ b/test/retryable_reads/unified/count-serverErrors.json @@ -0,0 +1,808 @@ +{ + "description": "count-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Count succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/count.json b/test/retryable_reads/unified/count.json new file mode 100644 index 0000000000..d5c9a343a9 --- /dev/null +++ b/test/retryable_reads/unified/count.json @@ -0,0 +1,286 @@ +{ + "description": "count", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Count succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/countDocuments-serverErrors.json b/test/retryable_reads/unified/countDocuments-serverErrors.json new file mode 100644 index 0000000000..fd028b114c --- /dev/null +++ b/test/retryable_reads/unified/countDocuments-serverErrors.json @@ -0,0 +1,1133 @@ +{ + "description": "countDocuments-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "CountDocuments succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/countDocuments.json b/test/retryable_reads/unified/countDocuments.json new file mode 100644 index 0000000000..e06e89c1ad --- /dev/null +++ b/test/retryable_reads/unified/countDocuments.json @@ -0,0 +1,364 @@ +{ + "description": "countDocuments", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "CountDocuments succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/distinct-serverErrors.json b/test/retryable_reads/unified/distinct-serverErrors.json new file mode 100644 index 0000000000..79d2d5fc31 --- /dev/null +++ b/test/retryable_reads/unified/distinct-serverErrors.json @@ -0,0 +1,1060 @@ +{ + "description": "distinct-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Distinct succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/distinct.json b/test/retryable_reads/unified/distinct.json new file mode 100644 index 0000000000..81f1f66e91 --- /dev/null +++ b/test/retryable_reads/unified/distinct.json @@ -0,0 +1,352 @@ +{ + "description": "distinct", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Distinct succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/estimatedDocumentCount-serverErrors.json b/test/retryable_reads/unified/estimatedDocumentCount-serverErrors.json new file mode 100644 index 0000000000..ba983c6cdf --- /dev/null +++ b/test/retryable_reads/unified/estimatedDocumentCount-serverErrors.json @@ -0,0 +1,768 @@ +{ + "description": "estimatedDocumentCount-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "EstimatedDocumentCount succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "estimatedDocumentCount", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/estimatedDocumentCount.json b/test/retryable_reads/unified/estimatedDocumentCount.json new file mode 100644 index 0000000000..2ee29f6799 --- /dev/null +++ b/test/retryable_reads/unified/estimatedDocumentCount.json @@ -0,0 +1,273 @@ +{ + "description": "estimatedDocumentCount", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "EstimatedDocumentCount succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "estimatedDocumentCount", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/exceededTimeLimit.json b/test/retryable_reads/unified/exceededTimeLimit.json new file mode 100644 index 0000000000..8d090bbe3f --- /dev/null +++ b/test/retryable_reads/unified/exceededTimeLimit.json @@ -0,0 +1,147 @@ +{ + "description": "ExceededTimeLimit is a retryable read", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "exceededtimelimit-test" + } + } + ], + "initialData": [ + { + "collectionName": "exceededtimelimit-test", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on second attempt after ExceededTimeLimit", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 262 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "exceededtimelimit-test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "exceededtimelimit-test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/find-serverErrors.json b/test/retryable_reads/unified/find-serverErrors.json new file mode 100644 index 0000000000..ab3dbe45f4 --- /dev/null +++ b/test/retryable_reads/unified/find-serverErrors.json @@ -0,0 +1,1184 @@ +{ + "description": "find-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/find.json b/test/retryable_reads/unified/find.json new file mode 100644 index 0000000000..30c4c5e478 --- /dev/null +++ b/test/retryable_reads/unified/find.json @@ -0,0 +1,498 @@ +{ + "description": "find", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds on second attempt with explicit clientOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": true + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/findOne-serverErrors.json b/test/retryable_reads/unified/findOne-serverErrors.json new file mode 100644 index 0000000000..7adda1e32b --- /dev/null +++ b/test/retryable_reads/unified/findOne-serverErrors.json @@ -0,0 +1,954 @@ +{ + "description": "findOne-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "FindOne succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/findOne.json b/test/retryable_reads/unified/findOne.json new file mode 100644 index 0000000000..4314a19e46 --- /dev/null +++ b/test/retryable_reads/unified/findOne.json @@ -0,0 +1,330 @@ +{ + "description": "findOne", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "FindOne succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/gridfs-download-serverErrors.json b/test/retryable_reads/unified/gridfs-download-serverErrors.json new file mode 100644 index 0000000000..5bb7eee0b2 --- /dev/null +++ b/test/retryable_reads/unified/gridfs-download-serverErrors.json @@ -0,0 +1,1092 @@ +{ + "description": "gridfs-download-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "Download succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket1", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/gridfs-download.json b/test/retryable_reads/unified/gridfs-download.json new file mode 100644 index 0000000000..69fe8ff7c8 --- /dev/null +++ b/test/retryable_reads/unified/gridfs-download.json @@ -0,0 +1,367 @@ +{ + "description": "gridfs-download", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "Download succeeds on first attempt", + "operations": [ + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket1", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/gridfs-downloadByName-serverErrors.json b/test/retryable_reads/unified/gridfs-downloadByName-serverErrors.json new file mode 100644 index 0000000000..35f7e1e563 --- /dev/null +++ b/test/retryable_reads/unified/gridfs-downloadByName-serverErrors.json @@ -0,0 +1,1016 @@ +{ + "description": "gridfs-downloadByName-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "DownloadByName succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket1", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/gridfs-downloadByName.json b/test/retryable_reads/unified/gridfs-downloadByName.json new file mode 100644 index 0000000000..c3fa873396 --- /dev/null +++ b/test/retryable_reads/unified/gridfs-downloadByName.json @@ -0,0 +1,347 @@ +{ + "description": "gridfs-downloadByName", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "DownloadByName succeeds on first attempt", + "operations": [ + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket1", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/handshakeError.json b/test/retryable_reads/unified/handshakeError.json new file mode 100644 index 0000000000..2921d8a954 --- /dev/null +++ b/test/retryable_reads/unified/handshakeError.json @@ -0,0 +1,3079 @@ +{ + "description": "retryable reads handshake failures", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "connectionCheckOutStartedEvent", + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "client.listDatabases succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.listDatabases succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.listDatabaseNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.listDatabaseNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "client.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.aggregate succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.listCollections succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.listCollections succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.listCollectionNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.listCollectionNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.aggregate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.countDocuments succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.countDocuments succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.estimatedDocumentCount succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "count" + } + }, + { + "commandSucceededEvent": { + "commandName": "count" + } + } + ] + } + ] + }, + { + "description": "collection.estimatedDocumentCount succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "count" + } + }, + { + "commandSucceededEvent": { + "commandName": "count" + } + } + ] + } + ] + }, + { + "description": "collection.distinct succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "distinct" + } + }, + { + "commandSucceededEvent": { + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "collection.distinct succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "distinct" + } + }, + { + "commandSucceededEvent": { + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "collection.find succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.find succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.findOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.findOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.listIndexes succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "collection.listIndexes succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "collection.listIndexNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "collection.listIndexNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "collection.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollectionNames-serverErrors.json b/test/retryable_reads/unified/listCollectionNames-serverErrors.json new file mode 100644 index 0000000000..162dd4cee0 --- /dev/null +++ b/test/retryable_reads/unified/listCollectionNames-serverErrors.json @@ -0,0 +1,710 @@ +{ + "description": "listCollectionNames-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionNames succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollectionNames.json b/test/retryable_reads/unified/listCollectionNames.json new file mode 100644 index 0000000000..0fe575f7a6 --- /dev/null +++ b/test/retryable_reads/unified/listCollectionNames.json @@ -0,0 +1,243 @@ +{ + "description": "listCollectionNames", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionNames succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollectionObjects-serverErrors.json b/test/retryable_reads/unified/listCollectionObjects-serverErrors.json new file mode 100644 index 0000000000..8b9d582c10 --- /dev/null +++ b/test/retryable_reads/unified/listCollectionObjects-serverErrors.json @@ -0,0 +1,710 @@ +{ + "description": "listCollectionObjects-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionObjects succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollectionObjects.json b/test/retryable_reads/unified/listCollectionObjects.json new file mode 100644 index 0000000000..9cdbb69276 --- /dev/null +++ b/test/retryable_reads/unified/listCollectionObjects.json @@ -0,0 +1,243 @@ +{ + "description": "listCollectionObjects", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionObjects succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollections-serverErrors.json b/test/retryable_reads/unified/listCollections-serverErrors.json new file mode 100644 index 0000000000..171fe7457f --- /dev/null +++ b/test/retryable_reads/unified/listCollections-serverErrors.json @@ -0,0 +1,710 @@ +{ + "description": "listCollections-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollections succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollections.json b/test/retryable_reads/unified/listCollections.json new file mode 100644 index 0000000000..b6152f9ce5 --- /dev/null +++ b/test/retryable_reads/unified/listCollections.json @@ -0,0 +1,243 @@ +{ + "description": "listCollections", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollections succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabaseNames-serverErrors.json b/test/retryable_reads/unified/listDatabaseNames-serverErrors.json new file mode 100644 index 0000000000..489ff0ad51 --- /dev/null +++ b/test/retryable_reads/unified/listDatabaseNames-serverErrors.json @@ -0,0 +1,696 @@ +{ + "description": "listDatabaseNames-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseNames succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabaseNames.json b/test/retryable_reads/unified/listDatabaseNames.json new file mode 100644 index 0000000000..5590f39a51 --- /dev/null +++ b/test/retryable_reads/unified/listDatabaseNames.json @@ -0,0 +1,229 @@ +{ + "description": "listDatabaseNames", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseNames succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabaseObjects-serverErrors.json b/test/retryable_reads/unified/listDatabaseObjects-serverErrors.json new file mode 100644 index 0000000000..56f9f36236 --- /dev/null +++ b/test/retryable_reads/unified/listDatabaseObjects-serverErrors.json @@ -0,0 +1,696 @@ +{ + "description": "listDatabaseObjects-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseObjects succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabaseObjects.json b/test/retryable_reads/unified/listDatabaseObjects.json new file mode 100644 index 0000000000..46b1511d46 --- /dev/null +++ b/test/retryable_reads/unified/listDatabaseObjects.json @@ -0,0 +1,229 @@ +{ + "description": "listDatabaseObjects", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseObjects succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabases-serverErrors.json b/test/retryable_reads/unified/listDatabases-serverErrors.json new file mode 100644 index 0000000000..09b935a59f --- /dev/null +++ b/test/retryable_reads/unified/listDatabases-serverErrors.json @@ -0,0 +1,696 @@ +{ + "description": "listDatabases-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabases succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabases.json b/test/retryable_reads/unified/listDatabases.json new file mode 100644 index 0000000000..4cf5eccc7b --- /dev/null +++ b/test/retryable_reads/unified/listDatabases.json @@ -0,0 +1,229 @@ +{ + "description": "listDatabases", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabases succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listIndexNames-serverErrors.json b/test/retryable_reads/unified/listIndexNames-serverErrors.json new file mode 100644 index 0000000000..7b98111480 --- /dev/null +++ b/test/retryable_reads/unified/listIndexNames-serverErrors.json @@ -0,0 +1,749 @@ +{ + "description": "listIndexNames-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexNames succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listIndexNames.json b/test/retryable_reads/unified/listIndexNames.json new file mode 100644 index 0000000000..c5fe967ff5 --- /dev/null +++ b/test/retryable_reads/unified/listIndexNames.json @@ -0,0 +1,263 @@ +{ + "description": "listIndexNames", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexNames succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listIndexes-serverErrors.json b/test/retryable_reads/unified/listIndexes-serverErrors.json new file mode 100644 index 0000000000..0110a0acd0 --- /dev/null +++ b/test/retryable_reads/unified/listIndexes-serverErrors.json @@ -0,0 +1,749 @@ +{ + "description": "listIndexes-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexes succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listIndexes.json b/test/retryable_reads/unified/listIndexes.json new file mode 100644 index 0000000000..2560e4961c --- /dev/null +++ b/test/retryable_reads/unified/listIndexes.json @@ -0,0 +1,263 @@ +{ + "description": "listIndexes", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexes succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/mapReduce.json b/test/retryable_reads/unified/mapReduce.json new file mode 100644 index 0000000000..745c0ef001 --- /dev/null +++ b/test/retryable_reads/unified/mapReduce.json @@ -0,0 +1,284 @@ +{ + "description": "mapReduce", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 0 + }, + { + "_id": 2, + "x": 1 + }, + { + "_id": 3, + "x": 2 + } + ] + } + ], + "tests": [ + { + "description": "MapReduce succeeds with retry on", + "operations": [ + { + "object": "collection0", + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "expectResult": [ + { + "_id": 0, + "value": 6 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "MapReduce fails with retry on", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "mapReduce" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "MapReduce fails with retry off", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "mapReduce" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/readConcernMajorityNotAvailableYet.json b/test/retryable_reads/unified/readConcernMajorityNotAvailableYet.json new file mode 100644 index 0000000000..8aa6a6b5e5 --- /dev/null +++ b/test/retryable_reads/unified/readConcernMajorityNotAvailableYet.json @@ -0,0 +1,147 @@ +{ + "description": "ReadConcernMajorityNotAvailableYet is a retryable read", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "readconcernmajoritynotavailableyet_test" + } + } + ], + "initialData": [ + { + "collectionName": "readconcernmajoritynotavailableyet_test", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on second attempt after ReadConcernMajorityNotAvailableYet", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 134 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "readconcernmajoritynotavailableyet_test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "readconcernmajoritynotavailableyet_test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/aggregate-out-merge.json b/test/retryable_writes/unified/aggregate-out-merge.json new file mode 100644 index 0000000000..fd25c345ac --- /dev/null +++ b/test/retryable_writes/unified/aggregate-out-merge.json @@ -0,0 +1,149 @@ +{ + "description": "aggregate with $out/$merge does not set txnNumber", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "mergeCollection", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "aggregate with $out does not set txnNumber", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "outCollection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with $merge does not set txnNumber", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "mergeCollection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/bulkWrite-errorLabels.json b/test/retryable_writes/unified/bulkWrite-errorLabels.json new file mode 100644 index 0000000000..13ba9bae75 --- /dev/null +++ b/test/retryable_writes/unified/bulkWrite-errorLabels.json @@ -0,0 +1,416 @@ +{ + "description": "bulkWrite-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 3 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 3 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 3 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/bulkWrite-serverErrors.json b/test/retryable_writes/unified/bulkWrite-serverErrors.json new file mode 100644 index 0000000000..0a063ab4d9 --- /dev/null +++ b/test/retryable_writes/unified/bulkWrite-serverErrors.json @@ -0,0 +1,285 @@ +{ + "description": "retryable-writes bulkWrite serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite succeeds after retryable writeConcernError in first batch", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3 + } + }, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": { + "_id": 2 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/bulkWrite.json b/test/retryable_writes/unified/bulkWrite.json new file mode 100644 index 0000000000..f2bd9e0eb8 --- /dev/null +++ b/test/retryable_writes/unified/bulkWrite.json @@ -0,0 +1,1083 @@ +{ + "description": "bulkWrite", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "First command is retried", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 23 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "All commands are retried", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 7 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 4, + "x": 44 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + } + }, + { + "insertOne": { + "document": { + "_id": 5, + "x": 55 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "_id": 3, + "x": 333 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 3, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "2": 3, + "4": 5 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 1, + "upsertedIds": { + "3": 4 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 45 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ] + }, + { + "description": "Both commands are retried after their first statement fails", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + }, + { + "description": "Second command is retried after its second statement fails", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "skip": 2 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + }, + { + "description": "BulkWrite with unordered execution", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "ordered": false + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "First insertOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "Second updateOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "skip": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Third updateOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "skip": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 2 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Single-document write following deleteMany is retried", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "x": 11 + } + } + }, + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 2 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Single-document write following updateMany is retried", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "x": 11 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 2 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "collection bulkWrite with updateMany does not set txnNumber", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "collection bulkWrite with deleteMany does not set txnNumber", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": {} + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/client-bulkWrite-clientErrors.json b/test/retryable_writes/unified/client-bulkWrite-clientErrors.json new file mode 100644 index 0000000000..d16e0c9c8d --- /dev/null +++ b/test/retryable_writes/unified/client-bulkWrite-clientErrors.json @@ -0,0 +1,351 @@ +{ + "description": "client bulkWrite retryable writes with client errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with one network error succeeds after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with two network errors fails after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/client-bulkWrite-serverErrors.json b/test/retryable_writes/unified/client-bulkWrite-serverErrors.json new file mode 100644 index 0000000000..a1f7c8152a --- /dev/null +++ b/test/retryable_writes/unified/client-bulkWrite-serverErrors.json @@ -0,0 +1,882 @@ +{ + "description": "client bulkWrite retryable writes", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "clientRetryWritesFalse", + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 222 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with retryWrites: false does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "clientRetryWritesFalse", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "clientRetryWritesFalse", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "clientRetryWritesFalse", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/deleteMany.json b/test/retryable_writes/unified/deleteMany.json new file mode 100644 index 0000000000..381f377954 --- /dev/null +++ b/test/retryable_writes/unified/deleteMany.json @@ -0,0 +1,96 @@ +{ + "description": "deleteMany", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany ignores retryWrites", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": {} + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/deleteOne-errorLabels.json b/test/retryable_writes/unified/deleteOne-errorLabels.json new file mode 100644 index 0000000000..88920862ec --- /dev/null +++ b/test/retryable_writes/unified/deleteOne-errorLabels.json @@ -0,0 +1,266 @@ +{ + "description": "deleteOne-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/deleteOne-serverErrors.json b/test/retryable_writes/unified/deleteOne-serverErrors.json new file mode 100644 index 0000000000..0808b7921d --- /dev/null +++ b/test/retryable_writes/unified/deleteOne-serverErrors.json @@ -0,0 +1,114 @@ +{ + "description": "deleteOne-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne fails with RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/deleteOne.json b/test/retryable_writes/unified/deleteOne.json new file mode 100644 index 0000000000..9e37ff8bcf --- /dev/null +++ b/test/retryable_writes/unified/deleteOne.json @@ -0,0 +1,218 @@ +{ + "description": "deleteOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "DeleteOne is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndDelete-errorLabels.json b/test/retryable_writes/unified/findOneAndDelete-errorLabels.json new file mode 100644 index 0000000000..8639873fca --- /dev/null +++ b/test/retryable_writes/unified/findOneAndDelete-errorLabels.json @@ -0,0 +1,289 @@ +{ + "description": "findOneAndDelete-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndDelete-serverErrors.json b/test/retryable_writes/unified/findOneAndDelete-serverErrors.json new file mode 100644 index 0000000000..f6d8e9d69c --- /dev/null +++ b/test/retryable_writes/unified/findOneAndDelete-serverErrors.json @@ -0,0 +1,119 @@ +{ + "description": "findOneAndDelete-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndDelete.json b/test/retryable_writes/unified/findOneAndDelete.json new file mode 100644 index 0000000000..ebfb8ce665 --- /dev/null +++ b/test/retryable_writes/unified/findOneAndDelete.json @@ -0,0 +1,235 @@ +{ + "description": "findOneAndDelete", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndDelete is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndReplace-errorLabels.json b/test/retryable_writes/unified/findOneAndReplace-errorLabels.json new file mode 100644 index 0000000000..78db52e75d --- /dev/null +++ b/test/retryable_writes/unified/findOneAndReplace-errorLabels.json @@ -0,0 +1,301 @@ +{ + "description": "findOneAndReplace-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndReplace-serverErrors.json b/test/retryable_writes/unified/findOneAndReplace-serverErrors.json new file mode 100644 index 0000000000..1c355c3ebf --- /dev/null +++ b/test/retryable_writes/unified/findOneAndReplace-serverErrors.json @@ -0,0 +1,119 @@ +{ + "description": "findOneAndReplace-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndReplace.json b/test/retryable_writes/unified/findOneAndReplace.json new file mode 100644 index 0000000000..638d15a41d --- /dev/null +++ b/test/retryable_writes/unified/findOneAndReplace.json @@ -0,0 +1,243 @@ +{ + "description": "findOneAndReplace", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndReplace is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndUpdate-errorLabels.json b/test/retryable_writes/unified/findOneAndUpdate-errorLabels.json new file mode 100644 index 0000000000..38b3f7ba44 --- /dev/null +++ b/test/retryable_writes/unified/findOneAndUpdate-errorLabels.json @@ -0,0 +1,305 @@ +{ + "description": "findOneAndUpdate-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndUpdate-serverErrors.json b/test/retryable_writes/unified/findOneAndUpdate-serverErrors.json new file mode 100644 index 0000000000..150012ac72 --- /dev/null +++ b/test/retryable_writes/unified/findOneAndUpdate-serverErrors.json @@ -0,0 +1,120 @@ +{ + "description": "findOneAndUpdate-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndUpdate.json b/test/retryable_writes/unified/findOneAndUpdate.json new file mode 100644 index 0000000000..eefe98ae11 --- /dev/null +++ b/test/retryable_writes/unified/findOneAndUpdate.json @@ -0,0 +1,245 @@ +{ + "description": "findOneAndUpdate", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json new file mode 100644 index 0000000000..93cb2e849e --- /dev/null +++ b/test/retryable_writes/unified/handshakeError.json @@ -0,0 +1,2015 @@ +{ + "description": "retryable writes handshake failures", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "connectionCheckOutStartedEvent", + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "client.clientBulkWrite succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "collection.insertOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.insertOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.insertMany succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.insertMany succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.deleteOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "delete" + } + }, + { + "commandSucceededEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "collection.deleteOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "delete" + } + }, + { + "commandSucceededEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "collection.replaceOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.replaceOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.updateOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.updateOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndDelete succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndDelete succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndReplace succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndReplace succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndUpdate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndUpdate succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.bulkWrite succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.bulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertMany-errorLabels.json b/test/retryable_writes/unified/insertMany-errorLabels.json new file mode 100644 index 0000000000..5254ba7cb2 --- /dev/null +++ b/test/retryable_writes/unified/insertMany-errorLabels.json @@ -0,0 +1,335 @@ +{ + "description": "insertMany-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertMany succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertMany fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertMany succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertMany succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertMany-serverErrors.json b/test/retryable_writes/unified/insertMany-serverErrors.json new file mode 100644 index 0000000000..f5f513603c --- /dev/null +++ b/test/retryable_writes/unified/insertMany-serverErrors.json @@ -0,0 +1,114 @@ +{ + "description": "insertMany-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertMany fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertMany.json b/test/retryable_writes/unified/insertMany.json new file mode 100644 index 0000000000..35a18c46c6 --- /dev/null +++ b/test/retryable_writes/unified/insertMany.json @@ -0,0 +1,290 @@ +{ + "description": "insertMany", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertMany succeeds after one network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "InsertMany with unordered execution", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": false + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "InsertMany fails after multiple network errors", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": "alwaysOn", + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne-errorLabels.json b/test/retryable_writes/unified/insertOne-errorLabels.json new file mode 100644 index 0000000000..39f31a8aa6 --- /dev/null +++ b/test/retryable_writes/unified/insertOne-errorLabels.json @@ -0,0 +1,1127 @@ +{ + "description": "insertOne-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "InsertOne succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ] + }, + { + "description": "InsertOne succeeds after NotWritablePrimary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after HostNotFound", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after HostUnreachable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after SocketException", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after NetworkTimeout", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after ExceededTimeLimit", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 262, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after WriteConcernError InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11600, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after WriteConcernError InterruptedDueToReplStateChange", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11602, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after WriteConcernError PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 189, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne fails after multiple retryable writeConcernErrors", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne-noWritesPerformedError.json b/test/retryable_writes/unified/insertOne-noWritesPerformedError.json new file mode 100644 index 0000000000..3194e91c5c --- /dev/null +++ b/test/retryable_writes/unified/insertOne-noWritesPerformedError.json @@ -0,0 +1,90 @@ +{ + "description": "retryable-writes insertOne noWritesPerformedErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "no-writes-performed-collection" + } + } + ], + "tests": [ + { + "description": "InsertOne fails after NoWritesPerformed error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 64, + "errorLabels": [ + "NoWritesPerformed", + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "errorCode": 64, + "errorLabelsContain": [ + "NoWritesPerformed", + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "no-writes-performed-collection", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne-serverErrors.json b/test/retryable_writes/unified/insertOne-serverErrors.json new file mode 100644 index 0000000000..8edafb7029 --- /dev/null +++ b/test/retryable_writes/unified/insertOne-serverErrors.json @@ -0,0 +1,864 @@ +{ + "description": "retryable-writes insertOne serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "InsertOne succeeds after retryable writeConcernError", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "RetryableWriteError label is added based on top-level code in pre-4.4 server response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "RetryableWriteError label is added based on writeConcernError in pre-4.4 mongod response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "RetryableWriteError label is not added based on writeConcernError in pre-4.4 mongos response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after connection failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne fails after connection failure when retryWrites option is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + } + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne fails after Interrupted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601, + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne fails after WriteConcernError Interrupted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 11601, + "errmsg": "operation was interrupted" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne fails after WriteConcernError WriteConcernTimeout", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 64, + "errmsg": "waiting for replication timed out", + "errInfo": { + "wtimeout": true + } + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne.json b/test/retryable_writes/unified/insertOne.json new file mode 100644 index 0000000000..a6afdbf224 --- /dev/null +++ b/test/retryable_writes/unified/insertOne.json @@ -0,0 +1,245 @@ +{ + "description": "insertOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "InsertOne is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "InsertOne is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/replaceOne-errorLabels.json b/test/retryable_writes/unified/replaceOne-errorLabels.json new file mode 100644 index 0000000000..22c4561ae7 --- /dev/null +++ b/test/retryable_writes/unified/replaceOne-errorLabels.json @@ -0,0 +1,300 @@ +{ + "description": "replaceOne-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/replaceOne-serverErrors.json b/test/retryable_writes/unified/replaceOne-serverErrors.json new file mode 100644 index 0000000000..c957db7244 --- /dev/null +++ b/test/retryable_writes/unified/replaceOne-serverErrors.json @@ -0,0 +1,118 @@ +{ + "description": "replaceOne-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/replaceOne.json b/test/retryable_writes/unified/replaceOne.json new file mode 100644 index 0000000000..ee6e37d3bb --- /dev/null +++ b/test/retryable_writes/unified/replaceOne.json @@ -0,0 +1,242 @@ +{ + "description": "replaceOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "ReplaceOne is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/unacknowledged-write-concern.json b/test/retryable_writes/unified/unacknowledged-write-concern.json new file mode 100644 index 0000000000..eaa114acfd --- /dev/null +++ b/test/retryable_writes/unified/unacknowledged-write-concern.json @@ -0,0 +1,77 @@ +{ + "description": "unacknowledged write does not set txnNumber", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "tests": [ + { + "description": "unacknowledged write does not set txnNumber", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/updateMany.json b/test/retryable_writes/unified/updateMany.json new file mode 100644 index 0000000000..12c5204ee9 --- /dev/null +++ b/test/retryable_writes/unified/updateMany.json @@ -0,0 +1,112 @@ +{ + "description": "updateMany", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany ignores retryWrites", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/updateOne-errorLabels.json b/test/retryable_writes/unified/updateOne-errorLabels.json new file mode 100644 index 0000000000..e44cef45f6 --- /dev/null +++ b/test/retryable_writes/unified/updateOne-errorLabels.json @@ -0,0 +1,304 @@ +{ + "description": "updateOne-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/updateOne-serverErrors.json b/test/retryable_writes/unified/updateOne-serverErrors.json new file mode 100644 index 0000000000..648834ada4 --- /dev/null +++ b/test/retryable_writes/unified/updateOne-serverErrors.json @@ -0,0 +1,119 @@ +{ + "description": "updateOne-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/updateOne.json b/test/retryable_writes/unified/updateOne.json new file mode 100644 index 0000000000..99ffba8e21 --- /dev/null +++ b/test/retryable_writes/unified/updateOne.json @@ -0,0 +1,424 @@ +{ + "description": "updateOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "UpdateOne is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with upsert is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 3, + "x": 33 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 3 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "UpdateOne with upsert is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 3, + "x": 33 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 3 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "UpdateOne with upsert is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 3, + "x": 33 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/run_command/unified/runCommand.json b/test/run_command/unified/runCommand.json new file mode 100644 index 0000000000..fde9de92e6 --- /dev/null +++ b/test/run_command/unified/runCommand.json @@ -0,0 +1,634 @@ +{ + "description": "runCommand", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + }, + { + "database": { + "id": "dbWithRC", + "client": "client", + "databaseName": "dbWithRC", + "databaseOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "database": { + "id": "dbWithWC", + "client": "client", + "databaseName": "dbWithWC", + "databaseOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "client": { + "id": "clientWithStableApi", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "dbWithStableApi", + "client": "clientWithStableApi", + "databaseName": "dbWithStableApi" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "always attaches $db and implicit lsid to given command and omits default readPreference", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "db", + "lsid": { + "$$exists": true + }, + "$readPreference": { + "$$exists": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "always gossips the $clusterTime on the sent command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$clusterTime": { + "$$exists": true + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided session lsid to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided $readPreference to given command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "mode": "nearest" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach $readPreference to given command on standalone", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach primary $readPreference to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "primary" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not inherit readConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithRC", + "arguments": { + "commandName": "aggregate", + "command": { + "aggregate": "collection", + "pipeline": [], + "cursor": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection", + "readConcern": { + "$$exists": false + }, + "$db": "dbWithRC" + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "does not inherit writeConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithWC", + "arguments": { + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "bar" + } + ], + "ordered": true + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "writeConcern": { + "$$exists": false + }, + "$db": "dbWithWC" + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "does not retry retryable errors on given command", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "attaches transaction fields to given command", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "session": "session", + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "db" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "attaches apiVersion fields to given command when stableApi is configured on the client", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "dbWithStableApi", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "clientWithStableApi", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "dbWithStableApi", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/run_command/unified/runCursorCommand.json b/test/run_command/unified/runCursorCommand.json new file mode 100644 index 0000000000..4f1ec8a01a --- /dev/null +++ b/test/run_command/unified/runCursorCommand.json @@ -0,0 +1,877 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "successfully executes checkMetadataConsistency cursor creating command", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "checkMetadataConsistency", + "command": { + "checkMetadataConsistency": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "checkMetadataConsistency": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "checkMetadataConsistency" + } + } + ] + } + ] + }, + { + "description": "errors if the command response is not a cursor", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "creates an implicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "accepts an explicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is exhausted", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 2, + "x": 22 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 3, + "x": 33 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 4, + "x": 44 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 5, + "x": 55 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is closed", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ] + }, + { + "description": "supports configuring getMore batchSize", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 5, + "command": { + "find": "collection", + "batchSize": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "batchSize": 5, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore maxTimeMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "maxTimeMS": 300, + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1 + } + }, + "ignoreResultAndError": true + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "maxTimeMS": 300, + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "comment": { + "hello": "getMore" + }, + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + } + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "comment": { + "hello": "getMore" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "does not close the cursor when receiving an empty batch", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "cursorType": "tailable", + "commandName": "find", + "batchSize": 2, + "command": { + "find": "cappedCollection", + "tailable": true + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "cappedCollection" + }, + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "cappedCollection" + }, + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "cappedCollection" + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "cappedCollection", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/sdam_monitoring/discovered_standalone.json b/test/sdam_monitoring/discovered_standalone.json new file mode 100644 index 0000000000..097203694e --- /dev/null +++ b/test/sdam_monitoring/discovered_standalone.json @@ -0,0 +1,105 @@ +{ + "description": "Monitoring a discovered standalone connection", + "uri": "mongodb://a:27017/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/sdam_monitoring/load_balancer.json b/test/sdam_monitoring/load_balancer.json new file mode 100644 index 0000000000..09b1537193 --- /dev/null +++ b/test/sdam_monitoring/load_balancer.json @@ -0,0 +1,93 @@ +{ + "description": "Monitoring a load balancer", + "uri": "mongodb://a:27017/?loadBalanced=true", + "phases": [ + { + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "LoadBalanced", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "LoadBalancer" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "LoadBalanced", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "LoadBalanced", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "LoadBalancer" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/sdam_monitoring/replica_set_with_no_primary.json b/test/sdam_monitoring/replica_set_with_no_primary.json new file mode 100644 index 0000000000..41d048729d --- /dev/null +++ b/test/sdam_monitoring/replica_set_with_no_primary.json @@ -0,0 +1,151 @@ +{ + "description": "Monitoring a topology that is a replica set with no primary connected", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "setVersion": 1, + "primary": "b:27017", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "b:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017", + "b:27017" + ], + "passives": [], + "primary": "b:27017", + "setName": "rs", + "type": "RSSecondary" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017", + "b:27017" + ], + "passives": [], + "primary": "b:27017", + "setName": "rs", + "type": "RSSecondary" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "PossiblePrimary" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/sdam_monitoring/replica_set_with_primary.json b/test/sdam_monitoring/replica_set_with_primary.json new file mode 100644 index 0000000000..3ccc127d1d --- /dev/null +++ b/test/sdam_monitoring/replica_set_with_primary.json @@ -0,0 +1,150 @@ +{ + "description": "Monitoring a topology that is a replica set with a primary connected", + "uri": "mongodb://a,b", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "setVersion": 1, + "primary": "a:27017", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "b:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017", + "b:27017" + ], + "passives": [], + "primary": "a:27017", + "setName": "rs", + "type": "RSPrimary" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017", + "b:27017" + ], + "passives": [], + "primary": "a:27017", + "setName": "rs", + "type": "RSPrimary" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/sdam_monitoring/replica_set_with_removal.json b/test/sdam_monitoring/replica_set_with_removal.json new file mode 100644 index 0000000000..dc6fbe7e7d --- /dev/null +++ b/test/sdam_monitoring/replica_set_with_removal.json @@ -0,0 +1,161 @@ +{ + "description": "Monitoring a replica set with non member", + "uri": "mongodb://a,b/", + "phases": [ + { + "responses": [], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "b:27017" + } + } + ] + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "setVersion": 1, + "primary": "a:27017", + "hosts": [ + "a:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true + } + ] + ], + "outcome": { + "events": [ + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017" + ], + "passives": [], + "primary": "a:27017", + "setName": "rs", + "type": "RSPrimary" + } + } + }, + { + "server_closed_event": { + "topologyId": "42", + "address": "b:27017" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017" + ], + "passives": [], + "primary": "a:27017", + "setName": "rs", + "type": "RSPrimary" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/sdam_monitoring/required_replica_set.json b/test/sdam_monitoring/required_replica_set.json new file mode 100644 index 0000000000..1f4e5c1d71 --- /dev/null +++ b/test/sdam_monitoring/required_replica_set.json @@ -0,0 +1,152 @@ +{ + "description": "Monitoring a topology that is required to be a replica set", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "setVersion": 1, + "primary": "a:27017", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "b:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017", + "b:27017" + ], + "passives": [], + "primary": "a:27017", + "setName": "rs", + "type": "RSPrimary" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017", + "b:27017" + ], + "passives": [], + "primary": "a:27017", + "setName": "rs", + "type": "RSPrimary" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/sdam_monitoring/standalone.json b/test/sdam_monitoring/standalone.json new file mode 100644 index 0000000000..f375a383ca --- /dev/null +++ b/test/sdam_monitoring/standalone.json @@ -0,0 +1,105 @@ +{ + "description": "Monitoring a direct connection", + "uri": "mongodb://a:27017/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json new file mode 100644 index 0000000000..4d046ff8ed --- /dev/null +++ b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json @@ -0,0 +1,115 @@ +{ + "description": "Monitoring a direct connection - suppress update events for equal server descriptions", + "uri": "mongodb://a:27017/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/server_selection/in_window/equilibrium.json b/test/server_selection/in_window/equilibrium.json new file mode 100644 index 0000000000..c5f177d49b --- /dev/null +++ b/test/server_selection/in_window/equilibrium.json @@ -0,0 +1,46 @@ +{ + "description": "When in equilibrium selection is evenly distributed", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 5 + }, + { + "address": "b:27017", + "operation_count": 5 + }, + { + "address": "c:27017", + "operation_count": 5 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.33, + "b:27017": 0.33, + "c:27017": 0.33 + } + } +} diff --git a/test/server_selection/in_window/many-choices.json b/test/server_selection/in_window/many-choices.json new file mode 100644 index 0000000000..7e940513ef --- /dev/null +++ b/test/server_selection/in_window/many-choices.json @@ -0,0 +1,106 @@ +{ + "description": "Selections from many choices occur at correct frequencies", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "d:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "e:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "f:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "g:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "i:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 0 + }, + { + "address": "b:27017", + "operation_count": 5 + }, + { + "address": "c:27017", + "operation_count": 5 + }, + { + "address": "d:27017", + "operation_count": 10 + }, + { + "address": "e:27017", + "operation_count": 10 + }, + { + "address": "f:27017", + "operation_count": 20 + }, + { + "address": "g:27017", + "operation_count": 20 + }, + { + "address": "h:27017", + "operation_count": 50 + }, + { + "address": "i:27017", + "operation_count": 60 + } + ], + "iterations": 10000, + "outcome": { + "tolerance": 0.03, + "expected_frequencies": { + "a:27017": 0.22, + "b:27017": 0.18, + "c:27017": 0.18, + "d:27017": 0.125, + "e:27017": 0.125, + "f:27017": 0.074, + "g:27017": 0.074, + "h:27017": 0.0277, + "i:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/one-least-two-tied.json b/test/server_selection/in_window/one-least-two-tied.json new file mode 100644 index 0000000000..ed7526e716 --- /dev/null +++ b/test/server_selection/in_window/one-least-two-tied.json @@ -0,0 +1,46 @@ +{ + "description": "Least operations gets most selections, two tied share the rest", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 16 + }, + { + "address": "b:27017", + "operation_count": 10 + }, + { + "address": "c:27017", + "operation_count": 16 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.165, + "b:27017": 0.66, + "c:27017": 0.165 + } + } +} diff --git a/test/server_selection/in_window/rs-equilibrium.json b/test/server_selection/in_window/rs-equilibrium.json new file mode 100644 index 0000000000..61c6687e50 --- /dev/null +++ b/test/server_selection/in_window/rs-equilibrium.json @@ -0,0 +1,46 @@ +{ + "description": "When in equilibrium selection is evenly distributed (replica set)", + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "RSPrimary" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 6 + }, + { + "address": "b:27017", + "operation_count": 6 + }, + { + "address": "c:27017", + "operation_count": 6 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.33, + "b:27017": 0.33, + "c:27017": 0.33 + } + } +} diff --git a/test/server_selection/in_window/rs-three-choices.json b/test/server_selection/in_window/rs-three-choices.json new file mode 100644 index 0000000000..3fdc15205c --- /dev/null +++ b/test/server_selection/in_window/rs-three-choices.json @@ -0,0 +1,46 @@ +{ + "description": "Selections from three servers occur at proper distributions (replica set)", + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "RSPrimary" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 3 + }, + { + "address": "b:27017", + "operation_count": 6 + }, + { + "address": "c:27017", + "operation_count": 20 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.66, + "b:27017": 0.33, + "c:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/three-choices.json b/test/server_selection/in_window/three-choices.json new file mode 100644 index 0000000000..7b5b414549 --- /dev/null +++ b/test/server_selection/in_window/three-choices.json @@ -0,0 +1,46 @@ +{ + "description": "Selections from three servers occur at proper distributions", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 3 + }, + { + "address": "b:27017", + "operation_count": 6 + }, + { + "address": "c:27017", + "operation_count": 20 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.66, + "b:27017": 0.33, + "c:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/two-choices.json b/test/server_selection/in_window/two-choices.json new file mode 100644 index 0000000000..2c7a605d8d --- /dev/null +++ b/test/server_selection/in_window/two-choices.json @@ -0,0 +1,36 @@ +{ + "description": "Better of two choices always selected", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 0 + }, + { + "address": "b:27017", + "operation_count": 5 + } + ], + "iterations": 100, + "outcome": { + "tolerance": 0, + "expected_frequencies": { + "a:27017": 1, + "b:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/two-least.json b/test/server_selection/in_window/two-least.json new file mode 100644 index 0000000000..73214fc647 --- /dev/null +++ b/test/server_selection/in_window/two-least.json @@ -0,0 +1,46 @@ +{ + "description": "Two tied for least operations share all selections", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 10 + }, + { + "address": "b:27017", + "operation_count": 10 + }, + { + "address": "c:27017", + "operation_count": 16 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.5, + "b:27017": 0.5, + "c:27017": 0 + } + } +} diff --git a/test/server_selection/rtt/first_value.json b/test/server_selection/rtt/first_value.json new file mode 100644 index 0000000000..421944da36 --- /dev/null +++ b/test/server_selection/rtt/first_value.json @@ -0,0 +1,5 @@ +{ + "avg_rtt_ms": "NULL", + "new_rtt_ms": 10, + "new_avg_rtt": 10 +} diff --git a/test/server_selection/rtt/first_value_zero.json b/test/server_selection/rtt/first_value_zero.json new file mode 100644 index 0000000000..d5bfc41b25 --- /dev/null +++ b/test/server_selection/rtt/first_value_zero.json @@ -0,0 +1,5 @@ +{ + "avg_rtt_ms": "NULL", + "new_rtt_ms": 0, + "new_avg_rtt": 0 +} diff --git a/test/server_selection/rtt/value_test_1.json b/test/server_selection/rtt/value_test_1.json new file mode 100644 index 0000000000..ed6a80ce29 --- /dev/null +++ b/test/server_selection/rtt/value_test_1.json @@ -0,0 +1,5 @@ +{ + "avg_rtt_ms": 0, + "new_rtt_ms": 5, + "new_avg_rtt": 1 +} diff --git a/test/server_selection/rtt/value_test_2.json b/test/server_selection/rtt/value_test_2.json new file mode 100644 index 0000000000..ccb5a0173b --- /dev/null +++ b/test/server_selection/rtt/value_test_2.json @@ -0,0 +1,5 @@ +{ + "avg_rtt_ms": 3.1, + "new_rtt_ms": 36, + "new_avg_rtt": 9.68 +} diff --git a/test/server_selection/rtt/value_test_3.json b/test/server_selection/rtt/value_test_3.json new file mode 100644 index 0000000000..6921c94d36 --- /dev/null +++ b/test/server_selection/rtt/value_test_3.json @@ -0,0 +1,5 @@ +{ + "avg_rtt_ms": 9.12, + "new_rtt_ms": 9.12, + "new_avg_rtt": 9.12 +} diff --git a/test/server_selection/rtt/value_test_4.json b/test/server_selection/rtt/value_test_4.json new file mode 100644 index 0000000000..d9ce3800b8 --- /dev/null +++ b/test/server_selection/rtt/value_test_4.json @@ -0,0 +1,5 @@ +{ + "avg_rtt_ms": 1, + "new_rtt_ms": 1000, + "new_avg_rtt": 200.8 +} diff --git a/test/server_selection/rtt/value_test_5.json b/test/server_selection/rtt/value_test_5.json new file mode 100644 index 0000000000..9ae33bc143 --- /dev/null +++ b/test/server_selection/rtt/value_test_5.json @@ -0,0 +1,5 @@ +{ + "avg_rtt_ms": 0, + "new_rtt_ms": 0.25, + "new_avg_rtt": 0.05 +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/Nearest.json b/test/server_selection/server_selection/LoadBalanced/read/Nearest.json new file mode 100644 index 0000000000..76fa336d55 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/Nearest.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/Primary.json b/test/server_selection/server_selection/LoadBalanced/read/Primary.json new file mode 100644 index 0000000000..5a4a0aa93a --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/Primary.json @@ -0,0 +1,30 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/PrimaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/read/PrimaryPreferred.json new file mode 100644 index 0000000000..9aa151cd06 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/PrimaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/Secondary.json b/test/server_selection/server_selection/LoadBalanced/read/Secondary.json new file mode 100644 index 0000000000..c49e30370b --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/Secondary.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/SecondaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/read/SecondaryPreferred.json new file mode 100644 index 0000000000..18e46877b4 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/SecondaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/Nearest.json b/test/server_selection/server_selection/LoadBalanced/write/Nearest.json new file mode 100644 index 0000000000..e52e343332 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/Nearest.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/Primary.json b/test/server_selection/server_selection/LoadBalanced/write/Primary.json new file mode 100644 index 0000000000..9061b25208 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/Primary.json @@ -0,0 +1,30 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/PrimaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/write/PrimaryPreferred.json new file mode 100644 index 0000000000..5c94dc410d --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/PrimaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/Secondary.json b/test/server_selection/server_selection/LoadBalanced/write/Secondary.json new file mode 100644 index 0000000000..5493867e12 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/Secondary.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/SecondaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/write/SecondaryPreferred.json new file mode 100644 index 0000000000..f7905f1d5f --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/SecondaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json new file mode 100644 index 0000000000..aa48679e86 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json new file mode 100644 index 0000000000..1fcfd52a47 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json @@ -0,0 +1,68 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 20, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 20, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 20, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json new file mode 100644 index 0000000000..b72895d8a8 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json @@ -0,0 +1,34 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimary.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimary.json new file mode 100644 index 0000000000..4d286af830 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimary.json @@ -0,0 +1,21 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "PossiblePrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimaryNearest.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimaryNearest.json new file mode 100644 index 0000000000..bf9c70b420 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimaryNearest.json @@ -0,0 +1,21 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "PossiblePrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json new file mode 100644 index 0000000000..f0f3fa9ea1 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json @@ -0,0 +1,29 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json new file mode 100644 index 0000000000..f87ef4f617 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json @@ -0,0 +1,58 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json new file mode 100644 index 0000000000..ee96229927 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json @@ -0,0 +1,34 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json new file mode 100644 index 0000000000..3b8f1e97cd --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json new file mode 100644 index 0000000000..c3142ec115 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json new file mode 100644 index 0000000000..a2c18bb7d2 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json @@ -0,0 +1,34 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json new file mode 100644 index 0000000000..b319918e92 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "two", + "data_center": "sf" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc", + "rack": "one" + }, + { + "other_tag": "doesntexist" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json new file mode 100644 index 0000000000..8f64d95ecb --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "two", + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc", + "rack": "one" + }, + { + "other_tag": "doesntexist" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json new file mode 100644 index 0000000000..4931e1019a --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json @@ -0,0 +1,34 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json new file mode 100644 index 0000000000..e136cf12a4 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json @@ -0,0 +1,34 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json new file mode 100644 index 0000000000..cfe4965938 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json @@ -0,0 +1,76 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json new file mode 100644 index 0000000000..67296d434f --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json @@ -0,0 +1,84 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 20, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 20, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 20, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json new file mode 100644 index 0000000000..a3a85c9a83 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json @@ -0,0 +1,42 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json new file mode 100644 index 0000000000..8da1482e96 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json @@ -0,0 +1,55 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json new file mode 100644 index 0000000000..306171f3a2 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json @@ -0,0 +1,58 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json new file mode 100644 index 0000000000..722f1cfb1a --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json new file mode 100644 index 0000000000..23864a278c --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json @@ -0,0 +1,68 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json new file mode 100644 index 0000000000..d07c24218d --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json @@ -0,0 +1,68 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json new file mode 100644 index 0000000000..f893cc9f82 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json new file mode 100644 index 0000000000..a74a2dbf33 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json @@ -0,0 +1,52 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "sf" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json new file mode 100644 index 0000000000..1272180666 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json @@ -0,0 +1,42 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json new file mode 100644 index 0000000000..65ab3dc640 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/Nearest.json b/test/server_selection/server_selection/Sharded/read/Nearest.json new file mode 100644 index 0000000000..705a784a0b --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/Nearest.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/Primary.json b/test/server_selection/server_selection/Sharded/read/Primary.json new file mode 100644 index 0000000000..7a321be2bb --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/Primary.json @@ -0,0 +1,40 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/PrimaryPreferred.json b/test/server_selection/server_selection/Sharded/read/PrimaryPreferred.json new file mode 100644 index 0000000000..e9bc1421f9 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/PrimaryPreferred.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/Secondary.json b/test/server_selection/server_selection/Sharded/read/Secondary.json new file mode 100644 index 0000000000..49813f7b9e --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/Secondary.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json b/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json new file mode 100644 index 0000000000..62fa13f297 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/Nearest.json b/test/server_selection/server_selection/Sharded/write/Nearest.json new file mode 100644 index 0000000000..aef7f02ec7 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/Nearest.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/Primary.json b/test/server_selection/server_selection/Sharded/write/Primary.json new file mode 100644 index 0000000000..f6ce2e75c1 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/Primary.json @@ -0,0 +1,40 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/PrimaryPreferred.json b/test/server_selection/server_selection/Sharded/write/PrimaryPreferred.json new file mode 100644 index 0000000000..25f56a5359 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/PrimaryPreferred.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/Secondary.json b/test/server_selection/server_selection/Sharded/write/Secondary.json new file mode 100644 index 0000000000..1fa026f716 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/Secondary.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json b/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json new file mode 100644 index 0000000000..f9467472aa --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Single/read/SecondaryPreferred.json b/test/server_selection/server_selection/Single/read/SecondaryPreferred.json new file mode 100644 index 0000000000..e60496dfdf --- /dev/null +++ b/test/server_selection/server_selection/Single/read/SecondaryPreferred.json @@ -0,0 +1,44 @@ +{ + "topology_description": { + "type": "Single", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } + } + ] +} diff --git a/test/server_selection/server_selection/Single/write/SecondaryPreferred.json b/test/server_selection/server_selection/Single/write/SecondaryPreferred.json new file mode 100644 index 0000000000..34fe91d5a2 --- /dev/null +++ b/test/server_selection/server_selection/Single/write/SecondaryPreferred.json @@ -0,0 +1,44 @@ +{ + "topology_description": { + "type": "Single", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } + } + ] +} diff --git a/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json b/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json new file mode 100644 index 0000000000..0ae8075fba --- /dev/null +++ b/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json @@ -0,0 +1,17 @@ +{ + "topology_description": { + "type": "Unknown", + "servers": [] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/Unknown/read/ghost.json b/test/server_selection/server_selection/Unknown/read/ghost.json new file mode 100644 index 0000000000..76d3d774e8 --- /dev/null +++ b/test/server_selection/server_selection/Unknown/read/ghost.json @@ -0,0 +1,18 @@ +{ + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSGhost" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json b/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json new file mode 100644 index 0000000000..a70eece62c --- /dev/null +++ b/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json @@ -0,0 +1,17 @@ +{ + "topology_description": { + "type": "Unknown", + "servers": [] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/Unknown/write/ghost.json b/test/server_selection/server_selection/Unknown/write/ghost.json new file mode 100644 index 0000000000..65caa4cd0a --- /dev/null +++ b/test/server_selection/server_selection/Unknown/write/ghost.json @@ -0,0 +1,18 @@ +{ + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSGhost" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection_logging/load-balanced.json b/test/server_selection_logging/load-balanced.json new file mode 100644 index 0000000000..5855c4e991 --- /dev/null +++ b/test/server_selection_logging/load-balanced.json @@ -0,0 +1,107 @@ +{ + "description": "server-selection-logging", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "heartbeatFrequencyMS": 500 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + } + ], + "tests": [ + { + "description": "A successful operation - load balanced cluster", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "LoadBalancer" + } + } + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/operation-id.json b/test/server_selection_logging/operation-id.json new file mode 100644 index 0000000000..ccc2623166 --- /dev/null +++ b/test/server_selection_logging/operation-id.json @@ -0,0 +1,418 @@ +{ + "description": "operation-id", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appName": "loggingClient", + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient" + } + } + ], + "_yamlAnchors": { + "namespace": "logging-tests.server-selection" + }, + "tests": [ + { + "description": "Successful bulkWrite operation: log messages have operationIds", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + } + ] + } + ] + }, + { + "description": "Failed bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + } + ] + } + ] + }, + { + "description": "Successful client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "Failed client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/replica-set.json b/test/server_selection_logging/replica-set.json new file mode 100644 index 0000000000..830b1ea51a --- /dev/null +++ b/test/server_selection_logging/replica-set.json @@ -0,0 +1,228 @@ +{ + "description": "replica-set-logging", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient" + } + }, + { + "collection": { + "id": "unsatisfiableRPColl", + "database": "database", + "collectionName": "unsatisfiableRPColl", + "collectionOptions": { + "readPreference": { + "mode": "Secondary", + "tagSets": [ + { + "nonexistenttag": "a" + } + ] + } + } + } + } + ], + "tests": [ + { + "description": "A successful operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Server selection fails due to unsatisfiable read preference", + "runOnRequirements": [ + { + "minServerVersion": "4.0" + } + ], + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "find", + "object": "unsatisfiableRPColl", + "arguments": { + "filter": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + }, + "remainingTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/sharded.json new file mode 100644 index 0000000000..346c050f9e --- /dev/null +++ b/test/server_selection_logging/sharded.json @@ -0,0 +1,237 @@ +{ + "description": "server-selection-logging", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appName": "loggingClient", + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "tests": [ + { + "description": "A successful operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Failure due to unreachable server", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "remainingTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/standalone.json b/test/server_selection_logging/standalone.json new file mode 100644 index 0000000000..fa01ad9911 --- /dev/null +++ b/test/server_selection_logging/standalone.json @@ -0,0 +1,235 @@ +{ + "description": "standalone-logging", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appName": "loggingClient", + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient" + } + } + ], + "tests": [ + { + "description": "A successful operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Failure due to unreachable server", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "remainingTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json new file mode 100644 index 0000000000..d7a1c6aba7 --- /dev/null +++ b/test/sessions/driver-sessions-dirty-session-errors.json @@ -0,0 +1,976 @@ +{ + "description": "driver-sessions-dirty-session-errors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Dirty explicit session is discarded (insert)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 2 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "Dirty explicit session is discarded (findAndModify)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1 + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (insert)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (findAndModify)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (read returning cursor)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (read not returning cursor)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": 1 + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/sessions/driver-sessions-server-support.json b/test/sessions/driver-sessions-server-support.json new file mode 100644 index 0000000000..55312b32eb --- /dev/null +++ b/test/sessions/driver-sessions-server-support.json @@ -0,0 +1,256 @@ +{ + "description": "driver-sessions-server-support", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Server supports explicit sessions", + "operations": [ + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Server supports implicit sessions", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/sessions/implicit-sessions-default-causal-consistency.json b/test/sessions/implicit-sessions-default-causal-consistency.json new file mode 100644 index 0000000000..517c8ebc63 --- /dev/null +++ b/test/sessions/implicit-sessions-default-causal-consistency.json @@ -0,0 +1,318 @@ +{ + "description": "implicit sessions default causal consistency", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "implicit-cc-tests" + } + }, + { + "collection": { + "id": "collectionDefault", + "database": "database0", + "collectionName": "coll-default" + } + }, + { + "collection": { + "id": "collectionSnapshot", + "database": "database0", + "collectionName": "coll-snapshot", + "collectionOptions": { + "readConcern": { + "level": "snapshot" + } + } + } + }, + { + "collection": { + "id": "collectionlinearizable", + "database": "database0", + "collectionName": "coll-linearizable", + "collectionOptions": { + "readConcern": { + "level": "linearizable" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll-default", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "default" + } + ] + }, + { + "collectionName": "coll-snapshot", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "snapshot" + } + ] + }, + { + "collectionName": "coll-linearizable", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "linearizable" + } + ] + } + ], + "tests": [ + { + "description": "readConcern is not sent on retried read in implicit session when readConcern level is not specified", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionDefault", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "default" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-default", + "filter": {}, + "readConcern": { + "$$exists": false + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-default", + "filter": {}, + "readConcern": { + "$$exists": false + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + }, + { + "description": "afterClusterTime is not sent on retried read in implicit session when readConcern level is snapshot", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionSnapshot", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "snapshot" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-snapshot", + "filter": {}, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-snapshot", + "filter": {}, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + }, + { + "description": "afterClusterTime is not sent on retried read in implicit session when readConcern level is linearizable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionlinearizable", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "linearizable" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-linearizable", + "filter": {}, + "readConcern": { + "level": "linearizable", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-linearizable", + "filter": {}, + "readConcern": { + "level": "linearizable", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/snapshot-sessions-not-supported-client-error.json b/test/sessions/snapshot-sessions-not-supported-client-error.json new file mode 100644 index 0000000000..208e4cfe63 --- /dev/null +++ b/test/sessions/snapshot-sessions-not-supported-client-error.json @@ -0,0 +1,128 @@ +{ + "description": "snapshot-sessions-not-supported-client-error", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "maxServerVersion": "4.4.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Client error on find with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "isClientError": true, + "errorContains": "Snapshot reads require MongoDB 5.0 or later" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Client error on aggregate with snapshot", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "isClientError": true, + "errorContains": "Snapshot reads require MongoDB 5.0 or later" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Client error on distinct with snapshot", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectError": { + "isClientError": true, + "errorContains": "Snapshot reads require MongoDB 5.0 or later" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/sessions/snapshot-sessions-not-supported-server-error.json b/test/sessions/snapshot-sessions-not-supported-server-error.json new file mode 100644 index 0000000000..79213f314f --- /dev/null +++ b/test/sessions/snapshot-sessions-not-supported-server-error.json @@ -0,0 +1,187 @@ +{ + "description": "snapshot-sessions-not-supported-server-error", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Server returns an error on find with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on aggregate with snapshot", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on distinct with snapshot", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "distinct" + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/snapshot-sessions-unsupported-ops.json b/test/sessions/snapshot-sessions-unsupported-ops.json new file mode 100644 index 0000000000..c41f74d337 --- /dev/null +++ b/test/sessions/snapshot-sessions-unsupported-ops.json @@ -0,0 +1,493 @@ +{ + "description": "snapshot-sessions-unsupported-ops", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Server returns an error on insertOne with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 22, + "x": 22 + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on insertMany with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 22, + "x": 22 + }, + { + "_id": 33, + "x": 33 + } + ] + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on deleteOne with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on updateOne with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on findOneAndUpdate with snapshot", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on listDatabases with snapshot", + "operations": [ + { + "name": "listDatabases", + "object": "client0", + "arguments": { + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1, + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on listCollections with snapshot", + "operations": [ + { + "name": "listCollections", + "object": "database0", + "arguments": { + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on listIndexes with snapshot", + "operations": [ + { + "name": "listIndexes", + "object": "collection0", + "arguments": { + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on runCommand with snapshot", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "session": "session0", + "commandName": "listCollections", + "command": { + "listCollections": 1 + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/snapshot-sessions.json b/test/sessions/snapshot-sessions.json new file mode 100644 index 0000000000..260f8b6f48 --- /dev/null +++ b/test/sessions/snapshot-sessions.json @@ -0,0 +1,993 @@ +{ + "description": "snapshot-sessions", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "findAndModify", + "insert", + "update" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + }, + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Find operation with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 12 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 13 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 13 + } + ] + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Distinct operation with snapshot", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 11 + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 2, + "x": 12 + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session1" + }, + "expectResult": [ + 11, + 12 + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 2, + "x": 13 + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectResult": [ + 11, + 13 + ] + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 11 + ] + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session1" + }, + "expectResult": [ + 11, + 12 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate operation with snapshot", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 12 + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session1" + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 13 + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 1, + "x": 13 + } + ] + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session1" + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "countDocuments operation with snapshot", + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectResult": 2 + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Mixed operation with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 12 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Write commands with snapshot session do not affect snapshot reads", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 22, + "x": 33 + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "First snapshot read does not send atClusterTime", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + }, + "commandName": "find", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "StartTransaction fails in snapshot session", + "operations": [ + { + "name": "startTransaction", + "object": "session0", + "expectError": { + "isError": true, + "isClientError": true, + "errorContains": "Transactions are not supported in snapshot sessions" + } + } + ] + } + ] +} diff --git a/test/sigstop_sigcont.py b/test/sigstop_sigcont.py new file mode 100644 index 0000000000..bc1bacce33 --- /dev/null +++ b/test/sigstop_sigcont.py @@ -0,0 +1,94 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Used by test_client.TestClient.test_sigstop_sigcont.""" +from __future__ import annotations + +import logging +import os +import sys + +sys.path[0:0] = [""] + +from pymongo import monitoring +from pymongo.server_api import ServerApi +from pymongo.synchronous.mongo_client import MongoClient + +SERVER_API = None +MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") +if MONGODB_API_VERSION: + SERVER_API = ServerApi(MONGODB_API_VERSION) + + +class HeartbeatLogger(monitoring.ServerHeartbeatListener): + """Log events until the listener is closed.""" + + def __init__(self): + self.closed = False + + def close(self): + self.closed = True + + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: + if self.closed: + return + logging.info("%s", event) + + def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: + if self.closed: + return + logging.info("%s", event) + + def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: + if self.closed: + return + logging.warning("%s", event) + + +def main(uri: str) -> None: + heartbeat_logger = HeartbeatLogger() + client = MongoClient( + uri, + event_listeners=[heartbeat_logger], + heartbeatFrequencyMS=500, + connectTimeoutMS=500, + server_api=SERVER_API, + ) + client.admin.command("ping") + logging.info("TEST STARTED") + # test_sigstop_sigcont will SIGSTOP and SIGCONT this process in this loop. + while True: + try: + data = input('Type "q" to quit: ') + except EOFError: + break + if data == "q": + break + client.admin.command("ping") + logging.info("TEST COMPLETED") + heartbeat_logger.close() + client.close() + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("unknown or missing options") + print(f"usage: python3 {sys.argv[0]} 'mongodb://localhost'") + sys.exit(1) + + # Enable logs in this format: + # 2022-03-30 12:40:55,582 INFO + FORMAT = "%(asctime)s %(levelname)s %(message)s" + logging.basicConfig(format=FORMAT, level=logging.INFO) + main(sys.argv[1]) diff --git a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json new file mode 100644 index 0000000000..8e459115c1 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test24.test.build.10gen.cc/?directConnection=false", + "seeds": [ + "localhost.test.build.10gen.cc:8000" + ], + "hosts": [ + "localhost.test.build.10gen.cc:8000" + ], + "options": { + "loadBalanced": true, + "ssl": true, + "directConnection": false + }, + "ping": true +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-no-results.json b/test/srv_seedlist/load-balanced/loadBalanced-no-results.json new file mode 100644 index 0000000000..7f49416aa3 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-no-results.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test4.test.build.10gen.cc/?loadBalanced=true", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because no SRV records are present for this URI." +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json b/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json new file mode 100644 index 0000000000..2133dee532 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test24.test.build.10gen.cc/?replicaSet=replset", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because loadBalanced=true is incompatible with replicaSet" +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-true-multiple-hosts.json b/test/srv_seedlist/load-balanced/loadBalanced-true-multiple-hosts.json new file mode 100644 index 0000000000..f425c06b30 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-true-multiple-hosts.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?loadBalanced=true", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because loadBalanced is true but the SRV record resolves to multiple hosts" +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json new file mode 100644 index 0000000000..39bff5a23b --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json @@ -0,0 +1,14 @@ +{ + "uri": "mongodb+srv://test24.test.build.10gen.cc/", + "seeds": [ + "localhost.test.build.10gen.cc:8000" + ], + "hosts": [ + "localhost.test.build.10gen.cc:8000" + ], + "options": { + "loadBalanced": true, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json new file mode 100644 index 0000000000..593a521c26 --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test24.test.build.10gen.cc/?srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with loadBalanced=true (TXT)" +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true.json b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true.json new file mode 100644 index 0000000000..d03a174b1e --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?loadBalanced=true&srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with loadBalanced=true" +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json new file mode 100644 index 0000000000..474a314fd7 --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test24.test.build.10gen.cc/?srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:8000" + ], + "hosts": [ + "localhost.test.build.10gen.cc:8000" + ], + "options": { + "loadBalanced": true, + "srvMaxHosts": 0, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json new file mode 100644 index 0000000000..dfc90dc96d --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test23.test.build.10gen.cc/?loadBalanced=true&srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:8000" + ], + "hosts": [ + "localhost.test.build.10gen.cc:8000" + ], + "options": { + "loadBalanced": true, + "srvMaxHosts": 0, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json b/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json new file mode 100644 index 0000000000..b5fcfd2c07 --- /dev/null +++ b/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/some%2Cdb?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "defaultDatabase": "some,db" + } +} diff --git a/test/srv_seedlist/replica-set/dbname-with-commas.json b/test/srv_seedlist/replica-set/dbname-with-commas.json new file mode 100644 index 0000000000..c1e85f4b99 --- /dev/null +++ b/test/srv_seedlist/replica-set/dbname-with-commas.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/some,db?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "defaultDatabase": "some,db" + } +} diff --git a/test/srv_seedlist/replica-set/direct-connection-false.json b/test/srv_seedlist/replica-set/direct-connection-false.json new file mode 100644 index 0000000000..3f14ff94e7 --- /dev/null +++ b/test/srv_seedlist/replica-set/direct-connection-false.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?directConnection=false", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "ssl": true, + "directConnection": false + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/direct-connection-true.json b/test/srv_seedlist/replica-set/direct-connection-true.json new file mode 100644 index 0000000000..ace6700106 --- /dev/null +++ b/test/srv_seedlist/replica-set/direct-connection-true.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?directConnection=true", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because directConnection=true is incompatible with SRV URIs." +} diff --git a/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json b/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json new file mode 100644 index 0000000000..4493628be9 --- /dev/null +++ b/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json @@ -0,0 +1,22 @@ +{ + "uri": "mongodb+srv://b*b%40f3tt%3D:%244to%40L8%3DMC@test3.test.build.10gen.cc/mydb%3F?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "user": "b*b@f3tt=", + "password": "$4to@L8=MC", + "db": "mydb?" + }, + "ping": false, + "comment": "Encoded user, pass, and DB parse correctly" +} diff --git a/test/srv_seedlist/replica-set/loadBalanced-false-txt.json b/test/srv_seedlist/replica-set/loadBalanced-false-txt.json new file mode 100644 index 0000000000..682d32a742 --- /dev/null +++ b/test/srv_seedlist/replica-set/loadBalanced-false-txt.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test21.test.build.10gen.cc/", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "loadBalanced": false, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/longer-parent-in-return.json b/test/srv_seedlist/replica-set/longer-parent-in-return.json new file mode 100644 index 0000000000..ebe3fe1e77 --- /dev/null +++ b/test/srv_seedlist/replica-set/longer-parent-in-return.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test18.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [ + "localhost.sub.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "ping": true, + "comment": "Is correct, as returned host name shared the URI root \"test.build.10gen.cc\"." +} diff --git a/test/srv_seedlist/replica-set/misformatted-option.json b/test/srv_seedlist/replica-set/misformatted-option.json new file mode 100644 index 0000000000..3c8c29ace6 --- /dev/null +++ b/test/srv_seedlist/replica-set/misformatted-option.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test8.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because the options in the TXT record are incorrectly formatted (misses value)." +} diff --git a/test/srv_seedlist/replica-set/no-results.json b/test/srv_seedlist/replica-set/no-results.json new file mode 100644 index 0000000000..c1dc02d281 --- /dev/null +++ b/test/srv_seedlist/replica-set/no-results.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test4.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because no SRV records are present for this URI." +} diff --git a/test/srv_seedlist/replica-set/not-enough-parts.json b/test/srv_seedlist/replica-set/not-enough-parts.json new file mode 100644 index 0000000000..7cfce2ec57 --- /dev/null +++ b/test/srv_seedlist/replica-set/not-enough-parts.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because host in URI does not have {hostname}, {domainname} and {tld}." +} diff --git a/test/srv_seedlist/replica-set/one-result-default-port.json b/test/srv_seedlist/replica-set/one-result-default-port.json new file mode 100644 index 0000000000..9f7733de80 --- /dev/null +++ b/test/srv_seedlist/replica-set/one-result-default-port.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json b/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json new file mode 100644 index 0000000000..1d740b1b59 --- /dev/null +++ b/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test11.test.build.10gen.cc/", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/one-txt-record.json b/test/srv_seedlist/replica-set/one-txt-record.json new file mode 100644 index 0000000000..ecdb0a7e2a --- /dev/null +++ b/test/srv_seedlist/replica-set/one-txt-record.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "authSource": "thisDB", + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/parent-part-mismatch1.json b/test/srv_seedlist/replica-set/parent-part-mismatch1.json new file mode 100644 index 0000000000..8d0147a48b --- /dev/null +++ b/test/srv_seedlist/replica-set/parent-part-mismatch1.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test14.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because returned host name's part \"not-test\" mismatches URI parent part \"test\"." +} diff --git a/test/srv_seedlist/replica-set/parent-part-mismatch2.json b/test/srv_seedlist/replica-set/parent-part-mismatch2.json new file mode 100644 index 0000000000..996249eb99 --- /dev/null +++ b/test/srv_seedlist/replica-set/parent-part-mismatch2.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test15.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because returned host name's part \"not-build\" mismatches URI parent part \"build\"." +} diff --git a/test/srv_seedlist/replica-set/parent-part-mismatch3.json b/test/srv_seedlist/replica-set/parent-part-mismatch3.json new file mode 100644 index 0000000000..69e724af6c --- /dev/null +++ b/test/srv_seedlist/replica-set/parent-part-mismatch3.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test16.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because returned host name's part \"not-10gen\" mismatches URI parent part \"10gen\"." +} diff --git a/test/srv_seedlist/replica-set/parent-part-mismatch4.json b/test/srv_seedlist/replica-set/parent-part-mismatch4.json new file mode 100644 index 0000000000..254168e34c --- /dev/null +++ b/test/srv_seedlist/replica-set/parent-part-mismatch4.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test17.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because returned host name's TLD \"not-cc\" mismatches URI TLD \"cc\"." +} diff --git a/test/srv_seedlist/replica-set/parent-part-mismatch5.json b/test/srv_seedlist/replica-set/parent-part-mismatch5.json new file mode 100644 index 0000000000..92c024b4f3 --- /dev/null +++ b/test/srv_seedlist/replica-set/parent-part-mismatch5.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test19.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because one of the returned host names' domain name parts \"evil\" mismatches \"test\"." +} diff --git a/test/srv_seedlist/replica-set/returned-parent-too-short.json b/test/srv_seedlist/replica-set/returned-parent-too-short.json new file mode 100644 index 0000000000..676eb0c0d0 --- /dev/null +++ b/test/srv_seedlist/replica-set/returned-parent-too-short.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test13.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because returned host name's parent (build.10gen.cc) misses \"test.\"" +} diff --git a/test/srv_seedlist/replica-set/returned-parent-wrong.json b/test/srv_seedlist/replica-set/returned-parent-wrong.json new file mode 100644 index 0000000000..3aabfd8196 --- /dev/null +++ b/test/srv_seedlist/replica-set/returned-parent-wrong.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test12.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because returned host name is too short and mismatches a parent." +} diff --git a/test/srv_seedlist/replica-set/srv-service-name.json b/test/srv_seedlist/replica-set/srv-service-name.json new file mode 100644 index 0000000000..e320c2ca3e --- /dev/null +++ b/test/srv_seedlist/replica-set/srv-service-name.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "ssl": true, + "srvServiceName": "customname" + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet-txt.json b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet-txt.json new file mode 100644 index 0000000000..6de1e37fa5 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet-txt.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/?srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with replicaSet option (TXT)" +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet.json b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet.json new file mode 100644 index 0000000000..f968757502 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with replicaSet option" +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json new file mode 100644 index 0000000000..70edacfd06 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json @@ -0,0 +1,18 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2", + "numSeeds": 2, + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "srvMaxHosts": 2, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json new file mode 100644 index 0000000000..72540ed408 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=3", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "srvMaxHosts": 3, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json new file mode 100644 index 0000000000..a9d6dd6fd9 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json @@ -0,0 +1,14 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", + "numSeeds": 1, + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "srvMaxHosts": 1, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json b/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json new file mode 100644 index 0000000000..e232edb9eb --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json @@ -0,0 +1,18 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/?srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "authSource": "thisDB", + "replicaSet": "repl0", + "srvMaxHosts": 0, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-zero.json b/test/srv_seedlist/replica-set/srvMaxHosts-zero.json new file mode 100644 index 0000000000..3421a35a3d --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-zero.json @@ -0,0 +1,18 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "srvMaxHosts": 0, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/two-results-default-port.json b/test/srv_seedlist/replica-set/two-results-default-port.json new file mode 100644 index 0000000000..43efcc6310 --- /dev/null +++ b/test/srv_seedlist/replica-set/two-results-default-port.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/two-results-nonstandard-port.json b/test/srv_seedlist/replica-set/two-results-nonstandard-port.json new file mode 100644 index 0000000000..f6e8e415a7 --- /dev/null +++ b/test/srv_seedlist/replica-set/two-results-nonstandard-port.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test2.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27018", + "localhost.test.build.10gen.cc:27019" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/two-txt-records.json b/test/srv_seedlist/replica-set/two-txt-records.json new file mode 100644 index 0000000000..f0654ef6cb --- /dev/null +++ b/test/srv_seedlist/replica-set/two-txt-records.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test6.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because there are two TXT records." +} diff --git a/test/srv_seedlist/replica-set/txt-record-not-allowed-option.json b/test/srv_seedlist/replica-set/txt-record-not-allowed-option.json new file mode 100644 index 0000000000..2a5cf2f007 --- /dev/null +++ b/test/srv_seedlist/replica-set/txt-record-not-allowed-option.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test10.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because socketTimeoutMS is not an allowed option." +} diff --git a/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json b/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json new file mode 100644 index 0000000000..3d84cfe446 --- /dev/null +++ b/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/?ssl=false", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "authSource": "thisDB", + "ssl": false + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json b/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json new file mode 100644 index 0000000000..1a5a240680 --- /dev/null +++ b/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/?authSource=otherDB", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "authSource": "otherDB", + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/txt-record-with-unallowed-option.json b/test/srv_seedlist/replica-set/txt-record-with-unallowed-option.json new file mode 100644 index 0000000000..0d333a459d --- /dev/null +++ b/test/srv_seedlist/replica-set/txt-record-with-unallowed-option.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test7.test.build.10gen.cc/", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because \"ssl\" is not an allowed option." +} diff --git a/test/srv_seedlist/replica-set/uri-with-admin-database.json b/test/srv_seedlist/replica-set/uri-with-admin-database.json new file mode 100644 index 0000000000..c5513a0dad --- /dev/null +++ b/test/srv_seedlist/replica-set/uri-with-admin-database.json @@ -0,0 +1,20 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/adminDB?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "auth_database": "adminDB" + }, + "ping": true +} diff --git a/test/srv_seedlist/replica-set/uri-with-auth.json b/test/srv_seedlist/replica-set/uri-with-auth.json new file mode 100644 index 0000000000..872f997cc7 --- /dev/null +++ b/test/srv_seedlist/replica-set/uri-with-auth.json @@ -0,0 +1,22 @@ +{ + "uri": "mongodb+srv://auser:apass@test1.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "user": "auser", + "password": "apass" + }, + "ping": false, + "comment": "Should preserve auth credentials" +} diff --git a/test/srv_seedlist/replica-set/uri-with-port.json b/test/srv_seedlist/replica-set/uri-with-port.json new file mode 100644 index 0000000000..b981e2a1bf --- /dev/null +++ b/test/srv_seedlist/replica-set/uri-with-port.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc:8123/?replicaSet=repl0", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because the mongodb+srv URI includes a port." +} diff --git a/test/srv_seedlist/replica-set/uri-with-two-hosts.json b/test/srv_seedlist/replica-set/uri-with-two-hosts.json new file mode 100644 index 0000000000..5261a39cfa --- /dev/null +++ b/test/srv_seedlist/replica-set/uri-with-two-hosts.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc,test6.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because the mongodb+srv URI includes two host names." +} diff --git a/test/srv_seedlist/replica-set/uri-with-uppercase-hostname.json b/test/srv_seedlist/replica-set/uri-with-uppercase-hostname.json new file mode 100644 index 0000000000..40579aa44c --- /dev/null +++ b/test/srv_seedlist/replica-set/uri-with-uppercase-hostname.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://TEST1.TEST.BUILD.10GEN.CC", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json new file mode 100644 index 0000000000..7d2f9a6bf8 --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2", + "numSeeds": 2, + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "options": { + "srvMaxHosts": 2, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json new file mode 100644 index 0000000000..452c7b54db --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=3", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "options": { + "srvMaxHosts": 3, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json new file mode 100644 index 0000000000..cd3bf65117 --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json @@ -0,0 +1,10 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", + "numSeeds": 1, + "numHosts": 1, + "options": { + "srvMaxHosts": 1, + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-zero.json b/test/srv_seedlist/sharded/srvMaxHosts-zero.json new file mode 100644 index 0000000000..f289628c9c --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-zero.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "options": { + "srvMaxHosts": 0, + "ssl": true + }, + "ping": true +} diff --git a/test/test_auth.py b/test/test_auth.py index 17953f20f9..27f6743fae 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 MongoDB, Inc. +# Copyright 2013-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,361 +13,727 @@ # limitations under the License. """Authentication Tests.""" +from __future__ import annotations +import asyncio import os import sys import threading -import unittest - -from urllib import quote_plus +from urllib.parse import quote_plus sys.path[0:0] = [""] -from nose.plugins.skip import SkipTest - -from pymongo import MongoClient, MongoReplicaSetClient -from pymongo.auth import HAVE_KERBEROS -from pymongo.errors import OperationFailure, ConfigurationError +from test import ( + IntegrationTest, + PyMongoTestCase, + SkipTest, + client_context, + unittest, +) +from test.utils_shared import AllowListEventListener, delay, ignore_deprecations + +import pytest + +from pymongo import MongoClient, monitoring +from pymongo.auth_shared import _build_credentials_tuple +from pymongo.errors import OperationFailure +from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference -from test import version, host, port -from test.utils import is_mongos, server_started_with_auth +from pymongo.saslprep import HAVE_STRINGPREP +from pymongo.synchronous.auth import HAVE_KERBEROS, _canonicalize_hostname + +_IS_SYNC = True -# YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS. -GSSAPI_HOST = os.environ.get('GSSAPI_HOST') -GSSAPI_PORT = int(os.environ.get('GSSAPI_PORT', '27017')) -PRINCIPAL = os.environ.get('PRINCIPAL') +pytestmark = pytest.mark.auth -SASL_HOST = os.environ.get('SASL_HOST') -SASL_PORT = int(os.environ.get('SASL_PORT', '27017')) -SASL_USER = os.environ.get('SASL_USER') -SASL_PASS = os.environ.get('SASL_PASS') -SASL_DB = os.environ.get('SASL_DB', '$external') +# YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. +GSSAPI_HOST = os.environ.get("GSSAPI_HOST") +GSSAPI_PORT = int(os.environ.get("GSSAPI_PORT", "27017")) +GSSAPI_PRINCIPAL = os.environ.get("GSSAPI_PRINCIPAL") +GSSAPI_SERVICE_NAME = os.environ.get("GSSAPI_SERVICE_NAME", "mongodb") +GSSAPI_CANONICALIZE = os.environ.get("GSSAPI_CANONICALIZE", "false") +GSSAPI_SERVICE_REALM = os.environ.get("GSSAPI_SERVICE_REALM") +GSSAPI_PASS = os.environ.get("GSSAPI_PASS") +GSSAPI_DB = os.environ.get("GSSAPI_DB", "test") + +SASL_HOST = os.environ.get("SASL_HOST") +SASL_PORT = int(os.environ.get("SASL_PORT", "27017")) +SASL_USER = os.environ.get("SASL_USER") +SASL_PASS = os.environ.get("SASL_PASS") +SASL_DB = os.environ.get("SASL_DB", "$external") class AutoAuthenticateThread(threading.Thread): """Used in testing threaded authentication. + + This does collection.find_one() with a 1-second delay to ensure it must + check out and authenticate multiple connections from the pool concurrently. + + :Parameters: + `collection`: An auth-protected collection containing one document. """ - def __init__(self, database): - super(AutoAuthenticateThread, self).__init__() - self.database = database - self.success = True + def __init__(self, collection): + super().__init__() + self.collection = collection + self.success = False def run(self): - try: - self.database.command('dbstats') - except OperationFailure: - self.success = False + assert self.collection.find_one({"$where": delay(1)}) is not None + self.success = True -class TestGSSAPI(unittest.TestCase): +class TestGSSAPI(PyMongoTestCase): + mech_properties: str + service_realm_required: bool - def setUp(self): + @classmethod + def setUpClass(cls): if not HAVE_KERBEROS: - raise SkipTest('Kerberos module not available.') - if not GSSAPI_HOST or not PRINCIPAL: - raise SkipTest('Must set GSSAPI_HOST and PRINCIPAL to test GSSAPI') - + raise SkipTest("Kerberos module not available.") + if not GSSAPI_HOST or not GSSAPI_PRINCIPAL: + raise SkipTest("Must set GSSAPI_HOST and GSSAPI_PRINCIPAL to test GSSAPI") + cls.service_realm_required = ( + GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL + ) + mech_properties = dict( + SERVICE_NAME=GSSAPI_SERVICE_NAME, CANONICALIZE_HOST_NAME=GSSAPI_CANONICALIZE + ) + if GSSAPI_SERVICE_REALM is not None: + mech_properties["SERVICE_REALM"] = GSSAPI_SERVICE_REALM + cls.mech_properties = mech_properties + + def test_credentials_hashing(self): + # GSSAPI credentials are properly hashed. + creds0 = _build_credentials_tuple("GSSAPI", None, "user", "pass", {}, None) + + creds1 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) + + creds2 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) + + creds3 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "B"}}, None + ) + + self.assertEqual(1, len({creds1, creds2})) + self.assertEqual(3, len({creds0, creds1, creds2, creds3})) + + @ignore_deprecations def test_gssapi_simple(self): - - client = MongoClient(GSSAPI_HOST, GSSAPI_PORT) - # Without gssapiServiceName - self.assertTrue(client.test.authenticate(PRINCIPAL, - mechanism='GSSAPI')) - self.assertTrue(client.database_names()) - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'GSSAPI' % (quote_plus(PRINCIPAL), GSSAPI_HOST, GSSAPI_PORT)) - client = MongoClient(uri) - self.assertTrue(client.database_names()) - - # With gssapiServiceName - self.assertTrue(client.test.authenticate(PRINCIPAL, - mechanism='GSSAPI', - gssapiServiceName='mongodb')) - self.assertTrue(client.database_names()) - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'GSSAPI;gssapiServiceName=mongodb' % (quote_plus(PRINCIPAL), - GSSAPI_HOST, GSSAPI_PORT)) - client = MongoClient(uri) - self.assertTrue(client.database_names()) - - set_name = client.admin.command('ismaster').get('setName') + assert GSSAPI_PRINCIPAL is not None + if GSSAPI_PASS is not None: + uri = "mongodb://%s:%s@%s:%d/?authMechanism=GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_PASS, + GSSAPI_HOST, + GSSAPI_PORT, + ) + else: + uri = "mongodb://%s@%s:%d/?authMechanism=GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_HOST, + GSSAPI_PORT, + ) + + if not self.service_realm_required: + # Without authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + ) + + client[GSSAPI_DB].collection.find_one() + + # Log in using URI, without authMechanismProperties. + client = self.simple_client(uri) + client[GSSAPI_DB].collection.find_one() + + # Authenticate with authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + + client[GSSAPI_DB].collection.find_one() + + # Log in using URI, with authMechanismProperties. + mech_properties_str = "" + for key, value in self.mech_properties.items(): + mech_properties_str += f"{key}:{value}," + mech_uri = uri + f"&authMechanismProperties={mech_properties_str[:-1]}" + client = self.simple_client(mech_uri) + client[GSSAPI_DB].collection.find_one() + + set_name = client_context.replica_set_name if set_name: - client = MongoReplicaSetClient(GSSAPI_HOST, - port=GSSAPI_PORT, - replicaSet=set_name) - # Without gssapiServiceName - self.assertTrue(client.test.authenticate(PRINCIPAL, - mechanism='GSSAPI')) - self.assertTrue(client.database_names()) - uri = ('mongodb://%s@%s:%d/?authMechanism=GSSAPI;replicaSet' - '=%s' % (quote_plus(PRINCIPAL), - GSSAPI_HOST, GSSAPI_PORT, str(set_name))) - client = MongoReplicaSetClient(uri) - self.assertTrue(client.database_names()) - - # With gssapiServiceName - self.assertTrue(client.test.authenticate(PRINCIPAL, - mechanism='GSSAPI', - gssapiServiceName='mongodb')) - self.assertTrue(client.database_names()) - uri = ('mongodb://%s@%s:%d/?authMechanism=GSSAPI;replicaSet' - '=%s;gssapiServiceName=mongodb' % (quote_plus(PRINCIPAL), - GSSAPI_HOST, - GSSAPI_PORT, - str(set_name))) - client = MongoReplicaSetClient(uri) - self.assertTrue(client.database_names()) - + if not self.service_realm_required: + # Without authMechanismProperties + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + replicaSet=set_name, + ) + + client[GSSAPI_DB].list_collection_names() + + uri = uri + f"&replicaSet={set_name!s}" + client = self.simple_client(uri) + client[GSSAPI_DB].list_collection_names() + + # With authMechanismProperties + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) + + client[GSSAPI_DB].list_collection_names() + + mech_uri = mech_uri + f"&replicaSet={set_name!s}" + client = self.simple_client(mech_uri) + client[GSSAPI_DB].list_collection_names() + + @ignore_deprecations + @client_context.require_sync def test_gssapi_threaded(self): - - # Use auto_start_request=True to make sure each thread - # uses a different socket. - client = MongoClient(GSSAPI_HOST, auto_start_request=True) - self.assertTrue(client.test.authenticate(PRINCIPAL, - mechanism='GSSAPI')) + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + + # Authentication succeeded? + client.server_info() + db = client[GSSAPI_DB] + + # Need one document in the collection. AutoAuthenticateThread does + # collection.find_one with a 1-second delay, forcing it to check out + # multiple connections from the pool concurrently, proving that + # auto-authentication works with GSSAPI. + collection = db.test + if not collection.count_documents({}): + try: + collection.drop() + collection.insert_one({"_id": 1}) + except OperationFailure: + raise SkipTest("User must be able to write.") threads = [] - for _ in xrange(4): - threads.append(AutoAuthenticateThread(client.foo)) + for _ in range(4): + threads.append(AutoAuthenticateThread(collection)) for thread in threads: thread.start() for thread in threads: thread.join() self.assertTrue(thread.success) - set_name = client.admin.command('ismaster').get('setName') + set_name = client_context.replica_set_name if set_name: - preference = ReadPreference.SECONDARY - client = MongoReplicaSetClient(GSSAPI_HOST, - replicaSet=set_name, - read_preference=preference) - self.assertTrue(client.test.authenticate(PRINCIPAL, - mechanism='GSSAPI')) - self.assertTrue(client.foo.command('dbstats')) + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) + + # Succeeded? + client.server_info() threads = [] - for _ in xrange(4): - threads.append(AutoAuthenticateThread(client.foo)) + for _ in range(4): + threads.append(AutoAuthenticateThread(collection)) for thread in threads: thread.start() for thread in threads: thread.join() self.assertTrue(thread.success) - -class TestSASL(unittest.TestCase): - - def setUp(self): + def test_gssapi_canonicalize_host_name(self): + # Test the low level method. + assert GSSAPI_HOST is not None + result = _canonicalize_hostname(GSSAPI_HOST, "forward") + if "compute-1.amazonaws.com" not in result: + self.assertEqual(result, GSSAPI_HOST) + result = _canonicalize_hostname(GSSAPI_HOST, "forwardAndReverse") + self.assertEqual(result, GSSAPI_HOST) + + # Use the equivalent named CANONICALIZE_HOST_NAME. + props = self.mech_properties.copy() + if props["CANONICALIZE_HOST_NAME"] == "true": + props["CANONICALIZE_HOST_NAME"] = "forwardAndReverse" + else: + props["CANONICALIZE_HOST_NAME"] = "none" + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=props, + ) + client.server_info() + + def test_gssapi_host_name(self): + props = self.mech_properties + props["SERVICE_HOST"] = "example.com" + + # Authenticate with authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + with self.assertRaises(OperationFailure): + client.server_info() + + props["SERVICE_HOST"] = GSSAPI_HOST + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + client.server_info() + + +class TestSASLPlain(PyMongoTestCase): + @classmethod + def setUpClass(cls): if not SASL_HOST or not SASL_USER or not SASL_PASS: - raise SkipTest('Must set SASL_HOST, ' - 'SASL_USER, and SASL_PASS to test SASL') + raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") def test_sasl_plain(self): - - client = MongoClient(SASL_HOST, SASL_PORT) - self.assertTrue(client.ldap.authenticate(SASL_USER, SASL_PASS, - SASL_DB, 'PLAIN')) + client = self.simple_client( + SASL_HOST, + SASL_PORT, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) client.ldap.test.find_one() - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s' % (quote_plus(SASL_USER), - quote_plus(SASL_PASS), - SASL_HOST, SASL_PORT, SASL_DB)) - client = MongoClient(uri) + assert SASL_USER is not None + assert SASL_PASS is not None + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) + client = self.simple_client(uri) client.ldap.test.find_one() - set_name = client.admin.command('ismaster').get('setName') + set_name = client_context.replica_set_name if set_name: - client = MongoReplicaSetClient(SASL_HOST, - port=SASL_PORT, - replicaSet=set_name) - self.assertTrue(client.ldap.authenticate(SASL_USER, SASL_PASS, - SASL_DB, 'PLAIN')) + client = self.simple_client( + SASL_HOST, + SASL_PORT, + replicaSet=set_name, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) client.ldap.test.find_one() - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s;replicaSet=%s' % (quote_plus(SASL_USER), - quote_plus(SASL_PASS), - SASL_HOST, SASL_PORT, - SASL_DB, str(set_name))) - client = MongoReplicaSetClient(uri) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s;replicaSet=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + str(set_name), + ) + client = self.simple_client(uri) client.ldap.test.find_one() def test_sasl_plain_bad_credentials(self): + def auth_string(user, password): + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( + quote_plus(user), + quote_plus(password), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) + return uri - client = MongoClient(SASL_HOST, SASL_PORT) - - # Bad username - self.assertRaises(OperationFailure, client.ldap.authenticate, - 'not-user', SASL_PASS, SASL_DB, 'PLAIN') - self.assertRaises(OperationFailure, client.ldap.test.find_one) - self.assertRaises(OperationFailure, client.ldap.test.insert, - {"failed": True}) + bad_user = self.simple_client(auth_string("not-user", SASL_PASS)) + bad_pwd = self.simple_client(auth_string(SASL_USER, "not-pwd")) + # OperationFailure raised upon connecting. + with self.assertRaises(OperationFailure): + bad_user.admin.command("ping") + with self.assertRaises(OperationFailure): + bad_pwd.admin.command("ping") - # Bad password - self.assertRaises(OperationFailure, client.ldap.authenticate, - SASL_USER, 'not-pwd', SASL_DB, 'PLAIN') - self.assertRaises(OperationFailure, client.ldap.test.find_one) - self.assertRaises(OperationFailure, client.ldap.test.insert, - {"failed": True}) - def auth_string(user, password): - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s' % (quote_plus(user), - quote_plus(password), - SASL_HOST, SASL_PORT, SASL_DB)) - return uri +class TestSCRAMSHA1(IntegrationTest): + @client_context.require_auth + def setUp(self): + super().setUp() + client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) - # Just assert that we raise the right exception - self.assertRaises(ConfigurationError, MongoClient, - auth_string('not-user', SASL_PASS)) - self.assertRaises(ConfigurationError, MongoClient, - auth_string(SASL_USER, 'not-pwd')) + def tearDown(self): + client_context.drop_user("pymongo_test", "user") + super().tearDown() + + @client_context.require_no_fips + def test_scram_sha1(self): + host, port = client_context.host, client_context.port + + client = self.rs_or_single_client_noauth( + "mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" % (host, port) + ) + client.pymongo_test.command("dbstats") + + if client_context.is_rs: + uri = ( + "mongodb://user:pass" + "@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" + "&replicaSet=%s" % (host, port, client_context.replica_set_name) + ) + client = self.single_client_noauth(uri) + client.pymongo_test.command("dbstats") + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + db.command("dbstats") + + +# https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#scram-sha-256-and-mechanism-negotiation +class TestSCRAM(IntegrationTest): + @client_context.require_auth + @client_context.require_version_min(3, 7, 2) + def setUp(self): + super().setUp() + self._SENSITIVE_COMMANDS = monitoring._SENSITIVE_COMMANDS + monitoring._SENSITIVE_COMMANDS = set() + self.listener = AllowListEventListener("saslStart") + def tearDown(self): + monitoring._SENSITIVE_COMMANDS = self._SENSITIVE_COMMANDS + client_context.client.testscram.command("dropAllUsersFromDatabase") + client_context.client.drop_database("testscram") + super().tearDown() + + def test_scram_skip_empty_exchange(self): + listener = AllowListEventListener("saslStart", "saslContinue") + client_context.create_user( + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + + client = self.rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", event_listeners=[listener] + ) + client.testscram.command("dbstats") + + if client_context.version < (4, 4, -1): + # Assert we sent the skipEmptyExchange option. + first_event = listener.started_events[0] + self.assertEqual(first_event.command_name, "saslStart") + self.assertEqual(first_event.command["options"], {"skipEmptyExchange": True}) + + # Assert the third exchange was skipped on servers that support it. + # Note that the first exchange occurs on the connection handshake. + started = listener.started_command_names() + if client_context.version.at_least(4, 4, -1): + self.assertEqual(started, ["saslContinue"]) + else: + self.assertEqual(started, ["saslStart", "saslContinue", "saslContinue"]) + + @client_context.require_no_fips + def test_scram(self): + # Step 1: create users + client_context.create_user( + "testscram", "sha1", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-1"] + ) + client_context.create_user( + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + client_context.create_user( + "testscram", + "both", + "pwd", + roles=["dbOwner"], + mechanisms=["SCRAM-SHA-1", "SCRAM-SHA-256"], + ) + + # Step 2: verify auth success cases + client = self.rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram" + ) + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram" + ) + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") + + # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 + client = self.rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + client.testscram.command("dbstats") + client = self.rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") + + self.listener.reset() + client = self.rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] + ) + client.testscram.command("dbstats") + if client_context.version.at_least(4, 4, -1): + # Speculative authentication in 4.4+ sends saslStart with the + # handshake. + self.assertEqual(self.listener.started_events, []) + else: + started = self.listener.started_events[0] + self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") + + # Step 3: verify auth failure conditions + client = self.rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + with self.assertRaises(OperationFailure): + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + with self.assertRaises(OperationFailure): + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + username="not-a-user", password="pwd", authSource="testscram" + ) + with self.assertRaises(OperationFailure): + client.testscram.command("dbstats") + + if client_context.is_rs: + host, port = client_context.host, client_context.port + uri = "mongodb://both:pwd@%s:%d/testscram?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) + client = self.single_client_noauth(uri) + client.testscram.command("dbstats") + db = client.get_database("testscram", read_preference=ReadPreference.SECONDARY) + db.command("dbstats") + + @unittest.skipUnless(HAVE_STRINGPREP, "Cannot test without stringprep") + def test_scram_saslprep(self): + # Step 4: test SASLprep + host, port = client_context.host, client_context.port + # Test the use of SASLprep on passwords. For example, + # saslprep('\u2136') becomes 'IV' and saslprep('I\u00ADX') + # becomes 'IX'. SASLprep is only supported when the standard + # library provides stringprep. + client_context.create_user( + "testscram", "\u2168", "\u2163", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + client_context.create_user( + "testscram", "IX", "IX", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + + client = self.rs_or_single_client_noauth( + username="\u2168", password="\u2163", authSource="testscram" + ) + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + username="\u2168", + password="\u2163", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + username="\u2168", password="IV", authSource="testscram" + ) + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + username="IX", password="I\u00ADX", authSource="testscram" + ) + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + username="IX", + password="I\u00ADX", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + username="IX", password="IX", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + "mongodb://\u2168:\u2163@%s:%d/testscram" % (host, port) + ) + client.testscram.command("dbstats") + client = self.rs_or_single_client_noauth( + "mongodb://\u2168:IV@%s:%d/testscram" % (host, port) + ) + client.testscram.command("dbstats") + + client = self.rs_or_single_client_noauth( + "mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port) + ) + client.testscram.command("dbstats") + client = self.rs_or_single_client_noauth("mongodb://IX:IX@%s:%d/testscram" % (host, port)) + client.testscram.command("dbstats") + + def test_cache(self): + client = self.single_client() + credentials = client.options.pool_options._credentials + cache = credentials.cache + self.assertIsNotNone(cache) + self.assertIsNone(cache.data) + # Force authentication. + client.admin.command("ping") + cache = credentials.cache + self.assertIsNotNone(cache) + data = cache.data + self.assertIsNotNone(data) + self.assertEqual(len(data), 4) + ckey, skey, salt, iterations = data + self.assertIsInstance(ckey, bytes) + self.assertIsInstance(skey, bytes) + self.assertIsInstance(salt, bytes) + self.assertIsInstance(iterations, int) + + @client_context.require_sync + def test_scram_threaded(self): + coll = client_context.client.db.test + coll.drop() + coll.insert_one({"_id": 1}) + + # The first thread to call find() will authenticate + client = self.rs_or_single_client() + coll = client.db.test + threads = [] + for _ in range(4): + threads.append(AutoAuthenticateThread(coll)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + self.assertTrue(thread.success) -class TestAuthURIOptions(unittest.TestCase): +class TestAuthURIOptions(IntegrationTest): + @client_context.require_auth def setUp(self): - client = MongoClient(host, port) - # Sharded auth not supported before MongoDB 2.0 - if is_mongos(client) and not version.at_least(client, (2, 0, 0)): - raise SkipTest("Auth with sharding requires MongoDB >= 2.0.0") - if not server_started_with_auth(client): - raise SkipTest('Authentication is not enabled on server') - response = client.admin.command('ismaster') - self.set_name = str(response.get('setName', '')) - client.admin.add_user('admin', 'pass', roles=['userAdminAnyDatabase', - 'dbAdminAnyDatabase', - 'readWriteAnyDatabase', - 'clusterAdmin']) - client.admin.authenticate('admin', 'pass') - client.pymongo_test.add_user('user', 'pass', - roles=['userAdmin', 'readWrite']) - - if self.set_name: - # GLE requires authentication. - client.admin.authenticate('admin', 'pass') - # Make sure the admin user is replicated after calling add_user - # above. This avoids a race in the MRSC tests below. Adding a - # user is just an insert into system.users. - client.admin.command('getLastError', w=len(response['hosts'])) - self.client = client + super().setUp() + client_context.create_user("admin", "admin", "pass") + client_context.create_user("pymongo_test", "user", "pass", ["userAdmin", "readWrite"]) def tearDown(self): - self.client.admin.authenticate('admin', 'pass') - self.client.pymongo_test.remove_user('user') - self.client.admin.remove_user('admin') - self.client.pymongo_test.logout() - self.client.admin.logout() - self.client = None + client_context.drop_user("pymongo_test", "user") + client_context.drop_user("admin", "admin") + super().tearDown() def test_uri_options(self): # Test default to admin - client = MongoClient('mongodb://admin:pass@%s:%d' % (host, port)) - self.assertTrue(client.admin.command('dbstats')) - - if self.set_name: - uri = ('mongodb://admin:pass' - '@%s:%d/?replicaSet=%s' % (host, port, self.set_name)) - client = MongoReplicaSetClient(uri) - self.assertTrue(client.admin.command('dbstats')) - client.read_preference = ReadPreference.SECONDARY - self.assertTrue(client.admin.command('dbstats')) + host, port = client_context.host, client_context.port + client = self.rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + self.assertTrue(client.admin.command("dbstats")) + + if client_context.is_rs: + uri = "mongodb://admin:pass@%s:%d/?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) + client = self.single_client_noauth(uri) + self.assertTrue(client.admin.command("dbstats")) + db = client.get_database("admin", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) # Test explicit database - uri = 'mongodb://user:pass@%s:%d/pymongo_test' % (host, port) - client = MongoClient(uri) - self.assertRaises(OperationFailure, client.admin.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) - - if self.set_name: - uri = ('mongodb://user:pass@%s:%d' - '/pymongo_test?replicaSet=%s' % (host, port, self.set_name)) - client = MongoReplicaSetClient(uri) - self.assertRaises(OperationFailure, - client.admin.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) - client.read_preference = ReadPreference.SECONDARY - self.assertTrue(client.pymongo_test.command('dbstats')) + uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) + client = self.rs_or_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + client.admin.command("dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) + + if client_context.is_rs: + uri = "mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) + client = self.single_client_noauth(uri) + with self.assertRaises(OperationFailure): + client.admin.command("dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) # Test authSource - uri = ('mongodb://user:pass@%s:%d' - '/pymongo_test2?authSource=pymongo_test' % (host, port)) - client = MongoClient(uri) - self.assertRaises(OperationFailure, - client.pymongo_test2.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) - - if self.set_name: - uri = ('mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=' - '%s;authSource=pymongo_test' % (host, port, self.set_name)) - client = MongoReplicaSetClient(uri) - self.assertRaises(OperationFailure, - client.pymongo_test2.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) - client.read_preference = ReadPreference.SECONDARY - self.assertTrue(client.pymongo_test.command('dbstats')) - - -class TestDelegatedAuth(unittest.TestCase): - - def setUp(self): - self.client = MongoClient(host, port) - if not version.at_least(self.client, (2, 4, 0)): - raise SkipTest('Delegated authentication requires MongoDB >= 2.4.0') - if not server_started_with_auth(self.client): - raise SkipTest('Authentication is not enabled on server') - if version.at_least(self.client, (2, 5, 3, -1)): - raise SkipTest('Delegated auth does not exist in MongoDB >= 2.5.3') - # Give admin all privileges. - self.client.admin.add_user('admin', 'pass', - roles=['readAnyDatabase', - 'readWriteAnyDatabase', - 'userAdminAnyDatabase', - 'dbAdminAnyDatabase', - 'clusterAdmin']) - - def tearDown(self): - self.client.admin.authenticate('admin', 'pass') - self.client.pymongo_test.remove_user('user') - self.client.pymongo_test2.remove_user('user') - self.client.pymongo_test2.foo.remove() - self.client.admin.remove_user('admin') - self.client.admin.logout() - self.client = None - - def test_delegated_auth(self): - self.client.admin.authenticate('admin', 'pass') - self.client.pymongo_test2.foo.remove() - self.client.pymongo_test2.foo.insert({}) - # User definition with no roles in pymongo_test. - self.client.pymongo_test.add_user('user', 'pass', roles=[]) - # Delegate auth to pymongo_test. - self.client.pymongo_test2.add_user('user', - userSource='pymongo_test', - roles=['read']) - self.client.admin.logout() - self.assertRaises(OperationFailure, - self.client.pymongo_test2.foo.find_one) - # Auth must occur on the db where the user is defined. - self.assertRaises(OperationFailure, - self.client.pymongo_test2.authenticate, - 'user', 'pass') - # Auth directly - self.assertTrue(self.client.pymongo_test.authenticate('user', 'pass')) - self.assertTrue(self.client.pymongo_test2.foo.find_one()) - self.client.pymongo_test.logout() - self.assertRaises(OperationFailure, - self.client.pymongo_test2.foo.find_one) - # Auth using source - self.assertTrue(self.client.pymongo_test2.authenticate( - 'user', 'pass', source='pymongo_test')) - self.assertTrue(self.client.pymongo_test2.foo.find_one()) - # Must logout from the db authenticate was called on. - self.client.pymongo_test2.logout() - self.assertRaises(OperationFailure, - self.client.pymongo_test2.foo.find_one) + uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) + client = self.rs_or_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + client.pymongo_test2.command("dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) + + if client_context.is_rs: + uri = ( + "mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=" + "%s;authSource=pymongo_test" % (host, port, client_context.replica_set_name) + ) + client = self.single_client_noauth(uri) + with self.assertRaises(OperationFailure): + client.pymongo_test2.command("dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) if __name__ == "__main__": diff --git a/test/test_auth_oidc.py b/test/test_auth_oidc.py new file mode 100644 index 0000000000..877a5ca981 --- /dev/null +++ b/test/test_auth_oidc.py @@ -0,0 +1,1190 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-OIDC Authentication.""" +from __future__ import annotations + +import os +import sys +import time +import unittest +import warnings +from contextlib import contextmanager +from pathlib import Path +from test import PyMongoTestCase +from test.helpers import ConcurrentRunner +from typing import Dict + +import pytest + +sys.path[0:0] = [""] + +from test.unified_format import generate_test_classes +from test.utils_shared import EventListener, OvertCommandListener + +from bson import SON +from pymongo import MongoClient +from pymongo._azure_helpers import _get_azure_response +from pymongo._gcp_helpers import _get_gcp_response +from pymongo.auth_oidc_shared import _get_k8s_token +from pymongo.auth_shared import _build_credentials_tuple +from pymongo.cursor_shared import CursorType +from pymongo.errors import AutoReconnect, ConfigurationError, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.operations import InsertOne +from pymongo.synchronous.auth_oidc import ( + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + _get_authenticator, +) +from pymongo.synchronous.uri_parser import parse_uri + +_IS_SYNC = True + +ROOT = Path(__file__).parent.parent.resolve() +TEST_PATH = ROOT / "auth" / "unified" +ENVIRON = os.environ.get("OIDC_ENV", "test") +DOMAIN = os.environ.get("OIDC_DOMAIN", "") +TOKEN_DIR = os.environ.get("OIDC_TOKEN_DIR", "") +TOKEN_FILE = os.environ.get("OIDC_TOKEN_FILE", "") + +# Generate unified tests. +globals().update(generate_test_classes(str(TEST_PATH), module=__name__)) + +pytestmark = pytest.mark.auth_oidc + + +class OIDCTestBase(PyMongoTestCase): + @classmethod + def setUpClass(cls): + cls.uri_single = os.environ["MONGODB_URI_SINGLE"] + cls.uri_multiple = os.environ.get("MONGODB_URI_MULTI") + cls.uri_admin = os.environ["MONGODB_URI"] + if ENVIRON == "test": + if not TOKEN_DIR: + raise ValueError("Please set OIDC_TOKEN_DIR") + if not TOKEN_FILE: + raise ValueError("Please set OIDC_TOKEN_FILE") + + def setUp(self): + self.request_called = 0 + + def get_token(self, username=None): + """Get a token for the current provider.""" + if ENVIRON == "test": + if username is None: + token_file = TOKEN_FILE + else: + token_file = os.path.join(TOKEN_DIR, username) + with open(token_file) as fid: # noqa: ASYNC101,RUF100 + return fid.read() + elif ENVIRON == "azure": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + return _get_azure_response(token_aud, username)["access_token"] + elif ENVIRON == "gcp": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + return _get_gcp_response(token_aud, username)["access_token"] + elif ENVIRON == "k8s": + return _get_k8s_token() + else: + raise ValueError(f"Unknown ENVIRON: {ENVIRON}") + + @contextmanager + def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client = MongoClient(self.uri_admin) + client.admin.command(cmd_on) + try: + yield + finally: + client.admin.command("configureFailPoint", cmd_on["configureFailPoint"], mode="off") + client.close() + + +class TestAuthOIDCHuman(OIDCTestBase): + uri: str + + @classmethod + def setUpClass(cls): + if ENVIRON != "test": + raise unittest.SkipTest("Human workflows are only tested with the test environment") + if DOMAIN is None: + raise ValueError("Missing OIDC_DOMAIN") + super().setUpClass() + + def setUp(self): + self.refresh_present = 0 + super().setUp() + + def create_request_cb(self, username="test_user1", sleep=0): + def request_token(context: OIDCCallbackContext): + # Validate the info. + self.assertIsInstance(context.idp_info.issuer, str) + if context.idp_info.clientId is not None: + self.assertIsInstance(context.idp_info.clientId, str) + + # Validate the timeout. + timeout_seconds = context.timeout_seconds + self.assertEqual(timeout_seconds, 60 * 5) + + if context.refresh_token: + self.refresh_present += 1 + + token = self.get_token(username) + resp = OIDCCallbackResult(access_token=token, refresh_token=token) + + time.sleep(sleep) + self.request_called += 1 + return resp + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + def create_client(self, *args, **kwargs): + username = kwargs.get("username", "test_user1") + if kwargs.get("username") in ["test_user1", "test_user2"]: + kwargs["username"] = f"{username}@{DOMAIN}" + request_cb = kwargs.pop("request_cb", self.create_request_cb(username=username)) + props = kwargs.pop("authmechanismproperties", {"OIDC_HUMAN_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + + client = self.simple_client(*args, authmechanismproperties=props, **kwargs) + + return client + + def test_1_1_single_principal_implicit_username(self): + # Create default OIDC client with authMechanism=MONGODB-OIDC. + client = self.create_client() + # Perform a find operation that succeeds. + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_2_single_principal_explicit_username(self): + # Create a client with MONGODB_URI_SINGLE, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(username="test_user1") + # Perform a find operation that succeeds. + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_3_multiple_principal_user_1(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(self.uri_multiple, username="test_user1") + # Perform a find operation that succeeds. + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_4_multiple_principal_user_2(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a human callback that reads in the generated test_user2 token file. + # Create a client with MONGODB_URI_MULTI, a username of test_user2, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(self.uri_multiple, username="test_user2") + # Perform a find operation that succeeds. + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_5_multiple_principal_no_user(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, no username, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(self.uri_multiple) + # Assert that a find operation fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_6_allowed_hosts_blocked(self): + # Create a default OIDC client, with an ALLOWED_HOSTS that is an empty list. + request_token = self.create_request_cb() + props: Dict = {"OIDC_HUMAN_CALLBACK": request_token, "ALLOWED_HOSTS": []} + client = self.create_client(authmechanismproperties=props) + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + client.test.test.find_one() + # Close the client. + client.close() + + # Create a client that uses the URL mongodb://localhost/?authMechanism=MONGODB-OIDC&ignored=example.com, + # a human callback, and an ALLOWED_HOSTS that contains ["example.com"]. + props: Dict = { + "OIDC_HUMAN_CALLBACK": request_token, + "ALLOWED_HOSTS": ["example.com"], + } + with warnings.catch_warnings(): + warnings.simplefilter("default") + client = self.create_client( + self.uri_single + "&ignored=example.com", + authmechanismproperties=props, + connect=False, + ) + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_7_allowed_hosts_in_connection_string_ignored(self): + # Create an OIDC configured client with the connection string: `mongodb+srv://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D` and a Human Callback. + # Assert that the creation of the client raises a configuration error. + uri = "mongodb+srv://example.com?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D" + with self.assertRaises(ConfigurationError), warnings.catch_warnings(): + warnings.simplefilter("ignore") + c = MongoClient( + uri, + authmechanismproperties=dict(OIDC_HUMAN_CALLBACK=self.create_request_cb()), + ) + c._connect() + + def test_1_8_machine_idp_human_callback(self): + if not os.environ.get("OIDC_IS_LOCAL"): + raise unittest.SkipTest("Test Requires Local OIDC server") + # Create a client with MONGODB_URI_SINGLE, a username of test_machine, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(username="test_machine") + # Perform a find operation that succeeds. + client.test.test.find_one() + # Close the client. + client.close() + + def test_2_1_valid_callback_inputs(self): + # Create a MongoClient with a human callback that validates its inputs and returns a valid access token. + client = self.create_client() + # Perform a find operation that succeeds. Verify that the human callback was called with the appropriate inputs, including the timeout parameter if possible. + # Ensure that there are no unexpected fields. + client.test.test.find_one() + # Close the client. + client.close() + + def test_2_2_callback_returns_missing_data(self): + # Create a MongoClient with a human callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCB(OIDCCallback): + def fetch(self, ctx): + return dict() + + client = self.create_client(request_cb=CustomCB()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + client.test.test.find_one() + # Close the client. + client.close() + + def test_2_3_refresh_token_is_passed_to_the_callback(self): + # Create a MongoClient with a human callback that checks for the presence of a refresh token. + client = self.create_client() + + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Set a fail point for ``find`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + + # Assert that the callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the refresh token was used once. + self.assertEqual(self.refresh_present, 1) + + def test_3_1_uses_speculative_authentication_if_there_is_a_cached_token(self): + # Create a client with a human callback that returns a valid token. + client = self.create_client() + + # Set a fail point for ``find`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(AutoReconnect): + client.test.test.find_one() + + # Set a fail point for ``saslStart`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that succeeds + client.test.test.find_one() + + # Close the client. + client.close() + + def test_3_2_does_not_use_speculative_authentication_if_there_is_no_cached_token(self): + # Create a ``MongoClient`` with a human callback that returns a valid token + client = self.create_client() + + # Set a fail point for ``saslStart`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Close the client. + client.close() + + def test_4_1_reauthenticate_succeeds(self): + # Create a default OIDC client and add an event listener. + # The following assumes that the driver does not emit saslStart or saslContinue events. + # If the driver does emit those events, ignore/filter them for the purposes of this test. + listener = OvertCommandListener() + client = self.create_client(event_listeners=[listener]) + + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Clear the listener state if possible. + listener.reset() + + # Force a reauthenication using a fail point. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform another find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the ordering of list started events is [find, find]. + # Note that if the listener stat could not be cleared then there will be an extra find command. + started_events = [ + i.command_name for i in listener.started_events if not i.command_name.startswith("sasl") + ] + succeeded_events = [ + i.command_name + for i in listener.succeeded_events + if not i.command_name.startswith("sasl") + ] + failed_events = [ + i.command_name for i in listener.failed_events if not i.command_name.startswith("sasl") + ] + + self.assertEqual( + started_events, + [ + "find", + "find", + ], + ) + # Assert that the list of command succeeded events is [find]. + self.assertEqual(succeeded_events, ["find"]) + # Assert that a find operation failed once during the command execution. + self.assertEqual(failed_events, ["find"]) + # Close the client. + client.close() + + def test_4_2_reauthenticate_succeeds_no_refresh(self): + # Create a default OIDC client with a human callback that does not return a refresh token. + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = None + return result + + client = self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthenication using a fail point. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + # Close the client. + client.close() + + def test_4_3_reauthenticate_succeeds_after_refresh_fails(self): + # Create a default OIDC client with a human callback that returns an invalid refresh token + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + return result + + client = self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthenication using a fail point. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called 2 times. + self.assertEqual(self.request_called, 2) + + # Close the client. + client.close() + + def test_4_4_reauthenticate_fails(self): + # Create a default OIDC client with a human callback that returns invalid refresh tokens and + # Returns invalid access tokens after the first access. + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + fetch_called = 0 + + def fetch(self, *args, **kwargs): + self.fetch_called += 1 + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + if self.fetch_called > 1: + result.access_token = "bad" + return result + + client = self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds (to force a speculative auth). + client.test.test.find_one() + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthentication using a failCommand. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Assert that the human callback has been called three times. + self.assertEqual(self.request_called, 3) + + # Close the client. + client.close() + + def test_request_callback_returns_null(self): + class RequestTokenNull(OIDCCallback): + def fetch(self, a): + return None + + client = self.create_client(request_cb=RequestTokenNull()) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_request_callback_invalid_result(self): + class CallbackInvalidToken(OIDCCallback): + def fetch(self, a): + return {} + + client = self.create_client(request_cb=CallbackInvalidToken()) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_reauthentication_succeeds_multiple_connections(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + client1 = self.create_client(request_cb=request_cb) + client2 = self.create_client(request_cb=request_cb) + + # Perform an insert operation. + client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + client1.test.test.find_one() + client2.test.test.find_one() + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + client1.close() + client2.close() + + # PyMongo specific tests, since we have multiple code paths for reauth handling. + + def test_reauthenticate_succeeds_bulk_write(self): + # Create a client. + client = self.create_client() + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a bulk write operation. + client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_bulk_read(self): + # Create a client. + client = self.create_client() + + # Perform a find operation. + client.test.test.find_one() + + # Perform a bulk write operation. + client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a bulk read operation. + cursor = client.test.test.find_raw_batches({}) + cursor.to_list() + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_cursor(self): + # Create a client. + client = self.create_client() + + # Perform an insert operation. + client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}) + self.assertGreaterEqual(len(cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_get_more(self): + # Create a client. + client = self.create_client() + + # Perform an insert operation. + client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1) + self.assertGreaterEqual(len(cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_get_more_exhaust(self): + # Ensure no mongos + client = self.create_client() + hello = client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") != "isdbgrid": + raise unittest.SkipTest("Must not be a mongos") + + # Create a client with the callback. + client = self.create_client() + + # Perform an insert operation. + client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1, cursor_type=CursorType.EXHAUST) + self.assertGreaterEqual(len(cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_command(self): + # Create a client. + client = self.create_client() + + # Perform an insert operation. + client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["count"], "errorCode": 391}, + } + ): + # Perform a count operation. + cursor = client.test.command({"count": "test"}) + + self.assertGreaterEqual(len(cursor), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + +class TestAuthOIDCMachine(OIDCTestBase): + uri: str + + def setUp(self): + self.request_called = 0 + + def create_request_cb(self, username=None, sleep=0): + def request_token(context): + assert isinstance(context.timeout_seconds, int) + assert context.version == 1 + assert context.refresh_token is None + assert context.idp_info is None + token = self.get_token(username) + time.sleep(sleep) + self.request_called += 1 + return OIDCCallbackResult(access_token=token) + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + def create_client(self, *args, **kwargs): + request_cb = kwargs.pop("request_cb", self.create_request_cb()) + props = kwargs.pop("authmechanismproperties", {"OIDC_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + client = MongoClient(*args, authmechanismproperties=props, **kwargs) + self.addCleanup(client.close) + return client + + def test_1_1_callback_is_called_during_reauthentication(self): + # Create a ``MongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = self.create_client() + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + def test_1_2_callback_is_called_once_for_multiple_connections(self): + # Create a ``MongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = self.create_client() + client._connect() + + # Start 10 tasks and run 100 find operations that all succeed in each task. + def target(): + for _ in range(100): + client.test.test.find_one() + + tasks = [] + for i in range(10): + tasks.append(ConcurrentRunner(target=target)) + for t in tasks: + t.start() + for t in tasks: + t.join() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + def test_2_1_valid_callback_inputs(self): + # Create a MongoClient configured with an OIDC callback that validates its inputs and returns a valid access token. + client = self.create_client() + # Perform a find operation that succeeds. + client.test.test.find_one() + # Assert that the OIDC callback was called with the appropriate inputs, including the timeout parameter if possible. Ensure that there are no unexpected fields. + self.assertEqual(self.request_called, 1) + + def test_2_2_oidc_callback_returns_null(self): + # Create a MongoClient configured with an OIDC callback that returns null. + class CallbackNullToken(OIDCCallback): + def fetch(self, a): + return None + + client = self.create_client(request_cb=CallbackNullToken()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + client.test.test.find_one() + + def test_2_3_oidc_callback_returns_missing_data(self): + # Create a MongoClient configured with an OIDC callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return object() + + client = self.create_client(request_cb=CustomCallback()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + client.test.test.find_one() + + def test_2_4_invalid_client_configuration_with_callback(self): + # Create a MongoClient configured with an OIDC callback and auth mechanism property ENVIRONMENT:test. + request_cb = self.create_request_cb() + props: Dict = {"OIDC_CALLBACK": request_cb, "ENVIRONMENT": "test"} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + self.create_client(authmechanismproperties=props) + + def test_2_5_invalid_use_of_ALLOWED_HOSTS(self): + # Create an OIDC configured client with auth mechanism properties `{"ENVIRONMENT": "test", "ALLOWED_HOSTS": []}`. + props: Dict = {"ENVIRONMENT": "test", "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + self.create_client(authmechanismproperties=props) + + # Create an OIDC configured client with auth mechanism properties `{"OIDC_CALLBACK": "", "ALLOWED_HOSTS": []}`. + props: Dict = {"OIDC_CALLBACK": self.create_request_cb(), "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + self.create_client(authmechanismproperties=props) + + def test_2_6_ALLOWED_HOSTS_defaults_ignored(self): + # Create a MongoCredential for OIDC with a machine callback. + props = {"OIDC_CALLBACK": self.create_request_cb()} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, "foo", None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "foo" + + # Create a MongoCredential for OIDC with an ENVIRONMENT. + props = {"ENVIRONMENT": "test"} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, None, None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "" + + def test_3_1_authentication_failure_with_cached_tokens_fetch_a_new_token_and_retry(self): + # Create a MongoClient and an OIDC callback that implements the provider logic. + client = self.create_client() + client._connect() + # Poison the cache with an invalid access token. + # Set a fail point for ``find`` command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. This is to force the ``MongoClient`` + # to cache an access token. + with self.assertRaises(AutoReconnect): + client.test.test.find_one() + # Poison the cache of the client. + client.options.pool_options._credentials.cache.data.access_token = "bad" + # Reset the request count. + self.request_called = 0 + # Verify that a find succeeds. + client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + def test_3_2_authentication_failures_without_cached_tokens_returns_an_error(self): + # Create a MongoClient configured with retryReads=false and an OIDC callback that always returns invalid access tokens. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return OIDCCallbackResult(access_token="bad value") + + callback = CustomCallback() + client = self.create_client(request_cb=callback) + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(callback.count, 1) + + def test_3_3_unexpected_error_code_does_not_clear_cache(self): + # Create a ``MongoClient`` with a human callback that returns a valid token + client = self.create_client() + + # Set a fail point for ``saslStart`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 20}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + def test_4_1_reauthentication_succeeds(self): + # Create a ``MongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = self.create_client() + client._connect() + + # Set a fail point for the find command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + + # Verify that the callback was called 2 times (once during the connection + # handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + + def test_4_2_read_commands_fail_if_reauthentication_fails(self): + # Create a ``MongoClient`` whose OIDC callback returns one good token and then + # bad tokens after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = self.create_client(request_cb=callback) + + # Perform a read operation that succeeds. + client.test.test.find_one() + + # Set a fail point for the find command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + def test_4_3_write_commands_fail_if_reauthentication_fails(self): + # Create a ``MongoClient`` whose OIDC callback returns one good token and then + # bad token after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = self.create_client(request_cb=callback) + + # Perform an insert operation that succeeds. + client.test.test.insert_one({}) + + # Set a fail point for the find command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a ``insert`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.insert_one({}) + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(self): + # Create an OIDC configured client that can listen for `SaslStart` commands. + listener = EventListener() + client = self.create_client(event_listeners=[listener]) + client._connect() + + # Preload the *Client Cache* with a valid access token to enforce Speculative Authentication. + client2 = self.create_client() + client2.test.test.find_one() + client.options.pool_options._credentials.cache.data = ( + client2.options.pool_options._credentials.cache.data + ) + client2.close() + self.request_called = 0 + + # Perform an `insert` operation that succeeds. + client.test.test.insert_one({}) + + # Assert that the callback was not called. + self.assertEqual(self.request_called, 0) + + # Assert there were no `SaslStart` commands executed. + assert not any( + event.command_name.lower() == "saslstart" for event in listener.started_events + ) + listener.reset() + + # Set a fail point for `insert` commands of the form: + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform an `insert` operation that succeeds. + client.test.test.insert_one({}) + + # Assert that the callback was called once. + self.assertEqual(self.request_called, 1) + + # Assert there were `SaslStart` commands executed. + assert any(event.command_name.lower() == "saslstart" for event in listener.started_events) + + def test_4_5_reauthentication_succeeds_when_a_session_is_involved(self): + # Create an OIDC configured client. + client = self.create_client() + + # Set a fail point for `find` commands of the form: + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Start a new session. + with client.start_session() as session: + # In the started session perform a `find` operation that succeeds. + client.test.test.find_one({}, session=session) + + # Assert that the callback was called 2 times (once during the connection handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + + def test_5_1_azure_with_no_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + opts = parse_uri(self.uri_single)["options"] + resource = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=resource, ENVIRONMENT="azure") + client = self.create_client(authMechanismProperties=props) + client.test.test.find_one() + + def test_5_2_azure_with_bad_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=token_aud, ENVIRONMENT="azure") + client = self.create_client(username="bad", authmechanismproperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + + def test_speculative_auth_success(self): + client1 = self.create_client() + client1.test.test.find_one() + client2 = self.create_client() + client2._connect() + + # Prime the cache of the second client. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + # Set a fail point for saslStart commands. + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + client2.test.test.find_one() + + def test_reauthentication_succeeds_multiple_connections(self): + client1 = self.create_client() + client2 = self.create_client() + + # Perform an insert operation. + client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + client1.test.test.find_one() + client2.test.test.find_one() + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py new file mode 100644 index 0000000000..ac6411cd89 --- /dev/null +++ b/test/test_auth_spec.py @@ -0,0 +1,113 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the auth spec tests.""" +from __future__ import annotations + +import glob +import json +import os +import sys +import warnings +from test import PyMongoTestCase + +import pytest + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +from pymongo import MongoClient +from pymongo.auth_oidc_shared import OIDCCallback + +pytestmark = pytest.mark.auth + +_IS_SYNC = True + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") + + +class TestAuthSpec(PyMongoTestCase): + pass + + +class SampleHumanCallback(OIDCCallback): + def fetch(self, context): + pass + + +def create_test(test_case): + def run_test(self): + uri = test_case["uri"] + valid = test_case["valid"] + credential = test_case.get("credential") + + if not valid: + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.assertRaises(Exception, MongoClient, uri, connect=False) + else: + client = self.simple_client(uri, connect=False) + credentials = client.options.pool_options._credentials + if credential is None: + self.assertIsNone(credentials) + else: + self.assertIsNotNone(credentials) + self.assertEqual(credentials.username, credential["username"]) + self.assertEqual(credentials.password, credential["password"]) + self.assertEqual(credentials.source, credential["source"]) + if credential["mechanism"] is not None: + self.assertEqual(credentials.mechanism, credential["mechanism"]) + else: + self.assertEqual(credentials.mechanism, "DEFAULT") + expected = credential["mechanism_properties"] + if expected is not None: + actual = credentials.mechanism_properties + for key, value in expected.items(): + self.assertEqual(getattr(actual, key.lower()), value) + else: + if credential["mechanism"] == "MONGODB-AWS": + self.assertIsNone(credentials.mechanism_properties.aws_session_token) + else: + self.assertIsNone(credentials.mechanism_properties) + + return run_test + + +def create_tests(): + for filename in glob.glob(os.path.join(_TEST_PATH, "legacy", "*.json")): + test_suffix, _ = os.path.splitext(os.path.basename(filename)) + with open(filename) as auth_tests: + test_cases = json.load(auth_tests)["tests"] + for test_case in test_cases: + if test_case.get("optional", False): + continue + test_method = create_test(test_case) + name = str(test_case["description"].lower().replace(" ", "_")) + setattr(TestAuthSpec, f"test_{test_suffix}_{name}", test_method) + + +create_tests() + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_binary.py b/test/test_binary.py index 522f5302ce..a64aa42280 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,304 +13,463 @@ # limitations under the License. """Tests for the Binary wrapper.""" +from __future__ import annotations +import array import base64 import copy +import mmap import pickle import sys -import unittest -try: - import uuid - should_test_uuid = True -except ImportError: - should_test_uuid = False +import uuid +from typing import Any sys.path[0:0] = [""] -import bson +from test import IntegrationTest, client_context, unittest +import bson +from bson import decode, encode from bson.binary import * -from bson.py3compat import b, binary_type +from bson.codec_options import CodecOptions from bson.son import SON -from nose.plugins.skip import SkipTest -from test.test_client import get_client -from pymongo.mongo_client import MongoClient +from pymongo.common import validate_uuid_representation +from pymongo.write_concern import WriteConcern + + +class BinaryData: + # Generated by the Java driver + from_java = ( + b"bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu" + b"Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND" + b"ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+" + b"XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1" + b"aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR" + b"jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA" + b"AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z" + b"DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf" + b"aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx" + b"29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My" + b"1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB" + b"W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp" + b"bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc" + b"0MQAA" + ) + java_data = base64.b64decode(from_java) + + # Generated by the .net driver + from_csharp = ( + b"ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl" + b"iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2" + b"ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V" + b"pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl" + b"AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A" + b"ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z" + b"oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU" + b"zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn" + b"dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA" + b"CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT" + b"QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP" + b"MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00" + b"ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=" + ) + csharp_data = base64.b64decode(from_csharp) + class TestBinary(unittest.TestCase): def test_binary(self): a_string = "hello world" - a_binary = Binary(b("hello world")) - self.assertTrue(a_binary.startswith(b("hello"))) - self.assertTrue(a_binary.endswith(b("world"))) - self.assertTrue(isinstance(a_binary, Binary)) - self.assertFalse(isinstance(a_string, Binary)) + a_binary = Binary(b"hello world") + self.assertTrue(a_binary.startswith(b"hello")) + self.assertTrue(a_binary.endswith(b"world")) + self.assertIsInstance(a_binary, Binary) + self.assertNotIsInstance(a_string, Binary) def test_exceptions(self): self.assertRaises(TypeError, Binary, None) - self.assertRaises(TypeError, Binary, u"hello") self.assertRaises(TypeError, Binary, 5) self.assertRaises(TypeError, Binary, 10.2) - self.assertRaises(TypeError, Binary, b("hello"), None) - self.assertRaises(TypeError, Binary, b("hello"), "100") - self.assertRaises(ValueError, Binary, b("hello"), -1) - self.assertRaises(ValueError, Binary, b("hello"), 256) - self.assertTrue(Binary(b("hello"), 0)) - self.assertTrue(Binary(b("hello"), 255)) + self.assertRaises(TypeError, Binary, b"hello", None) + self.assertRaises(TypeError, Binary, b"hello", "100") + self.assertRaises(ValueError, Binary, b"hello", -1) + self.assertRaises(ValueError, Binary, b"hello", 256) + self.assertTrue(Binary(b"hello", 0)) + self.assertTrue(Binary(b"hello", 255)) + self.assertRaises(TypeError, Binary, "hello") def test_subtype(self): - one = Binary(b("hello")) + one = Binary(b"hello") self.assertEqual(one.subtype, 0) - two = Binary(b("hello"), 2) + two = Binary(b"hello", 2) self.assertEqual(two.subtype, 2) - three = Binary(b("hello"), 100) + three = Binary(b"hello", 100) self.assertEqual(three.subtype, 100) def test_equality(self): - two = Binary(b("hello")) - three = Binary(b("hello"), 100) + two = Binary(b"hello") + three = Binary(b"hello", 100) self.assertNotEqual(two, three) - self.assertEqual(three, Binary(b("hello"), 100)) - self.assertEqual(two, Binary(b("hello"))) - self.assertNotEqual(two, Binary(b("hello "))) - self.assertNotEqual(b("hello"), Binary(b("hello"))) + self.assertEqual(three, Binary(b"hello", 100)) + self.assertEqual(two, Binary(b"hello")) + self.assertNotEqual(two, Binary(b"hello ")) + self.assertNotEqual(b"hello", Binary(b"hello")) # Explicitly test inequality - self.assertFalse(three != Binary(b("hello"), 100)) - self.assertFalse(two != Binary(b("hello"))) + self.assertFalse(three != Binary(b"hello", 100)) + self.assertFalse(two != Binary(b"hello")) def test_repr(self): - one = Binary(b("hello world")) - self.assertEqual(repr(one), - "Binary(%s, 0)" % (repr(b("hello world")),)) - two = Binary(b("hello world"), 2) - self.assertEqual(repr(two), - "Binary(%s, 2)" % (repr(b("hello world")),)) - three = Binary(b("\x08\xFF")) - self.assertEqual(repr(three), - "Binary(%s, 0)" % (repr(b("\x08\xFF")),)) - four = Binary(b("\x08\xFF"), 2) - self.assertEqual(repr(four), - "Binary(%s, 2)" % (repr(b("\x08\xFF")),)) - five = Binary(b("test"), 100) - self.assertEqual(repr(five), - "Binary(%s, 100)" % (repr(b("test")),)) + one = Binary(b"hello world") + self.assertEqual(repr(one), "Binary({}, 0)".format(repr(b"hello world"))) + two = Binary(b"hello world", 2) + self.assertEqual(repr(two), "Binary({}, 2)".format(repr(b"hello world"))) + three = Binary(b"\x08\xFF") + self.assertEqual(repr(three), "Binary({}, 0)".format(repr(b"\x08\xFF"))) + four = Binary(b"\x08\xFF", 2) + self.assertEqual(repr(four), "Binary({}, 2)".format(repr(b"\x08\xFF"))) + five = Binary(b"test", 100) + self.assertEqual(repr(five), "Binary({}, 100)".format(repr(b"test"))) + + def test_hash(self): + one = Binary(b"hello world") + two = Binary(b"hello world", 42) + self.assertEqual(hash(Binary(b"hello world")), hash(one)) + self.assertNotEqual(hash(one), hash(two)) + self.assertEqual(hash(Binary(b"hello world", 42)), hash(two)) + + def test_uuid_subtype_4(self): + """Only STANDARD should decode subtype 4 as native uuid.""" + expected_uuid = uuid.uuid4() + expected_bin = Binary(expected_uuid.bytes, 4) + doc = {"uuid": expected_bin} + encoded = encode(doc) + for uuid_rep in ( + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + ): + opts = CodecOptions(uuid_representation=uuid_rep) + self.assertEqual(expected_bin, decode(encoded, opts)["uuid"]) + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + self.assertEqual(expected_uuid, decode(encoded, opts)["uuid"]) def test_legacy_java_uuid(self): - if not should_test_uuid: - raise SkipTest("No uuid module") - - # Generated by the Java driver - from_java = b('bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu' - 'Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND' - 'ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+' - 'XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1' - 'aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR' - 'jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA' - 'AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z' - 'DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf' - 'aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx' - '29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My' - '1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB' - 'W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp' - 'bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc' - '0MQAA') - - data = base64.b64decode(from_java) - # Test decoding - docs = bson.decode_all(data, SON, False, OLD_UUID_SUBTYPE) + data = BinaryData.java_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, SON, False, UUID_SUBTYPE) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, STANDARD)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, SON, False, CSHARP_LEGACY) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, SON, False, JAVA_LEGACY) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) for d in docs: - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) # Test encoding - encoded = b('').join([bson.BSON.encode(doc, - uuid_subtype=OLD_UUID_SUBTYPE) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b('').join([bson.BSON.encode(doc, uuid_subtype=UUID_SUBTYPE) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=STANDARD)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b('').join([bson.BSON.encode(doc, uuid_subtype=CSHARP_LEGACY) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b('').join([bson.BSON.encode(doc, uuid_subtype=JAVA_LEGACY) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) for doc in docs] + ) self.assertEqual(data, encoded) - # Test insert and find - client = get_client() - client.pymongo_test.drop_collection('java_uuid') - coll = client.pymongo_test.java_uuid - coll.uuid_subtype = JAVA_LEGACY - - coll.insert(docs) - self.assertEqual(5, coll.count()) - for d in coll.find(): - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) - - coll.uuid_subtype = OLD_UUID_SUBTYPE - for d in coll.find(): - self.assertNotEqual(d['newguid'], d['newguidstring']) - client.pymongo_test.drop_collection('java_uuid') - def test_legacy_csharp_uuid(self): - if not should_test_uuid: - raise SkipTest("No uuid module") - - # Generated by the .net driver - from_csharp = b('ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl' - 'iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2' - 'ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V' - 'pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl' - 'AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A' - 'ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z' - 'oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU' - 'zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn' - 'dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA' - 'CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT' - 'QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP' - 'MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00' - 'ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=') - - data = base64.b64decode(from_csharp) + data = BinaryData.csharp_data # Test decoding - docs = bson.decode_all(data, SON, False, OLD_UUID_SUBTYPE) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, SON, False, UUID_SUBTYPE) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, STANDARD)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, SON, False, JAVA_LEGACY) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, SON, False, CSHARP_LEGACY) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) for d in docs: - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) # Test encoding - encoded = b('').join([bson.BSON.encode(doc, - uuid_subtype=OLD_UUID_SUBTYPE) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b('').join([bson.BSON.encode(doc, uuid_subtype=UUID_SUBTYPE) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=STANDARD)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b('').join([bson.BSON.encode(doc, uuid_subtype=JAVA_LEGACY) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b('').join([bson.BSON.encode(doc, uuid_subtype=CSHARP_LEGACY) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) for doc in docs] + ) self.assertEqual(data, encoded) - # Test insert and find - client = get_client() - client.pymongo_test.drop_collection('csharp_uuid') - coll = client.pymongo_test.csharp_uuid - coll.uuid_subtype = CSHARP_LEGACY - - coll.insert(docs) - self.assertEqual(5, coll.count()) - for d in coll.find(): - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) - - coll.uuid_subtype = OLD_UUID_SUBTYPE - for d in coll.find(): - self.assertNotEqual(d['newguid'], d['newguidstring']) - client.pymongo_test.drop_collection('csharp_uuid') - - def test_uri_to_uuid(self): - if not should_test_uuid: - raise SkipTest("No uuid module") - - uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" - client = MongoClient(uri, _connect=False) - self.assertEqual(client.pymongo_test.test.uuid_subtype, CSHARP_LEGACY) - - def test_uuid_queries(self): - if not should_test_uuid: - raise SkipTest("No uuid module") - - c = get_client() - coll = c.pymongo_test.test - coll.drop() - - uu = uuid.uuid4() - # Wrap uu.bytes in binary_type to work - # around http://bugs.python.org/issue7380. - coll.insert({'uuid': Binary(binary_type(uu.bytes), 3)}) - self.assertEqual(1, coll.count()) - - # Test UUIDLegacy queries. - coll.uuid_subtype = 4 - self.assertEqual(0, coll.find({'uuid': uu}).count()) - cur = coll.find({'uuid': UUIDLegacy(uu)}) - self.assertEqual(1, cur.count()) - retrieved = cur.next() - self.assertEqual(uu, retrieved['uuid']) - - # Test regular UUID queries (using subtype 4). - coll.insert({'uuid': uu}) - self.assertEqual(2, coll.count()) - cur = coll.find({'uuid': uu}) - self.assertEqual(1, cur.count()) - retrieved = cur.next() - self.assertEqual(uu, retrieved['uuid']) - - # Test both. - cur = coll.find({'uuid': {'$in': [uu, UUIDLegacy(uu)]}}) - self.assertEqual(2, cur.count()) - coll.drop() - def test_pickle(self): - b1 = Binary(b('123'), 2) + b1 = Binary(b"123", 2) # For testing backwards compatibility with pre-2.4 pymongo - if PY3: - p = b("\x80\x03cbson.binary\nBinary\nq\x00C\x03123q\x01\x85q" - "\x02\x81q\x03}q\x04X\x10\x00\x00\x00_Binary__subtypeq" - "\x05K\x02sb.") - else: - p = b("ccopy_reg\n_reconstructor\np0\n(cbson.binary\nBinary\np1\nc" - "__builtin__\nstr\np2\nS'123'\np3\ntp4\nRp5\n(dp6\nS'_Binary" - "__subtype'\np7\nI2\nsb.") - - if not sys.version.startswith('3.0'): + p = ( + b"\x80\x03cbson.binary\nBinary\nq\x00C\x03123q\x01\x85q" + b"\x02\x81q\x03}q\x04X\x10\x00\x00\x00_Binary__subtypeq" + b"\x05K\x02sb." + ) + + if not sys.version.startswith("3.0"): self.assertEqual(b1, pickle.loads(p)) - for proto in xrange(pickle.HIGHEST_PROTOCOL + 1): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): self.assertEqual(b1, pickle.loads(pickle.dumps(b1, proto))) - if should_test_uuid: - uu = uuid.uuid4() - uul = UUIDLegacy(uu) - - self.assertEqual(uul, copy.copy(uul)) - self.assertEqual(uul, copy.deepcopy(uul)) + uu = uuid.uuid4() + uul = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) + + self.assertEqual(uul, copy.copy(uul)) + self.assertEqual(uul, copy.deepcopy(uul)) + + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + self.assertEqual(uul, pickle.loads(pickle.dumps(uul, proto))) + + def test_buffer_protocol(self): + b0 = Binary(b"123", 2) + + self.assertEqual(b0, Binary(memoryview(b"123"), 2)) + self.assertEqual(b0, Binary(bytearray(b"123"), 2)) + with mmap.mmap(-1, len(b"123")) as mm: + mm.write(b"123") + mm.seek(0) + self.assertEqual(b0, Binary(mm, 2)) + self.assertEqual(b0, Binary(array.array("B", b"123"), 2)) + + +class TestUuidSpecExplicitCoding(unittest.TestCase): + uuid: uuid.UUID + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") + + @staticmethod + def _hex_to_bytes(hexstring): + return bytes.fromhex(hexstring) + + # Explicit encoding prose test #1 + def test_encoding_1(self): + obj = Binary.from_uuid(self.uuid) + expected_obj = Binary(self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) + self.assertEqual(obj, expected_obj) + + def _test_encoding_w_uuid_rep(self, uuid_rep, expected_hexstring, expected_subtype): + obj = Binary.from_uuid(self.uuid, uuid_rep) + expected_obj = Binary(self._hex_to_bytes(expected_hexstring), expected_subtype) + self.assertEqual(obj, expected_obj) + + # Explicit encoding prose test #2 + def test_encoding_2(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.STANDARD, "00112233445566778899AABBCCDDEEFF", 4 + ) + + # Explicit encoding prose test #3 + def test_encoding_3(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.JAVA_LEGACY, "7766554433221100FFEEDDCCBBAA9988", 3 + ) + + # Explicit encoding prose test #4 + def test_encoding_4(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.CSHARP_LEGACY, "33221100554477668899AABBCCDDEEFF", 3 + ) + + # Explicit encoding prose test #5 + def test_encoding_5(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.PYTHON_LEGACY, "00112233445566778899AABBCCDDEEFF", 3 + ) + + # Explicit encoding prose test #6 + def test_encoding_6(self): + with self.assertRaises(ValueError): + Binary.from_uuid(self.uuid, UuidRepresentation.UNSPECIFIED) + + # Explicit decoding prose test #1 + def test_decoding_1(self): + obj = Binary(self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) + + # Case i: + self.assertEqual(obj.as_uuid(), self.uuid) + # Case ii: + self.assertEqual(obj.as_uuid(UuidRepresentation.STANDARD), self.uuid) + # Cases iii-vi: + for uuid_rep in ( + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + UuidRepresentation.PYTHON_LEGACY, + ): + with self.assertRaises(ValueError): + obj.as_uuid(uuid_rep) + + def _test_decoding_legacy(self, hexstring, uuid_rep): + obj = Binary(self._hex_to_bytes(hexstring), 3) + + # Case i: + with self.assertRaises(ValueError): + obj.as_uuid() + # Cases ii-iii: + for rep in (UuidRepresentation.STANDARD, UuidRepresentation.UNSPECIFIED): + with self.assertRaises(ValueError): + obj.as_uuid(rep) + # Case iv: + self.assertEqual(obj.as_uuid(uuid_rep), self.uuid) + + # Explicit decoding prose test #2 + def test_decoding_2(self): + self._test_decoding_legacy( + "7766554433221100FFEEDDCCBBAA9988", UuidRepresentation.JAVA_LEGACY + ) + + # Explicit decoding prose test #3 + def test_decoding_3(self): + self._test_decoding_legacy( + "33221100554477668899AABBCCDDEEFF", UuidRepresentation.CSHARP_LEGACY + ) + + # Explicit decoding prose test #4 + def test_decoding_4(self): + self._test_decoding_legacy( + "00112233445566778899AABBCCDDEEFF", UuidRepresentation.PYTHON_LEGACY + ) + + +class TestUuidSpecImplicitCoding(IntegrationTest): + uuid: uuid.UUID + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") + + @staticmethod + def _hex_to_bytes(hexstring): + return bytes.fromhex(hexstring) + + def _get_coll_w_uuid_rep(self, uuid_rep): + codec_options = self.client.codec_options.with_options( + uuid_representation=validate_uuid_representation(None, uuid_rep) + ) + coll = self.db.get_collection( + "pymongo_test", codec_options=codec_options, write_concern=WriteConcern("majority") + ) + return coll + + def _test_encoding(self, uuid_rep, expected_hexstring, expected_subtype): + coll = self._get_coll_w_uuid_rep(uuid_rep) + coll.delete_many({}) + coll.insert_one({"_id": self.uuid}) + self.assertTrue( + coll.find_one({"_id": Binary(self._hex_to_bytes(expected_hexstring), expected_subtype)}) + ) + + # Implicit encoding prose test #1 + def test_encoding_1(self): + self._test_encoding("javaLegacy", "7766554433221100FFEEDDCCBBAA9988", 3) + + # Implicit encoding prose test #2 + def test_encoding_2(self): + self._test_encoding("csharpLegacy", "33221100554477668899AABBCCDDEEFF", 3) + + # Implicit encoding prose test #3 + def test_encoding_3(self): + self._test_encoding("pythonLegacy", "00112233445566778899AABBCCDDEEFF", 3) + + # Implicit encoding prose test #4 + def test_encoding_4(self): + self._test_encoding("standard", "00112233445566778899AABBCCDDEEFF", 4) + + # Implicit encoding prose test #5 + def test_encoding_5(self): + with self.assertRaises(ValueError): + self._test_encoding("unspecified", "dummy", -1) + + def _test_decoding( + self, + client_uuid_representation_string, + legacy_field_uuid_representation, + expected_standard_field_value, + expected_legacy_field_value, + ): + coll = self._get_coll_w_uuid_rep(client_uuid_representation_string) + coll.drop() - for proto in xrange(pickle.HIGHEST_PROTOCOL + 1): - self.assertEqual(uul, pickle.loads(pickle.dumps(uul, proto))) + standard_val = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) + legacy_val = Binary.from_uuid(self.uuid, legacy_field_uuid_representation) + coll.insert_one({"standard": standard_val, "legacy": legacy_val}) + + doc = coll.find_one() + self.assertEqual(doc["standard"], expected_standard_field_value) + self.assertEqual(doc["legacy"], expected_legacy_field_value) + + # Implicit decoding prose test #1 + def test_decoding_1(self): + standard_binary = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) + self._test_decoding( + "javaLegacy", UuidRepresentation.JAVA_LEGACY, standard_binary, self.uuid + ) + self._test_decoding( + "csharpLegacy", UuidRepresentation.CSHARP_LEGACY, standard_binary, self.uuid + ) + self._test_decoding( + "pythonLegacy", UuidRepresentation.PYTHON_LEGACY, standard_binary, self.uuid + ) + + # Implicit decoding pose test #2 + def test_decoding_2(self): + legacy_binary = Binary.from_uuid(self.uuid, UuidRepresentation.PYTHON_LEGACY) + self._test_decoding("standard", UuidRepresentation.PYTHON_LEGACY, self.uuid, legacy_binary) + + # Implicit decoding pose test #3 + def test_decoding_3(self): + expected_standard_value = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) + for legacy_uuid_rep in ( + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + UuidRepresentation.JAVA_LEGACY, + ): + expected_legacy_value = Binary.from_uuid(self.uuid, legacy_uuid_rep) + self._test_decoding( + "unspecified", legacy_uuid_rep, expected_standard_value, expected_legacy_value + ) if __name__ == "__main__": diff --git a/test/test_bson.py b/test/test_bson.py index 5e827bf501..f792db1e89 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1,6 +1,5 @@ -# -*- coding: utf-8 -*- # -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,246 +14,152 @@ # limitations under the License. """Test the bson module.""" +from __future__ import annotations +import array +import collections import datetime +import mmap +import os +import pickle import re +import struct import sys -import traceback -import unittest -try: - import uuid - should_test_uuid = True -except ImportError: - should_test_uuid = False +import tempfile +import uuid +from collections import OrderedDict, abc +from io import BytesIO + sys.path[0:0] = [""] -from nose.plugins.skip import SkipTest +from test import qcheck, unittest +from test.helpers import ExceptionCatchingTask import bson -from bson import (BSON, - decode_all, - is_valid, - Regex) -from bson.binary import Binary, UUIDLegacy +from bson import ( + BSON, + EPOCH_AWARE, + DatetimeMS, + Regex, + _array_of_documents_to_buffer, + _datetime_to_millis, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, + is_valid, + json_util, +) +from bson.binary import ( + USER_DEFINED_SUBTYPE, + Binary, + BinaryVector, + BinaryVectorDtype, + UuidRepresentation, +) from bson.code import Code -from bson.objectid import ObjectId +from bson.codec_options import CodecOptions, DatetimeConversion +from bson.datetime_ms import _DATETIME_ERROR_SUGGESTION from bson.dbref import DBRef -from bson.py3compat import b -from bson.son import SON -from bson.timestamp import Timestamp -from bson.errors import (InvalidBSON, - InvalidDocument, - InvalidStringData) +from bson.errors import InvalidBSON, InvalidDocument +from bson.int64 import Int64 from bson.max_key import MaxKey from bson.min_key import MinKey -from bson.tz_util import (FixedOffset, - utc) +from bson.objectid import ObjectId +from bson.son import SON +from bson.timestamp import Timestamp +from bson.tz_util import FixedOffset, utc -from test import qcheck -PY3 = sys.version_info[0] == 3 +class NotADict(abc.MutableMapping): + """Non-dict type that implements the mapping protocol.""" + def __init__(self, initial=None): + if not initial: + self._dict = {} + else: + self._dict = initial -class TestBSON(unittest.TestCase): - def assertInvalid(self, data): - self.assertRaises(InvalidBSON, bson.BSON(data).decode) + def __iter__(self): + return iter(self._dict) - def test_basic_validation(self): - self.assertRaises(TypeError, is_valid, 100) - self.assertRaises(TypeError, is_valid, u"test") - self.assertRaises(TypeError, is_valid, 10.4) + def __getitem__(self, item): + return self._dict[item] - self.assertInvalid(b("test")) + def __delitem__(self, item): + del self._dict[item] - # the simplest valid BSON document - self.assertTrue(is_valid(b("\x05\x00\x00\x00\x00"))) - self.assertTrue(is_valid(BSON(b("\x05\x00\x00\x00\x00")))) + def __setitem__(self, item, value): + self._dict[item] = value - # failure cases - self.assertInvalid(b("\x04\x00\x00\x00\x00")) - self.assertInvalid(b("\x05\x00\x00\x00\x01")) - self.assertInvalid(b("\x05\x00\x00\x00")) - self.assertInvalid(b("\x05\x00\x00\x00\x00\x00")) - self.assertInvalid(b("\x07\x00\x00\x00\x02a\x00\x78\x56\x34\x12")) - self.assertInvalid(b("\x09\x00\x00\x00\x10a\x00\x05\x00")) - self.assertInvalid(b("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")) - self.assertInvalid(b("\x13\x00\x00\x00\x02foo\x00" - "\x04\x00\x00\x00bar\x00\x00")) - self.assertInvalid(b("\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00" - "\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00")) - self.assertInvalid(b("\x15\x00\x00\x00\x03foo\x00\x0c" - "\x00\x00\x00\x08bar\x00\x01\x00\x00")) - self.assertInvalid(b("\x1c\x00\x00\x00\x03foo\x00" - "\x12\x00\x00\x00\x02bar\x00" - "\x05\x00\x00\x00baz\x00\x00\x00")) - self.assertInvalid(b("\x10\x00\x00\x00\x02a\x00" - "\x04\x00\x00\x00abc\xff\x00")) + def __len__(self): + return len(self._dict) - def test_bad_string_lengths(self): - self.assertInvalid( - b("\x0c\x00\x00\x00\x02\x00" - "\x00\x00\x00\x00\x00\x00")) - self.assertInvalid( - b("\x12\x00\x00\x00\x02\x00" - "\xff\xff\xff\xfffoobar\x00\x00")) - self.assertInvalid( - b("\x0c\x00\x00\x00\x0e\x00" - "\x00\x00\x00\x00\x00\x00")) - self.assertInvalid( - b("\x12\x00\x00\x00\x0e\x00" - "\xff\xff\xff\xfffoobar\x00\x00")) - self.assertInvalid( - b("\x18\x00\x00\x00\x0c\x00" - "\x00\x00\x00\x00\x00RY\xb5j" - "\xfa[\xd8A\xd6X]\x99\x00")) - self.assertInvalid( - b("\x1e\x00\x00\x00\x0c\x00" - "\xff\xff\xff\xfffoobar\x00" - "RY\xb5j\xfa[\xd8A\xd6X]\x99\x00")) - self.assertInvalid( - b("\x0c\x00\x00\x00\r\x00" - "\x00\x00\x00\x00\x00\x00")) - self.assertInvalid( - b("\x0c\x00\x00\x00\r\x00" - "\xff\xff\xff\xff\x00\x00")) - self.assertInvalid( - b("\x1c\x00\x00\x00\x0f\x00" - "\x15\x00\x00\x00\x00\x00" - "\x00\x00\x00\x0c\x00\x00" - "\x00\x02\x00\x01\x00\x00" - "\x00\x00\x00\x00")) - self.assertInvalid( - b("\x1c\x00\x00\x00\x0f\x00" - "\x15\x00\x00\x00\xff\xff" - "\xff\xff\x00\x0c\x00\x00" - "\x00\x02\x00\x01\x00\x00" - "\x00\x00\x00\x00")) - self.assertInvalid( - b("\x1c\x00\x00\x00\x0f\x00" - "\x15\x00\x00\x00\x01\x00" - "\x00\x00\x00\x0c\x00\x00" - "\x00\x02\x00\x00\x00\x00" - "\x00\x00\x00\x00")) - self.assertInvalid( - b("\x1c\x00\x00\x00\x0f\x00" - "\x15\x00\x00\x00\x01\x00" - "\x00\x00\x00\x0c\x00\x00" - "\x00\x02\x00\xff\xff\xff" - "\xff\x00\x00\x00")) + def __eq__(self, other): + if isinstance(other, abc.Mapping): + return all(self.get(k) == other.get(k) for k in self) + return NotImplemented - def test_random_data_is_not_bson(self): - qcheck.check_unittest(self, qcheck.isnt(is_valid), - qcheck.gen_string(qcheck.gen_range(0, 40))) + def __repr__(self): + return "NotADict(%s)" % repr(self._dict) - def test_basic_decode(self): - self.assertEqual({"test": u"hello world"}, - BSON(b("\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C" - "\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F" - "\x72\x6C\x64\x00\x00")).decode()) - self.assertEqual([{"test": u"hello world"}, {}], - decode_all(b("\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - "\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - "\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - "\x05\x00\x00\x00\x00"))) - def test_data_timestamp(self): - self.assertEqual({"test": Timestamp(4, 20)}, - BSON(b("\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" - "\x00\x00\x00\x04\x00\x00\x00\x00")).decode()) +class DSTAwareTimezone(datetime.tzinfo): + def __init__(self, offset, name, dst_start_month, dst_end_month): + self.__offset = offset + self.__dst_start_month = dst_start_month + self.__dst_end_month = dst_end_month + self.__name = name - def test_basic_encode(self): - self.assertRaises(TypeError, BSON.encode, 100) - self.assertRaises(TypeError, BSON.encode, "hello") - self.assertRaises(TypeError, BSON.encode, None) - self.assertRaises(TypeError, BSON.encode, []) - - self.assertEqual(BSON.encode({}), BSON(b("\x05\x00\x00\x00\x00"))) - self.assertEqual(BSON.encode({"test": u"hello world"}), - b("\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00" - "\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C" - "\x64\x00\x00")) - self.assertEqual(BSON.encode({u"mike": 100}), - b("\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00" - "\x00\x00\x00")) - self.assertEqual(BSON.encode({"hello": 1.5}), - b("\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00" - "\x00\x00\x00\x00\x00\xF8\x3F\x00")) - self.assertEqual(BSON.encode({"true": True}), - b("\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00")) - self.assertEqual(BSON.encode({"false": False}), - b("\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00" - "\x00")) - self.assertEqual(BSON.encode({"empty": []}), - b("\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05" - "\x00\x00\x00\x00\x00")) - self.assertEqual(BSON.encode({"none": {}}), - b("\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00" - "\x00\x00\x00\x00")) - self.assertEqual(BSON.encode({"test": Binary(b("test"), 0)}), - b("\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" - "\x00\x00\x00\x74\x65\x73\x74\x00")) - self.assertEqual(BSON.encode({"test": Binary(b("test"), 2)}), - b("\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00" - "\x00\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00")) - self.assertEqual(BSON.encode({"test": Binary(b("test"), 128)}), - b("\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" - "\x00\x00\x80\x74\x65\x73\x74\x00")) - self.assertEqual(BSON.encode({"test": None}), - b("\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00")) - self.assertEqual(BSON.encode({"date": datetime.datetime(2007, 1, 8, - 0, 30, 11)}), - b("\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE" - "\x1C\xFF\x0F\x01\x00\x00\x00")) - self.assertEqual(BSON.encode({"regex": re.compile(b("a*b"), - re.IGNORECASE)}), - b("\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61" - "\x2A\x62\x00\x69\x00\x00")) - self.assertEqual(BSON.encode({"$where": Code("test")}), - b("\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test" - "\x00\x00")) - self.assertEqual(BSON.encode({"$field": - Code("function(){ return true;}", scope=None)}), - b("+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00" - "function(){ return true;}\x00\x00")) - self.assertEqual(BSON.encode({"$field": - Code("return function(){ return x; }", - scope={'x': False})}), - b("=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00" - "\x00\x00return function(){ return x; }\x00\t\x00" - "\x00\x00\x08x\x00\x00\x00\x00")) - a = ObjectId(b("\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B")) - self.assertEqual(BSON.encode({"oid": a}), - b("\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02" - "\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00")) - self.assertEqual(BSON.encode({"ref": DBRef("coll", a)}), - b("\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02" - "$ref\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00" - "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00" - "\x00")) + def _is_dst(self, dt): + return self.__dst_start_month <= dt.month <= self.__dst_end_month + + def utcoffset(self, dt): + return datetime.timedelta(minutes=self.__offset) + self.dst(dt) + + def dst(self, dt): + if self._is_dst(dt): + return datetime.timedelta(hours=1) + return datetime.timedelta(0) + + def tzname(self, dt): + return self.__name - def test_encode_then_decode(self): - def helper(dict): - self.assertEqual(dict, (BSON.encode(dict)).decode()) +class TestBSON(unittest.TestCase): + def assertInvalid(self, data): + self.assertRaises(InvalidBSON, decode, data) + + def check_encode_then_decode(self, doc_class=dict, decoder=decode, encoder=encode): + # Work around http://bugs.jython.org/issue1728 + if sys.platform.startswith("java"): + doc_class = SON + + def helper(doc): + self.assertEqual(doc, (decoder(encoder(doc_class(doc))))) + self.assertEqual(doc, decoder(encoder(doc))) + helper({}) - helper({"test": u"hello"}) - self.assertTrue(isinstance(BSON.encode({"hello": "world"}) - .decode()["hello"], - unicode)) + helper({"test": "hello"}) + self.assertIsInstance(decoder(encoder({"hello": "world"}))["hello"], str) helper({"mike": -10120}) - helper({"long": long(10)}) + helper({"long": Int64(10)}) helper({"really big long": 2147483648}) - helper({u"hello": 0.0013109}) + helper({"hello": 0.0013109}) helper({"something": True}) helper({"false": False}) - helper({"an array": [1, True, 3.8, u"world"]}) - helper({"an object": {"test": u"something"}}) - helper({"a binary": Binary(b("test"), 100)}) - helper({"a binary": Binary(b("test"), 128)}) - helper({"a binary": Binary(b("test"), 254)}) - helper({"another binary": Binary(b("test"), 2)}) - helper(SON([(u'test dst', datetime.datetime(1993, 4, 4, 2))])) - helper(SON([(u'test negative dst', datetime.datetime(1, 1, 1, 1, 1, 1))])) + helper({"an array": [1, True, 3.8, "world"]}) + helper({"an object": doc_class({"test": "something"})}) + helper({"a binary": Binary(b"test", 100)}) + helper({"a binary": Binary(b"test", 128)}) + helper({"a binary": Binary(b"test", 254)}) + helper({"another binary": Binary(b"test", 2)}) + helper({"binary packed bit vector": Binary(b"\x10\x00\x7f\x07", 9)}) + helper({"binary int8 vector": Binary(b"\x03\x00\x7f\x07", 9)}) + helper({"binary float32 vector": Binary(b"'\x00\x00\x00\xfeB\x00\x00\xe0@", 9)}) + helper(SON([("test dst", datetime.datetime(1993, 4, 4, 2))])) + helper(SON([("test negative dst", datetime.datetime(1, 1, 1, 1, 1, 1))])) helper({"big float": float(10000000000)}) helper({"ref": DBRef("coll", 5)}) helper({"ref": DBRef("coll", 5, foo="bar", bar=4)}) @@ -264,19 +169,398 @@ def helper(dict): helper({"foo": MinKey()}) helper({"foo": MaxKey()}) helper({"$field": Code("function(){ return true; }")}) - helper({"$field": Code("return function(){ return x; }", scope={'x': False})}) - - doc_class = dict - # Work around http://bugs.jython.org/issue1728 - if (sys.platform.startswith('java') and - sys.version_info[:3] >= (2, 5, 2)): - doc_class = SON + helper({"$field": Code("return function(){ return x; }", scope={"x": False})}) def encode_then_decode(doc): - return doc == (BSON.encode(doc)).decode(as_class=doc_class) + return doc_class(doc) == decoder(encode(doc), CodecOptions(document_class=doc_class)) + + qcheck.check_unittest(self, encode_then_decode, qcheck.gen_mongo_dict(3)) - qcheck.check_unittest(self, encode_then_decode, - qcheck.gen_mongo_dict(3)) + def test_encode_then_decode(self): + self.check_encode_then_decode() + + def test_encode_then_decode_any_mapping(self): + self.check_encode_then_decode(doc_class=NotADict) + + def test_encode_then_decode_legacy(self): + self.check_encode_then_decode( + encoder=BSON.encode, decoder=lambda *args: BSON(args[0]).decode(*args[1:]) + ) + + def test_encode_then_decode_any_mapping_legacy(self): + self.check_encode_then_decode( + doc_class=NotADict, + encoder=BSON.encode, + decoder=lambda *args: BSON(args[0]).decode(*args[1:]), + ) + + def test_encoding_defaultdict(self): + dct = collections.defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] + encode(dct) + self.assertEqual(dct, collections.defaultdict(dict, [("foo", "bar")])) + + def test_basic_validation(self): + self.assertRaises(TypeError, is_valid, 100) + self.assertRaises(TypeError, is_valid, "test") + self.assertRaises(TypeError, is_valid, 10.4) + + self.assertInvalid(b"test") + + # the simplest valid BSON document + self.assertTrue(is_valid(b"\x05\x00\x00\x00\x00")) + self.assertTrue(is_valid(BSON(b"\x05\x00\x00\x00\x00"))) + + # failure cases + self.assertInvalid(b"\x04\x00\x00\x00\x00") + self.assertInvalid(b"\x05\x00\x00\x00\x01") + self.assertInvalid(b"\x05\x00\x00\x00") + self.assertInvalid(b"\x05\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x07\x00\x00\x00\x02a\x00\x78\x56\x34\x12") + self.assertInvalid(b"\x09\x00\x00\x00\x10a\x00\x05\x00") + self.assertInvalid(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00\x04\x00\x00\x00bar\x00\x00") + self.assertInvalid( + b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00" + ) + self.assertInvalid(b"\x15\x00\x00\x00\x03foo\x00\x0c\x00\x00\x00\x08bar\x00\x01\x00\x00") + self.assertInvalid( + b"\x1c\x00\x00\x00\x03foo\x00" + b"\x12\x00\x00\x00\x02bar\x00" + b"\x05\x00\x00\x00baz\x00\x00\x00" + ) + self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00\x04\x00\x00\x00abc\xff\x00") + + def test_bad_string_lengths(self): + self.assertInvalid(b"\x0c\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x02\x00\xff\xff\xff\xfffoobar\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x0e\x00\xff\xff\xff\xfffoobar\x00\x00") + self.assertInvalid( + b"\x18\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" + ) + self.assertInvalid( + b"\x1e\x00\x00\x00\x0c\x00" + b"\xff\xff\xff\xfffoobar\x00" + b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" + ) + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00\xff\xff\xff\xff\x00\x00") + self.assertInvalid( + b"\x1c\x00\x00\x00\x0f\x00" + b"\x15\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x0c\x00\x00" + b"\x00\x02\x00\x01\x00\x00" + b"\x00\x00\x00\x00" + ) + self.assertInvalid( + b"\x1c\x00\x00\x00\x0f\x00" + b"\x15\x00\x00\x00\xff\xff" + b"\xff\xff\x00\x0c\x00\x00" + b"\x00\x02\x00\x01\x00\x00" + b"\x00\x00\x00\x00" + ) + self.assertInvalid( + b"\x1c\x00\x00\x00\x0f\x00" + b"\x15\x00\x00\x00\x01\x00" + b"\x00\x00\x00\x0c\x00\x00" + b"\x00\x02\x00\x00\x00\x00" + b"\x00\x00\x00\x00" + ) + self.assertInvalid( + b"\x1c\x00\x00\x00\x0f\x00" + b"\x15\x00\x00\x00\x01\x00" + b"\x00\x00\x00\x0c\x00\x00" + b"\x00\x02\x00\xff\xff\xff" + b"\xff\x00\x00\x00" + ) + + def test_random_data_is_not_bson(self): + qcheck.check_unittest( + self, qcheck.isnt(is_valid), qcheck.gen_string(qcheck.gen_range(0, 40)) + ) + + def test_basic_decode(self): + self.assertEqual( + {"test": "hello world"}, + decode( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C" + b"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F" + b"\x72\x6C\x64\x00\x00" + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + decode_all( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + list( + decode_iter( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ) + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + list( + decode_file_iter( + BytesIO( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ) + ) + ), + ) + + def test_decode_all_buffer_protocol(self): + docs = [{"foo": "bar"}, {}] + bs = b"".join(map(encode, docs)) # type: ignore[arg-type] + self.assertEqual(docs, decode_all(bytearray(bs))) + self.assertEqual(docs, decode_all(memoryview(bs))) + self.assertEqual(docs, decode_all(memoryview(b"1" + bs + b"1")[1:-1])) + self.assertEqual(docs, decode_all(array.array("B", bs))) + with mmap.mmap(-1, len(bs)) as mm: + mm.write(bs) + mm.seek(0) + self.assertEqual(docs, decode_all(mm)) + + def test_decode_buffer_protocol(self): + doc = {"foo": "bar"} + bs = encode(doc) + self.assertEqual(doc, decode(bs)) + self.assertEqual(doc, decode(bytearray(bs))) + self.assertEqual(doc, decode(memoryview(bs))) + self.assertEqual(doc, decode(memoryview(b"1" + bs + b"1")[1:-1])) + self.assertEqual(doc, decode(array.array("B", bs))) + with mmap.mmap(-1, len(bs)) as mm: + mm.write(bs) + mm.seek(0) + self.assertEqual(doc, decode(mm)) + + def test_invalid_decodes(self): + # Invalid object size (not enough bytes in document for even + # an object size of first object. + # NOTE: decode_all and decode_iter don't care, not sure if they should? + self.assertRaises(InvalidBSON, list, decode_file_iter(BytesIO(b"\x1B"))) + + bad_bsons = [ + # An object size that's too small to even include the object size, + # but is correctly encoded, along with a correct EOO (and no data). + b"\x01\x00\x00\x00\x00", + # One object, but with object size listed smaller than it is in the + # data. + ( + b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ), + # One object, missing the EOO at the end. + ( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00" + ), + # One object, sized correctly, with a spot for an EOO, but the EOO + # isn't 0x00. + ( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\xFF" + ), + ] + for i, data in enumerate(bad_bsons): + msg = f"bad_bson[{i}]" + with self.assertRaises(InvalidBSON, msg=msg): + decode_all(data) + with self.assertRaises(InvalidBSON, msg=msg): + list(decode_iter(data)) + with self.assertRaises(InvalidBSON, msg=msg): + list(decode_file_iter(BytesIO(data))) + with tempfile.TemporaryFile() as scratch: + scratch.write(data) + scratch.seek(0, os.SEEK_SET) + with self.assertRaises(InvalidBSON, msg=msg): + list(decode_file_iter(scratch)) + + def test_invalid_field_name(self): + # Decode a truncated field + with self.assertRaises(InvalidBSON) as ctx: + decode(b"\x0b\x00\x00\x00\x02field\x00") + # Assert that the InvalidBSON error message is not empty. + self.assertTrue(str(ctx.exception)) + + def test_data_timestamp(self): + self.assertEqual( + {"test": Timestamp(4, 20)}, + decode(b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00"), + ) + + def test_basic_encode(self): + self.assertRaises(TypeError, encode, 100) + self.assertRaises(TypeError, encode, "hello") + self.assertRaises(TypeError, encode, None) + self.assertRaises(TypeError, encode, []) + + self.assertEqual(encode({}), BSON(b"\x05\x00\x00\x00\x00")) + self.assertEqual(encode({}), b"\x05\x00\x00\x00\x00") + self.assertEqual( + encode({"test": "hello world"}), + b"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00" + b"\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C" + b"\x64\x00\x00", + ) + self.assertEqual( + encode({"mike": 100}), + b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00\x00\x00\x00", + ) + self.assertEqual( + encode({"hello": 1.5}), + b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00\x00\x00\x00\x00\x00\xF8\x3F\x00", + ) + self.assertEqual( + encode({"true": True}), b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00" + ) + self.assertEqual( + encode({"false": False}), b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00\x00" + ) + self.assertEqual( + encode({"empty": []}), + b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05\x00\x00\x00\x00\x00", + ) + self.assertEqual( + encode({"none": {}}), + b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00\x00\x00\x00\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 0)}), + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00\x00\x00\x74\x65\x73\x74\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 2)}), + b"\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00" + b"\x00\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 128)}), + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00\x00\x80\x74\x65\x73\x74\x00", + ) + self.assertEqual( + encode({"vector_int8": Binary.from_vector([-128, -1, 127], BinaryVectorDtype.INT8)}), + b"\x1c\x00\x00\x00\x05vector_int8\x00\x05\x00\x00\x00\t\x03\x00\x80\xff\x7f\x00", + ) + self.assertEqual( + encode({"vector_bool": Binary.from_vector([1, 127], BinaryVectorDtype.PACKED_BIT)}), + b"\x1b\x00\x00\x00\x05vector_bool\x00\x04\x00\x00\x00\t\x10\x00\x01\x7f\x00", + ) + self.assertEqual( + encode( + {"vector_float32": Binary.from_vector([-1.1, 1.1e10], BinaryVectorDtype.FLOAT32)} + ), + b"$\x00\x00\x00\x05vector_float32\x00\n\x00\x00\x00\t'\x00\xcd\xcc\x8c\xbf\xac\xe9#P\x00", + ) + self.assertEqual(encode({"test": None}), b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") + self.assertEqual( + encode({"date": datetime.datetime(2007, 1, 8, 0, 30, 11)}), + b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE\x1C\xFF\x0F\x01\x00\x00\x00", + ) + self.assertEqual( + encode({"regex": re.compile(b"a*b", re.IGNORECASE)}), + b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61\x2A\x62\x00\x69\x00\x00", + ) + self.assertEqual( + encode({"$where": Code("test")}), + b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test\x00\x00", + ) + self.assertEqual( + encode({"$field": Code("function(){ return true;}", scope=None)}), + b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00function(){ return true;}\x00\x00", + ) + self.assertEqual( + encode({"$field": Code("return function(){ return x; }", scope={"x": False})}), + b"=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00" + b"\x00\x00return function(){ return x; }\x00\t\x00" + b"\x00\x00\x08x\x00\x00\x00\x00", + ) + unicode_empty_scope = Code("function(){ return 'héllo';}", {}) + self.assertEqual( + encode({"$field": unicode_empty_scope}), + b"8\x00\x00\x00\x0f$field\x00+\x00\x00\x00\x1e\x00" + b"\x00\x00function(){ return 'h\xc3\xa9llo';}\x00\x05" + b"\x00\x00\x00\x00\x00", + ) + a = ObjectId(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B") + self.assertEqual( + encode({"oid": a}), + b"\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02" + b"\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00", + ) + self.assertEqual( + encode({"ref": DBRef("coll", a)}), + b"\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02" + b"$ref\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00" + b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00" + b"\x00", + ) + + def test_bad_code(self): + # Assert that decoding invalid Code with scope does not include a field name. + def generate_payload(length: int) -> bytes: + string_size = length - 0x1E + + return bytes.fromhex( + struct.pack(", >=, !=, and ==. + # These tests should be kept as assertTrue as opposed to using unittest's built-in comparison assertions because + # MinKey and MaxKey define their own __ge__, __le__, and other comparison attributes, and we want to explicitly test that. self.assertTrue(MinKey() < None) self.assertTrue(MinKey() < 1) self.assertTrue(MinKey() <= 1) @@ -623,7 +1062,7 @@ def test_minkey_maxkey_comparison(self): self.assertTrue(MinKey() != 1) self.assertFalse(MinKey() == 1) self.assertTrue(MinKey() == MinKey()) - + # MinKey compared to MaxKey. self.assertTrue(MinKey() < MaxKey()) self.assertTrue(MinKey() <= MaxKey()) @@ -631,7 +1070,7 @@ def test_minkey_maxkey_comparison(self): self.assertFalse(MinKey() >= MaxKey()) self.assertTrue(MinKey() != MaxKey()) self.assertFalse(MinKey() == MaxKey()) - + # MaxKey's <, <=, >, >=, !=, and ==. self.assertFalse(MaxKey() < None) self.assertFalse(MaxKey() < 1) @@ -653,6 +1092,11 @@ def test_minkey_maxkey_comparison(self): self.assertTrue(MaxKey() != MinKey()) self.assertFalse(MaxKey() == MinKey()) + def test_minkey_maxkey_hash(self): + self.assertEqual(hash(MaxKey()), hash(MaxKey())) + self.assertEqual(hash(MinKey()), hash(MinKey())) + self.assertNotEqual(hash(MaxKey()), hash(MinKey())) + def test_timestamp_comparison(self): # Timestamp is initialized with time, inc. Time is the more # significant comparand. @@ -676,12 +1120,578 @@ def test_timestamp_comparison(self): self.assertTrue(Timestamp(1, 0) <= Timestamp(1, 0)) self.assertFalse(Timestamp(1, 0) > Timestamp(1, 0)) + def test_timestamp_highorder_bits(self): + doc = {"a": Timestamp(0xFFFFFFFF, 0xFFFFFFFF)} + doc_bson = b"\x10\x00\x00\x00\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00" + self.assertEqual(doc_bson, encode(doc)) + self.assertEqual(doc, decode(doc_bson)) + def test_bad_id_keys(self): - self.assertRaises(InvalidDocument, BSON.encode, - {"_id": {"$bad": 123}}, True) - self.assertRaises(InvalidDocument, BSON.encode, - {"_id": {'$oid': "52d0b971b3ba219fdeb4170e"}}, True) - BSON.encode({"_id": {'$oid': "52d0b971b3ba219fdeb4170e"}}) + self.assertRaises(InvalidDocument, encode, {"_id": {"$bad": 123}}, True) + self.assertRaises( + InvalidDocument, encode, {"_id": {"$oid": "52d0b971b3ba219fdeb4170e"}}, True + ) + encode({"_id": {"$oid": "52d0b971b3ba219fdeb4170e"}}) + + def test_bson_encode_thread_safe(self): + def target(i): + for j in range(1000): + my_int = type(f"MyInt_{i}_{j}", (int,), {}) + bson.encode({"my_int": my_int()}) + + threads = [ExceptionCatchingTask(target=target, args=(i,)) for i in range(3)] + for t in threads: + t.start() + + for t in threads: + t.join() + + for t in threads: + self.assertIsNone(t.exc) + + def test_raise_invalid_document(self): + class Wrapper: + def __init__(self, val): + self.val = val + + def __repr__(self): + return repr(self.val) + + self.assertEqual("1", repr(Wrapper(1))) + with self.assertRaisesRegex( + InvalidDocument, "cannot encode object: 1, of type: " + repr(Wrapper) + ): + encode({"t": Wrapper(1)}) + + def test_doc_in_invalid_document_error_as_property(self): + class Wrapper: + def __init__(self, val): + self.val = val + + def __repr__(self): + return repr(self.val) + + self.assertEqual("1", repr(Wrapper(1))) + doc = {"t": Wrapper(1)} + with self.assertRaisesRegex(InvalidDocument, "Invalid document:") as cm: + encode(doc) + self.assertEqual(cm.exception.document, doc) + + def test_doc_in_invalid_document_error_as_property_mapping(self): + class MyMapping(abc.Mapping): + def keys(self): + return ["t"] + + def __getitem__(self, name): + if name == "_id": + return None + return Wrapper(name) + + def __len__(self): + return 1 + + def __iter__(self): + return iter(["t"]) + + def __eq__(self, other): + if isinstance(other, MyMapping): + return True + return False + + class Wrapper: + def __init__(self, val): + self.val = val + + def __repr__(self): + return repr(self.val) + + self.assertEqual("1", repr(Wrapper(1))) + doc = MyMapping() + with self.assertRaisesRegex(InvalidDocument, "Invalid document:") as cm: + encode(doc) + self.assertEqual(cm.exception.document, doc) + + +class TestCodecOptions(unittest.TestCase): + def test_document_class(self): + self.assertRaises(TypeError, CodecOptions, document_class=object) + self.assertIs(SON, CodecOptions(document_class=SON).document_class) # type: ignore[type-var] + + def test_tz_aware(self): + self.assertRaises(TypeError, CodecOptions, tz_aware=1) + self.assertFalse(CodecOptions().tz_aware) + self.assertTrue(CodecOptions(tz_aware=True).tz_aware) + + def test_uuid_representation(self): + self.assertRaises(ValueError, CodecOptions, uuid_representation=7) + self.assertRaises(ValueError, CodecOptions, uuid_representation=2) + + def test_tzinfo(self): + self.assertRaises(TypeError, CodecOptions, tzinfo="pacific") + tz = FixedOffset(42, "forty-two") + self.assertRaises(ValueError, CodecOptions, tzinfo=tz) + self.assertEqual(tz, CodecOptions(tz_aware=True, tzinfo=tz).tzinfo) + self.assertEqual(repr(tz), "FixedOffset(datetime.timedelta(seconds=2520), 'forty-two')") + self.assertEqual( + repr(eval(repr(tz))), "FixedOffset(datetime.timedelta(seconds=2520), 'forty-two')" + ) + + def test_codec_options_repr(self): + r = ( + "CodecOptions(document_class=dict, tz_aware=False, " + "uuid_representation=UuidRepresentation.UNSPECIFIED, " + "unicode_decode_error_handler='strict', " + "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " + "fallback_encoder=None), " + "datetime_conversion=DatetimeConversion.DATETIME)" + ) + self.assertEqual(r, repr(CodecOptions())) + + def test_decode_all_defaults(self): + # Test decode_all()'s default document_class is dict and tz_aware is + # False. + doc = {"sub_document": {}, "dt": datetime.datetime.now(tz=datetime.timezone.utc)} + + decoded = bson.decode_all(bson.encode(doc))[0] + self.assertIsInstance(decoded["sub_document"], dict) + self.assertIsNone(decoded["dt"].tzinfo) + # The default uuid_representation is UNSPECIFIED + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): + bson.decode_all(bson.encode({"uuid": uuid.uuid4()})) + + def test_decode_all_no_options(self): + # Test decode_all()'s default document_class is dict and tz_aware is + # False. + doc = {"sub_document": {}, "dt": datetime.datetime.now(tz=datetime.timezone.utc)} + + decoded = bson.decode_all(bson.encode(doc), None)[0] + self.assertIsInstance(decoded["sub_document"], dict) + self.assertIsNone(decoded["dt"].tzinfo) + + doc2 = {"id": Binary.from_uuid(uuid.uuid4())} + decoded = bson.decode_all(bson.encode(doc2), None)[0] + self.assertIsInstance(decoded["id"], Binary) + + def test_decode_all_kwarg(self): + doc = {"a": uuid.uuid4()} + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encoded = encode(doc, codec_options=opts) + # Positional codec_options + self.assertEqual([doc], decode_all(encoded, opts)) + # Keyword codec_options + self.assertEqual([doc], decode_all(encoded, codec_options=opts)) + + def test_unicode_decode_error_handler(self): + enc = encode({"keystr": "foobar"}) + + # Test handling of bad key value, bad string value, and both. + invalid_key = enc[:7] + b"\xe9" + enc[8:] + invalid_val = enc[:18] + b"\xe9" + enc[19:] + invalid_both = enc[:7] + b"\xe9" + enc[8:18] + b"\xe9" + enc[19:] + + # Ensure that strict mode raises an error. + for invalid in [invalid_key, invalid_val, invalid_both]: + self.assertRaises( + InvalidBSON, + decode, + invalid, + CodecOptions(unicode_decode_error_handler="strict"), + ) + self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) + self.assertRaises(InvalidBSON, decode, invalid) + + # Test all other error handlers. + for handler in ["replace", "backslashreplace", "surrogateescape", "ignore"]: + expected_key = b"ke\xe9str".decode("utf-8", handler) + expected_val = b"fo\xe9bar".decode("utf-8", handler) + doc = decode(invalid_key, CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {expected_key: "foobar"}) + doc = decode(invalid_val, CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {"keystr": expected_val}) + doc = decode(invalid_both, CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {expected_key: expected_val}) + + # Test handling bad error mode. + dec = decode(enc, CodecOptions(unicode_decode_error_handler="junk")) + self.assertEqual(dec, {"keystr": "foobar"}) + + self.assertRaises( + InvalidBSON, + decode, + invalid_both, + CodecOptions(unicode_decode_error_handler="junk"), + ) + + def round_trip_pickle(self, obj, pickled_with_older): + pickled_with_older_obj = pickle.loads(pickled_with_older) + for protocol in range(pickle.HIGHEST_PROTOCOL + 1): + pkl = pickle.dumps(obj, protocol=protocol) + obj2 = pickle.loads(pkl) + self.assertEqual(obj, obj2) + self.assertEqual(pickled_with_older_obj, obj2) + + def test_regex_pickling(self): + reg = Regex(".?") + pickled_with_3 = ( + b"\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag" + b"s\x94K\x00ub." + ) + self.round_trip_pickle(reg, pickled_with_3) + + def test_timestamp_pickling(self): + ts = Timestamp(0, 1) + pickled_with_3 = ( + b"\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0ebson.timestamp\x94\x8c\tTimestamp\x94\x93\x94)" + b"\x81\x94}\x94(" + b"\x8c\x10_Timestamp__time\x94K\x00\x8c" + b"\x0f_Timestamp__inc\x94K\x01ub." + ) + self.round_trip_pickle(ts, pickled_with_3) + + def test_dbref_pickling(self): + dbr = DBRef("foo", 5) + pickled_with_3 = ( + b"\x80\x04\x95q\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94" + b"\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database" + b"\x94N\x8c\x0e_DBRef__kwargs\x94}\x94ub." + ) + self.round_trip_pickle(dbr, pickled_with_3) + + dbr = DBRef("foo", 5, database="db", kwargs1=None) + pickled_with_3 = ( + b"\x80\x04\x95\x81\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\nbson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94" + b"\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database" + b"\x94\x8c\x02db\x94\x8c\x0e_DBRef__kwargs\x94}\x94" + b"\x8c\x07kwargs1\x94Nsub." + ) + + self.round_trip_pickle(dbr, pickled_with_3) + + def test_minkey_pickling(self): + mink = MinKey() + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0cbson.min_key\x94\x8c\x06MinKey\x94\x93\x94)" + b"\x81\x94." + ) + + self.round_trip_pickle(mink, pickled_with_3) + + def test_maxkey_pickling(self): + maxk = MaxKey() + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)" + b"\x81\x94." + ) + + self.round_trip_pickle(maxk, pickled_with_3) + + def test_int64_pickling(self): + i64 = Int64(9) + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.int64\x94\x8c\x05Int64\x94\x93\x94K\t\x85\x94" + b"\x81\x94." + ) + self.round_trip_pickle(i64, pickled_with_3) + + def test_bson_encode_decode(self) -> None: + doc = {"_id": ObjectId()} + encoded = bson.encode(doc) + decoded = bson.decode(encoded) + encoded = bson.encode(decoded) + decoded = bson.decode(encoded) + # Documents returned from decode are mutable. + decoded["new_field"] = 1 + self.assertTrue(decoded["_id"].generation_time) + + +class TestDatetimeConversion(unittest.TestCase): + def test_comps(self): + # Tests other timestamp formats. + # Test each of the rich comparison methods. + pairs = [ + (DatetimeMS(-1), DatetimeMS(1)), + (DatetimeMS(0), DatetimeMS(0)), + (DatetimeMS(1), DatetimeMS(-1)), + ] + + comp_ops = ["__lt__", "__le__", "__eq__", "__ne__", "__gt__", "__ge__"] + for lh, rh in pairs: + for op in comp_ops: + self.assertEqual(getattr(lh, op)(rh), getattr(lh._value, op)(rh._value)) + + def test_class_conversions(self): + # Test class conversions. + dtr1 = DatetimeMS(1234) + dt1 = dtr1.as_datetime() + self.assertEqual(dtr1, DatetimeMS(dt1)) + + dt2 = datetime.datetime(1969, 1, 1) + dtr2 = DatetimeMS(dt2) + self.assertEqual(dtr2.as_datetime(), dt2) + + # Test encode and decode without codec options. Expect: DatetimeMS => datetime + dtr1 = DatetimeMS(0) + enc1 = encode({"x": dtr1}) + dec1 = decode(enc1) + self.assertEqual(dec1["x"], datetime.datetime(1970, 1, 1)) + self.assertNotEqual(type(dtr1), type(dec1["x"])) + + # Test encode and decode with codec options. Expect: UTCDateimteRaw => DatetimeMS + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) + enc1 = encode({"x": dtr1}) + dec1 = decode(enc1, opts1) + self.assertEqual(type(dtr1), type(dec1["x"])) + self.assertEqual(dtr1, dec1["x"]) + + # Expect: datetime => DatetimeMS + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) + dt1 = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + enc1 = encode({"x": dt1}) + dec1 = decode(enc1, opts1) + self.assertEqual(dec1["x"], DatetimeMS(0)) + self.assertNotEqual(dt1, type(dec1["x"])) + + def test_clamping(self): + # Test clamping from below and above. + opts = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, + tz_aware=True, + tzinfo=datetime.timezone.utc, + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1)}) + dec_below = decode(below, opts) + self.assertEqual( + dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + ) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1)}) + dec_above = decode(above, opts) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), + ) + + def test_tz_clamping_local(self): + # Naive clamping to local tz. + opts = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=False) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + + dec_below = decode(below, opts) + self.assertEqual(dec_below["x"], datetime.datetime.min) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(microsecond=999000), + ) + + def test_tz_clamping_utc(self): + # Aware clamping default utc. + opts = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts) + self.assertEqual( + dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + ) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), + ) + + def test_tz_clamping_non_utc(self): + for tz in [FixedOffset(60, "+1H"), FixedOffset(-60, "-1H")]: + opts = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True, tzinfo=tz + ) + # Min/max values in this timezone which can be represented in both BSON and datetime UTC. + try: + min_tz = datetime.datetime.min.replace(tzinfo=utc).astimezone(tz) + except OverflowError: + min_tz = datetime.datetime.min.replace(tzinfo=tz) + try: + max_tz = datetime.datetime.max.replace(tzinfo=utc, microsecond=999000).astimezone( + tz + ) + except OverflowError: + max_tz = datetime.datetime.max.replace(tzinfo=tz, microsecond=999000) + + for in_range in [ + min_tz, + min_tz + datetime.timedelta(milliseconds=1), + max_tz - datetime.timedelta(milliseconds=1), + max_tz, + ]: + doc = decode(encode({"x": in_range}), opts) + self.assertEqual(doc["x"], in_range) + + for too_low in [ + DatetimeMS(_datetime_to_millis(min_tz) - 1), + DatetimeMS(_datetime_to_millis(min_tz) - 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(min_tz) - 1 - 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1), + DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1 - 60 * 60 * 1000), + ]: + doc = decode(encode({"x": too_low}), opts) + self.assertEqual(doc["x"], min_tz) + + for too_high in [ + DatetimeMS(_datetime_to_millis(max_tz) + 1), + DatetimeMS(_datetime_to_millis(max_tz) + 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(max_tz) + 1 + 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1), + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1 + 60 * 60 * 1000), + ]: + doc = decode(encode({"x": too_high}), opts) + self.assertEqual(doc["x"], max_tz) + + def test_tz_clamping_non_utc_simple(self): + dtm = datetime.datetime(2024, 8, 23) + encoded = encode({"d": dtm}) + self.assertEqual(decode(encoded)["d"], dtm) + for conversion in [ + DatetimeConversion.DATETIME, + DatetimeConversion.DATETIME_CLAMP, + DatetimeConversion.DATETIME_AUTO, + ]: + for tz in [FixedOffset(60, "+1H"), FixedOffset(-60, "-1H")]: + opts = CodecOptions(datetime_conversion=conversion, tz_aware=True, tzinfo=tz) + self.assertEqual(decode(encoded, opts)["d"], dtm.replace(tzinfo=utc).astimezone(tz)) + + def test_tz_clamping_non_hashable(self): + class NonHashableTZ(FixedOffset): + __hash__ = None + + tz = NonHashableTZ(0, "UTC-non-hashable") + self.assertRaises(TypeError, hash, tz) + # Aware clamping. + opts = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True, tzinfo=tz + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts) + self.assertEqual(dec_below["x"], datetime.datetime.min.replace(tzinfo=tz)) + + within = encode({"x": EPOCH_AWARE.astimezone(tz)}) + dec_within = decode(within, opts) + self.assertEqual(dec_within["x"], EPOCH_AWARE.astimezone(tz)) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=tz, microsecond=999000), + ) + + def test_datetime_auto(self): + # Naive auto, in range. + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) + inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts1) + dec_inr = decode(inr) + self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) + + # Naive auto, below range. + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts1) + self.assertEqual( + dec_below["x"], DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60) + ) + + # Naive auto, above range. + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60), + ) + + # Aware auto, in range. + opts2 = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_AUTO, + tz_aware=True, + tzinfo=datetime.timezone.utc, + ) + inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts2) + dec_inr = decode(inr) + self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) + + # Aware auto, below range. + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts2) + self.assertEqual( + dec_below["x"], DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60) + ) + + # Aware auto, above range. + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts2) + self.assertEqual( + dec_above["x"], + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60), + ) + + def test_millis_from_datetime_ms(self): + # Test 65+ bit integer conversion, expect OverflowError. + big_ms = 2**65 + with self.assertRaises(OverflowError): + encode({"x": DatetimeMS(big_ms)}) + + # Subclass of DatetimeMS w/ __int__ override, expect an Error. + class DatetimeMSOverride(DatetimeMS): + def __int__(self): + return float(self._value) + + float_ms = DatetimeMSOverride(2) + with self.assertRaises(TypeError): + encode({"x": float_ms}) + + # Test InvalidBSON errors on conversion include _DATETIME_ERROR_SUGGESTION + small_ms = -2 << 51 + with self.assertRaisesRegex(InvalidBSON, re.compile(re.escape(_DATETIME_ERROR_SUGGESTION))): + decode(encode({"a": DatetimeMS(small_ms)})) + + def test_array_of_documents_to_buffer(self): + doc = dict(a=1) + buf = _array_of_documents_to_buffer(encode({"0": doc})) + self.assertEqual(buf, encode(doc)) + buf = _array_of_documents_to_buffer(encode({"0": doc, "1": doc})) + self.assertEqual(buf, encode(doc) + encode(doc)) + with self.assertRaises(InvalidBSON): + _array_of_documents_to_buffer(encode({"0": doc, "1": doc}) + b"1") + buf = encode({"0": doc, "1": doc}) + buf = buf[:-1] + b"1" + with self.assertRaises(InvalidBSON): + _array_of_documents_to_buffer(buf) + # We replace the size of the array with \xff\xff\xff\x00 which is -221 as an int32. + buf = b"\x14\x00\x00\x00\x04a\x00\xff\xff\xff\x00\x100\x00\x01\x00\x00\x00\x00\x00" + with self.assertRaises(InvalidBSON): + _array_of_documents_to_buffer(buf) + + +class TestLongLongToString(unittest.TestCase): + def test_long_long_to_string(self): + try: + from bson import _cbson + + _cbson._test_long_long_to_str() + except ImportError: + print("_cbson was not imported. Check compilation logs.") + if __name__ == "__main__": unittest.main() diff --git a/test/test_bson_binary_vector.py b/test/test_bson_binary_vector.py new file mode 100644 index 0000000000..2783338793 --- /dev/null +++ b/test/test_bson_binary_vector.py @@ -0,0 +1,122 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import binascii +import struct +from pathlib import Path +from test import unittest + +from bson import decode, encode, json_util +from bson.binary import Binary, BinaryVectorDtype + +_TEST_PATH = Path(__file__).parent / "bson_binary_vector" + + +class TestBSONBinaryVector(unittest.TestCase): + """Runs Binary Vector subtype tests. + + Follows the style of the BSON corpus specification tests. + Tests are automatically generated on import + from json files in _TEST_PATH via `create_tests`. + The actual tests are defined in the inner function `run_test` + of the test generator `create_test`.""" + + +def create_test(case_spec): + """Create standard test given specification in json. + + We use the naming convention expected (exp) and observed (obj) + to differentiate what is in the json (expected or suffix _exp) + from what is produced by the API (observed or suffix _obs) + """ + test_key = case_spec.get("test_key") + + def run_test(self): + for test_case in case_spec.get("tests", []): + description = test_case["description"] + vector_exp = test_case.get("vector") + dtype_hex_exp = test_case["dtype_hex"] + dtype_alias_exp = test_case.get("dtype_alias") + padding_exp = test_case.get("padding", 0) + canonical_bson_exp = test_case.get("canonical_bson") + # Convert dtype hex string into bytes + dtype_exp = BinaryVectorDtype(int(dtype_hex_exp, 16).to_bytes(1, byteorder="little")) + + if test_case["valid"]: + # Convert bson string to bytes + cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) + decoded_doc = decode(cB_exp) + binary_obs = decoded_doc[test_key] + + # Test round-tripping canonical bson. + self.assertEqual(encode(decoded_doc), cB_exp, description) + + # Test BSON to Binary Vector + vector_obs = binary_obs.as_vector() + self.assertEqual(vector_obs.dtype, dtype_exp, description) + if dtype_alias_exp: + self.assertEqual( + vector_obs.dtype, BinaryVectorDtype[dtype_alias_exp], description + ) + if dtype_exp in [BinaryVectorDtype.FLOAT32]: + [ + self.assertAlmostEqual(vector_obs.data[i], vector_exp[i], delta=1e-5) + for i in range(len(vector_exp)) + ] + else: + self.assertEqual(vector_obs.data, vector_exp, description) + # Test Binary Vector to BSON + vector_exp = Binary.from_vector(vector_exp, dtype_exp, padding_exp) + cB_obs = binascii.hexlify(encode({test_key: vector_exp})).decode().upper() + self.assertEqual(cB_obs, canonical_bson_exp, description) + + else: + """ + #### To prove correct in an invalid case (`valid:false`), one MUST + - (encoding case) if the vector field is present, raise an exception + when attempting to encode a document from the numeric values,dtype, and padding. + - (decoding case) if the canonical_bson field is present, raise an exception + when attempting to deserialize it into the corresponding + numeric values, as the field contains corrupted data. + """ + # Tests Binary.from_vector() + if vector_exp is not None: + with self.assertRaises((struct.error, ValueError), msg=description): + Binary.from_vector(vector_exp, dtype_exp, padding_exp) + + # Tests Binary.as_vector() + if canonical_bson_exp is not None: + with self.assertRaises((struct.error, ValueError), msg=description): + cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) + decoded_doc = decode(cB_exp) + binary_obs = decoded_doc[test_key] + binary_obs.as_vector() + + return run_test + + +def create_tests(): + for filename in _TEST_PATH.glob("*.json"): + with open(str(filename), encoding="utf-8") as test_file: + test_method = create_test(json_util.loads(test_file.read())) + setattr(TestBSONBinaryVector, "test_" + filename.stem, test_method) + + +create_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py new file mode 100644 index 0000000000..504025e766 --- /dev/null +++ b/test/test_bson_corpus.py @@ -0,0 +1,238 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the BSON corpus specification tests.""" +from __future__ import annotations + +import binascii +import functools +import glob +import json +import os +import sys +from decimal import DecimalException + +sys.path[0:0] = [""] + +from test import unittest + +from bson import decode, encode, json_util +from bson.binary import STANDARD +from bson.codec_options import CodecOptions +from bson.dbref import DBRef +from bson.decimal128 import Decimal128 +from bson.errors import InvalidBSON, InvalidDocument, InvalidId +from bson.json_util import JSONMode +from bson.son import SON + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bson_corpus") + +_TESTS_TO_SKIP = { + # Python cannot decode dates after year 9999. + "Y10K", +} + +_NON_PARSE_ERRORS = { + # {"$date": } is our legacy format which we still need to parse. + "Bad $date (number, not string or hash)", + # This variant of $numberLong may have been generated by an old version + # of mongoexport. + "Bad $numberLong (number, not string)", + # Python's UUID constructor is very permissive. + "$uuid invalid value--misplaced hyphens", + # We parse Regex flags with extra characters, including nulls. + "Null byte in $regularExpression options", +} + +_IMPLCIT_LOSSY_TESTS = { + # JSON decodes top-level $ref+$id as a DBRef but BSON doesn't. + "Document with key names similar to those of a DBRef" +} + +_DEPRECATED_BSON_TYPES = { + # Symbol + "0x0E": str, + # Undefined + "0x06": type(None), + # DBPointer + "0x0C": DBRef, +} + + +# Need to set tz_aware=True in order to use "strict" dates in extended JSON. +codec_options: CodecOptions = CodecOptions(tz_aware=True, document_class=SON) +codec_options_no_tzaware: CodecOptions = CodecOptions(document_class=SON) +# We normally encode UUID as binary subtype 0x03, +# but we'll need to encode to subtype 0x04 for one of the tests. +codec_options_uuid_04 = codec_options._replace(uuid_representation=STANDARD) +json_options_uuid_04 = json_util.JSONOptions( + json_mode=JSONMode.CANONICAL, uuid_representation=STANDARD +) +json_options_iso8601 = json_util.JSONOptions( + datetime_representation=json_util.DatetimeRepresentation.ISO8601, json_mode=JSONMode.LEGACY +) +to_extjson = functools.partial(json_util.dumps, json_options=json_util.CANONICAL_JSON_OPTIONS) +to_extjson_uuid_04 = functools.partial(json_util.dumps, json_options=json_options_uuid_04) +to_extjson_iso8601 = functools.partial(json_util.dumps, json_options=json_options_iso8601) +to_relaxed_extjson = functools.partial(json_util.dumps, json_options=json_util.RELAXED_JSON_OPTIONS) +to_bson_uuid_04 = functools.partial(encode, codec_options=codec_options_uuid_04) +to_bson = functools.partial(encode, codec_options=codec_options) +decode_bson = functools.partial(decode, codec_options=codec_options_no_tzaware) +decode_extjson = functools.partial( + json_util.loads, + json_options=json_util.JSONOptions(json_mode=JSONMode.CANONICAL, document_class=SON), +) +loads = functools.partial(json.loads, object_pairs_hook=SON) + + +class TestBSONCorpus(unittest.TestCase): + def assertJsonEqual(self, first, second, msg=None): + """Fail if the two json strings are unequal. + + Normalize json by parsing it with the built-in json library. This + accounts for discrepancies in spacing. + """ + self.assertEqual(loads(first), loads(second), msg=msg) + + +def create_test(case_spec): + bson_type = case_spec["bson_type"] + # Test key is absent when testing top-level documents. + test_key = case_spec.get("test_key") + deprecated = case_spec.get("deprecated") + + def run_test(self): + for valid_case in case_spec.get("valid", []): + description = valid_case["description"] + if description in _TESTS_TO_SKIP: + continue + + # Special case for testing encoding UUID as binary subtype 0x04. + if description.startswith("subtype 0x04"): + encode_extjson = to_extjson_uuid_04 + encode_bson = to_bson_uuid_04 + else: + encode_extjson = to_extjson + encode_bson = to_bson + + cB = binascii.unhexlify(valid_case["canonical_bson"].encode("utf8")) + cEJ = valid_case["canonical_extjson"] + rEJ = valid_case.get("relaxed_extjson") + dEJ = valid_case.get("degenerate_extjson") + if description in _IMPLCIT_LOSSY_TESTS: + valid_case.setdefault("lossy", True) + lossy = valid_case.get("lossy") + + # BSON double, use lowercase 'e+' to match Python's encoding + if bson_type == "0x01": + cEJ = cEJ.replace("E+", "e+") + + decoded_bson = decode_bson(cB) + + if not lossy: + # Make sure we can parse the legacy (default) JSON format. + legacy_json = json_util.dumps( + decoded_bson, json_options=json_util.LEGACY_JSON_OPTIONS + ) + self.assertEqual(decode_extjson(legacy_json), decoded_bson, description) + + if deprecated: + if "converted_bson" in valid_case: + converted_bson = binascii.unhexlify(valid_case["converted_bson"].encode("utf8")) + self.assertEqual(encode_bson(decoded_bson), converted_bson) + self.assertJsonEqual( + encode_extjson(decode_bson(converted_bson)), valid_case["converted_extjson"] + ) + # Make sure we can decode the type. + self.assertEqual(decoded_bson, decode_extjson(cEJ)) + if test_key is not None: + self.assertIsInstance(decoded_bson[test_key], _DEPRECATED_BSON_TYPES[bson_type]) + continue + + # Jython can't handle NaN with a payload from + # struct.(un)pack if endianness is specified in the format string. + if not (sys.platform.startswith("java") and description == "NaN with payload"): + # Test round-tripping canonical bson. + self.assertEqual(encode_bson(decoded_bson), cB, description) + self.assertJsonEqual(encode_extjson(decoded_bson), cEJ) + + # Test round-tripping canonical extended json. + decoded_json = decode_extjson(cEJ) + self.assertJsonEqual(encode_extjson(decoded_json), cEJ) + if not lossy: + self.assertEqual(encode_bson(decoded_json), cB) + + # Test round-tripping degenerate bson. + if "degenerate_bson" in valid_case: + dB = binascii.unhexlify(valid_case["degenerate_bson"].encode("utf8")) + self.assertEqual(encode_bson(decode_bson(dB)), cB) + + # Test round-tripping degenerate extended json. + if dEJ is not None: + decoded_json = decode_extjson(dEJ) + self.assertJsonEqual(encode_extjson(decoded_json), cEJ) + if not lossy: + self.assertEqual(encode_bson(decoded_json), cB) + + # Test round-tripping relaxed extended json. + if rEJ is not None: + self.assertJsonEqual(to_relaxed_extjson(decoded_bson), rEJ) + decoded_json = decode_extjson(rEJ) + self.assertJsonEqual(to_relaxed_extjson(decoded_json), rEJ) + + for decode_error_case in case_spec.get("decodeErrors", []): + with self.assertRaises(InvalidBSON): + decode_bson(binascii.unhexlify(decode_error_case["bson"].encode("utf8"))) + + for parse_error_case in case_spec.get("parseErrors", []): + description = parse_error_case["description"] + if description in _NON_PARSE_ERRORS: + decode_extjson(parse_error_case["string"]) + continue + if bson_type == "0x13": + self.assertRaises(DecimalException, Decimal128, parse_error_case["string"]) + elif bson_type == "0x00": + try: + doc = decode_extjson(parse_error_case["string"]) + # Null bytes are validated when encoding to BSON. + if "Null" in description: + to_bson(doc) + raise AssertionError("exception not raised for test case: " + description) + except (ValueError, KeyError, TypeError, InvalidId, InvalidDocument): + pass + elif bson_type == "0x05": + try: + decode_extjson(parse_error_case["string"]) + raise AssertionError("exception not raised for test case: " + description) + except (TypeError, ValueError): + pass + else: + raise AssertionError("cannot test parseErrors for type " + bson_type) + + return run_test + + +def create_tests(): + for filename in glob.glob(os.path.join(_TEST_PATH, "*.json")): + test_suffix, _ = os.path.splitext(os.path.basename(filename)) + with open(filename, encoding="utf-8") as bson_test_file: + test_method = create_test(json.load(bson_test_file)) + setattr(TestBSONCorpus, "test_" + test_suffix, test_method) + + +create_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_bulk.py b/test/test_bulk.py index b6471b9f0f..1de406fca5 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -1,4 +1,4 @@ -# Copyright 2014-2014 MongoDB, Inc. +# Copyright 2014-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,1166 +13,1105 @@ # limitations under the License. """Test the bulk API.""" +from __future__ import annotations import sys -import unittest +import uuid +from typing import Any, Optional -from nose.plugins.skip import SkipTest +from pymongo.synchronous.mongo_client import MongoClient sys.path[0:0] = [""] -from bson import InvalidDocument, SON -from pymongo.errors import BulkWriteError, InvalidOperation, OperationFailure -from test import version -from test.test_client import get_client -from test.utils import (oid_generated_on_client, - remove_all_users, - server_started_with_auth, - server_started_with_nojournal) +from test import IntegrationTest, client_context, remove_all_users, unittest +from test.utils_shared import wait_until -class BulkTestBase(unittest.TestCase): +from bson.binary import Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from pymongo.common import partition_node +from pymongo.errors import BulkWriteError, ConfigurationError, InvalidOperation, OperationFailure +from pymongo.operations import * +from pymongo.synchronous.collection import Collection +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class BulkTestBase(IntegrationTest): + coll: Collection + coll_w0: Collection def setUp(self): - client = get_client() - self.has_write_commands = (client.max_wire_version > 1) + super().setUp() + self.coll = self.db.test + self.coll.drop() + self.coll_w0 = self.coll.with_options(write_concern=WriteConcern(w=0)) def assertEqualResponse(self, expected, actual): """Compare response from bulk.execute() to expected response.""" for key, value in expected.items(): - if key == 'nModified': - if self.has_write_commands: - self.assertEqual(value, actual['nModified']) - else: - # Legacy servers don't include nModified in the response. - self.assertFalse('nModified' in actual) - elif key == 'upserted': + if key == "nModified": + self.assertEqual(value, actual["nModified"]) + elif key == "upserted": expected_upserts = value - actual_upserts = actual['upserted'] + actual_upserts = actual["upserted"] self.assertEqual( - len(expected_upserts), len(actual_upserts), - 'Expected %d elements in "upserted", got %d' % ( - len(expected_upserts), len(actual_upserts))) + len(expected_upserts), + len(actual_upserts), + 'Expected %d elements in "upserted", got %d' + % (len(expected_upserts), len(actual_upserts)), + ) for e, a in zip(expected_upserts, actual_upserts): self.assertEqualUpsert(e, a) - elif key == 'writeErrors': + elif key == "writeErrors": expected_errors = value - actual_errors = actual['writeErrors'] + actual_errors = actual["writeErrors"] self.assertEqual( - len(expected_errors), len(actual_errors), - 'Expected %d elements in "writeErrors", got %d' % ( - len(expected_errors), len(actual_errors))) + len(expected_errors), + len(actual_errors), + 'Expected %d elements in "writeErrors", got %d' + % (len(expected_errors), len(actual_errors)), + ) for e, a in zip(expected_errors, actual_errors): self.assertEqualWriteError(e, a) else: self.assertEqual( - actual.get(key), value, - '%r value of %r does not match expected %r' % - (key, actual.get(key), value)) + actual.get(key), + value, + f"{key!r} value of {actual.get(key)!r} does not match expected {value!r}", + ) def assertEqualUpsert(self, expected, actual): """Compare bulk.execute()['upserts'] to expected value. Like: {'index': 0, '_id': ObjectId()} """ - self.assertEqual(expected['index'], actual['index']) - if expected['_id'] == '...': + self.assertEqual(expected["index"], actual["index"]) + if expected["_id"] == "...": # Unspecified value. - self.assertTrue('_id' in actual) + self.assertIn("_id", actual) else: - self.assertEqual(expected['_id'], actual['_id']) + self.assertEqual(expected["_id"], actual["_id"]) def assertEqualWriteError(self, expected, actual): """Compare bulk.execute()['writeErrors'] to expected value. Like: {'index': 0, 'code': 123, 'errmsg': '...', 'op': { ... }} """ - self.assertEqual(expected['index'], actual['index']) - self.assertEqual(expected['code'], actual['code']) - if expected['errmsg'] == '...': + self.assertEqual(expected["index"], actual["index"]) + self.assertEqual(expected["code"], actual["code"]) + if expected["errmsg"] == "...": # Unspecified value. - self.assertTrue('errmsg' in actual) + self.assertIn("errmsg", actual) else: - self.assertEqual(expected['errmsg'], actual['errmsg']) + self.assertEqual(expected["errmsg"], actual["errmsg"]) - expected_op = expected['op'].copy() - actual_op = actual['op'].copy() - if expected_op.get('_id') == '...': + expected_op = expected["op"].copy() + actual_op = actual["op"].copy() + if expected_op.get("_id") == "...": # Unspecified _id. - self.assertTrue('_id' in actual_op) - actual_op.pop('_id') - expected_op.pop('_id') + self.assertIn("_id", actual_op) + actual_op.pop("_id") + expected_op.pop("_id") self.assertEqual(expected_op, actual_op) class TestBulk(BulkTestBase): - - def setUp(self): - super(TestBulk, self).setUp() - self.coll = get_client().pymongo_test.test - self.coll.remove() - def test_empty(self): - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(InvalidOperation, bulk.execute) - - def test_find(self): - # find() requires a selector. - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(TypeError, bulk.find) - self.assertRaises(TypeError, bulk.find, 'foo') - # No error. - bulk.find({}) + with self.assertRaises(InvalidOperation): + self.coll.bulk_write([]) def test_insert(self): - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(TypeError, bulk.insert, 1) - - # find() before insert() is prohibited. - self.assertRaises(AttributeError, lambda: bulk.find({}).insert({})) - - # We don't allow multiple documents per call. - self.assertRaises(TypeError, bulk.insert, [{}, {}]) - self.assertRaises(TypeError, bulk.insert, ({} for _ in range(2))) - - bulk.insert({}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(1, self.coll.count()) - doc = self.coll.find_one() - self.assertTrue(oid_generated_on_client(doc)) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.insert({}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(2, self.coll.count()) - - def test_insert_check_keys(self): - bulk = self.coll.initialize_ordered_bulk_op() - bulk.insert({'$dollar': 1}) - self.assertRaises(InvalidDocument, bulk.execute) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.insert({'a.b': 1}) - self.assertRaises(InvalidDocument, bulk.execute) - - def test_update(self): - self.coll.insert([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - # update() requires find() first. - self.assertRaises( - AttributeError, - lambda: bulk.update({'$set': {'x': 1}})) - - self.assertRaises(TypeError, bulk.find({}).update, 1) - self.assertRaises(ValueError, bulk.find({}).update, {}) - - # All fields must be $-operators. - self.assertRaises(ValueError, bulk.find({}).update, {'foo': 'bar'}) - bulk.find({}).update({'$set': {'foo': 'bar'}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 2, - 'nModified': 2, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 2) - - # All fields must be $-operators -- validated server-side. - bulk = self.coll.initialize_ordered_bulk_op() - updates = SON([('$set', {'x': 1}), ('y', 1)]) - bulk.find({}).update(updates) - self.assertRaises(BulkWriteError, bulk.execute) - - self.coll.remove() - self.coll.insert([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).update({'$set': {'bim': 'baz'}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 2, - 'nModified': 2, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 2) - - self.coll.insert({'x': 1}) - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 1}).update({'$set': {'x': 42}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(1, self.coll.find({'x': 42}).count()) - - # Second time, x is already 42 so nModified is 0. - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 42}).update({'$set': {'x': 42}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + result = self.coll.bulk_write([InsertOne({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.inserted_count) + self.assertEqual(1, self.coll.count_documents({})) + + def _test_update_many(self, update): + expected = { + "nMatched": 2, + "nModified": 2, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + self.coll.insert_many([{}, {}]) + + result = self.coll.bulk_write([UpdateMany({}, update)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(2, result.matched_count) + self.assertIn(result.modified_count, (2, None)) + + def test_update_many(self): + self._test_update_many({"$set": {"foo": "bar"}}) + + @client_context.require_version_min(4, 2, 0) + def test_update_many_pipeline(self): + self._test_update_many([{"$set": {"foo": "bar"}}]) + + def test_array_filters_validation(self): + with self.assertRaises(TypeError): + UpdateMany({}, {}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + UpdateOne({}, {}, array_filters={}) # type: ignore[arg-type] + + def test_array_filters_unacknowledged(self): + coll = self.coll_w0 + update_one = UpdateOne({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + update_many = UpdateMany({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + coll.bulk_write([update_one]) + with self.assertRaises(ConfigurationError): + coll.bulk_write([update_many]) + + def _test_update_one(self, update): + expected = { + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + self.coll.insert_many([{}, {}]) + + result = self.coll.bulk_write([UpdateOne({}, update)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (1, None)) def test_update_one(self): - self.coll.insert([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() + self._test_update_one({"$set": {"foo": "bar"}}) - # update_one() requires find() first. - self.assertRaises( - AttributeError, - lambda: bulk.update_one({'$set': {'x': 1}})) - - self.assertRaises(TypeError, bulk.find({}).update_one, 1) - self.assertRaises(ValueError, bulk.find({}).update_one, {}) - self.assertRaises(ValueError, bulk.find({}).update_one, {'foo': 'bar'}) - bulk.find({}).update_one({'$set': {'foo': 'bar'}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 1) - - self.coll.remove() - self.coll.insert([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).update_one({'$set': {'bim': 'baz'}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1) - - # All fields must be $-operators -- validated server-side. - bulk = self.coll.initialize_ordered_bulk_op() - updates = SON([('$set', {'x': 1}), ('y', 1)]) - bulk.find({}).update_one(updates) - self.assertRaises(BulkWriteError, bulk.execute) + @client_context.require_version_min(4, 2, 0) + def test_update_one_pipeline(self): + self._test_update_one([{"$set": {"foo": "bar"}}]) def test_replace_one(self): - self.coll.insert([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(TypeError, bulk.find({}).replace_one, 1) - self.assertRaises(ValueError, - bulk.find({}).replace_one, {'$set': {'foo': 'bar'}}) - bulk.find({}).replace_one({'foo': 'bar'}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 1) - - self.coll.remove() - self.coll.insert([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).replace_one({'bim': 'baz'}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1) + expected = { + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + self.coll.insert_many([{}, {}]) + + result = self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (1, None)) def test_remove(self): # Test removing all documents, ordered. - self.coll.insert([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - # remove() must be preceded by find(). - self.assertRaises(AttributeError, lambda: bulk.remove()) - bulk.find({}).remove() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.count(), 0) - - # Test removing some documents, ordered. - self.coll.insert([{}, {'x': 1}, {}, {'x': 1}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - bulk.find({'x': 1}).remove() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.count(), 2) - self.coll.remove() - - # Test removing all documents, unordered. - self.coll.insert([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).remove() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - # Test removing some documents, unordered. - self.assertEqual(self.coll.count(), 0) - - self.coll.insert([{}, {'x': 1}, {}, {'x': 1}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 1}).remove() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.count(), 2) - self.coll.remove() + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 2, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + self.coll.insert_many([{}, {}]) + + result = self.coll.bulk_write([DeleteMany({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(2, result.deleted_count) def test_remove_one(self): - - bulk = self.coll.initialize_ordered_bulk_op() - - # remove_one() must be preceded by find(). - self.assertRaises(AttributeError, lambda: bulk.remove_one()) - # Test removing one document, empty selector. - # First ordered, then unordered. - self.coll.insert([{}, {}]) - bulk.find({}).remove_one() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 1, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.count(), 1) - - self.coll.insert({}) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).remove_one() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 1, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.count(), 1) - - # Test removing one document, with a selector. - # First ordered, then unordered. - self.coll.insert([{'x': 1}]) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({'x': 1}).remove_one() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 1, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual([{}], list(self.coll.find({}, {'_id': False}))) - self.coll.insert({'x': 1}) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 1}).remove_one() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 1, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual([{}], list(self.coll.find({}, {'_id': False}))) + self.coll.insert_many([{}, {}]) + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 1, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + result = self.coll.bulk_write([DeleteOne({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.deleted_count) + self.assertEqual(self.coll.count_documents({}), 1) def test_upsert(self): - bulk = self.coll.initialize_ordered_bulk_op() + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + } + + result = self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"}, upsert=True)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.upserted_count) + assert result.upserted_ids is not None + self.assertEqual(1, len(result.upserted_ids)) + self.assertIsInstance(result.upserted_ids.get(0), ObjectId) + + self.assertEqual(self.coll.count_documents({"foo": "bar"}), 1) - # upsert() requires find() first. - self.assertRaises( - AttributeError, - lambda: bulk.upsert()) + def test_numerous_inserts(self): + # Ensure we don't exceed server's maxWriteBatchSize size limit. + n_docs = client_context.max_write_batch_size + 100 + requests = [InsertOne[dict]({}) for _ in range(n_docs)] + result = self.coll.bulk_write(requests, ordered=False) + self.assertEqual(n_docs, result.inserted_count) + self.assertEqual(n_docs, self.coll.count_documents({})) - # Note, in MongoDB 2.4 the server won't return the - # "upserted" field unless _id is an ObjectId - bulk.find({}).upsert().replace_one({'foo': 'bar'}) - result = bulk.execute() + # Same with ordered bulk. + self.coll.drop() + result = self.coll.bulk_write(requests) + self.assertEqual(n_docs, result.inserted_count) + self.assertEqual(n_docs, self.coll.count_documents({})) + + def test_bulk_max_message_size(self): + self.coll.delete_many({}) + self.addCleanup(self.coll.delete_many, {}) + _16_MB = 16 * 1000 * 1000 + # Generate a list of documents such that the first batched OP_MSG is + # as close as possible to the 48MB limit. + docs = [ + {"_id": 1, "l": "s" * _16_MB}, + {"_id": 2, "l": "s" * _16_MB}, + {"_id": 3, "l": "s" * (_16_MB - 10000)}, + ] + # Fill in the remaining ~10000 bytes with small documents. + for i in range(4, 10000): + docs.append({"_id": i}) + result = self.coll.insert_many(docs) + self.assertEqual(len(docs), len(result.inserted_ids)) + + def test_generator_insert(self): + def gen(): + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} + + result = self.coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + + def test_bulk_write_no_results(self): + result = self.coll_w0.bulk_write([InsertOne({})]) + self.assertFalse(result.acknowledged) + self.assertRaises(InvalidOperation, lambda: result.inserted_count) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_ids) + + def test_bulk_write_invalid_arguments(self): + # The requests argument must be a list. + generator = (InsertOne[dict]({}) for _ in range(10)) + with self.assertRaises(TypeError): + self.coll.bulk_write(generator) # type: ignore[arg-type] + + # Document is not wrapped in a bulk write operation. + with self.assertRaises(TypeError): + self.coll.bulk_write([{}]) # type: ignore[list-item] + + def test_upsert_large(self): + big = "a" * (client_context.max_bson_size - 37) + result = self.coll.bulk_write([UpdateOne({"x": 1}, {"$set": {"s": big}}, upsert=True)]) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}]}, - result) - - self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 1) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({}).upsert().update_one({'$set': {'bim': 'baz'}}) - result = bulk.execute() + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + }, + result.bulk_api_result, + ) + + self.assertEqual(1, self.coll.count_documents({"x": 1})) + + def test_client_generated_upsert_id(self): + result = self.coll.bulk_write( + [ + UpdateOne({"_id": 0}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": 1}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": 2}, {"_id": 2}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({}).upsert().update({'$set': {'bim': 'bop'}}) - # Non-upsert, no matches. - bulk.find({'x': 1}).update({'$set': {'x': 2}}) - result = bulk.execute() + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": 0}, + {"index": 1, "_id": 1}, + {"index": 2, "_id": 2}, + ], + }, + result.bulk_api_result, + ) + + def test_upsert_uuid_standard(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'bop'}).count(), 1) - self.assertEqual(self.coll.find({'x': 2}).count(), 0) - - def test_upsert_large(self): - client = self.coll.database.connection - big = 'a' * (client.max_bson_size - 37) - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({'x': 1}).upsert().update({'$set': {'s': big}}) - result = bulk.execute() + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + def test_upsert_uuid_unspecified(self): + options = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) + coll = self.coll.with_options(codec_options=options) + uuids = [Binary.from_uuid(uuid.uuid4()) for _ in range(3)] + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}]}, - result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + def test_upsert_uuid_standard_subdocuments(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + ids: list = [{"f": Binary(bytes(i)), "f2": uuid.uuid4()} for i in range(3)] + + result = coll.bulk_write( + [ + UpdateOne({"_id": ids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": ids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": ids[2]}, {"_id": ids[2]}, upsert=True), + ] + ) + + # The `Binary` values are returned as `bytes` objects. + for _id in ids: + _id["f"] = bytes(_id["f"]) - self.assertEqual(1, self.coll.find({'x': 1}).count()) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": ids[0]}, + {"index": 1, "_id": ids[1]}, + {"index": 2, "_id": ids[2]}, + ], + }, + result.bulk_api_result, + ) def test_single_ordered_batch(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 1}).update_one({'$set': {'b': 1}}) - batch.find({'a': 2}).upsert().update_one({'$set': {'b': 2}}) - batch.insert({'a': 3}) - batch.find({'a': 3}).remove() - result = batch.execute() + result = self.coll.bulk_write( + [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + ) self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 1, - 'nInserted': 2, - 'nRemoved': 1, - 'upserted': [{'index': 2, '_id': '...'}]}, - result) + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + }, + result.bulk_api_result, + ) def test_single_error_ordered_batch(self): - self.coll.ensure_index('a', unique=True) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), + ] try: - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.insert({'b': 3, 'a': 2}) - - try: - batch.execute() - except BulkWriteError, exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) - finally: - self.coll.drop_index([('a', 1)]) + self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) def test_multiple_error_ordered_batch(self): - self.coll.ensure_index('a', unique=True) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 2}}, upsert=True), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 4, "a": 3}), + InsertOne({"b": 5, "a": 1}), + ] + try: - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.find({'b': 3}).upsert().update_one({'$set': {'a': 2}}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.insert({'b': 4, 'a': 3}) - batch.insert({'b': 5, 'a': 1}) - - try: - batch.execute() - except BulkWriteError, exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) - finally: - self.coll.drop_index([('a', 1)]) + self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) def test_single_unordered_batch(self): - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 1}).update_one({'$set': {'b': 1}}) - batch.find({'a': 2}).upsert().update_one({'$set': {'b': 2}}) - batch.insert({'a': 3}) - batch.find({'a': 3}).remove() - result = batch.execute() + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + result = self.coll.bulk_write(requests, ordered=False) self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 1, - 'nInserted': 2, - 'nRemoved': 1, - 'upserted': [{'index': 2, '_id': '...'}], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + "writeErrors": [], + "writeConcernErrors": [], + }, + result.bulk_api_result, + ) def test_single_error_unordered_batch(self): - self.coll.ensure_index('a', unique=True) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), + ] + try: - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.insert({'b': 3, 'a': 2}) - - try: - batch.execute() - except BulkWriteError, exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) - finally: - self.coll.drop_index([('a', 1)]) + self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) def test_multiple_error_unordered_batch(self): - self.coll.ensure_index('a', unique=True) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 3}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 4}}, upsert=True), + UpdateOne({"b": 4}, {"$set": {"a": 3}}, upsert=True), + InsertOne({"b": 5, "a": 2}), + InsertOne({"b": 6, "a": 1}), + ] + try: - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 3}}) - batch.find({'b': 3}).upsert().update_one({'$set': {'a': 4}}) - batch.find({'b': 4}).upsert().update_one({'$set': {'a': 3}}) - batch.insert({'b': 5, 'a': 2}) - batch.insert({'b': 6, 'a': 1}) - - try: - batch.execute() - except BulkWriteError, exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - # Assume the update at index 1 runs before the update at index 3, - # although the spec does not require it. Same for inserts. - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 2, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [ - {'index': 1, '_id': '...'}, - {'index': 2, '_id': '...'}], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 3, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 4}, - 'u': {'$set': {'a': 3}}, - 'multi': False, - 'upsert': True}}, - {'index': 5, - 'code': 11000, - 'errmsg': '...', - 'op': {'_id': '...', 'b': 6, 'a': 1}}]}, - result) - finally: - self.coll.drop_index([('a', 1)]) + self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + # Assume the update at index 1 runs before the update at index 3, + # although the spec does not require it. Same for inserts. + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 2, + "nInserted": 2, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}, {"index": 2, "_id": "..."}], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 3, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 4}, + "u": {"$set": {"a": 3}}, + "multi": False, + "upsert": True, + }, + }, + { + "index": 5, + "code": 11000, + "errmsg": "...", + "op": {"_id": "...", "b": 6, "a": 1}, + }, + ], + }, + result, + ) def test_large_inserts_ordered(self): - big = 'x' * self.coll.database.connection.max_bson_size - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.insert({'big': big}) - batch.insert({'b': 2, 'a': 2}) + big = "x" * client_context.max_bson_size + requests = [ + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), + ] try: - batch.execute() - except BulkWriteError, exc: + self.coll.bulk_write(requests) + except BulkWriteError as exc: result = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") - self.assertEqual(1, result['nInserted']) + self.assertEqual(1, result["nInserted"]) - self.coll.remove() + self.coll.delete_many({}) - big = 'x' * (1024 * 1024 * 4) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1, 'big': big}) - batch.insert({'a': 2, 'big': big}) - batch.insert({'a': 3, 'big': big}) - batch.insert({'a': 4, 'big': big}) - batch.insert({'a': 5, 'big': big}) - batch.insert({'a': 6, 'big': big}) - result = batch.execute() + big = "x" * (1024 * 1024 * 4) + write_result = self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ] + ) - self.assertEqual(6, result['nInserted']) - self.assertEqual(6, self.coll.count()) + self.assertEqual(6, write_result.inserted_count) + self.assertEqual(6, self.coll.count_documents({})) def test_large_inserts_unordered(self): - big = 'x' * self.coll.database.connection.max_bson_size - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.insert({'big': big}) - batch.insert({'b': 2, 'a': 2}) + big = "x" * client_context.max_bson_size + requests = [ + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), + ] try: - batch.execute() - except BulkWriteError, exc: - result = exc.details + self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") - self.assertEqual(2, result['nInserted']) + self.assertEqual(2, details["nInserted"]) - self.coll.remove() + self.coll.delete_many({}) - big = 'x' * (1024 * 1024 * 4) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1, 'big': big}) - batch.insert({'a': 2, 'big': big}) - batch.insert({'a': 3, 'big': big}) - batch.insert({'a': 4, 'big': big}) - batch.insert({'a': 5, 'big': big}) - batch.insert({'a': 6, 'big': big}) - result = batch.execute() + big = "x" * (1024 * 1024 * 4) + result = self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ], + ordered=False, + ) - self.assertEqual(6, result['nInserted']) - self.assertEqual(6, self.coll.count()) - - def test_numerous_inserts(self): - # Ensure we don't exceed server's 1000-document batch size limit. - n_docs = 2100 - batch = self.coll.initialize_unordered_bulk_op() - for _ in range(n_docs): - batch.insert({}) - - result = batch.execute() - self.assertEqual(n_docs, result['nInserted']) - self.assertEqual(n_docs, self.coll.count()) - - # Same with ordered bulk. - self.coll.remove() - batch = self.coll.initialize_ordered_bulk_op() - for _ in range(n_docs): - batch.insert({}) + self.assertEqual(6, result.inserted_count) + self.assertEqual(6, self.coll.count_documents({})) - result = batch.execute() - self.assertEqual(n_docs, result['nInserted']) - self.assertEqual(n_docs, self.coll.count()) - - def test_multiple_execution(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({}) - batch.execute() - self.assertRaises(InvalidOperation, batch.execute) - - -class TestBulkWriteConcern(BulkTestBase): +class BulkAuthorizationTestBase(BulkTestBase): + @client_context.require_auth + @client_context.require_no_api_version def setUp(self): - super(TestBulkWriteConcern, self).setUp() - client = get_client() - ismaster = client.test.command('ismaster') - self.is_repl = bool(ismaster.get('setName')) - self.w = len(ismaster.get("hosts", [])) - self.coll = client.pymongo_test.test - self.coll.remove() - - def test_fsync_and_j(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - self.assertRaises( - OperationFailure, - batch.execute, {'fsync': True, 'j': True}) - - def test_j_without_journal(self): - client = self.coll.database.connection - if not server_started_with_nojournal(client): - raise SkipTest("Need mongod started with --nojournal") - - # Using j=True without journaling is a hard failure. - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({}) - self.assertRaises(OperationFailure, batch.execute, {'j': True}) + super().setUp() + client_context.create_user(self.db.name, "readonly", "pw", ["read"]) + self.db.command( + "createRole", + "noremove", + privileges=[ + { + "actions": ["insert", "update", "find"], + "resource": {"db": "pymongo_test", "collection": "test"}, + } + ], + roles=[], + ) + + client_context.create_user(self.db.name, "noremove", "pw", ["noremove"]) - def test_write_concern_failure_ordered(self): + def tearDown(self): + self.db.command("dropRole", "noremove") + remove_all_users(self.db) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - batch.insert({'a': 2}) - # Using w > 1 with no replication is a hard failure. - if not self.is_repl: - self.assertRaises(OperationFailure, - batch.execute, {'w': 5, 'wtimeout': 1}) +class TestBulkUnacknowledged(BulkTestBase): + def tearDown(self): + self.coll.delete_many({}) - # Replication wtimeout is a 'soft' error. - # It shouldn't stop batch processing. - else: - try: - batch.execute({'w': self.w + 1, 'wtimeout': 1}) - except BulkWriteError, exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': []}, - result) - - # When talking to legacy servers there will be a - # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 0) - - failed = result['writeConcernErrors'][0] - self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], basestring)) - - self.coll.remove() - self.coll.ensure_index('a', unique=True) - - # Fail due to write concern support as well - # as duplicate key error on ordered batch. - try: - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 3}).upsert().replace_one({'b': 1}) - batch.insert({'a': 1}) - batch.insert({'a': 2}) - try: - batch.execute({'w': self.w + 1, 'wtimeout': 1}) - except BulkWriteError, exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [{'index': 1, '_id': '...'}], - 'writeErrors': [ - {'index': 2, - 'code': 11000, - 'errmsg': '...', - 'op': {'_id': '...', 'a': 1}}]}, - result) - - self.assertEqual(2, len(result['writeConcernErrors'])) - failed = result['writeErrors'][0] - self.assertTrue("duplicate" in failed['errmsg']) - finally: - self.coll.drop_index([('a', 1)]) + def test_no_results_ordered_success(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), + ] + result = self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) - def test_write_concern_failure_unordered(self): + def predicate(): + return self.coll.count_documents({}) == 2 - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 3}).upsert().update_one({'$set': {'a': 3, 'b': 1}}) - batch.insert({'a': 2}) + wait_until(predicate, "insert 2 documents") - # Using w > 1 with no replication is a hard failure. - if not self.is_repl: - self.assertRaises(OperationFailure, - batch.execute, {'w': 5, 'wtimeout': 1}) - # Replication wtimeout is a 'soft' error. - # It shouldn't stop batch processing. - else: - try: - batch.execute({'w': self.w + 1, 'wtimeout': 1}) - except BulkWriteError, exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqual(2, result['nInserted']) - self.assertEqual(1, result['nUpserted']) - self.assertEqual(0, len(result['writeErrors'])) - # When talking to legacy servers there will be a - # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 1) - - self.coll.remove() - self.coll.ensure_index('a', unique=True) - - # Fail due to write concern support as well - # as duplicate key error on unordered batch. - try: - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 3}).upsert().update_one({'$set': {'a': 3, - 'b': 1}}) - batch.insert({'a': 1}) - batch.insert({'a': 2}) - try: - batch.execute({'w': self.w + 1, 'wtimeout': 1}) - except BulkWriteError, exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqual(2, result['nInserted']) - self.assertEqual(1, result['nUpserted']) - self.assertEqual(1, len(result['writeErrors'])) - # When talking to legacy servers there will be a - # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 1) - - failed = result['writeErrors'][0] - self.assertEqual(2, failed['index']) - self.assertEqual(11000, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], basestring)) - self.assertEqual(1, failed['op']['a']) - - failed = result['writeConcernErrors'][0] - self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], basestring)) - - upserts = result['upserted'] - self.assertEqual(1, len(upserts)) - self.assertEqual(1, upserts[0]['index']) - self.assertTrue(upserts[0].get('_id')) - finally: - self.coll.drop_index([('a', 1)]) - - -class TestBulkNoResults(BulkTestBase): + def predicate(): + return self.coll.find_one({"_id": 1}) is None - def setUp(self): - super(TestBulkNoResults, self).setUp() - self.coll = get_client().pymongo_test.test - self.coll.remove() + wait_until(predicate, 'removed {"_id": 1}') - def test_no_results_ordered_success(self): + def test_no_results_ordered_failure(self): + requests: list = [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), + # Fails with duplicate key error. + InsertOne({"_id": 1}), + # Should not be executed since the batch is ordered. + DeleteOne({"_id": 1}), + ] + result = self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + + def predicate(): + return self.coll.count_documents({}) == 3 + + wait_until(predicate, "insert 3 documents") + self.assertEqual({"_id": 1}, self.coll.find_one({"_id": 1})) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - self.assertEqual(2, self.coll.count()) + def test_no_results_unordered_success(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), + ] + result = self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) - def test_no_results_ordered_failure(self): + def predicate(): + return self.coll.count_documents({}) == 2 - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - batch.insert({'_id': 1}) - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - self.assertEqual(3, self.coll.count()) + wait_until(predicate, "insert 2 documents") - def test_no_results_unordered_success(self): + def predicate(): + return self.coll.find_one({"_id": 1}) is None - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - self.assertEqual(2, self.coll.count()) + wait_until(predicate, 'removed {"_id": 1}') def test_no_results_unordered_failure(self): + requests: list = [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), + # Fails with duplicate key error. + InsertOne({"_id": 1}), + # Should be executed since the batch is unordered. + DeleteOne({"_id": 1}), + ] + result = self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - batch.insert({'_id': 1}) - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - self.assertEqual(2, self.coll.count()) - self.assertTrue(self.coll.find_one({'_id': 1}) is None) + def predicate(): + return self.coll.count_documents({}) == 2 + wait_until(predicate, "insert 2 documents") -class TestBulkAuthorization(BulkTestBase): + def predicate(): + return self.coll.find_one({"_id": 1}) is None - def setUp(self): - super(TestBulkAuthorization, self).setUp() - self.client = client = get_client() - if (not server_started_with_auth(client) - or not version.at_least(client, (2, 5, 3))): - raise SkipTest('Need at least MongoDB 2.5.3 with auth') - - db = client.pymongo_test - self.coll = db.test - self.coll.remove() - - db.add_user('dbOwner', 'pw', roles=['dbOwner']) - db.authenticate('dbOwner', 'pw') - db.add_user('readonly', 'pw', roles=['read']) - db.command( - 'createRole', 'noremove', - privileges=[{ - 'actions': ['insert', 'update', 'find'], - 'resource': {'db': 'pymongo_test', 'collection': 'test'} - }], - roles=[]) - - db.add_user('noremove', 'pw', roles=['noremove']) - db.logout() + wait_until(predicate, 'removed {"_id": 1}') + +class TestBulkAuthorization(BulkAuthorizationTestBase): def test_readonly(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - db = self.client.pymongo_test - db.authenticate('readonly', 'pw') - bulk = self.coll.initialize_ordered_bulk_op() - bulk.insert({'x': 1}) - self.assertRaises(OperationFailure, bulk.execute) + cli = self.rs_or_single_client_noauth( + username="readonly", password="pw", authSource="pymongo_test" + ) + coll = cli.pymongo_test.test + coll.find_one() + with self.assertRaises(OperationFailure): + coll.bulk_write([InsertOne({"x": 1})]) def test_no_remove(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - db = self.client.pymongo_test - db.authenticate('noremove', 'pw') - bulk = self.coll.initialize_ordered_bulk_op() - bulk.insert({'x': 1}) - bulk.find({'x': 2}).upsert().replace_one({'x': 2}) - bulk.find({}).remove() # Prohibited. - bulk.insert({'x': 3}) # Never attempted. - self.assertRaises(OperationFailure, bulk.execute) - self.assertEqual(set([1, 2]), set(self.coll.distinct('x'))) + cli = self.rs_or_single_client_noauth( + username="noremove", password="pw", authSource="pymongo_test" + ) + coll = cli.pymongo_test.test + coll.find_one() + requests = [ + InsertOne({"x": 1}), + ReplaceOne({"x": 2}, {"x": 2}, upsert=True), + DeleteMany({}), # Prohibited. + InsertOne({"x": 3}), # Never attempted. + ] + with self.assertRaises(OperationFailure): + coll.bulk_write(requests) # type: ignore[arg-type] + self.assertEqual({1, 2}, set(self.coll.distinct("x"))) + + +class TestBulkWriteConcern(BulkTestBase): + w: Optional[int] + secondary: MongoClient + + def setUp(self): + super().setUp() + self.w = client_context.w + self.secondary = None + if self.w is not None and self.w > 1: + for member in (client_context.hello)["hosts"]: + if member != (client_context.hello)["primary"]: + self.secondary = self.single_client(*partition_node(member)) + break def tearDown(self): - db = self.client.pymongo_test - db.logout() - db.authenticate('dbOwner', 'pw') - db.command('dropRole', 'noremove') - remove_all_users(db) - db.logout() + if self.secondary: + self.secondary.close() + + def cause_wtimeout(self, requests, ordered): + if not client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled.") + + # Use the rsSyncApplyStop failpoint to pause replication on a + # secondary which will cause a wtimeout error. + self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="alwaysOn") + + try: + coll = self.coll.with_options(write_concern=WriteConcern(w=self.w, wtimeout=1)) + return coll.bulk_write(requests, ordered=ordered) + finally: + self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="off") + + @client_context.require_version_max(7, 1) # PYTHON-4560 + @client_context.require_replica_set + @client_context.require_secondaries_count(1) + def test_write_concern_failure_ordered(self): + details = None + + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})]) + self.assertTrue(result.acknowledged) + + requests: list[Any] = [InsertOne({"a": 1}), InsertOne({"a": 2})] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + }, + details, + ) + + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertGreater(len(details["writeConcernErrors"]), 0) + + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertIsInstance(failed["errmsg"], str) + + self.coll.delete_many({}) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + + # Fail due to write concern support as well + # as duplicate key error on ordered batch. + requests = [ + InsertOne({"a": 1}), + ReplaceOne({"a": 3}, {"b": 1}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), + ] + try: + self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 1, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}], + "writeErrors": [ + {"index": 2, "code": 11000, "errmsg": "...", "op": {"_id": "...", "a": 1}} + ], + }, + details, + ) + + self.assertGreater(len(details["writeConcernErrors"]), 1) + failed = details["writeErrors"][0] + self.assertIn("duplicate", failed["errmsg"]) + + @client_context.require_version_max(7, 1) # PYTHON-4560 + @client_context.require_replica_set + @client_context.require_secondaries_count(1) + def test_write_concern_failure_unordered(self): + self.skipTest("Skipping until PYTHON-4865 is resolved.") + details = None + + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})], ordered=False) + self.assertTrue(result.acknowledged) + + requests = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 2}), + ] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(0, len(details["writeErrors"])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertGreater(len(details["writeConcernErrors"]), 1) + + self.coll.delete_many({}) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + + # Fail due to write concern support as well + # as duplicate key error on unordered batch. + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), + ] + try: + self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(1, len(details["writeErrors"])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertGreater(len(details["writeConcernErrors"]), 1) + + failed = details["writeErrors"][0] + self.assertEqual(2, failed["index"]) + self.assertEqual(11000, failed["code"]) + self.assertIsInstance(failed["errmsg"], str) + self.assertEqual(1, failed["op"]["a"]) + + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertIsInstance(failed["errmsg"], str) + + upserts = details["upserted"] + self.assertEqual(1, len(upserts)) + self.assertEqual(1, upserts[0]["index"]) + self.assertTrue(upserts[0].get("_id")) if __name__ == "__main__": diff --git a/test/test_change_stream.py b/test/test_change_stream.py new file mode 100644 index 0000000000..ad51f91873 --- /dev/null +++ b/test/test_change_stream.py @@ -0,0 +1,1138 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the change_stream module.""" +from __future__ import annotations + +import asyncio +import os +import random +import string +import sys +import threading +import time +import uuid +from itertools import product +from typing import no_type_check + +sys.path[0:0] = [""] + +from test import ( + IntegrationTest, + PyMongoTestCase, + Version, + client_context, + unittest, +) +from test.unified_format import generate_test_classes +from test.utils_shared import ( + AllowListEventListener, + EventListener, + OvertCommandListener, + wait_until, +) + +from bson import SON, ObjectId, Timestamp, encode +from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument +from pymongo import MongoClient +from pymongo.errors import ( + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, +) +from pymongo.message import _CursorAddress +from pymongo.read_concern import ReadConcern +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class TestChangeStreamBase(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + + def change_stream_with_client(self, client, *args, **kwargs): + """Create a change stream using the given client and return it.""" + raise NotImplementedError + + def change_stream(self, *args, **kwargs): + """Create a change stream using the default client and return it.""" + return self.change_stream_with_client(self.client, *args, **kwargs) + + def client_with_listener(self, *commands): + """Return a client with a AllowListEventListener.""" + listener = AllowListEventListener(*commands) + client = self.rs_or_single_client(event_listeners=[listener]) + return client, listener + + def watched_collection(self, *args, **kwargs): + """Return a collection that is watched by self.change_stream().""" + # Construct a unique collection for each test. + collname = ".".join(self.id().rsplit(".", 2)[1:]) + return self.db.get_collection(collname, *args, **kwargs) + + def generate_invalidate_event(self, change_stream): + """Cause a change stream invalidate event.""" + raise NotImplementedError + + def generate_unique_collnames(self, numcolls): + """Generate numcolls collection names unique to a test.""" + collnames = [] + for idx in range(1, numcolls + 1): + collnames.append(self.id() + "_" + str(idx)) + return collnames + + def get_resume_token(self, invalidate=False): + """Get a resume token to use for starting a change stream.""" + # Ensure targeted collection exists before starting. + coll = self.watched_collection(write_concern=WriteConcern("majority")) + coll.insert_one({}) + + if invalidate: + with self.change_stream([{"$match": {"operationType": "invalidate"}}]) as cs: + if isinstance(cs._target, MongoClient): + self.skipTest("cluster-level change streams cannot be invalidated") + self.generate_invalidate_event(cs) + return (cs.next())["_id"] + else: + with self.change_stream() as cs: + coll.insert_one({"data": 1}) + return (cs.next())["_id"] + + def get_start_at_operation_time(self): + """Get an operationTime. Advances the operation clock beyond the most + recently returned timestamp. + """ + optime = (self.client.admin.command("ping"))["operationTime"] + return Timestamp(optime.time, optime.inc + 1) + + def insert_one_and_check(self, change_stream, doc): + """Insert a document and check that it shows up in the change stream.""" + raise NotImplementedError + + def kill_change_stream_cursor(self, change_stream): + """Cause a cursor not found error on the next getMore.""" + cursor = change_stream._cursor + address = _CursorAddress(cursor.address, cursor._ns) + client = self.watched_collection().database.client + client._close_cursor_now(cursor.cursor_id, address) + + +class APITestsMixin: + @no_type_check + def test_watch(self): + with self.change_stream( + [{"$project": {"foo": 0}}], + full_document="updateLookup", + max_await_time_ms=1000, + batch_size=100, + ) as change_stream: + self.assertEqual([{"$project": {"foo": 0}}], change_stream._pipeline) + self.assertEqual("updateLookup", change_stream._full_document) + self.assertEqual(1000, change_stream._max_await_time_ms) + self.assertEqual(100, change_stream._batch_size) + self.assertIsInstance(change_stream._cursor, CommandCursor) + self.assertEqual(1000, change_stream._cursor._max_await_time_ms) + self.watched_collection(write_concern=WriteConcern("majority")).insert_one({}) + _ = change_stream.next() + resume_token = change_stream.resume_token + with self.assertRaises(TypeError): + self.change_stream(pipeline={}) + with self.assertRaises(TypeError): + self.change_stream(full_document={}) + # No Error. + with self.change_stream(resume_after=resume_token): + pass + + @no_type_check + def test_try_next(self): + # ChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + coll.drop() + coll.insert_one({}) + self.addCleanup(coll.drop) + with self.change_stream(max_await_time_ms=250) as stream: + self.assertIsNone(stream.try_next()) # No changes initially. + coll.insert_one({}) # Generate a change. + + # On sharded clusters, even majority-committed changes only show + # up once an event that sorts after it shows up on the other + # shard. So, we wait on try_next to eventually return changes. + def _wait_until(): + return stream.try_next() is not None + + wait_until(_wait_until, "get change from try_next") + + @no_type_check + def test_try_next_runs_one_getmore(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + # Connect to the cluster. + client.admin.command("ping") + listener.reset() + # ChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + coll.drop() + # Create the watched collection before starting the change stream to + # skip any "create" events. + coll.insert_one({"_id": 1}) + self.addCleanup(coll.drop) + with self.change_stream_with_client(client, max_await_time_ms=250) as stream: + self.assertEqual(listener.started_command_names(), ["aggregate"]) + listener.reset() + + # Confirm that only a single getMore is run even when no documents + # are returned. + self.assertIsNone(stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore"]) + listener.reset() + self.assertIsNone(stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore"]) + listener.reset() + + # Get at least one change before resuming. + coll.insert_one({"_id": 2}) + + def _wait_until(): + return stream.try_next() is not None + + wait_until(_wait_until, "get change from try_next") + listener.reset() + + # Cause the next request to initiate the resume process. + self.kill_change_stream_cursor(stream) + listener.reset() + + # The sequence should be: + # - getMore, fail + # - resume with aggregate command + # - no results, return immediately without another getMore + self.assertIsNone(stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore", "aggregate"]) + listener.reset() + + # Stream still works after a resume. + coll.insert_one({"_id": 3}) + + def _wait_until(): + return stream.try_next() is not None + + wait_until(_wait_until, "get change from try_next") + self.assertEqual(set(listener.started_command_names()), {"getMore"}) + self.assertIsNone(stream.try_next()) + + @no_type_check + def test_batch_size_is_honored(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + # Connect to the cluster. + client.admin.command("ping") + listener.reset() + # ChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + coll.drop() + # Create the watched collection before starting the change stream to + # skip any "create" events. + coll.insert_one({"_id": 1}) + self.addCleanup(coll.drop) + # Expected batchSize. + expected = {"batchSize": 23} + with self.change_stream_with_client(client, max_await_time_ms=250, batch_size=23) as stream: + # Confirm that batchSize is honored for initial batch. + cmd = listener.started_events[0].command + self.assertEqual(cmd["cursor"], expected) + listener.reset() + # Confirm that batchSize is honored by getMores. + self.assertIsNone(stream.try_next()) + cmd = listener.started_events[0].command + key = next(iter(expected)) + self.assertEqual(expected[key], cmd[key]) + + # $changeStream.startAtOperationTime was added in 4.0.0. + @no_type_check + @client_context.require_version_min(4, 2, 0) + def test_start_at_operation_time(self): + optime = self.get_start_at_operation_time() + + coll = self.watched_collection(write_concern=WriteConcern("majority")) + ndocs = 3 + coll.insert_many([{"data": i} for i in range(ndocs)]) + + with self.change_stream(start_at_operation_time=optime) as cs: + for _i in range(ndocs): + cs.next() + + @no_type_check + def _test_full_pipeline(self, expected_cs_stage): + client, listener = self.client_with_listener("aggregate") + with self.change_stream_with_client(client, [{"$project": {"foo": 0}}]) as _: + pass + + self.assertEqual(1, len(listener.started_events)) + command = listener.started_events[0] + self.assertEqual("aggregate", command.command_name) + self.assertEqual( + [{"$changeStream": expected_cs_stage}, {"$project": {"foo": 0}}], + command.command["pipeline"], + ) + + @no_type_check + def test_full_pipeline(self): + """$changeStream must be the first stage in a change stream pipeline + sent to the server. + """ + self._test_full_pipeline({}) + + @no_type_check + def test_iteration(self): + with self.change_stream(batch_size=2) as change_stream: + num_inserted = 10 + self.watched_collection().insert_many([{} for _ in range(num_inserted)]) + inserts_received = 0 + for change in change_stream: + self.assertEqual(change["operationType"], "insert") + inserts_received += 1 + if inserts_received == num_inserted: + break + self._test_invalidate_stops_iteration(change_stream) + + @no_type_check + @client_context.require_sync + def _test_next_blocks(self, change_stream): + inserted_doc = {"_id": ObjectId()} + changes = [] + t = threading.Thread(target=lambda: changes.append(change_stream.next())) + t.start() + # Sleep for a bit to prove that the call to next() blocks. + time.sleep(1) + self.assertTrue(t.is_alive()) + self.assertFalse(changes) + self.watched_collection().insert_one(inserted_doc) + # Join with large timeout to give the server time to return the change, + # in particular for shard clusters. + t.join(30) + self.assertFalse(t.is_alive()) + self.assertEqual(1, len(changes)) + self.assertEqual(changes[0]["operationType"], "insert") + self.assertEqual(changes[0]["fullDocument"], inserted_doc) + + @no_type_check + @client_context.require_sync + def test_next_blocks(self): + """Test that next blocks until a change is readable""" + # Use a short wait time to speed up the test. + with self.change_stream(max_await_time_ms=250) as change_stream: + self._test_next_blocks(change_stream) + + @no_type_check + @client_context.require_sync + def test_aggregate_cursor_blocks(self): + """Test that an aggregate cursor blocks until a change is readable.""" + with self.watched_collection().aggregate( + [{"$changeStream": {}}], maxAwaitTimeMS=250 + ) as change_stream: + self._test_next_blocks(change_stream) + + @no_type_check + @client_context.require_sync + def test_concurrent_close(self): + """Ensure a ChangeStream can be closed from another thread.""" + # Use a short wait time to speed up the test. + with self.change_stream(max_await_time_ms=250) as change_stream: + + def iterate_cursor(): + try: + for _ in change_stream: + pass + except OperationFailure as e: + if e.code != 237: # CursorKilled error code + raise + + t = threading.Thread(target=iterate_cursor) + t.start() + self.watched_collection().insert_one({}) + time.sleep(1) + change_stream.close() + t.join(3) + self.assertFalse(t.is_alive()) + + @no_type_check + def test_unknown_full_document(self): + """Must rely on the server to raise an error on unknown fullDocument.""" + try: + with self.change_stream(full_document="notValidatedByPyMongo"): + pass + except OperationFailure: + pass + + @no_type_check + def test_change_operations(self): + """Test each operation type.""" + expected_ns = { + "db": self.watched_collection().database.name, + "coll": self.watched_collection().name, + } + with self.change_stream() as change_stream: + # Insert. + inserted_doc = {"_id": ObjectId(), "foo": "bar"} + self.watched_collection().insert_one(inserted_doc) + change = change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) + # Update. + update_spec = {"$set": {"new": 1}, "$unset": {"foo": 1}} + self.watched_collection().update_one(inserted_doc, update_spec) + change = change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "update") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + + expected_update_description = {"updatedFields": {"new": 1}, "removedFields": ["foo"]} + if client_context.version.at_least(4, 5, 0): + expected_update_description["truncatedArrays"] = [] + self.assertEqual( + expected_update_description, + { + k: v + for k, v in change["updateDescription"].items() + if k in expected_update_description + }, + ) + # Replace. + self.watched_collection().replace_one({"new": 1}, {"foo": "bar"}) + change = change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "replace") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) + # Delete. + self.watched_collection().delete_one({"foo": "bar"}) + change = change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "delete") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + # Invalidate. + self._test_get_invalidate_event(change_stream) + + @no_type_check + @client_context.require_version_min(4, 2, 0) + def test_start_after(self): + resume_token = self.get_resume_token(invalidate=True) + + # resume_after cannot resume after invalidate. + with self.assertRaises(OperationFailure): + self.change_stream(resume_after=resume_token) + + # start_after can resume after invalidate. + with self.change_stream(start_after=resume_token) as change_stream: + self.watched_collection().insert_one({"_id": 2}) + change = change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + @no_type_check + @client_context.require_version_min(4, 2, 0) + def test_start_after_resume_process_with_changes(self): + resume_token = self.get_resume_token(invalidate=True) + + with self.change_stream(start_after=resume_token, max_await_time_ms=250) as change_stream: + self.watched_collection().insert_one({"_id": 2}) + change = change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + self.assertIsNone(change_stream.try_next()) + self.kill_change_stream_cursor(change_stream) + + self.watched_collection().insert_one({"_id": 3}) + change = change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 3}) + + @no_type_check + @client_context.require_version_min(4, 2) + def test_start_after_resume_process_without_changes(self): + resume_token = self.get_resume_token(invalidate=True) + + with self.change_stream(start_after=resume_token, max_await_time_ms=250) as change_stream: + self.assertIsNone(change_stream.try_next()) + self.kill_change_stream_cursor(change_stream) + + self.watched_collection().insert_one({"_id": 2}) + change = change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + +class ProseSpecTestsMixin: + @no_type_check + def _client_with_listener(self, *commands): + listener = AllowListEventListener(*commands) + client = PyMongoTestCase.unmanaged_rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + return client, listener + + @no_type_check + def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): + self.watched_collection().insert_many([{"data": k} for k in range(batch_size)]) + for _ in range(batch_size): + change = next(change_stream) + return change + + def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): + """Predicts what the resume token should currently be for server + versions that don't support postBatchResumeToken. Assumes the stream + has never returned any changes if previous_change is None. + """ + if previous_change is None: + agg_cmd = listener.started_events[0] + stage = agg_cmd.command["pipeline"][0]["$changeStream"] + return stage.get("resumeAfter") or stage.get("startAfter") + + return previous_change["_id"] + + def _get_expected_resume_token(self, stream, listener, previous_change=None): + """Predicts what the resume token should currently be for server + versions that support postBatchResumeToken. Assumes the stream has + never returned any changes if previous_change is None. Assumes + listener is a AllowListEventListener that listens for aggregate and + getMore commands. + """ + if previous_change is None or stream._cursor._has_next(): + token = self._get_expected_resume_token_legacy(stream, listener, previous_change) + if token is not None: + return token + + response = listener.succeeded_events[-1].reply + return response["cursor"]["postBatchResumeToken"] + + @no_type_check + def _test_raises_error_on_missing_id(self, expected_exception): + """ChangeStream will raise an exception if the server response is + missing the resume token. + """ + with self.change_stream([{"$project": {"_id": 0}}]) as change_stream: + self.watched_collection().insert_one({}) + with self.assertRaises(expected_exception): + next(change_stream) + # The cursor should now be closed. + with self.assertRaises(StopIteration): + next(change_stream) + + @no_type_check + def _test_update_resume_token(self, expected_rt_getter): + """ChangeStream must continuously track the last seen resumeToken.""" + client, listener = self._client_with_listener("aggregate", "getMore") + coll = self.watched_collection(write_concern=WriteConcern("majority")) + with self.change_stream_with_client(client) as change_stream: + self.assertEqual( + change_stream.resume_token, expected_rt_getter(change_stream, listener) + ) + for _ in range(3): + coll.insert_one({}) + change = next(change_stream) + self.assertEqual( + change_stream.resume_token, expected_rt_getter(change_stream, listener, change) + ) + + # Prose test no. 1 + @client_context.require_version_min(4, 2, 0) + def test_update_resume_token(self): + self._test_update_resume_token(self._get_expected_resume_token) + + # Prose test no. 2 + @client_context.require_version_min(4, 2, 0) + def test_raises_error_on_missing_id_418plus(self): + # Server returns an error on 4.1.8+ + self._test_raises_error_on_missing_id(OperationFailure) + + # Prose test no. 3 + @no_type_check + def test_resume_on_error(self): + with self.change_stream() as change_stream: + self.insert_one_and_check(change_stream, {"_id": 1}) + # Cause a cursor not found error on the next getMore. + self.kill_change_stream_cursor(change_stream) + self.insert_one_and_check(change_stream, {"_id": 2}) + + # Prose test no. 4 + @no_type_check + @client_context.require_failCommand_fail_point + def test_no_resume_attempt_if_aggregate_command_fails(self): + # Set non-retryable error on aggregate command. + fail_point = {"mode": {"times": 1}, "data": {"errorCode": 2, "failCommands": ["aggregate"]}} + client, listener = self._client_with_listener("aggregate", "getMore") + with self.fail_point(fail_point): + try: + _ = self.change_stream_with_client(client) + except OperationFailure: + pass + + # Driver should have attempted aggregate command only once. + self.assertEqual(len(listener.started_events), 1) + self.assertEqual(listener.started_events[0].command_name, "aggregate") + + # Prose test no. 5 - REMOVED + # Prose test no. 6 - SKIPPED + # Reason: readPreference is not configurable using the watch() helpers + # so we can skip this test. Also, PyMongo performs server selection for + # each operation which ensure compliance with this prose test. + + # Prose test no. 7 + @no_type_check + def test_initial_empty_batch(self): + with self.change_stream() as change_stream: + # The first batch should be empty. + self.assertFalse(change_stream._cursor._has_next()) + cursor_id = change_stream._cursor.cursor_id + self.assertTrue(cursor_id) + self.insert_one_and_check(change_stream, {}) + # Make sure we're still using the same cursor. + self.assertEqual(cursor_id, change_stream._cursor.cursor_id) + + # Prose test no. 8 + @no_type_check + def test_kill_cursors(self): + def raise_error(): + raise ServerSelectionTimeoutError("mock error") + + with self.change_stream() as change_stream: + self.insert_one_and_check(change_stream, {"_id": 1}) + # Cause a cursor not found error on the next getMore. + cursor = change_stream._cursor + self.kill_change_stream_cursor(change_stream) + cursor.close = raise_error + self.insert_one_and_check(change_stream, {"_id": 2}) + + # Prose test no. 10 - SKIPPED + # This test is identical to prose test no. 3. + + # Prose test no. 11 + @no_type_check + @client_context.require_version_min(4, 2, 0) + def test_resumetoken_empty_batch(self): + client, listener = self._client_with_listener("getMore") + with self.change_stream_with_client(client) as change_stream: + self.assertIsNone(change_stream.try_next()) + resume_token = change_stream.resume_token + + response = listener.succeeded_events[0].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) + + # Prose test no. 11 + @no_type_check + @client_context.require_version_min(4, 2, 0) + def test_resumetoken_exhausted_batch(self): + client, listener = self._client_with_listener("getMore") + with self.change_stream_with_client(client) as change_stream: + self._populate_and_exhaust_change_stream(change_stream) + resume_token = change_stream.resume_token + + response = listener.succeeded_events[-1].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) + + # Prose test no. 13 + @no_type_check + def test_resumetoken_partially_iterated_batch(self): + # When batch has been iterated up to but not including the last element. + # Resume token should be _id of previous change document. + with self.change_stream() as change_stream: + self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"data": k} for k in range(3)] + ) + for _ in range(2): + change = next(change_stream) + resume_token = change_stream.resume_token + + self.assertEqual(resume_token, change["_id"]) + + @no_type_check + def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): + # When the batch is not empty and hasn't been iterated at all. + # Resume token should be same as the resume option used. + resume_point = self.get_resume_token() + + # Insert some documents so that firstBatch isn't empty. + self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"a": 1}, {"b": 2}, {"c": 3}] + ) + + # Resume token should be same as the resume option. + with self.change_stream(**{resume_option: resume_point}) as change_stream: + self.assertTrue(change_stream._cursor._has_next()) + resume_token = change_stream.resume_token + self.assertEqual(resume_token, resume_point) + + # Prose test no. 14 + @no_type_check + @client_context.require_no_mongos + def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): + self._test_resumetoken_uniterated_nonempty_batch("resume_after") + + # Prose test no. 14 + @no_type_check + @client_context.require_no_mongos + @client_context.require_version_min(4, 2, 0) + def test_resumetoken_uniterated_nonempty_batch_startafter(self): + self._test_resumetoken_uniterated_nonempty_batch("start_after") + + # Prose test no. 17 + @no_type_check + @client_context.require_version_min(4, 2, 0) + def test_startafter_resume_uses_startafter_after_empty_getMore(self): + # Resume should use startAfter after no changes have been returned. + resume_point = self.get_resume_token() + + client, listener = self._client_with_listener("aggregate") + with self.change_stream_with_client(client, start_after=resume_point) as change_stream: + self.assertFalse(change_stream._cursor._has_next()) # No changes + change_stream.try_next() # No changes + self.kill_change_stream_cursor(change_stream) + change_stream.try_next() # Resume attempt + + response = listener.started_events[-1] + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + + # Prose test no. 18 + @no_type_check + @client_context.require_version_min(4, 2, 0) + def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): + # Resume should use resumeAfter after some changes have been returned. + resume_point = self.get_resume_token() + + client, listener = self._client_with_listener("aggregate") + with self.change_stream_with_client(client, start_after=resume_point) as change_stream: + self.assertFalse(change_stream._cursor._has_next()) # No changes + self.watched_collection().insert_one({}) + next(change_stream) # Changes + self.kill_change_stream_cursor(change_stream) + change_stream.try_next() # Resume attempt + + response = listener.started_events[-1] + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + + # Prose test no. 19 + @no_type_check + def test_split_large_change(self): + server_version = client_context.version + if not server_version.at_least(6, 0, 9): + self.skipTest("$changeStreamSplitLargeEvent requires MongoDB 6.0.9+") + if server_version.at_least(6, 1, 0) and server_version < Version(7, 0, 0): + self.skipTest("$changeStreamSplitLargeEvent is not available in 6.x rapid releases") + self.db.drop_collection("test_split_large_change") + coll = self.db.create_collection( + "test_split_large_change", changeStreamPreAndPostImages={"enabled": True} + ) + coll.insert_one({"_id": 1, "value": "q" * 10 * 1024 * 1024}) + with coll.watch( + [{"$changeStreamSplitLargeEvent": {}}], full_document_before_change="required" + ) as change_stream: + coll.update_one({"_id": 1}, {"$set": {"value": "z" * 10 * 1024 * 1024}}) + doc_1 = change_stream.next() + self.assertIn("splitEvent", doc_1) + self.assertEqual(doc_1["splitEvent"], {"fragment": 1, "of": 2}) + doc_2 = change_stream.next() + self.assertIn("splitEvent", doc_2) + self.assertEqual(doc_2["splitEvent"], {"fragment": 2, "of": 2}) + + +class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): + dbs: list + + @client_context.require_version_min(4, 2, 0) + @client_context.require_change_streams + def setUp(self) -> None: + super().setUp() + self.dbs = [self.db, self.client.pymongo_test_2] + + def tearDown(self): + for db in self.dbs: + self.client.drop_database(db) + super().tearDown() + + def change_stream_with_client(self, client, *args, **kwargs): + return client.watch(*args, **kwargs) + + def generate_invalidate_event(self, change_stream): + self.skipTest("cluster-level change streams cannot be invalidated") + + def _test_get_invalidate_event(self, change_stream): + # Cluster-level change streams don't get invalidated. + pass + + def _test_invalidate_stops_iteration(self, change_stream): + # Cluster-level change streams don't get invalidated. + pass + + def _insert_and_check(self, change_stream, db, collname, doc): + coll = db[collname] + coll.insert_one(doc) + change = next(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) + + def insert_one_and_check(self, change_stream, doc): + db = random.choice(self.dbs) + collname = self.id() + self._insert_and_check(change_stream, db, collname, doc) + + def test_simple(self): + collnames = self.generate_unique_collnames(3) + with self.change_stream() as change_stream: + for db, collname in product(self.dbs, collnames): + self._insert_and_check(change_stream, db, collname, {"_id": collname}) + + @client_context.require_sync + def test_aggregate_cursor_blocks(self): + """Test that an aggregate cursor blocks until a change is readable.""" + with self.client.admin.aggregate( + [{"$changeStream": {"allChangesForCluster": True}}], maxAwaitTimeMS=250 + ) as change_stream: + self._test_next_blocks(change_stream) + + def test_full_pipeline(self): + """$changeStream must be the first stage in a change stream pipeline + sent to the server. + """ + self._test_full_pipeline({"allChangesForCluster": True}) + + +class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): + @client_context.require_version_min(4, 2, 0) + @client_context.require_change_streams + def setUp(self) -> None: + super().setUp() + + def change_stream_with_client(self, client, *args, **kwargs): + return client[self.db.name].watch(*args, **kwargs) + + def generate_invalidate_event(self, change_stream): + # Dropping the database invalidates the change stream. + change_stream._client.drop_database(self.db.name) + + def _test_get_invalidate_event(self, change_stream): + # Cache collection names. + dropped_colls = self.db.list_collection_names() + # Drop the watched database to get an invalidate event. + self.generate_invalidate_event(change_stream) + change = change_stream.next() + # 4.1+ returns "drop" events for each collection in dropped database + # and a "dropDatabase" event for the database itself. + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + for _ in range(len(dropped_colls)): + ns = change["ns"] + self.assertEqual(ns["db"], change_stream._target.name) + self.assertIn(ns["coll"], dropped_colls) + change = change_stream.next() + self.assertEqual(change["operationType"], "dropDatabase") + self.assertTrue(change["_id"]) + self.assertEqual(change["ns"], {"db": change_stream._target.name}) + # Get next change. + change = change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) + # The ChangeStream should be dead. + with self.assertRaises(StopIteration): + change_stream.next() + + def _test_invalidate_stops_iteration(self, change_stream): + # Drop the watched database to get an invalidate event. + change_stream._client.drop_database(self.db.name) + # Check drop and dropDatabase events. + for change in change_stream: + self.assertIn(change["operationType"], ("drop", "dropDatabase", "invalidate")) + # Last change must be invalidate. + self.assertEqual(change["operationType"], "invalidate") + # Change stream must not allow further iteration. + with self.assertRaises(StopIteration): + change_stream.next() + with self.assertRaises(StopIteration): + next(change_stream) + + def _insert_and_check(self, change_stream, collname, doc): + coll = self.db[collname] + coll.insert_one(doc) + change = next(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": self.db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) + + def insert_one_and_check(self, change_stream, doc): + self._insert_and_check(change_stream, self.id(), doc) + + def test_simple(self): + collnames = self.generate_unique_collnames(3) + with self.change_stream() as change_stream: + for collname in collnames: + self._insert_and_check( + change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())} + ) + + def test_isolation(self): + # Ensure inserts to other dbs don't show up in our ChangeStream. + other_db = self.client.pymongo_test_temp + self.assertNotEqual(other_db, self.db, msg="Isolation must be tested on separate DBs") + collname = self.id() + with self.change_stream() as change_stream: + other_db[collname].insert_one({"_id": Binary.from_uuid(uuid.uuid4())}) + self._insert_and_check(change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())}) + self.client.drop_database(other_db) + + +class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecTestsMixin): + @client_context.require_change_streams + def setUp(self): + super().setUp() + # Use a new collection for each test. + self.watched_collection().drop() + self.watched_collection().insert_one({}) + + def change_stream_with_client(self, client, *args, **kwargs): + return ( + client[self.db.name] + .get_collection(self.watched_collection().name) + .watch(*args, **kwargs) + ) + + def generate_invalidate_event(self, change_stream): + # Dropping the collection invalidates the change stream. + change_stream._target.drop() + + def _test_invalidate_stops_iteration(self, change_stream): + self.generate_invalidate_event(change_stream) + # Check drop and dropDatabase events. + for change in change_stream: + self.assertIn(change["operationType"], ("drop", "invalidate")) + # Last change must be invalidate. + self.assertEqual(change["operationType"], "invalidate") + # Change stream must not allow further iteration. + with self.assertRaises(StopIteration): + change_stream.next() + with self.assertRaises(StopIteration): + next(change_stream) + + def _test_get_invalidate_event(self, change_stream): + # Drop the watched database to get an invalidate event. + change_stream._target.drop() + change = change_stream.next() + # 4.1+ returns a "drop" change document. + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + self.assertEqual( + change["ns"], + {"db": change_stream._target.database.name, "coll": change_stream._target.name}, + ) + # Last change should be invalidate. + change = change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) + # The ChangeStream should be dead. + with self.assertRaises(StopIteration): + change_stream.next() + + def insert_one_and_check(self, change_stream, doc): + self.watched_collection().insert_one(doc) + change = next(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual( + change["ns"], + {"db": self.watched_collection().database.name, "coll": self.watched_collection().name}, + ) + self.assertEqual(change["fullDocument"], doc) + + def test_raw(self): + """Test with RawBSONDocument.""" + raw_coll = self.watched_collection(codec_options=DEFAULT_RAW_BSON_OPTIONS) + with raw_coll.watch() as change_stream: + raw_doc = RawBSONDocument(encode({"_id": 1})) + self.watched_collection().insert_one(raw_doc) + change = next(change_stream) + self.assertIsInstance(change, RawBSONDocument) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"]["db"], self.watched_collection().database.name) + self.assertEqual(change["ns"]["coll"], self.watched_collection().name) + self.assertEqual(change["fullDocument"], raw_doc) + + @client_context.require_version_min(4, 0) # Needed for start_at_operation_time. + def test_uuid_representations(self): + """Test with uuid document _ids and different uuid_representation.""" + optime = (self.db.command("ping"))["operationTime"] + self.watched_collection().insert_many( + [ + {"_id": Binary(uuid.uuid4().bytes, id_subtype)} + for id_subtype in (STANDARD, PYTHON_LEGACY) + ] + ) + for uuid_representation in ALL_UUID_REPRESENTATIONS: + options = self.watched_collection().codec_options.with_options( + uuid_representation=uuid_representation + ) + coll = self.watched_collection(codec_options=options) + with coll.watch(start_at_operation_time=optime, max_await_time_ms=1) as change_stream: + _ = change_stream.next() + resume_token_1 = change_stream.resume_token + _ = change_stream.next() + resume_token_2 = change_stream.resume_token + + # Should not error. + with coll.watch(resume_after=resume_token_1): + pass + with coll.watch(resume_after=resume_token_2): + pass + + def test_document_id_order(self): + """Test with document _ids that need their order preserved.""" + random_keys = random.sample(string.ascii_letters, len(string.ascii_letters)) + random_doc = {"_id": SON([(key, key) for key in random_keys])} + for document_class in (dict, SON, RawBSONDocument): + options = self.watched_collection().codec_options.with_options( + document_class=document_class + ) + coll = self.watched_collection(codec_options=options) + with coll.watch() as change_stream: + coll.insert_one(random_doc) + _ = change_stream.next() + resume_token = change_stream.resume_token + + # The resume token is always a document. + self.assertIsInstance(resume_token, document_class) + # Should not error. + with coll.watch(resume_after=resume_token): + pass + coll.delete_many({}) + + def test_read_concern(self): + """Test readConcern is not validated by the driver.""" + # Read concern 'local' is not allowed for $changeStream. + coll = self.watched_collection(read_concern=ReadConcern("local")) + with self.assertRaises(OperationFailure): + coll.watch() + + # Does not error. + coll = self.watched_collection(read_concern=ReadConcern("majority")) + with coll.watch(): + pass + + +class TestAllLegacyScenarios(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + listener: AllowListEventListener + + @client_context.require_connection + def setUp(self): + super().setUp() + self.listener = AllowListEventListener("aggregate", "getMore") + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.listener.reset() + + def setUpCluster(self, scenario_dict): + assets = [ + (scenario_dict["database_name"], scenario_dict["collection_name"]), + ( + scenario_dict.get("database2_name", "db2"), + scenario_dict.get("collection2_name", "coll2"), + ), + ] + for db, coll in assets: + self.client.drop_database(db) + self.client[db].create_collection(coll) + + def setFailPoint(self, scenario_dict): + fail_point = scenario_dict.get("failPoint") + if fail_point is None: + return + elif not client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") + + fail_cmd = SON([("configureFailPoint", "failCommand")]) + fail_cmd.update(fail_point) + client_context.client.admin.command(fail_cmd) + self.addCleanup( + client_context.client.admin.command, + "configureFailPoint", + fail_cmd["configureFailPoint"], + mode="off", + ) + + def assert_list_contents_are_subset(self, superlist, sublist): + """Check that each element in sublist is a subset of the corresponding + element in superlist. + """ + self.assertEqual(len(superlist), len(sublist)) + for sup, sub in zip(superlist, sublist): + if isinstance(sub, dict): + self.assert_dict_is_subset(sup, sub) + continue + if isinstance(sub, (list, tuple)): + self.assert_list_contents_are_subset(sup, sub) + continue + self.assertEqual(sup, sub) + + def assert_dict_is_subset(self, superdict, subdict): + """Check that subdict is a subset of superdict.""" + exempt_fields = ["documentKey", "_id", "getMore"] + for key, value in subdict.items(): + if key not in superdict: + self.fail(f"Key {key} not found in {superdict}") + if isinstance(value, dict): + self.assert_dict_is_subset(superdict[key], value) + continue + if isinstance(value, (list, tuple)): + self.assert_list_contents_are_subset(superdict[key], value) + continue + if key in exempt_fields: + # Only check for presence of these exempt fields, but not value. + self.assertIn(key, superdict) + else: + self.assertEqual(superdict[key], value) + + def check_event(self, event, expectation_dict): + if event is None: + self.fail() + for key, value in expectation_dict.items(): + if isinstance(value, dict): + self.assert_dict_is_subset(getattr(event, key), value) + else: + self.assertEqual(getattr(event, key), value) + + def tearDown(self): + self.listener.reset() + + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_client.py b/test/test_client.py index bd3cbe8dd4..9d201c663b 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2013-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,53 +13,175 @@ # limitations under the License. """Test the mongo_client module.""" +from __future__ import annotations +import _thread as thread +import asyncio +import base64 +import contextlib +import copy import datetime +import gc +import logging import os -import threading +import re +import signal import socket +import struct +import subprocess import sys +import threading import time -import thread -import unittest +import uuid +from typing import Any, Iterable, Type, no_type_check +from unittest import mock, skipIf +from unittest.mock import patch +import pytest -sys.path[0:0] = [""] +from bson.binary import CSHARP_LEGACY, JAVA_LEGACY, PYTHON_LEGACY, Binary, UuidRepresentation +from pymongo.operations import _Op -from nose.plugins.skip import SkipTest +sys.path[0:0] = [""] +from test import ( + HAVE_IPADDRESS, + IntegrationTest, + MockClientTest, + SkipTest, + UnitTest, + client_context, + client_knobs, + connected, + db_pwd, + db_user, + remove_all_users, + unittest, +) +from test.pymongo_mocks import MockClient +from test.test_binary import BinaryData +from test.utils import ( + assertRaisesExactly, + get_pool, + wait_until, +) +from test.utils_shared import ( + NTHREADS, + CMAPListener, + FunctionCallRecorder, + delay, + gevent_monkey_patched, + is_greenthread_patched, + lazy_client_trial, + one, +) + +import bson +import pymongo +from bson import encode +from bson.codec_options import ( + CodecOptions, + DatetimeConversion, + TypeEncoder, + TypeRegistry, +) from bson.son import SON from bson.tz_util import utc -from pymongo.mongo_client import MongoClient -from pymongo.database import Database -from pymongo.pool import SocketInfo -from pymongo import thread_util, common -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure, - InvalidName, - OperationFailure, - PyMongoError) -from test import version, host, port, pair -from test.pymongo_mocks import MockClient -from test.utils import (assertRaisesExactly, - delay, - is_mongos, - remove_all_users, - server_is_master_with_slave, - server_started_with_auth, - TestRequestMixin, - _TestLazyConnectMixin, - lazy_client_trial, - NTHREADS, - get_pool) - - -def get_client(*args, **kwargs): - return MongoClient(host, port, *args, **kwargs) +from pymongo import event_loggers, message, monitoring +from pymongo.client_options import ClientOptions +from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT, MIN_SUPPORTED_WIRE_VERSION, has_c +from pymongo.compression_support import _have_snappy, _have_zstd +from pymongo.driver_info import DriverInfo +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + ConnectionFailure, + InvalidName, + InvalidOperation, + InvalidURI, + NetworkTimeout, + OperationFailure, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, +) +from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent +from pymongo.pool_options import _MAX_METADATA_SIZE, _METADATA, ENV_VAR_K8S, PoolOptions +from pymongo.read_preferences import ReadPreference +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import readable_server_selector, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor, CursorType +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.pool import ( + Connection, +) +from pymongo.synchronous.settings import TOPOLOGY_TYPE +from pymongo.synchronous.topology import _ErrorContext +from pymongo.topology_description import TopologyDescription +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class ClientUnitTest(UnitTest): + """MongoClient tests that don't require a server.""" + + client: MongoClient + + def setUp(self) -> None: + self.client = self.rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) + + @pytest.fixture(autouse=True) + def inject_fixtures(self, caplog): + self._caplog = caplog + + def test_keyword_arg_defaults(self): + client = self.simple_client( + socketTimeoutMS=None, + connectTimeoutMS=20000, + waitQueueTimeoutMS=None, + replicaSet=None, + read_preference=ReadPreference.PRIMARY, + ssl=False, + tlsCertificateKeyFile=None, + tlsAllowInvalidCertificates=True, + tlsCAFile=None, + connect=False, + serverSelectionTimeoutMS=12000, + ) + options = client.options + pool_opts = options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + # socket.Socket.settimeout takes a float in seconds + self.assertEqual(20.0, pool_opts.connect_timeout) + self.assertEqual(None, pool_opts.wait_queue_timeout) + self.assertEqual(None, pool_opts._ssl_context) + self.assertEqual(None, options.replica_set_name) + self.assertEqual(ReadPreference.PRIMARY, client.read_preference) + self.assertAlmostEqual(12, client.options.server_selection_timeout) + + def test_connect_timeout(self): + client = self.simple_client(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) + pool_opts = client.options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + + client = self.simple_client(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) + pool_opts = client.options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + + client = self.simple_client( + "mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0", connect=False + ) + pool_opts = client.options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) -class TestClient(unittest.TestCase, TestRequestMixin): def test_types(self): self.assertRaises(TypeError, MongoClient, 1) self.assertRaises(TypeError, MongoClient, 1.14) @@ -69,1036 +191,2464 @@ def test_types(self): self.assertRaises(ConfigurationError, MongoClient, []) - def test_constants(self): - MongoClient.HOST = host - MongoClient.PORT = port - self.assertTrue(MongoClient()) + def test_max_pool_size_zero(self): + self.simple_client(maxPoolSize=0) - MongoClient.HOST = "somedomainthatdoesntexist.org" - MongoClient.PORT = 123456789 - assertRaisesExactly( - ConnectionFailure, MongoClient, connectTimeoutMS=600) - self.assertTrue(MongoClient(host, port)) + def test_uri_detection(self): + self.assertRaises(ConfigurationError, MongoClient, "/foo") + self.assertRaises(ConfigurationError, MongoClient, "://") + self.assertRaises(ConfigurationError, MongoClient, "foo/") - MongoClient.HOST = host - MongoClient.PORT = port - self.assertTrue(MongoClient()) + def test_get_db(self): + def make_db(base, name): + return base[name] - def assertIsInstance(self, obj, cls, msg=None): - """Backport from Python 2.7.""" - if not isinstance(obj, cls): - standardMsg = '%r is not an instance of %r' % (obj, cls) - self.fail(self._formatMessage(msg, standardMsg)) + self.assertRaises(InvalidName, make_db, self.client, "") + self.assertRaises(InvalidName, make_db, self.client, "te$t") + self.assertRaises(InvalidName, make_db, self.client, "te.t") + self.assertRaises(InvalidName, make_db, self.client, "te\\t") + self.assertRaises(InvalidName, make_db, self.client, "te/t") + self.assertRaises(InvalidName, make_db, self.client, "te st") + + self.assertIsInstance(self.client.test, Database) + self.assertEqual(self.client.test, self.client["test"]) + self.assertEqual(self.client.test, Database(self.client, "test")) + + def test_get_database(self): + codec_options = CodecOptions(tz_aware=True) + write_concern = WriteConcern(w=2, j=True) + db = self.client.get_database("foo", codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) + self.assertEqual(codec_options, db.codec_options) + self.assertEqual(ReadPreference.SECONDARY, db.read_preference) + self.assertEqual(write_concern, db.write_concern) + + def test_getattr(self): + self.assertIsInstance(self.client["_does_not_exist"], Database) + + with self.assertRaises(AttributeError) as context: + self.client._does_not_exist + + # Message should be: + # "AttributeError: MongoClient has no attribute '_does_not_exist'. To + # access the _does_not_exist database, use client['_does_not_exist']". + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) - def test_init_disconnected(self): - c = MongoClient(host, port, _connect=False) + def test_iteration(self): + client = self.client + msg = "'MongoClient' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in client: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = client[0] + # next fails + with self.assertRaisesRegex(TypeError, "'MongoClient' object is not iterable"): + _ = next(client) + # .next() fails + with self.assertRaisesRegex(TypeError, "'MongoClient' object is not iterable"): + _ = client.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(client, Iterable) - self.assertIsInstance(c.is_primary, bool) - self.assertIsInstance(c.is_mongos, bool) - self.assertIsInstance(c.max_pool_size, int) - self.assertIsInstance(c.use_greenlets, bool) - self.assertIsInstance(c.nodes, frozenset) - self.assertIsInstance(c.auto_start_request, bool) - self.assertEqual(dict, c.get_document_class()) - self.assertIsInstance(c.tz_aware, bool) - self.assertIsInstance(c.max_bson_size, int) - self.assertIsInstance(c.min_wire_version, int) - self.assertIsInstance(c.max_wire_version, int) - self.assertIsInstance(c.max_write_batch_size, int) - self.assertEqual(None, c.host) - self.assertEqual(None, c.port) - - c.pymongo_test.test.find_one() # Auto-connect. - self.assertEqual(host, c.host) - self.assertEqual(port, c.port) - - if version.at_least(c, (2, 5, 4, -1)): - self.assertTrue(c.max_wire_version > 0) - else: - self.assertEqual(c.max_wire_version, 0) - self.assertTrue(c.min_wire_version >= 0) + def test_get_default_database(self): + c = self.rs_or_single_client( + "mongodb://%s:%d/foo" % (client_context.host, client_context.port), + connect=False, + ) + self.assertEqual(Database(c, "foo"), c.get_default_database()) + # Test that default doesn't override the URI value. + self.assertEqual(Database(c, "foo"), c.get_default_database("bar")) + + codec_options = CodecOptions(tz_aware=True) + write_concern = WriteConcern(w=2, j=True) + db = c.get_default_database(None, codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) + self.assertEqual(codec_options, db.codec_options) + self.assertEqual(ReadPreference.SECONDARY, db.read_preference) + self.assertEqual(write_concern, db.write_concern) + + c = self.rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), + connect=False, + ) + self.assertEqual(Database(c, "foo"), c.get_default_database("foo")) - bad_host = "somedomainthatdoesntexist.org" - c = MongoClient(bad_host, port, connectTimeoutMS=1, _connect=False) - self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) + def test_get_default_database_error(self): + # URI with no database. + c = self.rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), + connect=False, + ) + self.assertRaises(ConfigurationError, c.get_default_database) - def test_init_disconnected_with_auth(self): - uri = "mongodb://user:pass@somedomainthatdoesntexist" - c = MongoClient(uri, connectTimeoutMS=1, _connect=False) - self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) + def test_get_default_database_with_authsource(self): + # Ensure we distinguish database name from authSource. + uri = "mongodb://%s:%d/foo?authSource=src" % ( + client_context.host, + client_context.port, + ) + c = self.rs_or_single_client(uri, connect=False) + self.assertEqual(Database(c, "foo"), c.get_default_database()) - def test_connect(self): - # Check that the exception is a ConnectionFailure, not a subclass like - # AutoReconnect - assertRaisesExactly( - ConnectionFailure, MongoClient, - "somedomainthatdoesntexist.org", connectTimeoutMS=600) + def test_get_database_default(self): + c = self.rs_or_single_client( + "mongodb://%s:%d/foo" % (client_context.host, client_context.port), + connect=False, + ) + self.assertEqual(Database(c, "foo"), c.get_database()) - assertRaisesExactly( - ConnectionFailure, MongoClient, host, 123456789) + def test_get_database_default_error(self): + # URI with no database. + c = self.rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), + connect=False, + ) + self.assertRaises(ConfigurationError, c.get_database) - self.assertTrue(MongoClient(host, port)) + def test_get_database_default_with_authsource(self): + # Ensure we distinguish database name from authSource. + uri = "mongodb://%s:%d/foo?authSource=src" % ( + client_context.host, + client_context.port, + ) + c = self.rs_or_single_client(uri, connect=False) + self.assertEqual(Database(c, "foo"), c.get_database()) - def test_equality(self): - client = MongoClient(host, port) - self.assertEqual(client, MongoClient(host, port)) - # Explicitly test inequality - self.assertFalse(client != MongoClient(host, port)) + def test_primary_read_pref_with_tags(self): + # No tags allowed with "primary". + with self.assertRaises(ConfigurationError): + self.single_client("mongodb://host/?readpreferencetags=dc:east") - def test_host_w_port(self): - self.assertTrue(MongoClient("%s:%d" % (host, port))) - assertRaisesExactly( - ConnectionFailure, MongoClient, "%s:1234567" % (host,), port) + with self.assertRaises(ConfigurationError): + self.single_client("mongodb://host/?readpreference=primary&readpreferencetags=dc:east") - def test_repr(self): - # Making host a str avoids the 'u' prefix in Python 2, so the repr is - # the same in Python 2 and 3. - self.assertEqual(repr(MongoClient(str(host), port)), - "MongoClient('%s', %d)" % (host, port)) + def test_read_preference(self): + c = self.rs_or_single_client( + "mongodb://host", connect=False, readpreference=ReadPreference.NEAREST.mongos_mode + ) + self.assertEqual(c.read_preference, ReadPreference.NEAREST) - def test_getters(self): - self.assertEqual(MongoClient(host, port).host, host) - self.assertEqual(MongoClient(host, port).port, port) - self.assertEqual(set([(host, port)]), - MongoClient(host, port).nodes) - - def test_use_greenlets(self): - self.assertFalse(MongoClient(host, port).use_greenlets) - if thread_util.have_gevent: - self.assertTrue( - MongoClient( - host, port, use_greenlets=True).use_greenlets) + def test_metadata(self): + metadata = copy.deepcopy(_METADATA) + if has_c(): + metadata["driver"]["name"] = "PyMongo|c" + else: + metadata["driver"]["name"] = "PyMongo" + metadata["application"] = {"name": "foobar"} + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + client = self.simple_client("foo", 27017, appname="foobar", connect=False) + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + # No error + self.simple_client(appname="x" * 128) + with self.assertRaises(ValueError): + self.simple_client(appname="x" * 129) + # Bad "driver" options. + self.assertRaises(TypeError, DriverInfo, "Foo", 1, "a") + self.assertRaises(TypeError, DriverInfo, version="1", platform="a") + self.assertRaises(TypeError, DriverInfo) + with self.assertRaises(TypeError): + self.simple_client(driver=1) + with self.assertRaises(TypeError): + self.simple_client(driver="abc") + with self.assertRaises(TypeError): + self.simple_client(driver=("Foo", "1", "a")) + # Test appending to driver info. + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|FooDriver" + else: + metadata["driver"]["name"] = "PyMongo|FooDriver" + metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) + client = self.simple_client( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", None), + connect=False, + ) + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + metadata["platform"] = "{}|FooPlatform".format(_METADATA["platform"]) + client = self.simple_client( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", "FooPlatform"), + connect=False, + ) + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + # Test truncating driver info metadata. + client = self.simple_client( + driver=DriverInfo(name="s" * _MAX_METADATA_SIZE), + connect=False, + ) + options = client.options + self.assertLessEqual( + len(bson.encode(options.pool_options.metadata)), + _MAX_METADATA_SIZE, + ) + client = self.simple_client( + driver=DriverInfo(name="s" * _MAX_METADATA_SIZE, version="s" * _MAX_METADATA_SIZE), + connect=False, + ) + options = client.options + self.assertLessEqual( + len(bson.encode(options.pool_options.metadata)), + _MAX_METADATA_SIZE, + ) - def test_get_db(self): - client = MongoClient(host, port) + @mock.patch.dict("os.environ", {ENV_VAR_K8S: "1"}) + def test_container_metadata(self): + metadata = copy.deepcopy(_METADATA) + metadata["driver"]["name"] = "PyMongo" + metadata["env"] = {} + metadata["env"]["container"] = {"orchestrator": "kubernetes"} + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") + options = client.options + self.assertEqual(options.pool_options.metadata["env"], metadata["env"]) + + def test_kwargs_codec_options(self): + class MyFloatType: + def __init__(self, x): + self.__x = x + + @property + def x(self): + return self.__x + + class MyFloatAsIntEncoder(TypeEncoder): + python_type = MyFloatType + + def transform_python(self, value): + return int(value) + + # Ensure codec options are passed in correctly + document_class: Type[SON] = SON + type_registry = TypeRegistry([MyFloatAsIntEncoder()]) + tz_aware = True + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" + tzinfo = utc + c = self.simple_client( + document_class=document_class, + type_registry=type_registry, + tz_aware=tz_aware, + uuidrepresentation=uuid_representation_label, + unicode_decode_error_handler=unicode_decode_error_handler, + tzinfo=tzinfo, + connect=False, + ) + self.assertEqual(c.codec_options.document_class, document_class) + self.assertEqual(c.codec_options.type_registry, type_registry) + self.assertEqual(c.codec_options.tz_aware, tz_aware) + self.assertEqual( + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) + self.assertEqual(c.codec_options.tzinfo, tzinfo) + + def test_uri_codec_options(self): + # Ensure codec options are passed in correctly + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" + datetime_conversion = "DATETIME_CLAMP" + uri = ( + "mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" + "%s&unicode_decode_error_handler=%s" + "&datetime_conversion=%s" + % ( + client_context.host, + client_context.port, + uuid_representation_label, + unicode_decode_error_handler, + datetime_conversion, + ) + ) + c = self.simple_client(uri, connect=False) + self.assertEqual(c.codec_options.tz_aware, True) + self.assertEqual( + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] + ) - def make_db(base, name): - return base[name] + # Change the passed datetime_conversion to a number and re-assert. + uri = uri.replace(datetime_conversion, f"{int(DatetimeConversion[datetime_conversion])}") + c = self.simple_client(uri, connect=False) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] + ) - self.assertRaises(InvalidName, make_db, client, "") - self.assertRaises(InvalidName, make_db, client, "te$t") - self.assertRaises(InvalidName, make_db, client, "te.t") - self.assertRaises(InvalidName, make_db, client, "te\\t") - self.assertRaises(InvalidName, make_db, client, "te/t") - self.assertRaises(InvalidName, make_db, client, "te st") + def test_uri_option_precedence(self): + # Ensure kwarg options override connection string options. + uri = "mongodb://localhost/?ssl=true&replicaSet=name&readPreference=primary" + c = self.simple_client( + uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred" + ) + clopts = c.options + opts = clopts._options + + self.assertEqual(opts["tls"], False) + self.assertEqual(clopts.replica_set_name, "newname") + self.assertEqual(clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) + + def test_connection_timeout_ms_propagates_to_DNS_resolver(self): + # Patch the resolver. + from pymongo.synchronous.srv_resolver import _resolve + + patched_resolver = FunctionCallRecorder(_resolve) + pymongo.synchronous.srv_resolver._resolve = patched_resolver + + def reset_resolver(): + pymongo.synchronous.srv_resolver._resolve = _resolve + + self.addCleanup(reset_resolver) + + # Setup. + base_uri = "mongodb+srv://test5.test.build.10gen.cc" + connectTimeoutMS = 5000 + expected_kw_value = 5.0 + uri_with_timeout = base_uri + "/?connectTimeoutMS=6000" + expected_uri_value = 6.0 + + def test_scenario(args, kwargs, expected_value): + patched_resolver.reset() + self.simple_client(*args, **kwargs) + for _, kw in patched_resolver.call_list(): + self.assertAlmostEqual(kw["lifetime"], expected_value) + + # No timeout specified. + test_scenario((base_uri,), {}, CONNECT_TIMEOUT) + + # Timeout only specified in connection string. + test_scenario((uri_with_timeout,), {}, expected_uri_value) + + # Timeout only specified in keyword arguments. + kwarg = {"connectTimeoutMS": connectTimeoutMS} + test_scenario((base_uri,), kwarg, expected_kw_value) + + # Timeout specified in both kwargs and connection string. + test_scenario((uri_with_timeout,), kwarg, expected_kw_value) + + def test_uri_security_options(self): + # Ensure that we don't silently override security-related options. + with self.assertRaises(InvalidURI): + self.simple_client("mongodb://localhost/?ssl=true", tls=False, connect=False) + + # Matching SSL and TLS options should not cause errors. + c = self.simple_client("mongodb://localhost/?ssl=false", tls=False, connect=False) + self.assertEqual(c.options._options["tls"], False) + + # Conflicting tlsInsecure options should raise an error. + with self.assertRaises(InvalidURI): + self.simple_client( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidHostnames=True, + ) - self.assertTrue(isinstance(client.test, Database)) - self.assertEqual(client.test, client["test"]) - self.assertEqual(client.test, Database(client, "test")) + # Conflicting legacy tlsInsecure options should also raise an error. + with self.assertRaises(InvalidURI): + self.simple_client( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidCertificates=False, + ) - def test_database_names(self): - client = MongoClient(host, port) + # Conflicting kwargs should raise InvalidURI + with self.assertRaises(InvalidURI): + self.simple_client(ssl=True, tls=False) + + def test_event_listeners(self): + c = self.simple_client(event_listeners=[], connect=False) + self.assertEqual(c.options.event_listeners, []) + listeners = [ + event_loggers.CommandLogger(), + event_loggers.HeartbeatLogger(), + event_loggers.ServerLogger(), + event_loggers.TopologyLogger(), + event_loggers.ConnectionPoolLogger(), + ] + c = self.simple_client(event_listeners=listeners, connect=False) + self.assertEqual(c.options.event_listeners, listeners) + + def test_client_options(self): + c = self.simple_client(connect=False) + self.assertIsInstance(c.options, ClientOptions) + self.assertIsInstance(c.options.pool_options, PoolOptions) + self.assertEqual(c.options.server_selection_timeout, 30) + self.assertEqual(c.options.pool_options.max_idle_time_seconds, None) + self.assertIsInstance(c.options.retry_writes, bool) + self.assertIsInstance(c.options.retry_reads, bool) + + def test_validate_suggestion(self): + """Validate kwargs in constructor.""" + for typo in ["auth", "Auth", "AUTH"]: + expected = f"Unknown option: {typo}. Did you mean one of (authsource, authmechanism, authoidcallowedhosts) or maybe a camelCase version of one? Refer to docstring." + expected = re.escape(expected) + with self.assertRaisesRegex(ConfigurationError, expected): + MongoClient(**{typo: "standard"}) # type: ignore[arg-type] + + @patch("pymongo.synchronous.srv_resolver._SrvResolver.get_hosts") + def test_detected_environment_logging(self, mock_get_hosts): + normal_hosts = [ + "normal.host.com", + "host.cosmos.azure.com", + "host.docdb.amazonaws.com", + "host.docdb-elastic.amazonaws.com", + ] + srv_hosts = ["mongodb+srv://:@" + s for s in normal_hosts] + multi_host = ( + "host.cosmos.azure.com,host.docdb.amazonaws.com,host.docdb-elastic.amazonaws.com" + ) + with self.assertLogs("pymongo", level="INFO") as cm: + for host in normal_hosts: + MongoClient(host, connect=False) + for host in srv_hosts: + mock_get_hosts.return_value = [(host, 1)] + MongoClient(host, connect=False) + MongoClient(multi_host, connect=False) + logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] + self.assertEqual(len(logs), 7) + + @skipIf(os.environ.get("DEBUG_LOG"), "Enabling debug logs breaks this test") + @patch("pymongo.synchronous.srv_resolver._SrvResolver.get_hosts") + def test_detected_environment_warning(self, mock_get_hosts): + with self._caplog.at_level(logging.WARN): + normal_hosts = [ + "host.cosmos.azure.com", + "host.docdb.amazonaws.com", + "host.docdb-elastic.amazonaws.com", + ] + srv_hosts = ["mongodb+srv://:@" + s for s in normal_hosts] + multi_host = ( + "host.cosmos.azure.com,host.docdb.amazonaws.com,host.docdb-elastic.amazonaws.com" + ) + for host in normal_hosts: + with self.assertWarns(UserWarning): + self.simple_client(host) + for host in srv_hosts: + mock_get_hosts.return_value = [(host, 1)] + with self.assertWarns(UserWarning): + self.simple_client(host) + with self.assertWarns(UserWarning): + self.simple_client(multi_host) + + +class TestClient(IntegrationTest): + def test_multiple_uris(self): + with self.assertRaises(ConfigurationError): + MongoClient( + host=[ + "mongodb+srv://cluster-a.abc12.mongodb.net", + "mongodb+srv://cluster-b.abc12.mongodb.net", + "mongodb+srv://cluster-c.abc12.mongodb.net", + ] + ) - client.pymongo_test.test.save({"dummy": u"object"}) - client.pymongo_test_mike.test.save({"dummy": u"object"}) + def test_max_idle_time_reaper_default(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper doesn't remove connections when maxIdleTimeMS not set + client = self.rs_or_single_client() + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn: + pass + self.assertEqual(1, len(server._pool.conns)) + self.assertIn(conn, server._pool.conns) + + def test_max_idle_time_reaper_removes_stale_minPoolSize(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper removes idle socket and replaces it with a new one + client = self.rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn: + pass + # When the reaper runs at the same time as the get_socket, two + # connections could be created and checked into the pool. + self.assertGreaterEqual(len(server._pool.conns), 1) + wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + wait_until(lambda: len(server._pool.conns) >= 1, "replace stale socket") + + def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper respects maxPoolSize when adding new connections. + client = self.rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn: + pass + # When the reaper runs at the same time as the get_socket, + # maxPoolSize=1 should prevent two connections from being created. + self.assertEqual(1, len(server._pool.conns)) + wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + wait_until(lambda: len(server._pool.conns) == 1, "replace stale socket") + + def test_max_idle_time_reaper_removes_stale(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper has removed idle socket and NOT replaced it + client = self.rs_or_single_client(maxIdleTimeMS=500) + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn_one: + pass + # Assert that the pool does not close connections prematurely. + time.sleep(0.300) + with server._pool.checkout() as conn_two: + pass + self.assertIs(conn_one, conn_two) + wait_until( + lambda: len(server._pool.conns) == 0, + "stale socket reaped and new one NOT added to the pool", + ) - dbs = client.database_names() - self.assertTrue("pymongo_test" in dbs) - self.assertTrue("pymongo_test_mike" in dbs) + def test_min_pool_size(self): + with client_knobs(kill_cursor_frequency=0.1): + client = self.rs_or_single_client() + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + self.assertEqual(0, len(server._pool.conns)) + + # Assert that pool started up at minPoolSize + client = self.rs_or_single_client(minPoolSize=10) + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + wait_until( + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", + ) - def test_drop_database(self): - client = MongoClient(host, port) - - self.assertRaises(TypeError, client.drop_database, 5) - self.assertRaises(TypeError, client.drop_database, None) - - raise SkipTest("This test often fails due to SERVER-2329") - - client.pymongo_test.test.save({"dummy": u"object"}) - dbs = client.database_names() - self.assertTrue("pymongo_test" in dbs) - client.drop_database("pymongo_test") - dbs = client.database_names() - self.assertTrue("pymongo_test" not in dbs) - - client.pymongo_test.test.save({"dummy": u"object"}) - dbs = client.database_names() - self.assertTrue("pymongo_test" in dbs) - client.drop_database(client.pymongo_test) - dbs = client.database_names() - self.assertTrue("pymongo_test" not in dbs) - - def test_copy_db(self): - c = MongoClient(host, port) - # Due to SERVER-2329, databases may not disappear - # from a master in a master-slave pair. - if server_is_master_with_slave(c): - raise SkipTest("SERVER-2329") - # We test copy twice; once starting in a request and once not. In - # either case the copy should succeed (because it starts a request - # internally) and should leave us in the same state as before the copy. - c.start_request() - - self.assertRaises(TypeError, c.copy_database, 4, "foo") - self.assertRaises(TypeError, c.copy_database, "foo", 4) - - self.assertRaises(InvalidName, c.copy_database, "foo", "$foo") - - c.pymongo_test.test.drop() - c.drop_database("pymongo_test1") - c.drop_database("pymongo_test2") - self.assertFalse("pymongo_test1" in c.database_names()) - self.assertFalse("pymongo_test2" in c.database_names()) - - c.pymongo_test.test.insert({"foo": "bar"}) - - c.copy_database("pymongo_test", "pymongo_test1") - # copy_database() didn't accidentally end the request - self.assertTrue(c.in_request()) - - self.assertTrue("pymongo_test1" in c.database_names()) - self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"]) - - c.end_request() - self.assertFalse(c.in_request()) - c.copy_database("pymongo_test", "pymongo_test2", - "%s:%d" % (host, port)) - # copy_database() didn't accidentally restart the request - self.assertFalse(c.in_request()) - - self.assertTrue("pymongo_test2" in c.database_names()) - self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"]) - - # See SERVER-6427 for mongos - if (version.at_least(c, (1, 3, 3, 1)) and - not is_mongos(c) and server_started_with_auth(c)): - - c.drop_database("pymongo_test1") - - c.admin.add_user("admin", "password") - c.admin.authenticate("admin", "password") - try: - c.pymongo_test.add_user("mike", "password") - - self.assertRaises(OperationFailure, c.copy_database, - "pymongo_test", "pymongo_test1", - username="foo", password="bar") - self.assertFalse("pymongo_test1" in c.database_names()) - - self.assertRaises(OperationFailure, c.copy_database, - "pymongo_test", "pymongo_test1", - username="mike", password="bar") - self.assertFalse("pymongo_test1" in c.database_names()) - - c.copy_database("pymongo_test", "pymongo_test1", - username="mike", password="password") - self.assertTrue("pymongo_test1" in c.database_names()) - self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"]) - finally: - # Cleanup - remove_all_users(c.pymongo_test) - c.admin.remove_user("admin") - c.disconnect() + # Assert that if a socket is closed, a new one takes its place + with server._pool.checkout() as conn: + conn.close_conn(None) + wait_until( + lambda: len(server._pool.conns) == 10, + "a closed socket gets replaced from the pool", + ) + self.assertNotIn(conn, server._pool.conns) + + def test_max_idle_time_checkout(self): + # Use high frequency to test _get_socket_no_auth. + with client_knobs(kill_cursor_frequency=99999999): + client = self.rs_or_single_client(maxIdleTimeMS=500) + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn: + pass + self.assertEqual(1, len(server._pool.conns)) + time.sleep(1) # Sleep so that the socket becomes stale. + + with server._pool.checkout() as new_con: + self.assertNotEqual(conn, new_con) + self.assertEqual(1, len(server._pool.conns)) + self.assertNotIn(conn, server._pool.conns) + self.assertIn(new_con, server._pool.conns) + + # Test that connections are reused if maxIdleTimeMS is not set. + client = self.rs_or_single_client() + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn: + pass + self.assertEqual(1, len(server._pool.conns)) + time.sleep(1) + with server._pool.checkout() as new_con: + self.assertEqual(conn, new_con) + self.assertEqual(1, len(server._pool.conns)) - def test_iteration(self): - client = MongoClient(host, port) + def test_constants(self): + """This test uses MongoClient explicitly to make sure that host and + port are not overloaded. + """ + host, port = client_context.host, client_context.port + kwargs: dict = client_context.default_client_options.copy() + if client_context.auth_enabled: + kwargs["username"] = db_user + kwargs["password"] = db_pwd + + # Set bad defaults. + MongoClient.HOST = "somedomainthatdoesntexist.org" + MongoClient.PORT = 123456789 + with self.assertRaises(AutoReconnect): + c = self.simple_client(serverSelectionTimeoutMS=10, **kwargs) + connected(c) - def iterate(): - [a for a in client] + c = self.simple_client(host, port, **kwargs) + # Override the defaults. No error. + connected(c) - self.assertRaises(TypeError, iterate) + # Set good defaults. + MongoClient.HOST = host + MongoClient.PORT = port - def test_disconnect(self): - c = MongoClient(host, port) - coll = c.pymongo_test.bar + # No error. + c = self.simple_client(**kwargs) + connected(c) - c.disconnect() - c.disconnect() + def test_init_disconnected(self): + host, port = client_context.host, client_context.port + c = self.rs_or_single_client(connect=False) + # is_primary causes client to block until connected + self.assertIsInstance(c.is_primary, bool) + c = self.rs_or_single_client(connect=False) + self.assertIsInstance(c.is_mongos, bool) + c = self.rs_or_single_client(connect=False) + self.assertIsInstance(c.options.pool_options.max_pool_size, int) + self.assertIsInstance(c.nodes, frozenset) - coll.count() + c = self.rs_or_single_client(connect=False) + self.assertEqual(c.codec_options, CodecOptions()) + c = self.rs_or_single_client(connect=False) + self.assertFalse(c.primary) + self.assertFalse(c.secondaries) + c = self.rs_or_single_client(connect=False) + self.assertIsInstance(c.topology_description, TopologyDescription) + self.assertEqual(c.topology_description, c._topology._description) + if client_context.is_rs: + # The primary's host and port are from the replica set config. + self.assertIsNotNone(c.address) + else: + self.assertEqual(c.address, (host, port)) - c.disconnect() - c.disconnect() + bad_host = "somedomainthatdoesntexist.org" + c = self.simple_client(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + with self.assertRaises(ConnectionFailure): + c.pymongo_test.test.find_one() - coll.count() + def test_init_disconnected_with_auth(self): + uri = "mongodb://user:pass@somedomainthatdoesntexist" + c = self.simple_client(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + with self.assertRaises(ConnectionFailure): + c.pymongo_test.test.find_one() + + @client_context.require_replica_set + @client_context.require_no_load_balancer + @client_context.require_tls + def test_init_disconnected_with_srv(self): + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # nodes returns an empty set if not connected + self.assertEqual(c.nodes, frozenset()) + # topology_description returns the initial seed description if not connected + topology_description = c.topology_description + self.assertEqual(topology_description.topology_type, TOPOLOGY_TYPE.Unknown) + self.assertEqual( + { + ("test1.test.build.10gen.cc", None): ServerDescription( + ("test1.test.build.10gen.cc", None) + ) + }, + topology_description.server_descriptions(), + ) - def test_from_uri(self): - c = MongoClient(host, port) + # address causes client to block until connected + self.assertIsNotNone(c.address) + # Initial seed topology and connected topology have the same ID + self.assertEqual( + c._topology._topology_id, topology_description._topology_settings._topology_id + ) + c.close() - self.assertEqual(c, MongoClient("mongodb://%s:%d" % (host, port))) - self.assertTrue(MongoClient( - "mongodb://%s:%d" % (host, port), slave_okay=True).slave_okay) - self.assertTrue(MongoClient( - "mongodb://%s:%d/?slaveok=true;w=2" % (host, port)).slave_okay) + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # primary causes client to block until connected + c.primary + self.assertIsNotNone(c._topology) + c.close() - def test_get_default_database(self): - c = MongoClient("mongodb://%s:%d/foo" % (host, port), _connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database()) + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # secondaries causes client to block until connected + c.secondaries + self.assertIsNotNone(c._topology) + c.close() - def test_get_default_database_error(self): - # URI with no database. - c = MongoClient("mongodb://%s:%d/" % (host, port), _connect=False) - self.assertRaises(ConfigurationError, c.get_default_database) + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # arbiters causes client to block until connected + c.arbiters + self.assertIsNotNone(c._topology) - def test_get_default_database_with_authsource(self): - # Ensure we distinguish database name from authSource. - uri = "mongodb://%s:%d/foo?authSource=src" % (host, port) - c = MongoClient(uri, _connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database()) + def test_equality(self): + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) + c = self.rs_or_single_client(seed, connect=False) + self.assertEqual(client_context.client, c) + # Explicitly test inequality + self.assertFalse(client_context.client != c) - def test_auth_from_uri(self): - c = MongoClient(host, port) - # Sharded auth not supported before MongoDB 2.0 - if is_mongos(c) and not version.at_least(c, (2, 0, 0)): - raise SkipTest("Auth with sharding requires MongoDB >= 2.0.0") - if not server_started_with_auth(c): - raise SkipTest('Authentication is not enabled on server') - - c.admin.add_user("admin", "pass") - c.admin.authenticate("admin", "pass") - try: - c.pymongo_test.add_user("user", "pass", roles=['userAdmin', 'readWrite']) - - self.assertRaises(ConfigurationError, MongoClient, - "mongodb://foo:bar@%s:%d" % (host, port)) - self.assertRaises(ConfigurationError, MongoClient, - "mongodb://admin:bar@%s:%d" % (host, port)) - self.assertRaises(ConfigurationError, MongoClient, - "mongodb://user:pass@%s:%d" % (host, port)) - MongoClient("mongodb://admin:pass@%s:%d" % (host, port)) - - self.assertRaises(ConfigurationError, MongoClient, - "mongodb://admin:pass@%s:%d/pymongo_test" % - (host, port)) - self.assertRaises(ConfigurationError, MongoClient, - "mongodb://user:foo@%s:%d/pymongo_test" % - (host, port)) - MongoClient("mongodb://user:pass@%s:%d/pymongo_test" % - (host, port)) - - # Auth with lazy connection. - MongoClient( - "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), - _connect=False).pymongo_test.test.find_one() + c = self.rs_or_single_client("invalid.com", connect=False) + self.assertNotEqual(client_context.client, c) + self.assertTrue(client_context.client != c) - # Wrong password. - bad_client = MongoClient( - "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), - _connect=False) + c1 = self.simple_client("a", connect=False) + c2 = self.simple_client("b", connect=False) - self.assertRaises(OperationFailure, - bad_client.pymongo_test.test.find_one) + # Seeds differ: + self.assertNotEqual(c1, c2) - finally: - # Clean up. - remove_all_users(c.pymongo_test) - remove_all_users(c.admin) + c1 = self.simple_client(["a", "b", "c"], connect=False) + c2 = self.simple_client(["c", "a", "b"], connect=False) - def test_lazy_auth_raises_operation_failure(self): - # Check if we have the prerequisites to run this test. - c = MongoClient(host, port) - if not server_started_with_auth(c): - raise SkipTest('Authentication is not enabled on server') + # Same seeds but out of order still compares equal: + self.assertEqual(c1, c2) - if is_mongos(c) and not version.at_least(c, (2, 0, 0)): - raise SkipTest("Auth with sharding requires MongoDB >= 2.0.0") + def test_hashable(self): + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) + c = self.rs_or_single_client(seed, connect=False) + self.assertIn(c, {client_context.client}) + c = self.rs_or_single_client("invalid.com", connect=False) + self.assertNotIn(c, {client_context.client}) - lazy_client = MongoClient( - "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), - _connect=False) + def test_host_w_port(self): + with self.assertRaises(ValueError): + host = client_context.host + connected( + MongoClient( + f"{host}:1234567", + connectTimeoutMS=1, + serverSelectionTimeoutMS=10, + ) + ) - assertRaisesExactly( - OperationFailure, lazy_client.test.collection.find_one) + def test_repr(self): + # Used to test 'eval' below. + import bson + + client = MongoClient( # type: ignore[type-var] + "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" + "&connectTimeoutMS=12345&w=1&wtimeoutms=100", + connect=False, + document_class=SON, + ) - def test_unix_socket(self): - if not hasattr(socket, "AF_UNIX"): - raise SkipTest("UNIX-sockets are not supported on this system") - if (sys.platform == 'darwin' and - server_started_with_auth(MongoClient(host, port))): - raise SkipTest("SERVER-8492") + the_repr = repr(client) + self.assertIn("MongoClient(host=", the_repr) + self.assertIn("document_class=bson.son.SON, tz_aware=False, connect=False, ", the_repr) + self.assertIn("connecttimeoutms=12345", the_repr) + self.assertIn("replicaset='replset'", the_repr) + self.assertIn("w=1", the_repr) + self.assertIn("wtimeoutms=100", the_repr) + + with eval(the_repr) as client_two: + self.assertEqual(client_two, client) + + client = self.simple_client( + "localhost:27017,localhost:27018", + replicaSet="replset", + connectTimeoutMS=12345, + socketTimeoutMS=None, + w=1, + wtimeoutms=100, + connect=False, + ) + the_repr = repr(client) + self.assertIn("MongoClient(host=", the_repr) + self.assertIn("document_class=dict, tz_aware=False, connect=False, ", the_repr) + self.assertIn("connecttimeoutms=12345", the_repr) + self.assertIn("replicaset='replset'", the_repr) + self.assertIn("sockettimeoutms=None", the_repr) + self.assertIn("w=1", the_repr) + self.assertIn("wtimeoutms=100", the_repr) + + with eval(the_repr) as client_two: + self.assertEqual(client_two, client) + + def test_repr_srv_host(self): + client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/", connect=False) + # before srv resolution + self.assertIn("host='mongodb+srv://test1.test.build.10gen.cc'", repr(client)) + client._connect() + # after srv resolution + self.assertIn("host=['localhost.test.build.10gen.cc:", repr(client)) + client.close() - mongodb_socket = '/tmp/mongodb-27017.sock' - if not os.access(mongodb_socket, os.R_OK): - raise SkipTest("Socket file is not accessable") + def test_getters(self): + wait_until(lambda: client_context.nodes == self.client.nodes, "find all nodes") + + def test_list_databases(self): + cmd_docs = (self.client.admin.command("listDatabases"))["databases"] + cursor = self.client.list_databases() + self.assertIsInstance(cursor, CommandCursor) + helper_docs = cursor.to_list() + self.assertGreater(len(helper_docs), 0) + self.assertEqual(len(helper_docs), len(cmd_docs)) + # PYTHON-3529 Some fields may change between calls, just compare names. + for helper_doc, cmd_doc in zip(helper_docs, cmd_docs): + self.assertIs(type(helper_doc), dict) + self.assertEqual(helper_doc.keys(), cmd_doc.keys()) + client = self.rs_or_single_client(document_class=SON) + for doc in client.list_databases(): + self.assertIs(type(doc), dict) + + self.client.pymongo_test.test.insert_one({}) + cursor = self.client.list_databases(filter={"name": "admin"}) + docs = cursor.to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(docs[0]["name"], "admin") + + cursor = self.client.list_databases(nameOnly=True) + for doc in cursor: + self.assertEqual(["name"], list(doc)) + + def test_list_database_names(self): + self.client.pymongo_test.test.insert_one({"dummy": "object"}) + self.client.pymongo_test_mike.test.insert_one({"dummy": "object"}) + cmd_docs = (self.client.admin.command("listDatabases"))["databases"] + cmd_names = [doc["name"] for doc in cmd_docs] + + db_names = self.client.list_database_names() + self.assertIn("pymongo_test", db_names) + self.assertIn("pymongo_test_mike", db_names) + self.assertEqual(db_names, cmd_names) - self.assertTrue(MongoClient("mongodb://%s" % mongodb_socket)) + def test_drop_database(self): + with self.assertRaises(TypeError): + self.client.drop_database(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + self.client.drop_database(None) # type: ignore[arg-type] + + self.client.pymongo_test.test.insert_one({"dummy": "object"}) + self.client.pymongo_test2.test.insert_one({"dummy": "object"}) + dbs = self.client.list_database_names() + self.assertIn("pymongo_test", dbs) + self.assertIn("pymongo_test2", dbs) + self.client.drop_database("pymongo_test") + + if client_context.is_rs: + wc_client = self.rs_or_single_client(w=len(client_context.nodes) + 1) + with self.assertRaises(WriteConcernError): + wc_client.drop_database("pymongo_test2") + + self.client.drop_database(self.client.pymongo_test2) + dbs = self.client.list_database_names() + self.assertNotIn("pymongo_test", dbs) + self.assertNotIn("pymongo_test2", dbs) + + def test_close(self): + test_client = self.rs_or_single_client() + coll = test_client.pymongo_test.bar + test_client.close() + with self.assertRaises(InvalidOperation): + coll.count_documents({}) + + def test_close_kills_cursors(self): + if sys.platform.startswith("java"): + # We can't figure out how to make this test reliable with Jython. + raise SkipTest("Can't test with Jython") + test_client = self.rs_or_single_client() + # Kill any cursors possibly queued up by previous tests. + gc.collect() + test_client._process_periodic_tasks() + + # Add some test data. + coll = test_client.pymongo_test.test_close_kills_cursors + docs_inserted = 1000 + coll.insert_many([{"i": i} for i in range(docs_inserted)]) + + # Open a cursor and leave it open on the server. + cursor = coll.find().batch_size(10) + self.assertTrue(bool(next(cursor))) + self.assertLess(cursor.retrieved, docs_inserted) + + # Open a command cursor and leave it open on the server. + cursor = coll.aggregate([], batchSize=10) + self.assertTrue(bool(next(cursor))) + del cursor + # Required for PyPy, Jython and other Python implementations that + # don't use reference counting garbage collection. + gc.collect() + + # Close the client and ensure the topology is closed. + self.assertTrue(test_client._topology._opened) + test_client.close() + self.assertFalse(test_client._topology._opened) + test_client = self.rs_or_single_client() + # The killCursors task should not need to re-open the topology. + test_client._process_periodic_tasks() + self.assertTrue(test_client._topology._opened) + + def test_close_stops_kill_cursors_thread(self): + client = self.rs_client() + client.test.test.find_one() + self.assertFalse(client._kill_cursors_executor._stopped) + + # Closing the client should stop the thread. + client.close() + self.assertTrue(client._kill_cursors_executor._stopped) + + # Reusing the closed client should raise an InvalidOperation error. + with self.assertRaises(InvalidOperation): + client.admin.command("ping") + # Thread is still stopped. + self.assertTrue(client._kill_cursors_executor._stopped) + + def test_uri_connect_option(self): + # Ensure that topology is not opened if connect=False. + client = self.rs_client(connect=False) + self.assertFalse(client._topology._opened) + + # Ensure kill cursors thread has not been started. + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertFalse(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertFalse(kc_task and not kc_task.done()) + # Using the client should open topology and start the thread. + client.admin.command("ping") + self.assertTrue(client._topology._opened) + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertTrue(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertTrue(kc_task and not kc_task.done()) + + def test_close_does_not_open_servers(self): + client = self.rs_client(connect=False) + topology = client._topology + self.assertEqual(topology._servers, {}) + client.close() + self.assertEqual(topology._servers, {}) + + def test_close_closes_sockets(self): + client = self.rs_client() + client.test.test.find_one() + topology = client._topology + client.close() + for server in topology._servers.values(): + self.assertFalse(server._pool.conns) + self.assertTrue(server._monitor._executor._stopped) + self.assertTrue(server._monitor._rtt_monitor._executor._stopped) + self.assertFalse(server._monitor._pool.conns) + self.assertFalse(server._monitor._rtt_monitor._pool.conns) + + def test_bad_uri(self): + with self.assertRaises(InvalidURI): + MongoClient("http://localhost") + + @client_context.require_auth + @client_context.require_no_fips + def test_auth_from_uri(self): + host, port = client_context.host, client_context.port + client_context.create_user("admin", "admin", "pass") + self.addCleanup(client_context.drop_user, "admin", "admin") + self.addCleanup(remove_all_users, self.client.pymongo_test) - client = MongoClient("mongodb://%s" % mongodb_socket) - client.pymongo_test.test.save({"dummy": "object"}) + client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) - # Confirm we can read via the socket - dbs = client.database_names() - self.assertTrue("pymongo_test" in dbs) + with self.assertRaises(OperationFailure): + connected(self.rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port))) - # Confirm it fails with a missing socket - self.assertRaises(ConnectionFailure, MongoClient, - "mongodb:///tmp/none-existent.sock") + # No error. + connected(self.rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port))) - def test_fork(self): - # Test using a client before and after a fork. - if sys.platform == "win32": - raise SkipTest("Can't fork on windows") + # Wrong database. + uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) + with self.assertRaises(OperationFailure): + connected(self.rs_or_single_client_noauth(uri)) - try: - from multiprocessing import Process, Pipe - except ImportError: - raise SkipTest("No multiprocessing module") + # No error. + connected( + self.rs_or_single_client_noauth("mongodb://user:pass@%s:%d/pymongo_test" % (host, port)) + ) - db = MongoClient(host, port).pymongo_test + # Auth with lazy connection. + ( + self.rs_or_single_client_noauth( + "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False + ) + ).pymongo_test.test.find_one() - # Failure occurs if the client is used before the fork - db.test.find_one() - db.connection.end_request() + # Wrong password. + bad_client = self.rs_or_single_client_noauth( + "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), connect=False + ) - def loop(pipe): - while True: - try: - db.test.insert({"a": "b"}) - for _ in db.test.find(): - pass - except: - pipe.send(True) - os._exit(1) + with self.assertRaises(OperationFailure): + bad_client.pymongo_test.test.find_one() - cp1, cc1 = Pipe() - cp2, cc2 = Pipe() + @client_context.require_auth + def test_username_and_password(self): + client_context.create_user("admin", "ad min", "pa/ss") + self.addCleanup(client_context.drop_user, "admin", "ad min") - p1 = Process(target=loop, args=(cc1,)) - p2 = Process(target=loop, args=(cc2,)) + c = self.rs_or_single_client_noauth(username="ad min", password="pa/ss") - p1.start() - p2.start() + # Username and password aren't in strings that will likely be logged. + self.assertNotIn("ad min", repr(c)) + self.assertNotIn("ad min", str(c)) + self.assertNotIn("pa/ss", repr(c)) + self.assertNotIn("pa/ss", str(c)) - p1.join(1) - p2.join(1) + # Auth succeeds. + c.server_info() - p1.terminate() - p2.terminate() + with self.assertRaises(OperationFailure): + (self.rs_or_single_client_noauth(username="ad min", password="foo")).server_info() - p1.join() - p2.join() + @client_context.require_auth + @client_context.require_no_fips + def test_lazy_auth_raises_operation_failure(self): + host = client_context.host + lazy_client = self.rs_or_single_client_noauth( + f"mongodb://user:wrong@{host}/pymongo_test", connect=False + ) - cc1.close() - cc2.close() + assertRaisesExactly(OperationFailure, lazy_client.test.collection.find_one) - # recv will only have data if the subprocess failed - try: - cp1.recv() - self.fail() - except EOFError: - pass - try: - cp2.recv() - self.fail() - except EOFError: - pass + @client_context.require_no_tls + def test_unix_socket(self): + if not hasattr(socket, "AF_UNIX"): + raise SkipTest("UNIX-sockets are not supported on this system") - def test_document_class(self): - c = MongoClient(host, port) - db = c.pymongo_test - db.test.insert({"x": 1}) + mongodb_socket = "/tmp/mongodb-%d.sock" % (client_context.port,) + encoded_socket = "%2Ftmp%2F" + "mongodb-%d.sock" % (client_context.port,) + if not os.access(mongodb_socket, os.R_OK): + raise SkipTest("Socket file is not accessible") - self.assertEqual(dict, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), dict)) - self.assertFalse(isinstance(db.test.find_one(), SON)) + uri = "mongodb://%s" % encoded_socket + # Confirm we can do operations via the socket. + client = self.rs_or_single_client(uri) + client.pymongo_test.test.insert_one({"dummy": "object"}) + dbs = client.list_database_names() + self.assertIn("pymongo_test", dbs) - c.document_class = SON + self.assertIn(mongodb_socket, repr(client)) - self.assertEqual(SON, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), SON)) - self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON)) + # Confirm it fails with a missing socket. + with self.assertRaises(ConnectionFailure): + c = self.simple_client( + "mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100 + ) + connected(c) - c = MongoClient(host, port, document_class=SON) + def test_document_class(self): + c = self.client db = c.pymongo_test + db.test.insert_one({"x": 1}) - self.assertEqual(SON, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), SON)) - self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON)) + self.assertEqual(dict, c.codec_options.document_class) + self.assertIsInstance(db.test.find_one(), dict) + self.assertNotIsInstance(db.test.find_one(), SON) - c.document_class = dict + c = self.rs_or_single_client(document_class=SON) - self.assertEqual(dict, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), dict)) - self.assertFalse(isinstance(db.test.find_one(), SON)) + db = c.pymongo_test - def test_timeouts(self): - client = MongoClient(host, port, connectTimeoutMS=10500) - self.assertEqual(10.5, get_pool(client).conn_timeout) - client = MongoClient(host, port, socketTimeoutMS=10500) - self.assertEqual(10.5, get_pool(client).net_timeout) + self.assertEqual(SON, c.codec_options.document_class) + self.assertIsInstance(db.test.find_one(), SON) - def test_network_timeout_validation(self): - c = get_client(socketTimeoutMS=10 * 1000) - self.assertEqual(10, c._MongoClient__net_timeout) + def test_timeouts(self): + client = self.rs_or_single_client( + connectTimeoutMS=10500, + socketTimeoutMS=10500, + maxIdleTimeMS=10500, + serverSelectionTimeoutMS=10500, + ) + self.assertEqual(10.5, (get_pool(client)).opts.connect_timeout) + self.assertEqual(10.5, (get_pool(client)).opts.socket_timeout) + self.assertEqual(10.5, (get_pool(client)).opts.max_idle_time_seconds) + self.assertEqual(10.5, client.options.pool_options.max_idle_time_seconds) + self.assertEqual(10.5, client.options.server_selection_timeout) - c = get_client(socketTimeoutMS=None) - self.assertEqual(None, c._MongoClient__net_timeout) + def test_socket_timeout_ms_validation(self): + c = self.rs_or_single_client(socketTimeoutMS=10 * 1000) + self.assertEqual(10, (get_pool(c)).opts.socket_timeout) - self.assertRaises(ConfigurationError, - get_client, socketTimeoutMS=0) + c = connected(self.rs_or_single_client(socketTimeoutMS=None)) + self.assertEqual(None, (get_pool(c)).opts.socket_timeout) - self.assertRaises(ConfigurationError, - get_client, socketTimeoutMS=-1) + c = connected(self.rs_or_single_client(socketTimeoutMS=0)) + self.assertEqual(None, (get_pool(c)).opts.socket_timeout) - self.assertRaises(ConfigurationError, - get_client, socketTimeoutMS=1e10) + with self.assertRaises(ValueError): + with self.rs_or_single_client(socketTimeoutMS=-1): + pass - self.assertRaises(ConfigurationError, - get_client, socketTimeoutMS='foo') + with self.assertRaises(ValueError): + with self.rs_or_single_client(socketTimeoutMS=1e10): + pass - # network_timeout is gone from MongoClient, remains in deprecated - # Connection - self.assertRaises(ConfigurationError, - get_client, network_timeout=10) + with self.assertRaises(ValueError): + with self.rs_or_single_client(socketTimeoutMS="foo"): + pass - def test_network_timeout(self): - no_timeout = MongoClient(host, port) + def test_socket_timeout(self): + no_timeout = self.client timeout_sec = 1 - timeout = MongoClient( - host, port, socketTimeoutMS=1000 * timeout_sec) + timeout = self.rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) no_timeout.pymongo_test.drop_collection("test") - no_timeout.pymongo_test.test.insert({"x": 1}) + no_timeout.pymongo_test.test.insert_one({"x": 1}) # A $where clause that takes a second longer than the timeout where_func = delay(timeout_sec + 1) def get_x(db): - doc = db.test.find().where(where_func).next() + doc = next(db.test.find().where(where_func)) return doc["x"] + self.assertEqual(1, get_x(no_timeout.pymongo_test)) - self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test) + with self.assertRaises(NetworkTimeout): + get_x(timeout.pymongo_test) - def get_x_timeout(db, t): - doc = db.test.find(network_timeout=t).where(where_func).next() - return doc["x"] - self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None)) - self.assertRaises(ConnectionFailure, get_x_timeout, - no_timeout.pymongo_test, 0.1) + def test_server_selection_timeout(self): + client = MongoClient(serverSelectionTimeoutMS=100, connect=False) + self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + client.close() - def test_waitQueueTimeoutMS(self): - client = MongoClient(host, port, waitQueueTimeoutMS=2000) - self.assertEqual(get_pool(client).wait_queue_timeout, 2) + client = MongoClient(serverSelectionTimeoutMS=0, connect=False) - def test_waitQueueMultiple(self): - client = MongoClient(host, port, max_pool_size=3, waitQueueMultiple=2) - pool = get_pool(client) - self.assertEqual(pool.wait_queue_multiple, 2) - self.assertEqual(pool._socket_semaphore.waiter_semaphore.counter, 6) + self.assertAlmostEqual(0, client.options.server_selection_timeout) + self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS="foo", connect=False) + self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS=-1, connect=False) + self.assertRaises( + ConfigurationError, MongoClient, serverSelectionTimeoutMS=None, connect=False + ) + client.close() + + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=100", connect=False) + self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + client.close() + + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) + self.assertAlmostEqual(0, client.options.server_selection_timeout) + client.close() + + # Test invalid timeout in URI ignored and set to default. + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) + self.assertAlmostEqual(30, client.options.server_selection_timeout) + client.close() + + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) + self.assertAlmostEqual(30, client.options.server_selection_timeout) + + def test_waitQueueTimeoutMS(self): + listener = CMAPListener() + client = self.rs_or_single_client( + waitQueueTimeoutMS=10, maxPoolSize=1, event_listeners=[listener] + ) + pool = get_pool(client) + self.assertEqual(pool.opts.wait_queue_timeout, 0.01) + with pool.checkout(): + with self.assertRaises(WaitQueueTimeoutError): + client.test.command("ping") + self.assertFalse(listener.events_by_type(monitoring.PoolClearedEvent)) + + def test_socketKeepAlive(self): + pool = get_pool(self.client) + with pool.checkout() as conn: + keepalive = conn.conn.sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) + self.assertTrue(keepalive) + + @no_type_check def test_tz_aware(self): - self.assertRaises(ConfigurationError, MongoClient, tz_aware='foo') + self.assertRaises(ValueError, MongoClient, tz_aware="foo") - aware = MongoClient(host, port, tz_aware=True) - naive = MongoClient(host, port) + aware = self.rs_or_single_client(tz_aware=True) + self.addCleanup(aware.close) + naive = self.client aware.pymongo_test.drop_collection("test") - now = datetime.datetime.utcnow() - aware.pymongo_test.test.insert({"x": now}) + now = datetime.datetime.now(tz=datetime.timezone.utc) + aware.pymongo_test.test.insert_one({"x": now}) - self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo) - self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo) + self.assertEqual(None, (naive.pymongo_test.test.find_one())["x"].tzinfo) + self.assertEqual(utc, (aware.pymongo_test.test.find_one())["x"].tzinfo) self.assertEqual( - aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None), - naive.pymongo_test.test.find_one()["x"]) + (aware.pymongo_test.test.find_one())["x"].replace(tzinfo=None), + (naive.pymongo_test.test.find_one())["x"], + ) + @client_context.require_ipv6 def test_ipv6(self): - try: - client = MongoClient("[::1]") - except: - # Either mongod was started without --ipv6 - # or the OS doesn't support it (or both). - raise SkipTest("No IPv6") - - # Try a few simple things - MongoClient("mongodb://[::1]:%d" % (port,)) - MongoClient("mongodb://[::1]:%d/?w=0" % (port,)) - MongoClient("[::1]:%d,localhost:%d" % (port, port)) - - client = MongoClient("localhost:%d,[::1]:%d" % (port, port)) - client.pymongo_test.test.save({"dummy": u"object"}) - client.pymongo_test_bernie.test.save({"dummy": u"object"}) - - dbs = client.database_names() - self.assertTrue("pymongo_test" in dbs) - self.assertTrue("pymongo_test_bernie" in dbs) - - def test_fsync_lock_unlock(self): - c = get_client() - if is_mongos(c): - raise SkipTest('fsync/lock not supported by mongos') - if not version.at_least(c, (2, 0)) and server_started_with_auth(c): - raise SkipTest('Requires server >= 2.0 to test with auth') - - res = c.admin.command('getCmdLineOpts') - if '--master' in res['argv'] and version.at_least(c, (2, 3, 0)): - raise SkipTest('SERVER-7714') - - self.assertFalse(c.is_locked) - # async flushing not supported on windows... - if sys.platform not in ('cygwin', 'win32'): - c.fsync(async=True) - self.assertFalse(c.is_locked) - c.fsync(lock=True) - self.assertTrue(c.is_locked) - locked = True - c.unlock() - for _ in xrange(5): - locked = c.is_locked - if not locked: - break - time.sleep(1) - self.assertFalse(locked) + if client_context.tls: + if not HAVE_IPADDRESS: + raise SkipTest("Need the ipaddress module to test with SSL") - def test_contextlib(self): - if sys.version_info < (2, 6): - raise SkipTest("With statement requires Python >= 2.6") + if client_context.auth_enabled: + auth_str = f"{db_user}:{db_pwd}@" + else: + auth_str = "" + + uri = "mongodb://%s[::1]:%d" % (auth_str, client_context.port) + if client_context.is_rs: + uri += "/?replicaSet=" + (client_context.replica_set_name or "") + + client = self.rs_or_single_client_noauth(uri) + client.pymongo_test.test.insert_one({"dummy": "object"}) + client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) - import contextlib + dbs = client.list_database_names() + self.assertIn("pymongo_test", dbs) + self.assertIn("pymongo_test_bernie", dbs) - client = get_client(auto_start_request=False) + def test_contextlib(self): + client = self.rs_or_single_client() client.pymongo_test.drop_collection("test") - client.pymongo_test.test.insert({"foo": "bar"}) + client.pymongo_test.test.insert_one({"foo": "bar"}) # The socket used for the previous commands has been returned to the # pool - self.assertEqual(1, len(get_pool(client).sockets)) - - # We need exec here because if the Python version is less than 2.6 - # these with-statements won't even compile. - exec """ -with contextlib.closing(client): - self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) -self.assertEqual(None, client._MongoClient__member) -""" - - exec """ -with get_client() as client: - self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) -self.assertEqual(None, client._MongoClient__member) -""" - - def test_with_start_request(self): - client = get_client() - pool = get_pool(client) + self.assertEqual(1, len((get_pool(client)).conns)) + + # contextlib async support was added in Python 3.10 + if _IS_SYNC or sys.version_info >= (3, 10): + with contextlib.closing(client): + self.assertEqual("bar", (client.pymongo_test.test.find_one())["foo"]) + with self.assertRaises(InvalidOperation): + client.pymongo_test.test.find_one() + client = self.rs_or_single_client() + with client as client: + self.assertEqual("bar", (client.pymongo_test.test.find_one())["foo"]) + with self.assertRaises(InvalidOperation): + client.pymongo_test.test.find_one() + + @client_context.require_sync + def test_interrupt_signal(self): + if sys.platform.startswith("java"): + # We can't figure out how to raise an exception on a thread that's + # blocked on a socket, whether that's the main thread or a worker, + # without simply killing the whole thread in Jython. This suggests + # PYTHON-294 can't actually occur in Jython. + raise SkipTest("Can't test interrupts in Jython") + if is_greenthread_patched(): + raise SkipTest("Can't reliably test interrupts with green threads") - # No request started - self.assertNoRequest(pool) - self.assertDifferentSock(pool) - - # Start a request - request_context_mgr = client.start_request() - self.assertTrue( - isinstance(request_context_mgr, object) - ) - - self.assertNoSocketYet(pool) - self.assertSameSock(pool) - self.assertRequestSocket(pool) - - # End request - request_context_mgr.__exit__(None, None, None) - self.assertNoRequest(pool) - self.assertDifferentSock(pool) - - # Test the 'with' statement - if sys.version_info >= (2, 6): - # We need exec here because if the Python version is less than 2.6 - # these with-statements won't even compile. - exec """ -with client.start_request() as request: - self.assertEqual(client, request.connection) - self.assertNoSocketYet(pool) - self.assertSameSock(pool) - self.assertRequestSocket(pool) -""" - - # Request has ended - self.assertNoRequest(pool) - self.assertDifferentSock(pool) - - def test_auto_start_request(self): - for bad_horrible_value in (None, 5, 'hi!'): - self.assertRaises( - (TypeError, ConfigurationError), - lambda: get_client(auto_start_request=bad_horrible_value) - ) + # Test fix for PYTHON-294 -- make sure MongoClient closes its + # socket if it gets an interrupt while waiting to recv() from it. + db = self.client.pymongo_test - # auto_start_request should default to False - client = get_client() - self.assertFalse(client.auto_start_request) + # A $where clause which takes 1.5 sec to execute + where = delay(1.5) + + # Need exactly 1 document so find() will execute its $where clause once + db.drop_collection("foo") + db.foo.insert_one({"_id": 1}) - client = get_client(auto_start_request=True) - self.assertTrue(client.auto_start_request) + old_signal_handler = None + try: + # Platform-specific hacks for raising a KeyboardInterrupt on the + # main thread while find() is in-progress: On Windows, SIGALRM is + # unavailable so we use a second thread. In our Evergreen setup on + # Linux, the thread technique causes an error in the test at + # conn.recv(): TypeError: 'int' object is not callable + # We don't know what causes this, so we hack around it. + + if sys.platform == "win32": + + def interrupter(): + # Raises KeyboardInterrupt in the main thread + time.sleep(0.25) + thread.interrupt_main() + + thread.start_new_thread(interrupter, ()) + else: + # Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT + # for one second in the future, but easy to schedule SIGALRM. + def sigalarm(num, frame): + raise KeyboardInterrupt + + old_signal_handler = signal.signal(signal.SIGALRM, sigalarm) + signal.alarm(1) + + raised = False + try: + # Will be interrupted by a KeyboardInterrupt. + next(db.foo.find({"$where": where})) # type: ignore[call-overload] + except KeyboardInterrupt: + raised = True + + # Can't use self.assertRaises() because it doesn't catch system + # exceptions + self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt") + + # Raises AssertionError due to PYTHON-294 -- Mongo's response to + # the previous find() is still waiting to be read on the socket, + # so the request id's don't match. + self.assertEqual({"_id": 1}, next(db.foo.find())) # type: ignore[call-overload] + finally: + if old_signal_handler: + signal.signal(signal.SIGALRM, old_signal_handler) - # Assure we acquire a request socket. + def test_operation_failure(self): + # Ensure MongoClient doesn't close socket after it gets an error + # response to getLastError. PYTHON-395. We need a new client here + # to avoid race conditions caused by replica set failover or idle + # socket reaping. + client = self.single_client() client.pymongo_test.test.find_one() - self.assertTrue(client.in_request()) pool = get_pool(client) - self.assertRequestSocket(pool) - self.assertSameSock(pool) + socket_count = len(pool.conns) + self.assertGreaterEqual(socket_count, 1) + old_conn = next(iter(pool.conns)) + client.pymongo_test.test.drop() + client.pymongo_test.test.insert_one({"_id": "foo"}) + with self.assertRaises(OperationFailure): + client.pymongo_test.test.insert_one({"_id": "foo"}) + + self.assertEqual(socket_count, len(pool.conns)) + new_con = next(iter(pool.conns)) + self.assertEqual(old_conn, new_con) + + def test_lazy_connect_w0(self): + # Ensure that connect-on-demand works when the first operation is + # an unacknowledged write. This exercises _writable_max_wire_version(). - client.end_request() - self.assertNoRequest(pool) - self.assertDifferentSock(pool) + # Use a separate collection to avoid races where we're still + # completing an operation on a collection while the next test begins. + client_context.client.drop_database("test_lazy_connect_w0") + self.addCleanup(client_context.client.drop_database, "test_lazy_connect_w0") - # Trigger auto_start_request - client.pymongo_test.test.find_one() - self.assertRequestSocket(pool) - self.assertSameSock(pool) + client = self.rs_or_single_client(connect=False, w=0) + client.test_lazy_connect_w0.test.insert_one({}) + + def predicate(): + return client.test_lazy_connect_w0.test.count_documents({}) == 1 + + wait_until(predicate, "find one document") + + client = self.rs_or_single_client(connect=False, w=0) + client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) + + def predicate(): + return (client.test_lazy_connect_w0.test.find_one()).get("x") == 1 + + wait_until(predicate, "update one document") + + client = self.rs_or_single_client(connect=False, w=0) + client.test_lazy_connect_w0.test.delete_one({}) + + def predicate(): + return client.test_lazy_connect_w0.test.count_documents({}) == 0 - def test_nested_request(self): - # auto_start_request is False - client = get_client() + wait_until(predicate, "delete one document") + + @client_context.require_no_mongos + def test_exhaust_network_error(self): + # When doing an exhaust query, the socket stays checked out on success + # but must be checked in on error to avoid semaphore leaks. + client = self.rs_or_single_client(maxPoolSize=1, retryReads=False) + collection = client.pymongo_test.test pool = get_pool(client) - self.assertFalse(client.in_request()) - - # Start and end request - client.start_request() - self.assertInRequestAndSameSock(client, pool) - client.end_request() - self.assertNotInRequestAndDifferentSock(client, pool) - - # Double-nesting - client.start_request() - client.start_request() - client.end_request() - self.assertInRequestAndSameSock(client, pool) - client.end_request() - self.assertNotInRequestAndDifferentSock(client, pool) - - # Extra end_request calls have no effect - count stays at zero - client.end_request() - self.assertNotInRequestAndDifferentSock(client, pool) - - client.start_request() - self.assertInRequestAndSameSock(client, pool) - client.end_request() - self.assertNotInRequestAndDifferentSock(client, pool) - - def test_request_threads(self): - client = get_client(auto_start_request=False) + pool._check_interval_seconds = None # Never check. + + # Ensure a socket. + connected(client) + + # Cause a network error. + conn = one(pool.conns) + conn.conn.close() + cursor = collection.find(cursor_type=CursorType.EXHAUST) + with self.assertRaises(ConnectionFailure): + next(cursor) + + self.assertTrue(conn.closed) + + # The semaphore was decremented despite the error. + self.assertEqual(0, pool.requests) + + @client_context.require_auth + def test_auth_network_error(self): + # Make sure there's no semaphore leak if we get a network error + # when authenticating a new socket with cached credentials. + + # Get a client with one socket so we detect if it's leaked. + c = connected( + self.rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False) + ) + + # Cause a network error on the actual socket. + pool = get_pool(c) + conn = one(pool.conns) + conn.conn.close() + + # Connection.authenticate logs, but gets a socket.error. Should be + # reraised as AutoReconnect. + with self.assertRaises(AutoReconnect): + c.test.collection.find_one() + + # No semaphore leak, the pool is allowed to make a new socket. + c.test.collection.find_one() + + @client_context.require_no_replica_set + def test_connect_to_standalone_using_replica_set_name(self): + client = self.single_client(replicaSet="anything", serverSelectionTimeoutMS=100) + with self.assertRaises(AutoReconnect): + client.test.test.find_one() + + @client_context.require_replica_set + def test_stale_getmore(self): + # A cursor is created, but its member goes down and is removed from + # the topology before the getMore message is sent. Test that + # MongoClient._run_operation_with_response handles the error. + with self.assertRaises(AutoReconnect): + client = self.rs_client(connect=False, serverSelectionTimeoutMS=100) + client._run_operation( + operation=message._GetMore( + "pymongo_test", + "collection", + 101, + 1234, + client.codec_options, + ReadPreference.PRIMARY, + None, + client, + None, + None, + False, + None, + ), + unpack_res=Cursor(client.pymongo_test.collection)._unpack_response, + address=("not-a-member", 27017), + ) + + def test_heartbeat_frequency_ms(self): + class HeartbeatStartedListener(ServerHeartbeatListener): + def __init__(self): + self.results = [] + + def started(self, event): + self.results.append(event) + + def succeeded(self, event): + pass + + def failed(self, event): + pass + + old_init = ServerHeartbeatStartedEvent.__init__ + heartbeat_times = [] + + def init(self, *args): + old_init(self, *args) + heartbeat_times.append(time.time()) + + try: + ServerHeartbeatStartedEvent.__init__ = init # type: ignore + listener = HeartbeatStartedListener() + uri = "mongodb://%s:%d/?heartbeatFrequencyMS=500" % ( + client_context.host, + client_context.port, + ) + self.single_client(uri, event_listeners=[listener]) + wait_until( + lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents" + ) + + # Default heartbeatFrequencyMS is 10 sec. Check the interval was + # closer to 0.5 sec with heartbeatFrequencyMS configured. + self.assertAlmostEqual(heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) + + finally: + ServerHeartbeatStartedEvent.__init__ = old_init # type: ignore + + def test_small_heartbeat_frequency_ms(self): + uri = "mongodb://example/?heartbeatFrequencyMS=499" + with self.assertRaises(ConfigurationError) as context: + MongoClient(uri) + + self.assertIn("heartbeatFrequencyMS", str(context.exception)) + + def test_compression(self): + def compression_settings(client): + pool_options = client.options.pool_options + return pool_options._compression_settings + + uri = "mongodb://localhost:27017/?compressors=zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=4" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, 4) + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-1" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017/?compressors=foobar" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017/?compressors=foobar,zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + + # According to the connection string spec, unsupported values + # just raise a warning and are ignored. + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=10" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-2" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + + if not _have_snappy(): + uri = "mongodb://localhost:27017/?compressors=snappy" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + else: + uri = "mongodb://localhost:27017/?compressors=snappy" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["snappy"]) + uri = "mongodb://localhost:27017/?compressors=snappy,zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["snappy", "zlib"]) + + if not _have_zstd(): + uri = "mongodb://localhost:27017/?compressors=zstd" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + else: + uri = "mongodb://localhost:27017/?compressors=zstd" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zstd"]) + uri = "mongodb://localhost:27017/?compressors=zstd,zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zstd", "zlib"]) + + options = client_context.default_client_options + if "compressors" in options and "zlib" in options["compressors"]: + for level in range(-1, 10): + client = self.single_client(zlibcompressionlevel=level) + # No error + client.pymongo_test.test.find_one() + + @client_context.require_sync + def test_reset_during_update_pool(self): + client = self.rs_or_single_client(minPoolSize=10) + client.admin.command("ping") pool = get_pool(client) - self.assertNotInRequestAndDifferentSock(client, pool) - - started_request, ended_request = threading.Event(), threading.Event() - checked_request = threading.Event() - thread_done = [False] - - # Starting a request in one thread doesn't put the other thread in a - # request - def f(): - self.assertNotInRequestAndDifferentSock(client, pool) - client.start_request() - self.assertInRequestAndSameSock(client, pool) - started_request.set() - checked_request.wait() - checked_request.clear() - self.assertInRequestAndSameSock(client, pool) - client.end_request() - self.assertNotInRequestAndDifferentSock(client, pool) - ended_request.set() - checked_request.wait() - thread_done[0] = True - - t = threading.Thread(target=f) - t.setDaemon(True) + generation = pool.gen.get_overall() + + # Continuously reset the pool. + class ResetPoolThread(threading.Thread): + def __init__(self, pool): + super().__init__() + self.running = True + self.pool = pool + + def stop(self): + self.running = False + + def _run(self): + while self.running: + exc = AutoReconnect("mock pool error") + ctx = _ErrorContext(exc, 0, pool.gen.get_overall(), False, None) + client._topology.handle_error(pool.address, ctx) + time.sleep(0.001) + + def run(self): + self._run() + + t = ResetPoolThread(pool) t.start() - # It doesn't matter in what order the main thread or t initially get - # to started_request.set() / wait(); by waiting here we ensure that t - # has called client.start_request() before we assert on the next line. - started_request.wait() - self.assertNotInRequestAndDifferentSock(client, pool) - checked_request.set() - ended_request.wait() - self.assertNotInRequestAndDifferentSock(client, pool) - checked_request.set() - t.join() - self.assertNotInRequestAndDifferentSock(client, pool) - self.assertTrue(thread_done[0], "Thread didn't complete") - def test_interrupt_signal(self): - if sys.platform.startswith('java'): - # We can't figure out how to raise an exception on a thread that's - # blocked on a socket, whether that's the main thread or a worker, - # without simply killing the whole thread in Jython. This suggests - # PYTHON-294 can't actually occur in Jython. - raise SkipTest("Can't test interrupts in Jython") + # Ensure that update_pool completes without error even when the pool + # is reset concurrently. + try: + while True: + for _ in range(10): + client._topology.update_pool() + if generation != pool.gen.get_overall(): + break + finally: + t.stop() + t.join() + client.admin.command("ping") + + def test_background_connections_do_not_hold_locks(self): + min_pool_size = 10 + client = self.rs_or_single_client( + serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, connect=False + ) + # Create a single connection in the pool. + client.admin.command("ping") - # Test fix for PYTHON-294 -- make sure MongoClient closes its - # socket if it gets an interrupt while waiting to recv() from it. - c = get_client() - db = c.pymongo_test + # Cause new connections stall for a few seconds. + pool = get_pool(client) + original_connect = pool.connect - # A $where clause which takes 1.5 sec to execute - where = delay(1.5) + def stall_connect(*args, **kwargs): + time.sleep(2) + return original_connect(*args, **kwargs) - # Need exactly 1 document so find() will execute its $where clause once - db.drop_collection('foo') - db.foo.insert({'_id': 1}) + pool.connect = stall_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") - def interrupter(): - # Raises KeyboardInterrupt in the main thread - time.sleep(0.25) - thread.interrupt_main() + # Wait for the background thread to start creating connections + wait_until(lambda: len(pool.conns) > 1, "start creating connections") - thread.start_new_thread(interrupter, ()) + # Assert that application operations do not block. + for _ in range(10): + start = time.monotonic() + client.admin.command("ping") + total = time.monotonic() - start + # Each ping command should not take more than 2 seconds + self.assertLess(total, 2) - raised = False - try: - # Will be interrupted by a KeyboardInterrupt. - db.foo.find({'$where': where}).next() - except KeyboardInterrupt: - raised = True - - # Can't use self.assertRaises() because it doesn't catch system - # exceptions - self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt") - - # Raises AssertionError due to PYTHON-294 -- Mongo's response to the - # previous find() is still waiting to be read on the socket, so the - # request id's don't match. - self.assertEqual( - {'_id': 1}, - db.foo.find().next() + def test_background_connections_log_on_error(self): + with self.assertLogs("pymongo.client", level="ERROR") as cm: + client = self.rs_or_single_client(minPoolSize=1) + # Create a single connection in the pool. + client.admin.command("ping") + + # Cause new connections to fail. + pool = get_pool(client) + + def fail_connect(*args, **kwargs): + raise Exception("failed to connect") + + pool.connect = fail_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + pool.reset_without_pause() + + wait_until( + lambda: "failed to connect" in "".join(cm.output), "start creating connections" + ) + self.assertIn("MongoClient background task encountered an error", "".join(cm.output)) + + @client_context.require_replica_set + def test_direct_connection(self): + # direct_connection=True should result in Single topology. + client = self.rs_or_single_client(directConnection=True) + client.admin.command("ping") + self.assertEqual(len(client.nodes), 1) + self.assertEqual(client._topology_settings.get_topology_type(), TOPOLOGY_TYPE.Single) + + # direct_connection=False should result in RS topology. + client = self.rs_or_single_client(directConnection=False) + client.admin.command("ping") + self.assertGreaterEqual(len(client.nodes), 1) + self.assertIn( + client._topology_settings.get_topology_type(), + [TOPOLOGY_TYPE.ReplicaSetNoPrimary, TOPOLOGY_TYPE.ReplicaSetWithPrimary], ) - def test_operation_failure_without_request(self): - # Ensure MongoClient doesn't close socket after it gets an error - # response to getLastError. PYTHON-395. - c = get_client() - pool = get_pool(c) - self.assertEqual(1, len(pool.sockets)) - old_sock_info = iter(pool.sockets).next() - c.pymongo_test.test.drop() - c.pymongo_test.test.insert({'_id': 'foo'}) - self.assertRaises( - OperationFailure, - c.pymongo_test.test.insert, {'_id': 'foo'}) + # directConnection=True, should error with multiple hosts as a list. + with self.assertRaises(ConfigurationError): + MongoClient(["host1", "host2"], directConnection=True) - self.assertEqual(1, len(pool.sockets)) - new_sock_info = iter(pool.sockets).next() - self.assertEqual(old_sock_info, new_sock_info) + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2927 fails often on PyPy") + def test_continuous_network_errors(self): + def server_description_count(): + i = 0 + for obj in gc.get_objects(): + try: + if isinstance(obj, ServerDescription): + i += 1 + except ReferenceError: + pass + return i + + gc.collect() + with client_knobs(min_heartbeat_interval=0.002): + client = self.simple_client( + "invalid:27017", heartbeatFrequencyMS=2, serverSelectionTimeoutMS=200 + ) + initial_count = server_description_count() + with self.assertRaises(ServerSelectionTimeoutError): + client.test.test.find_one() + gc.collect() + final_count = server_description_count() + client.close() + # If a bug like PYTHON-2433 is reintroduced then too many + # ServerDescriptions will be kept alive and this test will fail: + # AssertionError: 11 != 47 within 20 delta (36 difference) + self.assertAlmostEqual(initial_count, final_count, delta=30) + + @client_context.require_failCommand_fail_point + def test_network_error_message(self): + client = self.single_client(retryReads=False) + client.admin.command("ping") # connect + with self.fail_point( + {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} + ): + assert client.address is not None + expected = "{}:{}: ".format(*(client.address)) + with self.assertRaisesRegex(AutoReconnect, expected): + client.pymongo_test.test.find_one({}) + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2938 could fail on PyPy") + def test_process_periodic_tasks(self): + client = self.rs_or_single_client() + coll = client.db.collection + coll.insert_many([{} for _ in range(5)]) + cursor = coll.find(batch_size=2) + cursor.next() + c_id = cursor.cursor_id + self.assertIsNotNone(c_id) + client.close() + # Add cursor to kill cursors queue + del cursor + wait_until( + lambda: client._kill_cursors_queue, + "waited for cursor to be added to queue", + ) + client._process_periodic_tasks() # This must not raise or print any exceptions + with self.assertRaises(InvalidOperation): + coll.insert_many([{} for _ in range(5)]) + + def test_service_name_from_kwargs(self): + client = MongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc", + srvServiceName="customname", + connect=False, + ) + client._connect() + self.assertEqual(client._topology_settings.srv_service_name, "customname") + client.close() + client = MongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc" + "/?srvServiceName=shouldbeoverriden", + srvServiceName="customname", + connect=False, + ) + client._connect() + self.assertEqual(client._topology_settings.srv_service_name, "customname") + client.close() + client = MongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc/?srvServiceName=customname", + connect=False, + ) + client._connect() + self.assertEqual(client._topology_settings.srv_service_name, "customname") + client.close() + + def test_srv_max_hosts_kwarg(self): + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/") + client._connect() + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + client._connect() + self.assertEqual(len(client.topology_description.server_descriptions()), 1) + client = self.simple_client( + "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 + ) + client._connect() + self.assertEqual(len(client.topology_description.server_descriptions()), 2) + + @unittest.skipIf( + client_context.load_balancer, + "loadBalanced clients do not run SDAM", + ) + @unittest.skipIf(sys.platform == "win32", "Windows does not support SIGSTOP") + @client_context.require_sync + def test_sigstop_sigcont(self): + test_dir = os.path.dirname(os.path.realpath(__file__)) + script = os.path.join(test_dir, "sigstop_sigcont.py") + p = subprocess.Popen( + [sys.executable, script, client_context.uri], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + self.addCleanup(p.wait, timeout=1) + self.addCleanup(p.kill) + time.sleep(1) + # Stop the child, sleep for twice the streaming timeout + # (heartbeatFrequencyMS + connectTimeoutMS), and restart. + os.kill(p.pid, signal.SIGSTOP) + time.sleep(2) + os.kill(p.pid, signal.SIGCONT) + time.sleep(0.5) + # Tell the script to exit gracefully. + outs, _ = p.communicate(input=b"q\n", timeout=10) + self.assertTrue(outs) + log_output = outs.decode("utf-8") + self.assertIn("TEST STARTED", log_output) + self.assertIn("ServerHeartbeatStartedEvent", log_output) + self.assertIn("ServerHeartbeatSucceededEvent", log_output) + self.assertIn("TEST COMPLETED", log_output) + self.assertNotIn("ServerHeartbeatFailedEvent", log_output) + + def _test_handshake(self, env_vars, expected_env): + with patch.dict("os.environ", env_vars): + metadata = copy.deepcopy(_METADATA) + if has_c(): + metadata["driver"]["name"] = "PyMongo|c" + else: + metadata["driver"]["name"] = "PyMongo" + if expected_env is not None: + metadata["env"] = expected_env + + if "AWS_REGION" not in env_vars: + os.environ["AWS_REGION"] = "" + client = self.rs_or_single_client(serverSelectionTimeoutMS=10000) + client.admin.command("ping") + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + + def test_handshake_01_aws(self): + self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", + "AWS_REGION": "us-east-2", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "1024", + }, + {"name": "aws.lambda", "region": "us-east-2", "memory_mb": 1024}, + ) - def test_operation_failure_with_request(self): - # Ensure MongoClient doesn't close socket after it gets an error - # response to getLastError. PYTHON-395. - c = get_client(auto_start_request=True) - pool = get_pool(c) + def test_handshake_02_azure(self): + self._test_handshake({"FUNCTIONS_WORKER_RUNTIME": "python"}, {"name": "azure.func"}) + + def test_handshake_03_gcp(self): + self._test_handshake( + { + "K_SERVICE": "servicename", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + # Extra case for FUNCTION_NAME. + self._test_handshake( + { + "FUNCTION_NAME": "funcname", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) - # Pool reserves a socket for this thread. - c.pymongo_test.test.find_one() - self.assertTrue(isinstance(pool._get_request_state(), SocketInfo)) + def test_handshake_04_vercel(self): + self._test_handshake( + {"VERCEL": "1", "VERCEL_REGION": "cdg1"}, {"name": "vercel", "region": "cdg1"} + ) - old_sock_info = pool._get_request_state() - c.pymongo_test.test.drop() - c.pymongo_test.test.insert({'_id': 'foo'}) - self.assertRaises( - OperationFailure, - c.pymongo_test.test.insert, {'_id': 'foo'}) + def test_handshake_05_multiple(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "FUNCTIONS_WORKER_RUNTIME": "python"}, + None, + ) + # Extra cases for other combos. + self._test_handshake( + {"FUNCTIONS_WORKER_RUNTIME": "python", "K_SERVICE": "servicename"}, + None, + ) + self._test_handshake({"K_SERVICE": "servicename", "VERCEL": "1"}, None) - # OperationFailure doesn't affect the request socket - self.assertEqual(old_sock_info, pool._get_request_state()) + def test_handshake_06_region_too_long(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "AWS_REGION": "a" * 512}, + {"name": "aws.lambda"}, + ) - def test_alive(self): - self.assertTrue(get_client().alive()) + def test_handshake_07_memory_invalid_int(self): + self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big", + }, + {"name": "aws.lambda"}, + ) - client = MongoClient('doesnt exist', _connect=False) - self.assertFalse(client.alive()) + def test_handshake_08_invalid_aws_ec2(self): + # AWS_EXECUTION_ENV needs to start with "AWS_Lambda_". + self._test_handshake( + {"AWS_EXECUTION_ENV": "EC2"}, + None, + ) - def test_wire_version(self): - c = MockClient( - standalones=[], - members=['a:1', 'b:2', 'c:3'], - mongoses=[], - host='b:2', # Pass a secondary. - replicaSet='rs', - _connect=False) + def test_handshake_09_container_with_provider(self): + self._test_handshake( + { + ENV_VAR_K8S: "1", + "AWS_LAMBDA_RUNTIME_API": "1", + "AWS_REGION": "us-east-1", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "256", + }, + { + "container": {"orchestrator": "kubernetes"}, + "name": "aws.lambda", + "region": "us-east-1", + "memory_mb": 256, + }, + ) - c.set_wire_version_range('a:1', 1, 5) - c.db.collection.find_one() # Connect. - self.assertEqual(c.min_wire_version, 1) - self.assertEqual(c.max_wire_version, 5) + def test_dict_hints(self): + self.db.t.find(hint={"x": 1}) - c.set_wire_version_range('a:1', 10, 11) - c.disconnect() - self.assertRaises(ConfigurationError, c.db.collection.find_one) + def test_dict_hints_sort(self): + result = self.db.t.find() + result.sort({"x": 1}) - def test_max_wire_version(self): - c = MockClient( - standalones=[], - members=['a:1', 'b:2', 'c:3'], - mongoses=[], - host='b:2', # Pass a secondary. - replicaSet='rs', - _connect=False) - - c.set_max_write_batch_size('a:1', 1) - c.set_max_write_batch_size('b:2', 2) - - # Starts with default max batch size. - self.assertEqual(1000, c.max_write_batch_size) - c.db.collection.find_one() # Connect. - # Uses primary's max batch size. - self.assertEqual(c.max_write_batch_size, 1) - - # b becomes primary. - c.mock_primary = 'b:2' - c.disconnect() - self.assertEqual(1000, c.max_write_batch_size) - c.db.collection.find_one() # Connect. - self.assertEqual(c.max_write_batch_size, 2) - - def test_wire_version_mongos_ha(self): - c = MockClient( - standalones=[], - members=[], - mongoses=['a:1', 'b:2', 'c:3'], - host='a:1,b:2,c:3', - _connect=False) - - c.set_wire_version_range('a:1', 2, 5) - c.set_wire_version_range('b:2', 2, 2) - c.set_wire_version_range('c:3', 1, 1) - c.db.collection.find_one() # Connect. - - # Which member did we use? - used_host = '%s:%s' % (c.host, c.port) - expected_min, expected_max = c.mock_wire_versions[used_host] - self.assertEqual(expected_min, c.min_wire_version) - self.assertEqual(expected_max, c.max_wire_version) - - c.set_wire_version_range('a:1', 0, 0) - c.set_wire_version_range('b:2', 0, 0) - c.set_wire_version_range('c:3', 0, 0) - c.disconnect() - c.db.collection.find_one() - used_host = '%s:%s' % (c.host, c.port) - expected_min, expected_max = c.mock_wire_versions[used_host] - self.assertEqual(expected_min, c.min_wire_version) - self.assertEqual(expected_max, c.max_wire_version) - - def test_replica_set(self): - client = MongoClient(host, port) - name = client.pymongo_test.command('ismaster').get('setName') - if not name: - raise SkipTest('Not connected to a replica set') - - MongoClient(host, port, replicaSet=name) # No error. + self.db.t.find(sort={"x": 1}) - self.assertRaises( - ConfigurationError, - MongoClient, host, port, replicaSet='bad' + name) + def test_dict_hints_create_index(self): + self.db.t.create_index({"x": pymongo.ASCENDING}) - def test_lazy_connect_w0(self): - client = get_client(_connect=False) - client.pymongo_test.test.insert({}, w=0) + def test_legacy_java_uuid_roundtrip(self): + data = BinaryData.java_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) + + client_context.client.pymongo_test.drop_collection("java_uuid") + db = client_context.client.pymongo_test + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) + + coll.insert_many(docs) + self.assertEqual(5, coll.count_documents({})) + for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("java_uuid") + + def test_legacy_csharp_uuid_roundtrip(self): + data = BinaryData.csharp_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) + + client_context.client.pymongo_test.drop_collection("csharp_uuid") + db = client_context.client.pymongo_test + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) + + coll.insert_many(docs) + self.assertEqual(5, coll.count_documents({})) + for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("csharp_uuid") + + def test_uri_to_uuid(self): + uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" + client = self.single_client(uri, connect=False) + self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) + + def test_uuid_queries(self): + db = client_context.client.pymongo_test + coll = db.test + coll.drop() + + uu = uuid.uuid4() + coll.insert_one({"uuid": Binary(uu.bytes, 3)}) + self.assertEqual(1, coll.count_documents({})) + + # Test regular UUID queries (using subtype 4). + coll = db.get_collection( + "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + ) + self.assertEqual(0, coll.count_documents({"uuid": uu})) + coll.insert_one({"uuid": uu}) + self.assertEqual(2, coll.count_documents({})) + docs = coll.find({"uuid": uu}).to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(uu, docs[0]["uuid"]) + + # Test both. + uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) + predicate = {"uuid": {"$in": [uu, uu_legacy]}} + self.assertEqual(2, coll.count_documents(predicate)) + docs = coll.find(predicate).to_list() + self.assertEqual(2, len(docs)) + coll.drop() + + +class TestExhaustCursor(IntegrationTest): + """Test that clients properly handle errors from exhaust cursors.""" + + def setUp(self): + super().setUp() + if client_context.is_mongos: + raise SkipTest("mongos doesn't support exhaust, SERVER-2627") + + def test_exhaust_query_server_error(self): + # When doing an exhaust query, the socket stays checked out on success + # but must be checked in on error to avoid semaphore leaks. + client = connected(self.rs_or_single_client(maxPoolSize=1)) + + collection = client.pymongo_test.test + pool = get_pool(client) + conn = one(pool.conns) + + # This will cause OperationFailure in all mongo versions since + # the value for $orderby must be a document. + cursor = collection.find( + SON([("$query", {}), ("$orderby", True)]), cursor_type=CursorType.EXHAUST + ) + + with self.assertRaises(OperationFailure): + cursor.next() + self.assertFalse(conn.closed) + + # The socket was checked in and the semaphore was decremented. + self.assertIn(conn, pool.conns) + self.assertEqual(0, pool.requests) + + def test_exhaust_getmore_server_error(self): + # When doing a getmore on an exhaust cursor, the socket stays checked + # out on success but it's checked in on error to avoid semaphore leaks. + client = self.rs_or_single_client(maxPoolSize=1) + collection = client.pymongo_test.test + collection.drop() + + collection.insert_many([{} for _ in range(200)]) + self.addCleanup(client_context.client.pymongo_test.test.drop) + + pool = get_pool(client) + pool._check_interval_seconds = None # Never check. + conn = one(pool.conns) + + cursor = collection.find(cursor_type=CursorType.EXHAUST) + + # Initial query succeeds. + cursor.next() + + # Cause a server error on getmore. + def receive_message(request_id): + # Discard the actual server response. + Connection.receive_message(conn, request_id) + + # responseFlags bit 1 is QueryFailure. + msg = struct.pack("= _NAMESPACE_DOC_BYTES: + num_models += 1 + b_repeated = "b" * (remainder_bytes - _OPERATION_DOC_BYTES) + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + return num_models, models + + @client_context.require_version_min(8, 0, 0, -24) + def test_11_no_batch_splits_if_new_namespace_is_not_too_large(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + + num_models, models = self._setup_namespace_test_models() + models.append( + InsertOne( + namespace="db.coll", + document={"a": "b"}, + ) + ) + self.addCleanup(client.db["coll"].drop) + + # No batch splitting required. + result = client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 1) + event = bulk_write_events[0] + + self.assertEqual(len(event.command["ops"]), num_models + 1) + self.assertEqual(len(event.command["nsInfo"]), 1) + self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") + + @client_context.require_version_min(8, 0, 0, -24) + def test_11_batch_splits_if_new_namespace_is_too_large(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + + num_models, models = self._setup_namespace_test_models() + c_repeated = "c" * 200 + namespace = f"db.{c_repeated}" + models.append( + InsertOne( + namespace=namespace, + document={"a": "b"}, + ) + ) + self.addCleanup(client.db["coll"].drop) + self.addCleanup(client.db[c_repeated].drop) + + # Batch splitting required. + result = client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 2) + first_event, second_event = bulk_write_events + + self.assertEqual(len(first_event.command["ops"]), num_models) + self.assertEqual(len(first_event.command["nsInfo"]), 1) + self.assertEqual(first_event.command["nsInfo"][0]["ns"], "db.coll") + + self.assertEqual(len(second_event.command["ops"]), 1) + self.assertEqual(len(second_event.command["nsInfo"]), 1) + self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) + + @client_context.require_version_min(8, 0, 0, -24) + def test_12_returns_error_if_no_writes_can_be_added_to_ops(self): + client = self.rs_or_single_client() + + # Document too large. + b_repeated = "b" * self.max_message_size_bytes + models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] + with self.assertRaises(DocumentTooLarge) as context: + client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + + # Namespace too large. + c_repeated = "c" * self.max_message_size_bytes + namespace = f"db.{c_repeated}" + models = [InsertOne(namespace=namespace, document={"a": "b"})] + with self.assertRaises(DocumentTooLarge) as context: + client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + + @client_context.require_version_min(8, 0, 0, -24) + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_13_returns_error_if_auto_encryption_configured(self): + opts = AutoEncryptionOpts( + key_vault_namespace="db.coll", + kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, + ) + client = self.rs_or_single_client(auto_encryption_opts=opts) + + models = [InsertOne(namespace="db.coll", document={"a": "b"})] + with self.assertRaises(InvalidOperation) as context: + client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + self.assertIn( + "bulk_write does not currently support automatic encryption", context.exception._message + ) + + @client_context.require_version_min(8, 0, 0, -24) + def test_upserted_result(self): + client = self.rs_or_single_client() + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + + models = [] + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": "a"}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + ) + ) + result = client.bulk_write(models=models, verbose_results=True) + + self.assertEqual(result.upserted_count, 2) + self.assertEqual(result.update_results[0].did_upsert, True) + self.assertEqual(result.update_results[1].did_upsert, True) + self.assertEqual(result.update_results[2].did_upsert, False) + + # Note: test 14 is optional and intentionally not implemented because we provide multiple APIs to specify explain. + + @client_context.require_version_min(8, 0, 0, -24) + def test_15_unacknowledged_write_across_batches(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + client.db.command({"create": "db.coll"}) + + b_repeated = "b" * (self.max_bson_object_size - 500) + models = [ + InsertOne(namespace="db.coll", document={"a": b_repeated}) + for _ in range(int(self.max_message_size_bytes / self.max_bson_object_size) + 1) + ] + + listener.reset() + + res = client.bulk_write(models, ordered=False, write_concern=WriteConcern(w=0)) + self.assertEqual(False, res.acknowledged) + + events = listener.started_events + self.assertEqual(2, len(events)) + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size), + len(events[0].command["ops"]), + ) + self.assertEqual(1, len(events[1].command["ops"])) + self.assertEqual(events[0].operation_id, events[1].operation_id) + self.assertEqual({"w": 0}, events[0].command["writeConcern"]) + self.assertEqual({"w": 0}, events[1].command["writeConcern"]) + + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size) + 1, + collection.count_documents({}), + ) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites +class TestClientBulkWriteCSOT(IntegrationTest): + def setUp(self): + super().setUp() + self.max_write_batch_size = client_context.max_write_batch_size + self.max_bson_object_size = client_context.max_bson_size + self.max_message_size_bytes = client_context.max_message_size_bytes + + @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_failCommand_fail_point + @flaky(reason="PYTHON-5290", max_runs=3, affects_cpython_linux=True) + def test_timeout_in_multi_batch_bulk_write(self): + if sys.platform != "linux" and "CI" in os.environ: + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows and MacOS") + _OVERHEAD = 500 + + internal_client = self.rs_or_single_client(timeoutMS=None) + + collection = internal_client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["bulkWrite"], "blockConnection": True, "blockTimeMS": 1010}, + } + with self.fail_point(fail_command): + models = [] + num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) + b_repeated = "b" * (self.max_bson_object_size - _OVERHEAD) + for _ in range(num_models): + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + + listener = OvertCommandListener() + client = self.rs_or_single_client( + event_listeners=[listener], + readConcernLevel="majority", + readPreference="primary", + timeoutMS=2000, + w="majority", + ) + # Initialize the client with a larger timeout to help make test less flakey + with pymongo.timeout(10): + client.admin.command("ping") + with self.assertRaises(ClientBulkWriteException) as context: + client.bulk_write(models=models) + self.assertIsInstance(context.exception.error, NetworkTimeout) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) diff --git a/test/test_client_context.py b/test/test_client_context.py new file mode 100644 index 0000000000..9c1b21ee78 --- /dev/null +++ b/test/test_client_context.py @@ -0,0 +1,57 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test import SkipTest, UnitTest, client_context, unittest + +_IS_SYNC = True + + +class TestClientContext(UnitTest): + def test_must_connect(self): + if not os.environ.get("PYMONGO_MUST_CONNECT"): + raise SkipTest("PYMONGO_MUST_CONNECT is not set") + + self.assertTrue( + client_context.connected, + "client context must be connected when " + "PYMONGO_MUST_CONNECT is set. Failed attempts:\n{}".format( + client_context.connection_attempt_info() + ), + ) + + def test_enableTestCommands_is_disabled(self): + if not os.environ.get("DISABLE_TEST_COMMANDS"): + raise SkipTest("DISABLE_TEST_COMMANDS is not set") + + self.assertFalse( + client_context.test_commands_enabled, + "enableTestCommands must be disabled when DISABLE_TEST_COMMANDS is set.", + ) + + def test_free_threading_is_enabled(self): + if "free-threading build" not in sys.version: + raise SkipTest("this test requires the Python free-threading build") + + # If the GIL is enabled then pymongo or one of our deps does not support free-threading. + self.assertFalse(sys._is_gil_enabled()) # type: ignore[attr-defined] + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_client_metadata.py b/test/test_client_metadata.py new file mode 100644 index 0000000000..a94c5aa25e --- /dev/null +++ b/test/test_client_metadata.py @@ -0,0 +1,232 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import asyncio +import os +import pathlib +import time +import unittest +from test import IntegrationTest +from test.unified_format import generate_test_classes +from test.utils_shared import CMAPListener +from typing import Any, Optional + +import pytest + +from pymongo import MongoClient +from pymongo.driver_info import DriverInfo +from pymongo.monitoring import ConnectionClosedEvent + +try: + from mockupdb import MockupDB, OpMsgReply + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +pytestmark = pytest.mark.mockupdb + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "handshake", "unified") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "handshake", "unified" + ) + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + + +def _get_handshake_driver_info(request): + assert "client" in request + return request["client"] + + +class TestClientMetadataProse(IntegrationTest): + def setUp(self): + super().setUp() + self.server = MockupDB() + self.handshake_req = None + + def respond(r): + if "ismaster" in r: + # then this is a handshake request + self.handshake_req = r + return r.reply(OpMsgReply(maxWireVersion=13)) + + self.server.autoresponds(respond) + self.server.run() + self.addCleanup(self.server.stop) + + def send_ping_and_get_metadata( + self, client: MongoClient, is_handshake: bool + ) -> tuple[str, Optional[str], Optional[str], dict[str, Any]]: + # reset if handshake request + if is_handshake: + self.handshake_req: Optional[dict] = None + + client.admin.command("ping") + metadata = _get_handshake_driver_info(self.handshake_req) + driver_metadata = metadata["driver"] + name, version, platform = ( + driver_metadata["name"], + driver_metadata["version"], + metadata["platform"], + ) + return name, version, platform, metadata + + def check_metadata_added( + self, + client: MongoClient, + add_name: str, + add_version: Optional[str], + add_platform: Optional[str], + ) -> None: + # send initial metadata + name, version, platform, metadata = self.send_ping_and_get_metadata(client, True) + # wait for connection to become idle + time.sleep(0.005) + + # add new metadata + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + new_name, new_version, new_platform, new_metadata = self.send_ping_and_get_metadata( + client, True + ) + if add_name is not None and add_name.lower() in name.lower().split("|"): + self.assertEqual(name, new_name) + self.assertEqual(version, new_version) + self.assertEqual(platform, new_platform) + else: + self.assertEqual(new_name, f"{name}|{add_name}" if add_name is not None else name) + self.assertEqual( + new_version, + f"{version}|{add_version}" if add_version is not None else version, + ) + self.assertEqual( + new_platform, + f"{platform}|{add_platform}" if add_platform is not None else platform, + ) + + metadata.pop("driver") + metadata.pop("platform") + new_metadata.pop("driver") + new_metadata.pop("platform") + self.assertEqual(metadata, new_metadata) + + def test_append_metadata(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + def test_append_metadata_platform_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", "2.0", None) + + def test_append_metadata_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", None, "Framework Platform") + + def test_append_metadata_platform_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", None, None) + + def test_multiple_successive_metadata_updates(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, maxIdleTimeMS=1, connect=False + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + def test_multiple_successive_metadata_updates_platform_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", "2.0", None) + + def test_multiple_successive_metadata_updates_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", None, "Framework Platform") + + def test_multiple_successive_metadata_updates_platform_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", None, None) + + def test_doesnt_update_established_connections(self): + listener = CMAPListener() + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + event_listeners=[listener], + ) + + # send initial metadata + name, version, platform, metadata = self.send_ping_and_get_metadata(client, True) + self.assertIsNotNone(name) + self.assertIsNotNone(version) + self.assertIsNotNone(platform) + + # add data + add_name, add_version, add_platform = "framework", "2.0", "Framework Platform" + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + # check new data isn't sent + self.handshake_req: Optional[dict] = None + client.admin.command("ping") + self.assertIsNone(self.handshake_req) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 0) + + def test_duplicate_driver_name_no_op(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", None, None) + # wait for connection to become idle + time.sleep(0.005) + # add same metadata again + self.check_metadata_added(client, "Framework", None, None) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_code.py b/test/test_code.py index 8569f96366..23f0af5cef 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -1,4 +1,5 @@ -# Copyright 2009-2014 MongoDB, Inc. +# +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,11 +14,14 @@ # limitations under the License. """Tests for the Code wrapper.""" +from __future__ import annotations -import unittest import sys + sys.path[0:0] = [""] +from test import unittest + from bson.code import Code @@ -26,17 +30,15 @@ def test_types(self): self.assertRaises(TypeError, Code, 5) self.assertRaises(TypeError, Code, None) self.assertRaises(TypeError, Code, "aoeu", 5) - self.assertRaises(TypeError, Code, u"aoeu", 5) self.assertTrue(Code("aoeu")) - self.assertTrue(Code(u"aoeu")) self.assertTrue(Code("aoeu", {})) - self.assertTrue(Code(u"aoeu", {})) def test_read_only(self): c = Code("blah") def set_c(): - c.scope = 5 + c.scope = 5 # type: ignore + self.assertRaises(AttributeError, set_c) def test_code(self): @@ -44,21 +46,28 @@ def test_code(self): a_code = Code("hello world") self.assertTrue(a_code.startswith("hello")) self.assertTrue(a_code.endswith("world")) - self.assertTrue(isinstance(a_code, Code)) - self.assertFalse(isinstance(a_string, Code)) - self.assertEqual(a_code.scope, {}) - a_code.scope["my_var"] = 5 - self.assertEqual(a_code.scope, {"my_var": 5}) + self.assertIsInstance(a_code, Code) + self.assertNotIsInstance(a_string, Code) + self.assertIsNone(a_code.scope) + with_scope = Code("hello world", {"my_var": 5}) + self.assertEqual({"my_var": 5}, with_scope.scope) + empty_scope = Code("hello world", {}) + self.assertEqual({}, empty_scope.scope) + another_scope = Code(with_scope, {"new_var": 42}) + self.assertEqual(str(with_scope), str(another_scope)) + self.assertEqual({"new_var": 42, "my_var": 5}, another_scope.scope) + # No error. + Code("héllø world¡") def test_repr(self): - c = Code("hello world") + c = Code("hello world", {}) self.assertEqual(repr(c), "Code('hello world', {})") c.scope["foo"] = "bar" self.assertEqual(repr(c), "Code('hello world', {'foo': 'bar'})") c = Code("hello world", {"blah": 3}) self.assertEqual(repr(c), "Code('hello world', {'blah': 3})") c = Code("\x08\xFF") - self.assertEqual(repr(c), "Code(%s, {})" % (repr("\x08\xFF"),)) + self.assertEqual(repr(c), "Code({}, None)".format(repr("\x08\xFF"))) def test_equality(self): b = Code("hello") @@ -67,14 +76,17 @@ def test_equality(self): self.assertEqual(c, Code("hello", {"foo": 5})) self.assertNotEqual(c, Code("hello", {"foo": 6})) self.assertEqual(b, Code("hello")) - self.assertEqual(b, Code("hello", {})) + self.assertEqual(b, Code("hello", None)) self.assertNotEqual(b, Code("hello ")) self.assertNotEqual("hello", Code("hello")) # Explicitly test inequality self.assertFalse(c != Code("hello", {"foo": 5})) self.assertFalse(b != Code("hello")) - self.assertFalse(b != Code("hello", {})) + self.assertFalse(b != Code("hello", None)) + + def test_hash(self): + self.assertRaises(TypeError, hash, Code("hello world")) def test_scope_preserved(self): a = Code("hello") @@ -88,8 +100,7 @@ def test_scope_preserved(self): def test_scope_kwargs(self): self.assertEqual({"a": 1}, Code("", a=1).scope) self.assertEqual({"a": 1}, Code("", {"a": 2}, a=1).scope) - self.assertEqual({"a": 1, "b": 2, "c": 3}, - Code("", {"b": 2}, a=1, c=3).scope) + self.assertEqual({"a": 1, "b": 2, "c": 3}, Code("", {"b": 2}, a=1, c=3).scope) if __name__ == "__main__": diff --git a/test/test_collation.py b/test/test_collation.py new file mode 100644 index 0000000000..903f24a228 --- /dev/null +++ b/test/test_collation.py @@ -0,0 +1,282 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collation module.""" +from __future__ import annotations + +import functools +import warnings +from test import IntegrationTest, client_context, unittest +from test.utils_shared import EventListener, OvertCommandListener +from typing import Any + +from pymongo.collation import ( + Collation, + CollationAlternate, + CollationCaseFirst, + CollationMaxVariable, + CollationStrength, +) +from pymongo.errors import ConfigurationError +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + ReplaceOne, + UpdateMany, + UpdateOne, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class TestCollationObject(unittest.TestCase): + def test_constructor(self): + self.assertRaises(TypeError, Collation, locale=42) + # Fill in a locale to test the other options. + _Collation = functools.partial(Collation, "en_US") + # No error. + _Collation(caseFirst=CollationCaseFirst.UPPER) + self.assertRaises(TypeError, _Collation, caseLevel="true") + self.assertRaises(ValueError, _Collation, strength="six") + self.assertRaises(TypeError, _Collation, numericOrdering="true") + self.assertRaises(TypeError, _Collation, alternate=5) + self.assertRaises(TypeError, _Collation, maxVariable=2) + self.assertRaises(TypeError, _Collation, normalization="false") + self.assertRaises(TypeError, _Collation, backwards="true") + + # No errors. + Collation("en_US", future_option="bar", another_option=42) + collation = Collation( + "en_US", + caseLevel=True, + caseFirst=CollationCaseFirst.UPPER, + strength=CollationStrength.QUATERNARY, + numericOrdering=True, + alternate=CollationAlternate.SHIFTED, + maxVariable=CollationMaxVariable.SPACE, + normalization=True, + backwards=True, + ) + + self.assertEqual( + { + "locale": "en_US", + "caseLevel": True, + "caseFirst": "upper", + "strength": 4, + "numericOrdering": True, + "alternate": "shifted", + "maxVariable": "space", + "normalization": True, + "backwards": True, + }, + collation.document, + ) + + self.assertEqual( + {"locale": "en_US", "backwards": True}, Collation("en_US", backwards=True).document + ) + + +class TestCollation(IntegrationTest): + listener: EventListener + warn_context: Any + collation: Collation + + @client_context.require_connection + def setUp(self) -> None: + super().setUp() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test + self.collation = Collation("en_US") + self.warn_context = warnings.catch_warnings() + self.warn_context.__enter__() + + def tearDown(self) -> None: + self.warn_context.__exit__() + self.warn_context = None + self.listener.reset() + super().tearDown() + + def last_command_started(self): + return self.listener.started_events[-1].command + + def assertCollationInLastCommand(self): + self.assertEqual(self.collation.document, self.last_command_started()["collation"]) + + def test_create_collection(self): + self.db.test.drop() + self.db.create_collection("test", collation=self.collation) + self.assertCollationInLastCommand() + + # Test passing collation as a dict as well. + self.db.test.drop() + self.listener.reset() + self.db.create_collection("test", collation=self.collation.document) + self.assertCollationInLastCommand() + + def test_index_model(self): + model = IndexModel([("a", 1), ("b", -1)], collation=self.collation) + self.assertEqual(self.collation.document, model.document["collation"]) + + def test_create_index(self): + self.db.test.create_index("foo", collation=self.collation) + ci_cmd = self.listener.started_events[0].command + self.assertEqual(self.collation.document, ci_cmd["indexes"][0]["collation"]) + + def test_aggregate(self): + self.db.test.aggregate([{"$group": {"_id": 42}}], collation=self.collation) + self.assertCollationInLastCommand() + + def test_count_documents(self): + self.db.test.count_documents({}, collation=self.collation) + self.assertCollationInLastCommand() + + def test_distinct(self): + self.db.test.distinct("foo", collation=self.collation) + self.assertCollationInLastCommand() + + self.listener.reset() + self.db.test.find(collation=self.collation).distinct("foo") + self.assertCollationInLastCommand() + + def test_find_command(self): + self.db.test.insert_one({"is this thing on?": True}) + self.listener.reset() + next(self.db.test.find(collation=self.collation)) + self.assertCollationInLastCommand() + + def test_explain_command(self): + self.listener.reset() + self.db.test.find(collation=self.collation).explain() + # The collation should be part of the explained command. + self.assertEqual( + self.collation.document, self.last_command_started()["explain"]["collation"] + ) + + def test_delete(self): + self.db.test.delete_one({"foo": 42}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) + + self.listener.reset() + self.db.test.delete_many({"foo": 42}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) + + def test_update(self): + self.db.test.replace_one({"foo": 42}, {"foo": 43}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + self.listener.reset() + self.db.test.update_one({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + self.listener.reset() + self.db.test.update_many({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + def test_find_and(self): + self.db.test.find_one_and_delete({"foo": 42}, collation=self.collation) + self.assertCollationInLastCommand() + + self.listener.reset() + self.db.test.find_one_and_update( + {"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation + ) + self.assertCollationInLastCommand() + + self.listener.reset() + self.db.test.find_one_and_replace({"foo": 42}, {"foo": 43}, collation=self.collation) + self.assertCollationInLastCommand() + + def test_bulk_write(self): + self.db.test.collection.bulk_write( + [ + DeleteOne({"noCollation": 42}), + DeleteMany({"noCollation": 42}), + DeleteOne({"foo": 42}, collation=self.collation), + DeleteMany({"foo": 42}, collation=self.collation), + ReplaceOne({"noCollation": 24}, {"bar": 42}), + UpdateOne({"noCollation": 84}, {"$set": {"bar": 10}}, upsert=True), + UpdateMany({"noCollation": 45}, {"$set": {"bar": 42}}), + ReplaceOne({"foo": 24}, {"foo": 42}, collation=self.collation), + UpdateOne( + {"foo": 84}, {"$set": {"foo": 10}}, upsert=True, collation=self.collation + ), + UpdateMany({"foo": 45}, {"$set": {"foo": 42}}, collation=self.collation), + ] + ) + + delete_cmd = self.listener.started_events[0].command + update_cmd = self.listener.started_events[1].command + + def check_ops(ops): + for op in ops: + if "noCollation" in op["q"]: + self.assertNotIn("collation", op) + else: + self.assertEqual(self.collation.document, op["collation"]) + + check_ops(delete_cmd["deletes"]) + check_ops(update_cmd["updates"]) + + def test_indexes_same_keys_different_collations(self): + self.db.test.drop() + usa_collation = Collation("en_US") + ja_collation = Collation("ja") + self.db.test.create_indexes( + [ + IndexModel("fieldname", collation=usa_collation), + IndexModel("fieldname", name="japanese_version", collation=ja_collation), + IndexModel("fieldname", name="simple"), + ] + ) + indexes = self.db.test.index_information() + self.assertEqual( + usa_collation.document["locale"], indexes["fieldname_1"]["collation"]["locale"] + ) + self.assertEqual( + ja_collation.document["locale"], indexes["japanese_version"]["collation"]["locale"] + ) + self.assertNotIn("collation", indexes["simple"]) + self.db.test.drop_index("fieldname_1") + indexes = self.db.test.index_information() + self.assertIn("japanese_version", indexes) + self.assertIn("simple", indexes) + self.assertNotIn("fieldname", indexes) + + def test_unacknowledged_write(self): + unacknowledged = WriteConcern(w=0) + collection = self.db.get_collection("test", write_concern=unacknowledged) + with self.assertRaises(ConfigurationError): + collection.update_one( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + update_one = UpdateOne( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + with self.assertRaises(ConfigurationError): + collection.bulk_write([update_one]) + + def test_cursor_collation(self): + self.db.test.insert_one({"hello": "world"}) + next(self.db.test.find().collation(self.collation)) + self.assertCollationInLastCommand() diff --git a/test/test_collection.py b/test/test_collection.py index 433cc5b78d..18be309f22 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1,6 +1,4 @@ -# -*- coding: utf-8 -*- - -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,67 +13,83 @@ # limitations under the License. """Test the collection module.""" +from __future__ import annotations -import itertools +import asyncio +import contextlib import re import sys -import threading -import time -import unittest -import warnings +from codecs import utf_8_decode +from collections import defaultdict +from test.utils import get_pool, is_mongos +from typing import Any, Iterable, no_type_check -from nose.plugins.skip import SkipTest +from pymongo.synchronous.database import Database sys.path[0:0] = [""] -from bson.binary import Binary -from bson.regex import Regex -from bson.code import Code -from bson.dbref import DBRef +from test import ( # TODO: fix sync imports in PYTHON-4528 + IntegrationTest, + UnitTest, + client_context, + unittest, +) +from test.utils_shared import ( + IMPOSSIBLE_WRITE_CONCERN, + EventListener, + OvertCommandListener, + wait_until, +) +from test.version import Version + +from bson import encode +from bson.codec_options import CodecOptions from bson.objectid import ObjectId -from bson.py3compat import b -from bson.son import SON, RE_TYPE -from pymongo import (ASCENDING, DESCENDING, GEO2D, - GEOHAYSTACK, GEOSPHERE, HASHED) -from pymongo import message as message_module -from pymongo.collection import Collection -from pymongo.command_cursor import CommandCursor -from pymongo.mongo_replica_set_client import MongoReplicaSetClient +from bson.raw_bson import RawBSONDocument +from bson.regex import Regex +from bson.son import SON +from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT +from pymongo.bulk_shared import BulkWriteError +from pymongo.cursor_shared import CursorType +from pymongo.errors import ( + ConfigurationError, + DocumentTooLarge, + DuplicateKeyError, + ExecutionTimeout, + InvalidDocument, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) +from pymongo.message import _COMMAND_OVERHEAD, _gen_find_command +from pymongo.operations import * +from pymongo.read_concern import DEFAULT_READ_CONCERN from pymongo.read_preferences import ReadPreference -from pymongo.son_manipulator import SONManipulator -from pymongo.errors import (DocumentTooLarge, - DuplicateKeyError, - InvalidDocument, - InvalidName, - InvalidOperation, - OperationFailure, - WTimeoutError) -from test.test_client import get_client -from test.utils import (is_mongos, joinall, enable_text_search, get_pool, - oid_generated_on_client) -from test import (qcheck, - version) - -have_uuid = True -try: - import uuid -except ImportError: - have_uuid = False - - -class TestCollection(unittest.TestCase): +from pymongo.results import ( + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.synchronous.collection import Collection, ReturnDocument +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.write_concern import WriteConcern - def setUp(self): - self.client = get_client() - self.db = self.client.pymongo_test - ismaster = self.db.command('ismaster') - self.setname = ismaster.get('setName') - self.w = len(ismaster.get('hosts', [])) or 1 +_IS_SYNC = True - def tearDown(self): - self.db.drop_collection("test_large_limit") - self.db = None - self.client = None + +class TestCollectionNoConnect(UnitTest): + """Test Collection features on a client that does not connect.""" + + db: Database + client: MongoClient + + def setUp(self) -> None: + super().setUp() + self.client = self.simple_client(connect=False) + self.db = self.client.pymongo_test def test_collection(self): self.assertRaises(TypeError, Collection, self.db, 5) @@ -95,193 +109,229 @@ def make_col(base, name): self.assertRaises(InvalidName, make_col, self.db.test, "tes..t") self.assertRaises(InvalidName, make_col, self.db.test, "tes\x00t") - self.assertTrue(isinstance(self.db.test, Collection)) + def test_getattr(self): + coll = self.db.test + self.assertIsInstance(coll["_does_not_exist"], Collection) + + with self.assertRaises(AttributeError) as context: + coll._does_not_exist + + # Message should be: + # "AttributeError: Collection has no attribute '_does_not_exist'. To + # access the test._does_not_exist collection, use + # database['test._does_not_exist']." + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) + + coll2 = coll.with_options(write_concern=WriteConcern(w=0)) + self.assertEqual(coll2.write_concern, WriteConcern(w=0)) + self.assertNotEqual(coll.write_concern, coll2.write_concern) + coll3 = coll2.subcoll + self.assertEqual(coll2.write_concern, coll3.write_concern) + coll4 = coll2["subcoll"] + self.assertEqual(coll2.write_concern, coll4.write_concern) + + def test_iteration(self): + coll = self.db.coll + msg = "'Collection' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] + break + # Non-string indices will start failing in PyMongo 5. + self.assertEqual(coll[0].name, "coll.0") + self.assertEqual(coll[{}].name, "coll.{}") + # next fails + with self.assertRaisesRegex(TypeError, msg): + _ = next(coll) + # .next() fails + with self.assertRaisesRegex(TypeError, msg): + _ = coll.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(coll, Iterable) + + +class TestCollection(IntegrationTest): + w: int + + def setUp(self): + super().setUp() + self.w = client_context.w # type: ignore + + def tearDown(self): + self.db.test.drop() + self.db.drop_collection("test_large_limit") + super().tearDown() + + @contextlib.contextmanager + def write_concern_collection(self): + if client_context.is_rs: + with self.assertRaises(WriteConcernError): + # Unsatisfiable write concern. + yield Collection( + self.db, + "test", + write_concern=WriteConcern(w=len(client_context.nodes) + 1), + ) + else: + yield self.db.test + + def test_equality(self): + self.assertIsInstance(self.db.test, Collection) self.assertEqual(self.db.test, self.db["test"]) self.assertEqual(self.db.test, Collection(self.db, "test")) self.assertEqual(self.db.test.mike, self.db["test.mike"]) self.assertEqual(self.db.test["mike"], self.db["test.mike"]) - self.db.drop_collection('test') - self.assertFalse('test' in self.db.collection_names()) + def test_hashable(self): + self.assertIn(self.db.test.mike, {self.db["test.mike"]}) + + def test_create(self): + # No Exception. + db = client_context.client.pymongo_test + db.create_test_no_wc.drop() + + def lambda_test(): + return "create_test_no_wc" not in db.list_collection_names() + + def lambda_test_2(): + return "create_test_no_wc" in db.list_collection_names() + + wait_until( + lambda_test, + "drop create_test_no_wc collection", + ) + db.create_collection("create_test_no_wc") + wait_until( + lambda_test_2, + "create create_test_no_wc collection", + ) + # SERVER-33317 + if not client_context.is_mongos or not client_context.version.at_least(3, 7, 0): + with self.assertRaises(OperationFailure): + db.create_collection("create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN) + + def test_drop_nonexistent_collection(self): + self.db.drop_collection("test") + self.assertNotIn("test", self.db.list_collection_names()) # No exception - self.db.drop_collection('test') + self.db.drop_collection("test") - def test_create_index(self): + def test_create_indexes(self): db = self.db - self.assertRaises(TypeError, db.test.create_index, 5) - self.assertRaises(TypeError, db.test.create_index, {"hello": 1}) - self.assertRaises(TypeError, - db.test.ensure_index, {"hello": 1}, cache_for='foo') - self.assertRaises(TypeError, - db.test.ensure_index, {"hello": 1}, ttl='foo') - self.assertRaises(ValueError, db.test.create_index, []) + with self.assertRaises(TypeError): + db.test.create_indexes("foo") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.create_indexes(["foo"]) # type: ignore[list-item] + self.assertRaises(TypeError, IndexModel, 5) + self.assertRaises(ValueError, IndexModel, []) db.test.drop_indexes() - db.test.insert({}) - self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) - .count(), 1) + db.test.insert_one({}) + self.assertEqual(len(db.test.index_information()), 1) - db.test.create_index("hello") - db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + db.test.create_indexes([IndexModel("hello")]) + db.test.create_indexes([IndexModel([("hello", DESCENDING), ("world", ASCENDING)])]) # Tuple instead of list. - db.test.create_index((("world", ASCENDING),)) + db.test.create_indexes([IndexModel((("world", ASCENDING),))]) - count = 0 - for _ in db.system.indexes.find({"ns": u"pymongo_test.test"}): - count += 1 - self.assertEqual(count, 4) + self.assertEqual(len(db.test.index_information()), 4) db.test.drop_indexes() - ix = db.test.create_index([("hello", DESCENDING), - ("world", ASCENDING)], name="hello_world") - self.assertEqual(ix, "hello_world") + names = db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world")] + ) + self.assertEqual(names, ["hello_world"]) db.test.drop_indexes() - self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) - .count(), 1) - db.test.create_index("hello") - self.assertTrue(u"hello_1" in - [a["name"] for a in db.system.indexes - .find({"ns": u"pymongo_test.test"})]) + self.assertEqual(len(db.test.index_information()), 1) + db.test.create_indexes([IndexModel("hello")]) + self.assertIn("hello_1", db.test.index_information()) db.test.drop_indexes() - self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) - .count(), 1) - db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) - self.assertTrue(u"hello_-1_world_1" in - [a["name"] for a in db.system.indexes - .find({"ns": u"pymongo_test.test"})]) + self.assertEqual(len(db.test.index_information()), 1) + names = db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)]), IndexModel("hello")] + ) + info = db.test.index_information() + for name in names: + self.assertIn(name, info) db.test.drop() - db.test.insert({'a': 1}) - db.test.insert({'a': 1}) - self.assertRaises(DuplicateKeyError, db.test.create_index, - 'a', unique=True) + db.test.insert_one({"a": 1}) + db.test.insert_one({"a": 1}) + with self.assertRaises(DuplicateKeyError): + db.test.create_indexes([IndexModel("a", unique=True)]) + + with self.write_concern_collection() as coll: + coll.create_indexes([IndexModel("hello")]) - def test_ensure_index(self): + @client_context.require_version_max(4, 3, -1) + def test_create_indexes_commitQuorum_requires_44(self): db = self.db + with self.assertRaisesRegex( + ConfigurationError, + r"Must be connected to MongoDB 4\.4\+ to use the commitQuorum option for createIndexes", + ): + db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") - self.assertRaises(TypeError, db.test.ensure_index, {"hello": 1}) - self.assertRaises(TypeError, - db.test.ensure_index, {"hello": 1}, cache_for='foo') - self.assertRaises(TypeError, - db.test.ensure_index, {"hello": 1}, ttl='foo') + @client_context.require_no_standalone + @client_context.require_version_min(4, 4, -1) + def test_create_indexes_commitQuorum(self): + self.db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") - db.test.drop_indexes() - self.assertEqual("hello_1", db.test.create_index("hello")) - self.assertEqual("hello_1", db.test.create_index("hello")) + def test_create_index(self): + db = self.db - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) + with self.assertRaises(TypeError): + db.test.create_index(5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.create_index([]) db.test.drop_indexes() - self.assertEqual("foo", - db.test.ensure_index("goodbye", name="foo")) - self.assertEqual(None, db.test.ensure_index("goodbye", name="foo")) + db.test.insert_one({}) + self.assertEqual(len(db.test.index_information()), 1) + + db.test.create_index("hello") + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + + # Tuple instead of list. + db.test.create_index((("world", ASCENDING),)) + + self.assertEqual(len(db.test.index_information()), 4) db.test.drop_indexes() - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) + ix = db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world") + self.assertEqual(ix, "hello_world") - db.test.drop_index("goodbye_1") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) + db.test.drop_indexes() + self.assertEqual(len(db.test.index_information()), 1) + db.test.create_index("hello") + self.assertIn("hello_1", db.test.index_information()) - db.drop_collection("test") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.test.drop_index("goodbye_1") - self.assertEqual("goodbye_1", - db.test.create_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.test.drop_index("goodbye_1") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye", cache_for=1)) - time.sleep(1.2) - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - - db.test.drop_index("goodbye_1") - self.assertEqual("goodbye_1", - db.test.create_index("goodbye", cache_for=1)) - time.sleep(1.2) - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - # Make sure the expiration time is updated. - self.assertEqual(None, - db.test.ensure_index("goodbye")) - - # Clean up indexes for later tests db.test.drop_indexes() + self.assertEqual(len(db.test.index_information()), 1) + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + self.assertIn("hello_-1_world_1", db.test.index_information()) - def test_deprecated_ttl_index_kwarg(self): - db = self.db + db.test.drop_indexes() + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) + self.assertIn("hello_-1_world_1", db.test.index_information()) - # In Python 2.6+ we could use the catch_warnings context - # manager to test this warning nicely. As we can't do that - # we must test raising errors before the ignore filter is applied. - warnings.simplefilter("error", DeprecationWarning) - try: - self.assertRaises(DeprecationWarning, lambda: - db.test.ensure_index("goodbye", ttl=10)) - finally: - warnings.resetwarnings() - warnings.simplefilter("ignore") - - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye", ttl=10)) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - def test_ensure_unique_index_threaded(self): - coll = self.db.test_unique_threaded - coll.drop() - coll.insert(({'foo': i} for i in xrange(10000))) - - class Indexer(threading.Thread): - def run(self): - try: - coll.ensure_index('foo', unique=True) - coll.insert({'foo': 'bar'}) - coll.insert({'foo': 'bar'}) - except OperationFailure: - pass - - threads = [] - for _ in xrange(10): - t = Indexer() - t.setDaemon(True) - threads.append(t) - - for i in xrange(10): - threads[i].start() - - joinall(threads) - - self.assertEqual(10001, coll.count()) - coll.drop() - - def test_index_on_binary(self): - db = self.db - db.drop_collection("test") - db.test.save({"bin": Binary(b("def"))}) - db.test.save({"bin": Binary(b("abc"))}) - db.test.save({"bin": Binary(b("ghi"))}) + db.test.drop() + db.test.insert_one({"a": 1}) + db.test.insert_one({"a": 1}) + with self.assertRaises(DuplicateKeyError): + db.test.create_index("a", unique=True) - self.assertEqual(db.test.find({"bin": Binary(b("abc"))}) - .explain()["nscanned"], 3) + with self.write_concern_collection() as coll: + coll.create_index([("hello", DESCENDING)]) - db.test.create_index("bin") - self.assertEqual(db.test.find({"bin": Binary(b("abc"))}) - .explain()["nscanned"], 1) + db.test.create_index(["hello", "world"]) + db.test.create_index(["hello", ("world", DESCENDING)]) + db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] def test_drop_index(self): db = self.db @@ -289,239 +339,219 @@ def test_drop_index(self): db.test.create_index("hello") name = db.test.create_index("goodbye") - self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) - .count(), 3) + self.assertEqual(len(db.test.index_information()), 3) self.assertEqual(name, "goodbye_1") db.test.drop_index(name) - self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) - .count(), 2) - self.assertTrue(u"hello_1" in - [a["name"] for a in db.system.indexes - .find({"ns": u"pymongo_test.test"})]) + + # Drop it again. + if client_context.version < Version(8, 3, -1): + with self.assertRaises(OperationFailure): + db.test.drop_index(name) + else: + db.test.drop_index(name) + self.assertEqual(len(db.test.index_information()), 2) + self.assertIn("hello_1", db.test.index_information()) db.test.drop_indexes() db.test.create_index("hello") name = db.test.create_index("goodbye") - self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) - .count(), 3) + self.assertEqual(len(db.test.index_information()), 3) self.assertEqual(name, "goodbye_1") db.test.drop_index([("goodbye", ASCENDING)]) - self.assertEqual(db.system.indexes.find({"ns": u"pymongo_test.test"}) - .count(), 2) - self.assertTrue(u"hello_1" in - [a["name"] for a in db.system.indexes - .find({"ns": u"pymongo_test.test"})]) + self.assertEqual(len(db.test.index_information()), 2) + self.assertIn("hello_1", db.test.index_information()) - def test_reindex(self): + with self.write_concern_collection() as coll: + coll.drop_index("hello_1") + + @client_context.require_no_mongos + @client_context.require_test_commands + def test_index_management_max_time_ms(self): + coll = self.db.test + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") + try: + with self.assertRaises(ExecutionTimeout): + coll.create_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + coll.create_indexes([IndexModel("foo")], maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + coll.drop_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + coll.drop_indexes(maxTimeMS=1) + finally: + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") + + def test_list_indexes(self): db = self.db - db.drop_collection("test") - db.test.insert({"foo": "bar", "who": "what", "when": "how"}) - db.test.create_index("foo") - db.test.create_index("who") - db.test.create_index("when") - info = db.test.index_information() + db.test.drop() + db.test.insert_one({}) # create collection - def check_result(result): - self.assertEqual(4, result['nIndexes']) - indexes = result['indexes'] - names = [idx['name'] for idx in indexes] - for name in names: - self.assertTrue(name in info) - for key in info: - self.assertTrue(key in names) - - reindexed = db.test.reindex() - if 'raw' in reindexed: - # mongos - for result in reindexed['raw'].itervalues(): - check_result(result) - else: - check_result(reindexed) + def map_indexes(indexes): + return {index["name"]: index for index in indexes} + + indexes = (db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 1) + self.assertIn("_id_", map_indexes(indexes)) + + db.test.create_index("hello") + indexes = (db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 2) + self.assertEqual(map_indexes(indexes)["hello_1"]["key"], SON([("hello", ASCENDING)])) + + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + indexes = (db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 3) + index_map = map_indexes(indexes) + self.assertEqual( + index_map["hello_-1_world_1"]["key"], SON([("hello", DESCENDING), ("world", ASCENDING)]) + ) + self.assertEqual(True, index_map["hello_-1_world_1"]["unique"]) + + # List indexes on a collection that does not exist. + indexes = (db.does_not_exist.list_indexes()).to_list() + self.assertEqual(len(indexes), 0) + + # List indexes on a database that does not exist. + indexes = (db.does_not_exist.list_indexes()).to_list() + self.assertEqual(len(indexes), 0) def test_index_info(self): db = self.db - db.test.drop_indexes() - db.test.remove({}) - db.test.save({}) # create collection + db.test.drop() + db.test.insert_one({}) # create collection self.assertEqual(len(db.test.index_information()), 1) - self.assertTrue("_id_" in db.test.index_information()) + self.assertIn("_id_", db.test.index_information()) db.test.create_index("hello") self.assertEqual(len(db.test.index_information()), 2) - self.assertEqual(db.test.index_information()["hello_1"]["key"], - [("hello", ASCENDING)]) + self.assertEqual((db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)]) - db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], - unique=True) - self.assertEqual(db.test.index_information()["hello_1"]["key"], - [("hello", ASCENDING)]) + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + self.assertEqual((db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)]) self.assertEqual(len(db.test.index_information()), 3) - self.assertEqual([("hello", DESCENDING), ("world", ASCENDING)], - db.test.index_information()["hello_-1_world_1"]["key"] - ) - self.assertEqual(True, - db.test.index_information()["hello_-1_world_1"]["unique"]) + self.assertEqual( + [("hello", DESCENDING), ("world", ASCENDING)], + (db.test.index_information())["hello_-1_world_1"]["key"], + ) + self.assertEqual(True, (db.test.index_information())["hello_-1_world_1"]["unique"]) def test_index_geo2d(self): db = self.db db.test.drop_indexes() - self.assertEqual('loc_2d', db.test.create_index([("loc", GEO2D)])) - index_info = db.test.index_information()['loc_2d'] - self.assertEqual([('loc', '2d')], index_info['key']) + self.assertEqual("loc_2d", db.test.create_index([("loc", GEO2D)])) + index_info = (db.test.index_information())["loc_2d"] + self.assertEqual([("loc", "2d")], index_info["key"]) + # geoSearch was deprecated in 4.4 and removed in 5.0 + @client_context.require_version_max(4, 5) + @client_context.require_no_mongos def test_index_haystack(self): - if is_mongos(self.db.connection): - raise SkipTest("geoSearch is not supported by mongos") db = self.db - db.test.drop_indexes() - db.test.remove() - _id = db.test.insert({ - "pos": {"long": 34.2, "lat": 33.3}, - "type": "restaurant" - }) - db.test.insert({ - "pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant" - }) - db.test.insert({ - "pos": {"long": 59.1, "lat": 87.2}, "type": "office" - }) - db.test.create_index( - [("pos", GEOHAYSTACK), ("type", ASCENDING)], - bucket_size=1 - ) - - results = db.command(SON([ - ("geoSearch", "test"), - ("near", [33, 33]), - ("maxDistance", 6), - ("search", {"type": "restaurant"}), - ("limit", 30), - ]))['results'] + db.test.drop() + _id = ( + db.test.insert_one({"pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}) + ).inserted_id + db.test.insert_one({"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"}) + db.test.insert_one({"pos": {"long": 59.1, "lat": 87.2}, "type": "office"}) + db.test.create_index([("pos", "geoHaystack"), ("type", ASCENDING)], bucketSize=1) + + results = ( + db.command( + SON( + [ + ("geoSearch", "test"), + ("near", [33, 33]), + ("maxDistance", 6), + ("search", {"type": "restaurant"}), + ("limit", 30), + ] + ) + ) + )["results"] self.assertEqual(2, len(results)) - self.assertEqual({ - "_id": _id, - "pos": {"long": 34.2, "lat": 33.3}, - "type": "restaurant" - }, results[0]) + self.assertEqual( + {"_id": _id, "pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}, results[0] + ) + @client_context.require_no_mongos def test_index_text(self): - if not version.at_least(self.client, (2, 3, 2)): - raise SkipTest("Text search requires server >=2.3.2.") + db = self.db + db.test.drop_indexes() + self.assertEqual("t_text", db.test.create_index([("t", TEXT)])) + index_info = (db.test.index_information())["t_text"] + self.assertIn("weights", index_info) - if is_mongos(self.client): - raise SkipTest("setParameter does not work through mongos") + db.test.insert_many( + [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] + ) - enable_text_search(self.client) + # MongoDB 2.6 text search. Create 'score' field in projection. + cursor = db.test.find({"$text": {"$search": "spam"}}, {"score": {"$meta": "textScore"}}) - db = self.db - db.test.drop_indexes() - self.assertEqual("t_text", db.test.create_index([("t", "text")])) - index_info = db.test.index_information()["t_text"] - self.assertTrue("weights" in index_info) - - if version.at_least(self.client, (2, 5, 5)): - db.test.insert([ - {'t': 'spam eggs and spam'}, - {'t': 'spam'}, - {'t': 'egg sausage and bacon'}]) - - # MongoDB 2.6 text search. Create 'score' field in projection. - cursor = db.test.find( - {'$text': {'$search': 'spam'}}, - {'score': {'$meta': 'textScore'}}) - - # Sort by 'score' field. - cursor.sort([('score', {'$meta': 'textScore'})]) - results = list(cursor) - self.assertTrue(results[0]['score'] >= results[1]['score']) + # Sort by 'score' field. + cursor.sort([("score", {"$meta": "textScore"})]) + results = cursor.to_list() + self.assertGreaterEqual(results[0]["score"], results[1]["score"]) db.test.drop_indexes() def test_index_2dsphere(self): - if not version.at_least(self.client, (2, 3, 2)): - raise SkipTest("2dsphere indexing requires server >=2.3.2.") - db = self.db db.test.drop_indexes() - self.assertEqual("geo_2dsphere", - db.test.create_index([("geo", GEOSPHERE)])) + self.assertEqual("geo_2dsphere", db.test.create_index([("geo", GEOSPHERE)])) - poly = {"type": "Polygon", - "coordinates": [[[40,5], [40,6], [41,6], [41,5], [40,5]]]} - query = {"geo": {"$within": {"$geometry": poly}}} + for dummy, info in (db.test.index_information()).items(): + field, idx_type = info["key"][0] + if field == "geo" and idx_type == "2dsphere": + break + else: + self.fail("2dsphere index not found.") - cursor = db.test.find(query).explain()['cursor'] - self.assertTrue('S2Cursor' in cursor or 'geo_2dsphere' in cursor) + poly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} + query = {"geo": {"$within": {"$geometry": poly}}} + # This query will error without a 2dsphere index. + db.test.find(query) db.test.drop_indexes() def test_index_hashed(self): - if not version.at_least(self.client, (2, 3, 2)): - raise SkipTest("hashed indexing requires server >=2.3.2.") - db = self.db db.test.drop_indexes() - self.assertEqual("a_hashed", - db.test.create_index([("a", HASHED)])) + self.assertEqual("a_hashed", db.test.create_index([("a", HASHED)])) + + for dummy, info in (db.test.index_information()).items(): + field, idx_type = info["key"][0] + if field == "a" and idx_type == "hashed": + break + else: + self.fail("hashed index not found.") - self.assertEqual("BtreeCursor a_hashed", - db.test.find({'a': 1}).explain()['cursor']) db.test.drop_indexes() def test_index_sparse(self): db = self.db db.test.drop_indexes() - db.test.create_index([('key', ASCENDING)], sparse=True) - self.assertTrue(db.test.index_information()['key_1']['sparse']) + db.test.create_index([("key", ASCENDING)], sparse=True) + self.assertTrue((db.test.index_information())["key_1"]["sparse"]) def test_index_background(self): db = self.db db.test.drop_indexes() - db.test.create_index([('keya', ASCENDING)]) - db.test.create_index([('keyb', ASCENDING)], background=False) - db.test.create_index([('keyc', ASCENDING)], background=True) - self.assertFalse('background' in db.test.index_information()['keya_1']) - self.assertFalse(db.test.index_information()['keyb_1']['background']) - self.assertTrue(db.test.index_information()['keyc_1']['background']) + db.test.create_index([("keya", ASCENDING)]) + db.test.create_index([("keyb", ASCENDING)], background=False) + db.test.create_index([("keyc", ASCENDING)], background=True) + self.assertNotIn("background", (db.test.index_information())["keya_1"]) + self.assertFalse((db.test.index_information())["keyb_1"]["background"]) + self.assertTrue((db.test.index_information())["keyc_1"]["background"]) def _drop_dups_setup(self, db): - db.drop_collection('test') - db.test.insert({'i': 1}) - db.test.insert({'i': 2}) - db.test.insert({'i': 2}) # duplicate - db.test.insert({'i': 3}) - - def test_index_drop_dups(self): - # Try dropping duplicates - db = self.db - self._drop_dups_setup(db) - - if version.at_least(db.connection, (1, 9, 2)): - # No error, just drop the duplicate - db.test.create_index( - [('i', ASCENDING)], - unique=True, - drop_dups=True - ) - else: - # https://jira.mongodb.org/browse/SERVER-2054 "Creating an index - # with dropDups shouldn't assert". On Mongo < 1.9.2, the duplicate - # is dropped & the index created, but an error is thrown. - def test_create(): - db.test.create_index( - [('i', ASCENDING)], - unique=True, - drop_dups=True - ) - self.assertRaises(DuplicateKeyError, test_create) - - # Duplicate was dropped - self.assertEqual(3, db.test.count()) - - # Index was created, plus the index on _id - self.assertEqual(2, len(db.test.index_information())) + db.drop_collection("test") + db.test.insert_one({"i": 1}) + db.test.insert_one({"i": 2}) + db.test.insert_one({"i": 2}) # duplicate + db.test.insert_one({"i": 3}) def test_index_dont_drop_dups(self): # Try *not* dropping duplicates @@ -529,445 +559,647 @@ def test_index_dont_drop_dups(self): self._drop_dups_setup(db) # There's a duplicate - def test_create(): - db.test.create_index( - [('i', ASCENDING)], - unique=True, - drop_dups=False - ) - self.assertRaises(DuplicateKeyError, test_create) + def _test_create(): + db.test.create_index([("i", ASCENDING)], unique=True, dropDups=False) + + with self.assertRaises(DuplicateKeyError): + _test_create() # Duplicate wasn't dropped - self.assertEqual(4, db.test.count()) + self.assertEqual(4, db.test.count_documents({})) # Index wasn't created, only the default index on _id self.assertEqual(1, len(db.test.index_information())) + # Get the plan dynamically because the explain format will change. + def get_plan_stage(self, root, stage): + if root.get("stage") == stage: + return root + elif "inputStage" in root: + return self.get_plan_stage(root["inputStage"], stage) + elif "inputStages" in root: + for i in root["inputStages"]: + stage = self.get_plan_stage(i, stage) + if stage: + return stage + elif "queryPlan" in root: + # queryPlan (and slotBasedPlan) are new in 5.0. + return self.get_plan_stage(root["queryPlan"], stage) + elif "shards" in root: + for i in root["shards"]: + stage = self.get_plan_stage(i["winningPlan"], stage) + if stage: + return stage + return {} + + def test_index_filter(self): + db = self.db + db.drop_collection("test") + + # Test bad filter spec on create. + with self.assertRaises(OperationFailure): + db.test.create_index("x", partialFilterExpression=5) + with self.assertRaises(OperationFailure): + db.test.create_index("x", partialFilterExpression={"x": {"$asdasd": 3}}) + with self.assertRaises(OperationFailure): + db.test.create_index("x", partialFilterExpression={"$and": 5}) + + self.assertEqual( + "x_1", + db.test.create_index([("x", ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}}), + ) + db.test.insert_one({"x": 5, "a": 2}) + db.test.insert_one({"x": 6, "a": 1}) + + # Operations that use the partial index. + explain = db.test.find({"x": 6, "a": 1}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + explain = db.test.find({"x": {"$gt": 1}, "a": 1}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + explain = db.test.find({"x": 6, "a": {"$lte": 1}}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + # Operations that do not use the partial index. + explain = db.test.find({"x": 6, "a": {"$lte": 1.6}}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + explain = db.test.find({"x": 6}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + + # Test drop_indexes. + db.test.drop_index("x_1") + explain = db.test.find({"x": 6, "a": 1}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + def test_field_selection(self): db = self.db db.drop_collection("test") doc = {"a": 1, "b": 5, "c": {"d": 5, "e": 10}} - db.test.insert(doc) + db.test.insert_one(doc) # Test field inclusion - doc = db.test.find({}, ["_id"]).next() - self.assertEqual(doc.keys(), ["_id"]) - doc = db.test.find({}, ["a"]).next() - l = doc.keys() + doc = next(db.test.find({}, ["_id"])) + self.assertEqual(list(doc), ["_id"]) + doc = next(db.test.find({}, ["a"])) + l = list(doc) l.sort() self.assertEqual(l, ["_id", "a"]) - doc = db.test.find({}, ["b"]).next() - l = doc.keys() + doc = next(db.test.find({}, ["b"])) + l = list(doc) l.sort() self.assertEqual(l, ["_id", "b"]) - doc = db.test.find({}, ["c"]).next() - l = doc.keys() + doc = next(db.test.find({}, ["c"])) + l = list(doc) l.sort() self.assertEqual(l, ["_id", "c"]) - doc = db.test.find({}, ["a"]).next() + doc = next(db.test.find({}, ["a"])) self.assertEqual(doc["a"], 1) - doc = db.test.find({}, ["b"]).next() + doc = next(db.test.find({}, ["b"])) self.assertEqual(doc["b"], 5) - doc = db.test.find({}, ["c"]).next() + doc = next(db.test.find({}, ["c"])) self.assertEqual(doc["c"], {"d": 5, "e": 10}) # Test inclusion of fields with dots - doc = db.test.find({}, ["c.d"]).next() + doc = next(db.test.find({}, ["c.d"])) self.assertEqual(doc["c"], {"d": 5}) - doc = db.test.find({}, ["c.e"]).next() + doc = next(db.test.find({}, ["c.e"])) self.assertEqual(doc["c"], {"e": 10}) - doc = db.test.find({}, ["b", "c.e"]).next() + doc = next(db.test.find({}, ["b", "c.e"])) self.assertEqual(doc["c"], {"e": 10}) - doc = db.test.find({}, ["b", "c.e"]).next() - l = doc.keys() + doc = next(db.test.find({}, ["b", "c.e"])) + l = list(doc) l.sort() self.assertEqual(l, ["_id", "b", "c"]) - doc = db.test.find({}, ["b", "c.e"]).next() + doc = next(db.test.find({}, ["b", "c.e"])) self.assertEqual(doc["b"], 5) # Test field exclusion - doc = db.test.find({}, {"a": False, "b": 0}).next() - l = doc.keys() + doc = next(db.test.find({}, {"a": False, "b": 0})) + l = list(doc) l.sort() self.assertEqual(l, ["_id", "c"]) - doc = db.test.find({}, {"_id": False}).next() - l = doc.keys() - self.assertFalse("_id" in l) + doc = next(db.test.find({}, {"_id": False})) + l = list(doc) + self.assertNotIn("_id", l) def test_options(self): db = self.db db.drop_collection("test") - db.test.save({}) - self.assertEqual(db.test.options(), {}) - self.assertEqual(db.test.doesnotexist.options(), {}) - - db.drop_collection("test") - if version.at_least(db.connection, (1, 9)): - db.create_collection("test", capped=True, size=4096) - self.assertEqual(db.test.options(), {"capped": True, 'size': 4096}) - else: - db.create_collection("test", capped=True) - self.assertEqual(db.test.options(), {"capped": True}) + db.create_collection("test", capped=True, size=4096) + result = db.test.options() + self.assertEqual(result, {"capped": True, "size": 4096}) db.drop_collection("test") - def test_insert_find_one(self): - db = self.db - db.test.remove({}) - self.assertEqual(0, len(list(db.test.find()))) - doc = {"hello": u"world"} - id = db.test.insert(doc) - self.assertEqual(1, len(list(db.test.find()))) - self.assertEqual(doc, db.test.find_one()) - self.assertEqual(doc["_id"], id) - self.assertTrue(isinstance(id, ObjectId)) - - doc_class = None - # Work around http://bugs.jython.org/issue1728 - if (sys.platform.startswith('java') and - sys.version_info[:3] >= (2, 5, 2)): - doc_class = SON - - def remove_insert_find_one(doc): - db.test.remove({}) - db.test.insert(doc) - # SON equality is order sensitive. - return db.test.find_one(as_class=doc_class) == doc.to_dict() - - qcheck.check_unittest(self, remove_insert_find_one, - qcheck.gen_mongo_dict(3)) - - def test_generator_insert(self): + def test_insert_one(self): db = self.db - db.test.remove({}) - self.assertEqual(db.test.find().count(), 0) - db.test.insert(({'a': i} for i in xrange(5)), manipulate=False) - self.assertEqual(5, db.test.count()) - db.test.remove({}) - - db.test.insert(({'a': i} for i in xrange(5)), manipulate=True) - self.assertEqual(5, db.test.count()) - db.test.remove({}) - - def test_remove_one(self): - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - self.db.test.insert({"x": 1}) - self.db.test.insert({"y": 1}) - self.db.test.insert({"z": 1}) - self.assertEqual(3, self.db.test.count()) - - self.db.test.remove(multi=False) - self.assertEqual(2, self.db.test.count()) - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - def test_remove_all(self): - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - self.db.test.insert({"x": 1}) - self.db.test.insert({"y": 1}) - self.assertEqual(2, self.db.test.count()) - - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) + db.test.drop() - def test_find_w_fields(self): + document: dict[str, Any] = {"_id": 1000} + result = db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, int) + self.assertEqual(document["_id"], result.inserted_id) + self.assertTrue(result.acknowledged) + self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) + self.assertEqual(1, db.test.count_documents({})) + + document = {"foo": "bar"} + result = db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) + self.assertEqual(document["_id"], result.inserted_id) + self.assertTrue(result.acknowledged) + self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) + self.assertEqual(2, db.test.count_documents({})) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) + self.assertEqual(document["_id"], result.inserted_id) + self.assertFalse(result.acknowledged) + # The insert failed duplicate key... + + def async_lambda(): + return db.test.count_documents({}) == 2 + + wait_until(async_lambda, "forcing duplicate key error") + + document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) + result = db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertEqual(result.inserted_id, None) + + def test_insert_many(self): db = self.db - db.test.remove({}) - - db.test.insert({"x": 1, "mike": "awesome", - "extra thing": "abcdefghijklmnopqrstuvwxyz"}) - self.assertEqual(1, db.test.count()) - doc = db.test.find({}).next() - self.assertTrue("x" in doc) - doc = db.test.find({}).next() - self.assertTrue("mike" in doc) - doc = db.test.find({}).next() - self.assertTrue("extra thing" in doc) - doc = db.test.find({}, ["x", "mike"]).next() - self.assertTrue("x" in doc) - doc = db.test.find({}, ["x", "mike"]).next() - self.assertTrue("mike" in doc) - doc = db.test.find({}, ["x", "mike"]).next() - self.assertFalse("extra thing" in doc) - doc = db.test.find({}, ["mike"]).next() - self.assertFalse("x" in doc) - doc = db.test.find({}, ["mike"]).next() - self.assertTrue("mike" in doc) - doc = db.test.find({}, ["mike"]).next() - self.assertFalse("extra thing" in doc) + db.test.drop() - def test_fields_specifier_as_dict(self): + docs: list = [{} for _ in range(5)] + result = db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) + self.assertEqual(5, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, ObjectId) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, db.test.count_documents({"_id": _id})) + self.assertTrue(result.acknowledged) + + docs = [{"_id": i} for i in range(5)] + result = db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) + self.assertEqual(5, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, db.test.count_documents({"_id": _id})) + self.assertTrue(result.acknowledged) + + docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] + result = db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) + self.assertEqual([], result.inserted_ids) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + docs: list = [{} for _ in range(5)] + result = db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertFalse(result.acknowledged) + self.assertEqual(20, db.test.count_documents({})) + + def test_insert_many_generator(self): + coll = self.db.test + coll.delete_many({}) + + def gen(): + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} + + result = coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + + def test_insert_many_invalid(self): db = self.db - db.test.remove({}) - db.test.insert({"x": [1, 2, 3], "mike": "awesome"}) + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many({}) - self.assertEqual([1, 2, 3], db.test.find_one()["x"]) - if version.at_least(db.connection, (1, 5, 1)): - self.assertEqual([2, 3], - db.test.find_one(fields={"x": {"$slice": - -2}})["x"]) - self.assertTrue("x" not in db.test.find_one(fields={"x": 0})) - self.assertTrue("mike" in db.test.find_one(fields={"x": 0})) + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many([]) - def test_find_w_regex(self): - db = self.db - db.test.remove({}) - - db.test.insert({"x": "hello_world"}) - db.test.insert({"x": "hello_mike"}) - db.test.insert({"x": "hello_mikey"}) - db.test.insert({"x": "hello_test"}) - - self.assertEqual(db.test.find().count(), 4) - self.assertEqual(db.test.find({"x": - re.compile("^hello.*")}).count(), 4) - self.assertEqual(db.test.find({"x": - re.compile("ello")}).count(), 4) - self.assertEqual(db.test.find({"x": - re.compile("^hello$")}).count(), 0) - self.assertEqual(db.test.find({"x": - re.compile("^hello_mi.*$")}).count(), 2) + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many(1) # type: ignore[arg-type] - def test_id_can_be_anything(self): - db = self.db + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) - db.test.remove({}) - auto_id = {"hello": "world"} - db.test.insert(auto_id) - self.assertTrue(isinstance(auto_id["_id"], ObjectId)) + def test_delete_one(self): + self.db.test.drop() - numeric = {"_id": 240, "hello": "world"} - db.test.insert(numeric) - self.assertEqual(numeric["_id"], 240) + self.db.test.insert_one({"x": 1}) + self.db.test.insert_one({"y": 1}) + self.db.test.insert_one({"z": 1}) - object = {"_id": numeric, "hello": "world"} - db.test.insert(object) - self.assertEqual(object["_id"], numeric) + result = self.db.test.delete_one({"x": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertEqual(1, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(2, self.db.test.count_documents({})) - for x in db.test.find(): - self.assertEqual(x["hello"], u"world") - self.assertTrue("_id" in x) + result = self.db.test.delete_one({"y": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertEqual(1, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(1, self.db.test.count_documents({})) - def test_iteration(self): - db = self.db + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + result = db.test.delete_one({"z": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertFalse(result.acknowledged) + + def lambda_async(): + return db.test.count_documents({}) == 0 + + wait_until(lambda_async, "delete 1 documents") - def iterate(): - [a for a in db.test] + def test_delete_many(self): + self.db.test.drop() - self.assertRaises(TypeError, iterate) + self.db.test.insert_one({"x": 1}) + self.db.test.insert_one({"x": 1}) + self.db.test.insert_one({"y": 1}) + self.db.test.insert_one({"y": 1}) + + result = self.db.test.delete_many({"x": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertEqual(2, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(0, self.db.test.count_documents({"x": 1})) + + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + result = db.test.delete_many({"y": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertFalse(result.acknowledged) + + def lambda_async(): + return db.test.count_documents({}) == 0 + + wait_until(lambda_async, "delete 2 documents") + + def test_command_document_too_large(self): + large = "*" * (client_context.max_bson_size + _COMMAND_OVERHEAD) + coll = self.db.test + with self.assertRaises(DocumentTooLarge): + coll.insert_one({"data": large}) + # update_one and update_many are the same + with self.assertRaises(DocumentTooLarge): + coll.replace_one({}, {"data": large}) + with self.assertRaises(DocumentTooLarge): + coll.delete_one({"data": large}) + + def test_write_large_document(self): + max_size = client_context.max_bson_size + half_size = int(max_size / 2) + max_str = "x" * max_size + half_str = "x" * half_size + self.assertEqual(max_size, 16777216) + + with self.assertRaises(OperationFailure): + self.db.test.insert_one({"foo": max_str}) + with self.assertRaises(OperationFailure): + self.db.test.replace_one({}, {"foo": max_str}, upsert=True) + with self.assertRaises(OperationFailure): + self.db.test.insert_many([{"x": 1}, {"foo": max_str}]) + self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}]) + + self.db.test.insert_one({"bar": "x"}) + # Use w=0 here to test legacy doc size checking in all server versions + unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) + with self.assertRaises(DocumentTooLarge): + unack_coll.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 14)}) + self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) - def test_invalid_key_names(self): + def test_insert_bypass_document_validation(self): db = self.db db.test.drop() + db.create_collection("test", validator={"a": {"$exists": True}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test insert_one + with self.assertRaises(OperationFailure): + db.test.insert_one({"_id": 1, "x": 100}) + result = db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) + self.assertIsInstance(result, InsertOneResult) + self.assertEqual(1, result.inserted_id) + result = db.test.insert_one({"_id": 2, "a": 0}) + self.assertIsInstance(result, InsertOneResult) + self.assertEqual(2, result.inserted_id) + + db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) + + def async_lambda(): + return db_w0.test.find_one({"y": 1}) + + wait_until(async_lambda, "find w:0 inserted document") + + # Test insert_many + docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)] + with self.assertRaises(OperationFailure): + db.test.insert_many(docs) + result = db.test.insert_many(docs, bypass_document_validation=True) + self.assertIsInstance(result, InsertManyResult) + self.assertTrue(97, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, db.test.count_documents({"x": doc["x"]})) + self.assertTrue(result.acknowledged) + docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)] + result = db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertTrue(97, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, db.test.count_documents({"a": doc["a"]})) + self.assertTrue(result.acknowledged) + + with self.assertRaises(OperationFailure): + db_w0.test.insert_many( + [{"x": 1}, {"x": 2}], + bypass_document_validation=True, + ) - db.test.insert({"hello": "world"}) - db.test.insert({"hello": {"hello": "world"}}) - - self.assertRaises(InvalidDocument, db.test.insert, {"$hello": "world"}) - self.assertRaises(InvalidDocument, db.test.insert, - {"hello": {"$hello": "world"}}) - - db.test.insert({"he$llo": "world"}) - db.test.insert({"hello": {"hello$": "world"}}) - - self.assertRaises(InvalidDocument, db.test.insert, - {".hello": "world"}) - self.assertRaises(InvalidDocument, db.test.insert, - {"hello": {".hello": "world"}}) - self.assertRaises(InvalidDocument, db.test.insert, - {"hello.": "world"}) - self.assertRaises(InvalidDocument, db.test.insert, - {"hello": {"hello.": "world"}}) - self.assertRaises(InvalidDocument, db.test.insert, - {"hel.lo": "world"}) - self.assertRaises(InvalidDocument, db.test.insert, - {"hello": {"hel.lo": "world"}}) - - def test_insert_multiple(self): + def test_replace_bypass_document_validation(self): db = self.db - db.drop_collection("test") - doc1 = {"hello": u"world"} - doc2 = {"hello": u"mike"} - self.assertEqual(db.test.find().count(), 0) - ids = db.test.insert([doc1, doc2]) - self.assertEqual(db.test.find().count(), 2) - self.assertEqual(doc1, db.test.find_one({"hello": u"world"})) - self.assertEqual(doc2, db.test.find_one({"hello": u"mike"})) - - self.assertEqual(2, len(ids)) - self.assertEqual(doc1["_id"], ids[0]) - self.assertEqual(doc2["_id"], ids[1]) - - id = db.test.insert([{"hello": 1}]) - self.assertTrue(isinstance(id, list)) - self.assertEqual(1, len(id)) - - self.assertRaises(InvalidOperation, db.test.insert, []) - - # Generator that raises StopIteration on first call to next(). - self.assertRaises(InvalidOperation, db.test.insert, (i for i in [])) - - def test_insert_multiple_with_duplicate(self): + db.test.drop() + db.create_collection("test", validator={"a": {"$exists": True}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test replace_one + db.test.insert_one({"a": 101}) + with self.assertRaises(OperationFailure): + db.test.replace_one({"a": 101}, {"y": 1}) + self.assertEqual(0, db.test.count_documents({"y": 1})) + self.assertEqual(1, db.test.count_documents({"a": 101})) + db.test.replace_one({"a": 101}, {"y": 1}, bypass_document_validation=True) + self.assertEqual(0, db.test.count_documents({"a": 101})) + self.assertEqual(1, db.test.count_documents({"y": 1})) + db.test.replace_one({"y": 1}, {"a": 102}) + self.assertEqual(0, db.test.count_documents({"y": 1})) + self.assertEqual(0, db.test.count_documents({"a": 101})) + self.assertEqual(1, db.test.count_documents({"a": 102})) + + db.test.insert_one({"y": 1}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + db.test.replace_one({"y": 1}, {"x": 101}) + self.assertEqual(0, db.test.count_documents({"x": 101})) + self.assertEqual(1, db.test.count_documents({"y": 1})) + db.test.replace_one({"y": 1}, {"x": 101}, bypass_document_validation=True) + self.assertEqual(0, db.test.count_documents({"y": 1})) + self.assertEqual(1, db.test.count_documents({"x": 101})) + db.test.replace_one({"x": 101}, {"a": 103}, bypass_document_validation=False) + self.assertEqual(0, db.test.count_documents({"x": 101})) + self.assertEqual(1, db.test.count_documents({"a": 103})) + + db.test.insert_one({"y": 1}, bypass_document_validation=True) + db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) + + def predicate(): + return db_w0.test.find_one({"x": 1}) + + wait_until(predicate, "find w:0 replaced document") + + def test_update_bypass_document_validation(self): db = self.db - db.drop_collection("test") - db.test.ensure_index([('i', ASCENDING)], unique=True) - - # No error - db.test.insert([{'i': i} for i in range(5, 10)], w=0) - db.test.remove() - - # No error - db.test.insert([{'i': 1}] * 2, w=0) - self.assertEqual(1, db.test.count()) - - self.assertRaises( - DuplicateKeyError, - lambda: db.test.insert([{'i': 2}] * 2), + db.test.drop() + db.test.insert_one({"z": 5}) + db.command(SON([("collMod", "test"), ("validator", {"z": {"$gte": 0}})])) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test update_one + with self.assertRaises(OperationFailure): + db.test.update_one({"z": 5}, {"$inc": {"z": -10}}) + self.assertEqual(0, db.test.count_documents({"z": -5})) + self.assertEqual(1, db.test.count_documents({"z": 5})) + db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, bypass_document_validation=True) + self.assertEqual(0, db.test.count_documents({"z": 5})) + self.assertEqual(1, db.test.count_documents({"z": -5})) + db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, bypass_document_validation=False) + self.assertEqual(1, db.test.count_documents({"z": 1})) + self.assertEqual(0, db.test.count_documents({"z": -5})) + + db.test.insert_one({"z": -10}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + db.test.update_one({"z": -10}, {"$inc": {"z": 1}}) + self.assertEqual(0, db.test.count_documents({"z": -9})) + self.assertEqual(1, db.test.count_documents({"z": -10})) + db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, bypass_document_validation=True) + self.assertEqual(1, db.test.count_documents({"z": -9})) + self.assertEqual(0, db.test.count_documents({"z": -10})) + db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, bypass_document_validation=False) + self.assertEqual(0, db.test.count_documents({"z": -9})) + self.assertEqual(1, db.test.count_documents({"z": 0})) + + db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True) + db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + + def async_lambda(): + return db_w0.test.find_one({"y": 1, "x": 1}) + + wait_until(async_lambda, "find w:0 updated document") + + # Test update_many + db.test.insert_many([{"z": i} for i in range(3, 101)]) + db.test.insert_one({"y": 0}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + db.test.update_many({}, {"$inc": {"z": -100}}) + self.assertEqual(100, db.test.count_documents({"z": {"$gte": 0}})) + self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) + self.assertEqual(0, db.test.count_documents({"y": 0, "z": -100})) + db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True ) - - db.drop_collection("test") - db.write_concern['w'] = 0 - db.test.ensure_index([('i', ASCENDING)], unique=True) - - # No error - db.test.insert([{'i': 1}] * 2) - self.assertEqual(1, db.test.count()) - - # Implied safe - self.assertRaises( - DuplicateKeyError, - lambda: db.test.insert([{'i': 2}] * 2, fsync=True), + self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) + db.test.update_many( + {"z": {"$gt": -50}}, {"$inc": {"z": 100}}, bypass_document_validation=False ) - - # Explicit safe - self.assertRaises( - DuplicateKeyError, - lambda: db.test.insert([{'i': 2}] * 2, w=1), + self.assertEqual(50, db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(50, db.test.count_documents({"z": {"$lt": 0}})) + + db.test.insert_many([{"z": -i} for i in range(50)], bypass_document_validation=True) + with self.assertRaises(OperationFailure): + db.test.update_many({}, {"$inc": {"z": 1}}) + self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) + self.assertEqual(50, db.test.count_documents({"z": {"$gt": 1}})) + db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True ) - - # Misconfigured value for safe - self.assertRaises( - TypeError, - lambda: db.test.insert([{'i': 2}] * 2, safe=1), + self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(150, db.test.count_documents({"z": {"$lte": 0}})) + db.test.update_many( + {"z": {"$lte": 0}}, {"$inc": {"z": 100}}, bypass_document_validation=False ) + self.assertEqual(150, db.test.count_documents({"z": {"$gte": 0}})) + self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) - def test_insert_iterables(self): - db = self.db + db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) + db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) + db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) - self.assertRaises(TypeError, db.test.insert, 4) - self.assertRaises(TypeError, db.test.insert, None) - self.assertRaises(TypeError, db.test.insert, True) + def async_lambda(): + return db_w0.test.count_documents({"m": 1, "x": 1}) == 2 - db.drop_collection("test") - self.assertEqual(db.test.find().count(), 0) - ids = db.test.insert(({"hello": u"world"}, {"hello": u"world"})) - self.assertEqual(db.test.find().count(), 2) + wait_until(async_lambda, "find w:0 updated documents") - db.drop_collection("test") - self.assertEqual(db.test.find().count(), 0) - ids = db.test.insert(itertools.imap(lambda x: {"hello": "world"}, - itertools.repeat(None, 10))) - self.assertEqual(db.test.find().count(), 10) - - def test_insert_manipulate_false(self): - # Test three aspects of insert with manipulate=False: - # 1. The return value is None or [None] as appropriate. - # 2. _id is not set on the passed-in document object. - # 3. _id is not sent to server. - if not version.at_least(self.db.connection, (2, 0)): - raise SkipTest('Need at least MongoDB 2.0') - - collection = self.db.test_insert_manipulate_false - collection.drop() - oid = ObjectId() - doc = {'a': oid} + def test_bypass_document_validation_bulk_write(self): + db = self.db + db.test.drop() + db.create_collection("test", validator={"a": {"$gte": 0}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + ops: list = [ + InsertOne({"a": -10}), + InsertOne({"a": -11}), + InsertOne({"a": -12}), + UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + ReplaceOne({"a": {"$lte": -10}}, {"a": -1}), + ] + db.test.bulk_write(ops, bypass_document_validation=True) + + self.assertEqual(3, db.test.count_documents({})) + self.assertEqual(1, db.test.count_documents({"a": -11})) + self.assertEqual(1, db.test.count_documents({"a": -1})) + self.assertEqual(1, db.test.count_documents({"a": -9})) + + # Assert that the operations would fail without bypass_doc_val + for op in ops: + with self.assertRaises(BulkWriteError): + db.test.bulk_write([op]) + + with self.assertRaises(OperationFailure): + db_w0.test.bulk_write(ops, bypass_document_validation=True) + + def test_find_by_default_dct(self): + db = self.db + db.test.insert_one({"foo": "bar"}) + dct = defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] + self.assertIsNotNone(db.test.find_one(dct)) + self.assertEqual(dct, defaultdict(dict, [("foo", "bar")])) - # The return value is None. - self.assertTrue(collection.insert(doc, manipulate=False) is None) - # insert() shouldn't set _id on the passed-in document object. - self.assertEqual({'a': oid}, doc) - server_doc = collection.find_one() + def test_find_w_fields(self): + db = self.db + db.test.delete_many({}) + + db.test.insert_one({"x": 1, "mike": "awesome", "extra thing": "abcdefghijklmnopqrstuvwxyz"}) + self.assertEqual(1, db.test.count_documents({})) + doc = next(db.test.find({})) + self.assertIn("x", doc) + doc = next(db.test.find({})) + self.assertIn("mike", doc) + doc = next(db.test.find({})) + self.assertIn("extra thing", doc) + doc = next(db.test.find({}, ["x", "mike"])) + self.assertIn("x", doc) + doc = next(db.test.find({}, ["x", "mike"])) + self.assertIn("mike", doc) + doc = next(db.test.find({}, ["x", "mike"])) + self.assertNotIn("extra thing", doc) + doc = next(db.test.find({}, ["mike"])) + self.assertNotIn("x", doc) + doc = next(db.test.find({}, ["mike"])) + self.assertIn("mike", doc) + doc = next(db.test.find({}, ["mike"])) + self.assertNotIn("extra thing", doc) + + @no_type_check + def test_fields_specifier_as_dict(self): + db = self.db + db.test.delete_many({}) - # _id is not sent to server, so it's generated server-side. - self.assertFalse(oid_generated_on_client(server_doc)) + db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"}) - # Bulk insert. The return value is a list of None. - self.assertEqual([None], collection.insert([{}], manipulate=False)) + self.assertEqual([1, 2, 3], (db.test.find_one())["x"]) + self.assertEqual([2, 3], (db.test.find_one(projection={"x": {"$slice": -2}}))["x"]) + self.assertNotIn("x", db.test.find_one(projection={"x": 0})) + self.assertIn("mike", db.test.find_one(projection={"x": 0})) - ids = collection.insert([{}, {}], manipulate=False) - self.assertEqual([None, None], ids) - collection.drop() + def test_find_w_regex(self): + db = self.db + db.test.delete_many({}) - def test_save(self): - self.db.drop_collection("test") + db.test.insert_one({"x": "hello_world"}) + db.test.insert_one({"x": "hello_mike"}) + db.test.insert_one({"x": "hello_mikey"}) + db.test.insert_one({"x": "hello_test"}) - # Save a doc with autogenerated id - id = self.db.test.save({"hello": "world"}) - self.assertEqual(self.db.test.find_one()["_id"], id) - self.assertTrue(isinstance(id, ObjectId)) - - # Save a doc with explicit id - self.db.test.save({"_id": "explicit_id", "hello": "bar"}) - doc = self.db.test.find_one({"_id": "explicit_id"}) - self.assertEqual(doc['_id'], 'explicit_id') - self.assertEqual(doc['hello'], 'bar') - - # Save docs with _id field already present (shouldn't create new docs) - self.assertEqual(2, self.db.test.count()) - self.db.test.save({'_id': id, 'hello': 'world'}) - self.assertEqual(2, self.db.test.count()) - self.db.test.save({'_id': 'explicit_id', 'hello': 'baz'}) - self.assertEqual(2, self.db.test.count()) - self.assertEqual( - 'baz', - self.db.test.find_one({'_id': 'explicit_id'})['hello'] - ) + self.assertEqual(len(db.test.find().to_list()), 4) + self.assertEqual(len(db.test.find({"x": re.compile("^hello.*")}).to_list()), 4) + self.assertEqual(len(db.test.find({"x": re.compile("ello")}).to_list()), 4) + self.assertEqual(len(db.test.find({"x": re.compile("^hello$")}).to_list()), 0) + self.assertEqual(len(db.test.find({"x": re.compile("^hello_mi.*$")}).to_list()), 2) - # Safe mode - self.db.test.create_index("hello", unique=True) - # No exception, even though we duplicate the first doc's "hello" value - self.db.test.save({'_id': 'explicit_id', 'hello': 'world'}, w=0) + def test_id_can_be_anything(self): + db = self.db - self.assertRaises( - DuplicateKeyError, - self.db.test.save, - {'_id': 'explicit_id', 'hello': 'world'}) + db.test.delete_many({}) + auto_id = {"hello": "world"} + db.test.insert_one(auto_id) + self.assertIsInstance(auto_id["_id"], ObjectId) - def test_save_with_invalid_key(self): - self.db.drop_collection("test") - self.assertTrue(self.db.test.insert({"hello": "world"})) - doc = self.db.test.find_one() - doc['a.b'] = 'c' - expected = InvalidDocument - if version.at_least(self.client, (2, 5, 4, -1)): - expected = OperationFailure - self.assertRaises(expected, self.db.test.save, doc) + numeric = {"_id": 240, "hello": "world"} + db.test.insert_one(numeric) + self.assertEqual(numeric["_id"], 240) + + obj = {"_id": numeric, "hello": "world"} + db.test.insert_one(obj) + self.assertEqual(obj["_id"], numeric) + + for x in db.test.find(): + self.assertEqual(x["hello"], "world") + self.assertIn("_id", x) def test_unique_index(self): db = self.db - db.drop_collection("test") db.test.create_index("hello") - db.test.save({"hello": "world"}) - db.test.save({"hello": "mike"}) - db.test.save({"hello": "world"}) - self.assertFalse(db.error()) + # No error. + db.test.insert_one({"hello": "world"}) + db.test.insert_one({"hello": "world"}) db.drop_collection("test") db.test.create_index("hello", unique=True) - db.test.save({"hello": "world"}) - db.test.save({"hello": "mike"}) - db.test.save({"hello": "world"}, w=0) - self.assertTrue(db.error()) + with self.assertRaises(DuplicateKeyError): + db.test.insert_one({"hello": "world"}) + db.test.insert_one({"hello": "world"}) def test_duplicate_key_error(self): db = self.db @@ -975,105 +1207,106 @@ def test_duplicate_key_error(self): db.test.create_index("x", unique=True) - db.test.insert({"_id": 1, "x": 1}) - db.test.insert({"_id": 2, "x": 2}) - - # No error - db.test.insert({"_id": 1, "x": 1}, safe=False) - db.test.save({"_id": 1, "x": 1}, safe=False) - db.test.insert({"_id": 2, "x": 2}, safe=False) - db.test.save({"_id": 2, "x": 2}, safe=False) - db.test.insert({"_id": 1, "x": 1}, w=0) - db.test.save({"_id": 1, "x": 1}, w=0) - db.test.insert({"_id": 2, "x": 2}, w=0) - db.test.save({"_id": 2, "x": 2}, w=0) - - # But all those statements didn't do anything - self.assertEqual(2, db.test.count()) - - expected_error = OperationFailure - if version.at_least(db.connection, (1, 3)): - expected_error = DuplicateKeyError - - self.assertRaises(expected_error, - db.test.insert, {"_id": 1}) - self.assertRaises(expected_error, - db.test.insert, {"x": 1}) - - self.assertRaises(expected_error, - db.test.save, {"x": 2}) - self.assertRaises(expected_error, - db.test.update, {"x": 1}, - {"$inc": {"x": 1}}) + db.test.insert_one({"_id": 1, "x": 1}) - try: - db.test.insert({"_id": 1}) - except expected_error, exc: - # Just check that we set the error document. Fields - # vary by MongoDB version. - self.assertTrue(exc.details is not None) - else: - self.fail("%s was not raised" % (expected_error.__name__,)) + with self.assertRaises(DuplicateKeyError) as context: + db.test.insert_one({"x": 1}) - def test_wtimeout(self): - # Ensure setting wtimeout doesn't disable write concern altogether. - # See SERVER-12596. - collection = self.db.test - collection.remove() - collection.insert({'_id': 1}) + self.assertIsNotNone(context.exception.details) - collection.write_concern = {'w': 1, 'wtimeout': 1000} - self.assertRaises(DuplicateKeyError, collection.insert, {'_id': 1}) + with self.assertRaises(DuplicateKeyError) as context: + db.test.insert_one({"x": 1}) - collection.write_concern = {'wtimeout': 1000} - self.assertRaises(DuplicateKeyError, collection.insert, {'_id': 1}) + self.assertIsNotNone(context.exception.details) + self.assertEqual(1, db.test.count_documents({})) - def test_continue_on_error(self): + def test_write_error_text_handling(self): db = self.db - if not version.at_least(db.connection, (1, 9, 1)): - raise SkipTest("continue_on_error requires MongoDB >= 1.9.1") - db.drop_collection("test") - oid = db.test.insert({"one": 1}) - self.assertEqual(1, db.test.count()) - docs = [] - docs.append({"_id": oid, "two": 2}) - docs.append({"three": 3}) - docs.append({"four": 4}) - docs.append({"five": 5}) + db.test.create_index("text", unique=True) + + # Test workaround for SERVER-24007 + data = ( + b"a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + ) + + text = utf_8_decode(data, None, True) + db.test.insert_one({"text": text}) - db.test.insert(docs, manipulate=False, w=0) - self.assertEqual(11000, db.error()['code']) - self.assertEqual(1, db.test.count()) + # Should raise DuplicateKeyError, not InvalidBSON + with self.assertRaises(DuplicateKeyError): + db.test.insert_one({"text": text}) - db.test.insert(docs, manipulate=False, continue_on_error=True, w=0) - self.assertEqual(11000, db.error()['code']) - self.assertEqual(4, db.test.count()) + with self.assertRaises(DuplicateKeyError): + db.test.replace_one({"_id": ObjectId()}, {"text": text}, upsert=True) - db.drop_collection("test") - oid = db.test.insert({"_id": oid, "one": 1}, w=0) - self.assertEqual(1, db.test.count()) - docs[0].pop("_id") - docs[2]["_id"] = oid + # Should raise BulkWriteError, not InvalidBSON + with self.assertRaises(BulkWriteError): + db.test.insert_many([{"text": text}]) + + def test_write_error_unicode(self): + coll = self.db.test + self.addCleanup(coll.drop) - db.test.insert(docs, manipulate=False, w=0) - self.assertEqual(11000, db.error()['code']) - self.assertEqual(3, db.test.count()) + coll.create_index("a", unique=True) + coll.insert_one({"a": "unicode \U0001f40d"}) + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error") as ctx: + coll.insert_one({"a": "unicode \U0001f40d"}) - db.test.insert(docs, manipulate=False, continue_on_error=True, w=0) - self.assertEqual(11000, db.error()['code']) - self.assertEqual(6, db.test.count()) + # Once more for good measure. + self.assertIn("E11000 duplicate key error", str(ctx.exception)) + + def test_wtimeout(self): + # Ensure setting wtimeout doesn't disable write concern altogether. + # See SERVER-12596. + collection = self.db.test + collection.drop() + collection.insert_one({"_id": 1}) + + coll = collection.with_options(write_concern=WriteConcern(w=1, wtimeout=1000)) + with self.assertRaises(DuplicateKeyError): + coll.insert_one({"_id": 1}) + + coll = collection.with_options(write_concern=WriteConcern(wtimeout=1000)) + with self.assertRaises(DuplicateKeyError): + coll.insert_one({"_id": 1}) def test_error_code(self): try: - self.db.test.update({}, {"$thismodifierdoesntexist": 1}) - except OperationFailure, exc: - if version.at_least(self.db.connection, (1, 3)): - self.assertTrue(exc.code in (9, 10147, 16840, 17009)) - # Just check that we set the error document. Fields - # vary by MongoDB version. - self.assertTrue(exc.details is not None) + self.db.test.update_many({}, {"$thismodifierdoesntexist": 1}) + except OperationFailure as exc: + self.assertIn(exc.code, (9, 10147, 16840, 17009)) + # Just check that we set the error document. Fields + # vary by MongoDB version. + self.assertIsNotNone(exc.details) else: self.fail("OperationFailure was not raised") @@ -1081,484 +1314,327 @@ def test_index_on_subfield(self): db = self.db db.drop_collection("test") - db.test.insert({"hello": {"a": 4, "b": 5}}) - db.test.insert({"hello": {"a": 7, "b": 2}}) - db.test.insert({"hello": {"a": 4, "b": 10}}) + db.test.insert_one({"hello": {"a": 4, "b": 5}}) + db.test.insert_one({"hello": {"a": 7, "b": 2}}) + db.test.insert_one({"hello": {"a": 4, "b": 10}}) db.drop_collection("test") db.test.create_index("hello.a", unique=True) - db.test.insert({"hello": {"a": 4, "b": 5}}) - db.test.insert({"hello": {"a": 7, "b": 2}}) - self.assertRaises(DuplicateKeyError, - db.test.insert, {"hello": {"a": 4, "b": 10}}) + db.test.insert_one({"hello": {"a": 4, "b": 5}}) + db.test.insert_one({"hello": {"a": 7, "b": 2}}) + with self.assertRaises(DuplicateKeyError): + db.test.insert_one({"hello": {"a": 4, "b": 10}}) - def test_safe_insert(self): + def test_replace_one(self): db = self.db db.drop_collection("test") - a = {"hello": "world"} - db.test.insert(a) - db.test.insert(a, w=0) - self.assertTrue("E11000" in db.error()["err"]) - - self.assertRaises(OperationFailure, db.test.insert, a) - - def test_update(self): - db = self.db - db.drop_collection("test") - - id1 = db.test.save({"x": 5}) - db.test.update({}, {"$inc": {"x": 1}}) - self.assertEqual(db.test.find_one(id1)["x"], 6) - - id2 = db.test.save({"x": 1}) - db.test.update({"x": 6}, {"$inc": {"x": 1}}) - self.assertEqual(db.test.find_one(id1)["x"], 7) - self.assertEqual(db.test.find_one(id2)["x"], 1) - - def test_update_manipulate(self): - db = self.db - db.drop_collection("test") - db.test.insert({'_id': 1}) - db.test.update({'_id': 1}, {'a': 1}, manipulate=True) - self.assertEqual( - {'_id': 1, 'a': 1}, - db.test.find_one()) - - class AddField(SONManipulator): - def transform_incoming(self, son, collection): - son['field'] = 'value' - return son - - db.add_son_manipulator(AddField()) - db.test.update({'_id': 1}, {'a': 2}, manipulate=False) - self.assertEqual( - {'_id': 1, 'a': 2}, - db.test.find_one()) - - db.test.update({'_id': 1}, {'a': 3}, manipulate=True) - self.assertEqual( - {'_id': 1, 'a': 3, 'field': 'value'}, - db.test.find_one()) - - def test_update_nmodified(self): + with self.assertRaises(ValueError): + db.test.replace_one({}, {"$set": {"x": 1}}) + + id1 = (db.test.insert_one({"x": 1})).inserted_id + result = db.test.replace_one({"x": 1}, {"y": 1}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, db.test.count_documents({"y": 1})) + self.assertEqual(0, db.test.count_documents({"x": 1})) + self.assertEqual((db.test.find_one(id1))["y"], 1) # type: ignore + + replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) + result = db.test.replace_one({"y": 1}, replacement, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, db.test.count_documents({"z": 1})) + self.assertEqual(0, db.test.count_documents({"y": 1})) + self.assertEqual((db.test.find_one(id1))["z"], 1) # type: ignore + + result = db.test.replace_one({"x": 2}, {"y": 2}, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(0, result.matched_count) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) + self.assertTrue(result.acknowledged) + self.assertEqual(1, db.test.count_documents({"y": 2})) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = db.test.replace_one({"x": 0}, {"y": 0}) + self.assertIsInstance(result, UpdateResult) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + def test_update_one(self): db = self.db db.drop_collection("test") - used_write_commands = (self.client.max_wire_version > 1) - - db.test.insert({'_id': 1}) - result = db.test.update({'_id': 1}, {'$set': {'x': 1}}) - if used_write_commands: - self.assertEqual(1, result['nModified']) - else: - self.assertFalse('nModified' in result) - # x is already 1. - result = db.test.update({'_id': 1}, {'$set': {'x': 1}}) - if used_write_commands: - self.assertEqual(0, result['nModified']) - else: - self.assertFalse('nModified' in result) - - def test_multi_update(self): + with self.assertRaises(ValueError): + db.test.update_one({}, {"x": 1}) + + id1 = (db.test.insert_one({"x": 5})).inserted_id + result = db.test.update_one({}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual((db.test.find_one(id1))["x"], 6) # type: ignore + + id2 = (db.test.insert_one({"x": 1})).inserted_id + result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual((db.test.find_one(id1))["x"], 7) # type: ignore + self.assertEqual((db.test.find_one(id2))["x"], 1) # type: ignore + + result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(0, result.matched_count) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) + self.assertTrue(result.acknowledged) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + def test_update_result(self): db = self.db - if not version.at_least(db.connection, (1, 1, 3, -1)): - raise SkipTest("multi-update requires MongoDB >= 1.1.3") - db.drop_collection("test") - db.test.save({"x": 4, "y": 3}) - db.test.save({"x": 5, "y": 5}) - db.test.save({"x": 4, "y": 4}) - - db.test.update({"x": 4}, {"$set": {"y": 5}}, multi=True) + result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) - self.assertEqual(3, db.test.count()) - for doc in db.test.find(): - self.assertEqual(5, doc["y"]) + result = db.test.update_one({"_id": None, "x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) - self.assertEqual(2, db.test.update({"x": 4}, {"$set": {"y": 6}}, - multi=True)["n"]) + result = db.test.update_one({"_id": None}, {"$inc": {"x": 1}}) + self.assertEqual(result.did_upsert, False) - def test_upsert(self): + def test_update_many(self): db = self.db db.drop_collection("test") - db.test.update({"page": "/"}, {"$inc": {"count": 1}}, upsert=True) - db.test.update({"page": "/"}, {"$inc": {"count": 1}}, upsert=True) - - self.assertEqual(1, db.test.count()) - self.assertEqual(2, db.test.find_one()["count"]) - - def test_safe_update(self): - db = self.db - v113minus = version.at_least(db.connection, (1, 1, 3, -1)) - v19 = version.at_least(db.connection, (1, 9)) - - db.drop_collection("test") - db.test.create_index("x", unique=True) - - db.test.insert({"x": 5}) - id = db.test.insert({"x": 4}) - - self.assertEqual( - None, db.test.update({"_id": id}, {"$inc": {"x": 1}}, w=0)) - - if v19: - self.assertTrue("E11000" in db.error()["err"]) - elif v113minus: - self.assertTrue(db.error()["err"].startswith("E11001")) - else: - self.assertTrue(db.error()["err"].startswith("E12011")) - - self.assertRaises(OperationFailure, db.test.update, - {"_id": id}, {"$inc": {"x": 1}}) - - self.assertEqual(1, db.test.update({"_id": id}, - {"$inc": {"x": 2}})["n"]) - - self.assertEqual(0, db.test.update({"_id": "foo"}, - {"$inc": {"x": 2}})["n"]) - - def test_update_with_invalid_keys(self): + with self.assertRaises(ValueError): + db.test.update_many({}, {"x": 1}) + + db.test.insert_one({"x": 4, "y": 3}) + db.test.insert_one({"x": 5, "y": 5}) + db.test.insert_one({"x": 4, "y": 4}) + + result = db.test.update_many({"x": 4}, {"$set": {"y": 5}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(2, result.matched_count) + self.assertIn(result.modified_count, (None, 2)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(3, db.test.count_documents({"y": 5})) + + result = db.test.update_many({"x": 5}, {"$set": {"y": 6}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, db.test.count_documents({"y": 6})) + + result = db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(0, result.matched_count) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) + self.assertTrue(result.acknowledged) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + def test_update_check_keys(self): self.db.drop_collection("test") - self.assertTrue(self.db.test.insert({"hello": "world"})) - doc = self.db.test.find_one() - doc['a.b'] = 'c' - - expected = InvalidDocument - if version.at_least(self.client, (2, 5, 4, -1)): - expected = OperationFailure - - # Replace - self.assertRaises(expected, - self.db.test.update, {"hello": "world"}, doc) - # Upsert - self.assertRaises(expected, - self.db.test.update, {"foo": "bar"}, doc, upsert=True) - - # Check that the last two ops didn't actually modify anything - self.assertTrue('a.b' not in self.db.test.find_one()) + self.assertTrue(self.db.test.insert_one({"hello": "world"})) # Modify shouldn't check keys... - self.assertTrue(self.db.test.update({"hello": "world"}, - {"$set": {"foo.bar": "baz"}}, - upsert=True)) + self.assertTrue( + self.db.test.update_one({"hello": "world"}, {"$set": {"foo.bar": "baz"}}, upsert=True) + ) # I know this seems like testing the server but I'd like to be notified # by CI if the server's behavior changes here. doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")]) - self.assertRaises(OperationFailure, self.db.test.update, - {"hello": "world"}, doc, upsert=True) + with self.assertRaises(OperationFailure): + self.db.test.update_one({"hello": "world"}, doc, upsert=True) # This is going to cause keys to be checked and raise InvalidDocument. # That's OK assuming the server's behavior in the previous assert # doesn't change. If the behavior changes checking the first key for # '$' in update won't be good enough anymore. doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})]) - self.assertRaises(expected, self.db.test.update, - {"hello": "world"}, doc, upsert=True) + with self.assertRaises(OperationFailure): + self.db.test.replace_one({"hello": "world"}, doc, upsert=True) # Replace with empty document - self.assertNotEqual(0, self.db.test.update({"hello": "world"}, - {})['n']) + self.assertNotEqual(0, (self.db.test.replace_one({"hello": "world"}, {})).matched_count) - def test_safe_save(self): + def test_acknowledged_delete(self): db = self.db db.drop_collection("test") - db.test.create_index("hello", unique=True) - - db.test.save({"hello": "world"}) - db.test.save({"hello": "world"}, w=0) - self.assertTrue("E11000" in db.error()["err"]) + db.test.insert_many([{"x": 1}, {"x": 1}]) + self.assertEqual(2, (db.test.delete_many({})).deleted_count) + self.assertEqual(0, (db.test.delete_many({})).deleted_count) - self.assertRaises(OperationFailure, db.test.save, - {"hello": "world"}) + @client_context.require_version_max(4, 9) + def test_manual_last_error(self): + coll = self.db.get_collection("test", write_concern=WriteConcern(w=0)) + coll.insert_one({"x": 1}) + self.db.command("getlasterror", w=1, wtimeout=1) - def test_safe_remove(self): + def test_count_documents(self): db = self.db db.drop_collection("test") - db.create_collection("test", capped=True, size=1000) - - db.test.insert({"x": 1}) - self.assertEqual(1, db.test.count()) - - self.assertEqual(None, db.test.remove({"x": 1}, w=0)) - self.assertEqual(1, db.test.count()) - - if version.at_least(db.connection, (1, 1, 3, -1)): - self.assertRaises(OperationFailure, db.test.remove, - {"x": 1}) - else: # Just test that it doesn't blow up - db.test.remove({"x": 1}) - + self.addCleanup(db.drop_collection, "test") + + self.assertEqual(db.test.count_documents({}), 0) + db.wrong.insert_many([{}, {}]) + self.assertEqual(db.test.count_documents({}), 0) + db.test.insert_many([{}, {}]) + self.assertEqual(db.test.count_documents({}), 2) + db.test.insert_many([{"foo": "bar"}, {"foo": "baz"}]) + self.assertEqual(db.test.count_documents({"foo": "bar"}), 1) + self.assertEqual(db.test.count_documents({"foo": re.compile(r"ba.*")}), 2) + + def test_estimated_document_count(self): + db = self.db db.drop_collection("test") - db.test.insert({"x": 1}) - db.test.insert({"x": 1}) - self.assertEqual(2, db.test.remove({})["n"]) - self.assertEqual(0, db.test.remove({})["n"]) - - def test_last_error_options(self): - if not version.at_least(self.client, (1, 5, 1)): - raise SkipTest("getLastError options require MongoDB >= 1.5.1") - - self.db.test.save({"x": 1}, w=1, wtimeout=1) - self.db.test.insert({"x": 1}, w=1, wtimeout=1) - self.db.test.remove({"x": 1}, w=1, wtimeout=1) - self.db.test.update({"x": 1}, {"y": 2}, w=1, wtimeout=1) - - ismaster = self.client.admin.command("ismaster") - if ismaster.get("setName"): - w = len(ismaster["hosts"]) + 1 - self.assertRaises(WTimeoutError, self.db.test.save, - {"x": 1}, w=w, wtimeout=1) - self.assertRaises(WTimeoutError, self.db.test.insert, - {"x": 1}, w=w, wtimeout=1) - self.assertRaises(WTimeoutError, self.db.test.update, - {"x": 1}, {"y": 2}, w=w, wtimeout=1) - self.assertRaises(WTimeoutError, self.db.test.remove, - {"x": 1}, w=w, wtimeout=1) - - try: - self.db.test.save({"x": 1}, w=w, wtimeout=1) - except WTimeoutError, exc: - # Just check that we set the error document. Fields - # vary by MongoDB version. - self.assertTrue(exc.details is not None) - else: - self.fail("WTimeoutError was not raised") - - # can't use fsync and j options together - self.assertRaises(OperationFailure, self.db.test.insert, - {"_id": 1}, j=True, fsync=True) + self.addCleanup(db.drop_collection, "test") - def test_manual_last_error(self): - self.db.test.save({"x": 1}, w=0) - self.db.command("getlasterror", w=1, wtimeout=1) + self.assertEqual(db.test.estimated_document_count(), 0) + db.wrong.insert_many([{}, {}]) + self.assertEqual(db.test.estimated_document_count(), 0) + db.test.insert_many([{}, {}]) + self.assertEqual(db.test.estimated_document_count(), 2) - def test_count(self): + def test_aggregate(self): db = self.db db.drop_collection("test") + db.test.insert_one({"foo": [1, 2]}) - self.assertEqual(db.test.count(), 0) - db.test.save({}) - db.test.save({}) - self.assertEqual(db.test.count(), 2) - db.test.save({'foo': 'bar'}) - db.test.save({'foo': 'baz'}) - self.assertEqual(db.test.find({'foo': 'bar'}).count(), 1) - self.assertEqual(db.test.find({'foo': re.compile(r'ba.*')}).count(), 2) + with self.assertRaises(TypeError): + db.test.aggregate("wow") # type: ignore[arg-type] - def test_aggregate(self): - if not version.at_least(self.db.connection, (2, 1, 0)): - raise SkipTest("The aggregate command requires MongoDB >= 2.1.0") + pipeline = {"$project": {"_id": False, "foo": True}} + result = db.test.aggregate([pipeline]) + self.assertIsInstance(result, CommandCursor) + self.assertEqual([{"foo": [1, 2]}], result.to_list()) + + # Test write concern. + with self.write_concern_collection() as coll: + coll.aggregate([{"$out": "output-collection"}]) + + def test_aggregate_raw_bson(self): db = self.db db.drop_collection("test") - db.test.save({'foo': [1, 2]}) + db.test.insert_one({"foo": [1, 2]}) - self.assertRaises(TypeError, db.test.aggregate, "wow") + with self.assertRaises(TypeError): + db.test.aggregate("wow") # type: ignore[arg-type] pipeline = {"$project": {"_id": False, "foo": True}} - for result in [ - db.test.aggregate(pipeline), - db.test.aggregate([pipeline]), - db.test.aggregate((pipeline,))]: - - self.assertEqual(1.0, result['ok']) - self.assertEqual([{'foo': [1, 2]}], result['result']) - - def test_aggregate_with_compile_re(self): - # See SERVER-6470. - if not version.at_least(self.db.connection, (2, 3, 2)): - raise SkipTest( - "Retrieving a regex with aggregation requires " - "MongoDB >= 2.3.2") - - db = self.client.pymongo_test - db.test.drop() - db.test.insert({'r': re.compile('.*')}) - - result = db.test.aggregate([]) - self.assertTrue(isinstance(result['result'][0]['r'], RE_TYPE)) - result = db.test.aggregate([], compile_re=False) - self.assertTrue(isinstance(result['result'][0]['r'], Regex)) + coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) + result = coll.aggregate([pipeline]) + self.assertIsInstance(result, CommandCursor) + first_result = next(result) + self.assertIsInstance(first_result, RawBSONDocument) + self.assertEqual([1, 2], list(first_result["foo"])) def test_aggregation_cursor_validation(self): - if not version.at_least(self.db.connection, (2, 5, 1)): - raise SkipTest("Aggregation cursor requires MongoDB >= 2.5.1") db = self.db - projection = {'$project': {'_id': '$_id'}} - cursor = db.test.aggregate(projection, cursor={}) - self.assertTrue(isinstance(cursor, CommandCursor)) + projection = {"$project": {"_id": "$_id"}} + cursor = db.test.aggregate([projection], cursor={}) + self.assertIsInstance(cursor, CommandCursor) def test_aggregation_cursor(self): - if not version.at_least(self.db.connection, (2, 5, 1)): - raise SkipTest("Aggregation cursor requires MongoDB >= 2.5.1") db = self.db - if self.setname: - db = MongoReplicaSetClient(host=self.client.host, - port=self.client.port, - replicaSet=self.setname)[db.name] + if client_context.has_secondaries: # Test that getMore messages are sent to the right server. - db.read_preference = ReadPreference.SECONDARY + db = self.client.get_database( + db.name, + read_preference=ReadPreference.SECONDARY, + write_concern=WriteConcern(w=self.w), + ) for collection_size in (10, 1000): db.drop_collection("test") - db.test.insert([{'_id': i} for i in range(collection_size)], - w=self.w) + db.test.insert_many([{"_id": i} for i in range(collection_size)]) expected_sum = sum(range(collection_size)) # Use batchSize to ensure multiple getMore messages - cursor = db.test.aggregate( - {'$project': {'_id': '$_id'}}, - cursor={'batchSize': 5}) - - self.assertEqual( - expected_sum, - sum(doc['_id'] for doc in cursor)) - - def test_parallel_scan(self): - if is_mongos(self.db.connection): - raise SkipTest("mongos does not support parallel_scan") - if not version.at_least(self.db.connection, (2, 5, 5)): - raise SkipTest("Requires MongoDB >= 2.5.5") - db = self.db - db.drop_collection("test") - if self.setname: - db = MongoReplicaSetClient(host=self.client.host, - port=self.client.port, - replicaSet=self.setname)[db.name] - # Test that getMore messages are sent to the right server. - db.read_preference = ReadPreference.SECONDARY - coll = db.test - coll.insert(({'_id': i} for i in xrange(8000)), w=self.w) - docs = [] - threads = [threading.Thread(target=docs.extend, args=(cursor,)) - for cursor in coll.parallel_scan(3)] - for t in threads: - t.start() - for t in threads: - t.join() - - self.assertEqual( - set(range(8000)), - set(doc['_id'] for doc in docs)) + cursor = db.test.aggregate([{"$project": {"_id": "$_id"}}], batchSize=5) + + self.assertEqual(expected_sum, sum(doc["_id"] for doc in cursor.to_list())) + + # Test that batchSize is handled properly. + cursor = db.test.aggregate([], batchSize=5) + self.assertEqual(5, len(cursor._data)) + # Force a getMore + cursor._data.clear() + next(cursor) + # batchSize - 1 + self.assertEqual(4, len(cursor._data)) + # Exhaust the cursor. There shouldn't be any errors. + for _doc in cursor: + pass - def test_group(self): - db = self.db - db.drop_collection("test") + def test_aggregation_cursor_alive(self): + self.db.test.delete_many({}) + self.db.test.insert_many([{} for _ in range(3)]) + self.addCleanup(self.db.test.delete_many, {}) + cursor = self.db.test.aggregate(pipeline=[], cursor={"batchSize": 2}) + n = 0 + while True: + cursor.next() + n += 1 + if n == 3: + self.assertFalse(cursor.alive) + break + + self.assertTrue(cursor.alive) + + def test_invalid_session_parameter(self): + def try_invalid_session(): + with self.db.test.aggregate([], {}): # type:ignore + pass - self.assertEqual([], - db.test.group([], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - db.test.save({"a": 2}) - db.test.save({"b": 5}) - db.test.save({"a": 1}) - - self.assertEqual([{"count": 3}], - db.test.group([], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - self.assertEqual([{"count": 1}], - db.test.group([], {"a": {"$gt": 1}}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - db.test.save({"a": 2, "b": 3}) - - self.assertEqual([{"a": 2, "count": 2}, - {"a": None, "count": 1}, - {"a": 1, "count": 1}], - db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - # modifying finalize - self.assertEqual([{"a": 2, "count": 3}, - {"a": None, "count": 2}, - {"a": 1, "count": 2}], - db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", - "function (obj) { obj.count++; }")) - - # returning finalize - self.assertEqual([2, 1, 1], - db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", - "function (obj) { return obj.count; }")) - - # keyf - self.assertEqual([2, 2], - db.test.group("function (obj) { if (obj.a == 2) " - "{ return {a: true} }; " - "return {b: true}; }", {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", - "function (obj) { return obj.count; }")) - - # no key - self.assertEqual([{"count": 4}], - db.test.group(None, {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - self.assertRaises(OperationFailure, db.test.group, - [], {}, {}, "5 ++ 5") - - def test_group_with_scope(self): - db = self.db - db.drop_collection("test") - db.test.save({"a": 1}) - db.test.save({"b": 1}) - - reduce_function = "function (obj, prev) { prev.count += inc_value; }" - - self.assertEqual(2, db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 1}))[0]['count']) - self.assertEqual(4, db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 2}))[0]['count']) - - self.assertEqual(1, - db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 0.5}))[0]['count']) - - if version.at_least(db.connection, (1, 1)): - self.assertEqual(2, db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 1}), - )[0]['count']) - - self.assertEqual(4, db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 2}), - )[0]['count']) - - self.assertEqual(1, db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 0.5}), - )[0]['count']) + with self.assertRaisesRegex(ValueError, "must be a ClientSession"): + try_invalid_session() def test_large_limit(self): db = self.db db.drop_collection("test_large_limit") - db.test_large_limit.create_index([('x', 1)]) + db.test_large_limit.create_index([("x", 1)]) my_str = "mongomongo" * 1000 - for i in range(2000): - doc = {"x": i, "y": my_str} - db.test_large_limit.insert(doc) + db.test_large_limit.insert_many({"x": i, "y": my_str} for i in range(2000)) i = 0 y = 0 - for doc in db.test_large_limit.find(limit=1900).sort([('x', 1)]): + for doc in db.test_large_limit.find(limit=1900).sort([("x", 1)]): i += 1 y += doc["x"] @@ -1568,78 +1644,87 @@ def test_large_limit(self): def test_find_kwargs(self): db = self.db db.drop_collection("test") + db.test.insert_many({"x": i} for i in range(10)) - for i in range(10): - db.test.insert({"x": i}) - - self.assertEqual(10, db.test.count()) + self.assertEqual(10, db.test.count_documents({})) - sum = 0 + total = 0 for x in db.test.find({}, skip=4, limit=2): - sum += x["x"] + total += x["x"] - self.assertEqual(9, sum) + self.assertEqual(9, total) def test_rename(self): db = self.db db.drop_collection("test") db.drop_collection("foo") - self.assertRaises(TypeError, db.test.rename, 5) - self.assertRaises(InvalidName, db.test.rename, "") - self.assertRaises(InvalidName, db.test.rename, "te$t") - self.assertRaises(InvalidName, db.test.rename, ".test") - self.assertRaises(InvalidName, db.test.rename, "test.") - self.assertRaises(InvalidName, db.test.rename, "tes..t") + with self.assertRaises(TypeError): + db.test.rename(5) # type: ignore[arg-type] + with self.assertRaises(InvalidName): + db.test.rename("") + with self.assertRaises(InvalidName): + db.test.rename("te$t") + with self.assertRaises(InvalidName): + db.test.rename(".test") + with self.assertRaises(InvalidName): + db.test.rename("test.") + with self.assertRaises(InvalidName): + db.test.rename("tes..t") - self.assertEqual(0, db.test.count()) - self.assertEqual(0, db.foo.count()) + self.assertEqual(0, db.test.count_documents({})) + self.assertEqual(0, db.foo.count_documents({})) - for i in range(10): - db.test.insert({"x": i}) + db.test.insert_many({"x": i} for i in range(10)) - self.assertEqual(10, db.test.count()) + self.assertEqual(10, db.test.count_documents({})) db.test.rename("foo") - self.assertEqual(0, db.test.count()) - self.assertEqual(10, db.foo.count()) + self.assertEqual(0, db.test.count_documents({})) + self.assertEqual(10, db.foo.count_documents({})) x = 0 for doc in db.foo.find(): self.assertEqual(x, doc["x"]) x += 1 - db.test.insert({}) - self.assertRaises(OperationFailure, db.foo.rename, "test") + db.test.insert_one({}) + with self.assertRaises(OperationFailure): + db.foo.rename("test") db.foo.rename("test", dropTarget=True) - # doesn't really test functionality, just that the option is set correctly - def test_snapshot(self): - db = self.db - - self.assertRaises(TypeError, db.test.find, snapshot=5) - - list(db.test.find(snapshot=True)) - self.assertRaises(OperationFailure, list, - db.test.find(snapshot=True).sort("foo", 1)) + with self.write_concern_collection() as coll: + coll.rename("foo") + @no_type_check def test_find_one(self): db = self.db db.drop_collection("test") - id = db.test.save({"hello": "world", "foo": "bar"}) + _id = (db.test.insert_one({"hello": "world", "foo": "bar"})).inserted_id - self.assertEqual("world", db.test.find_one()["hello"]) - self.assertEqual(db.test.find_one(id), db.test.find_one()) + self.assertEqual("world", (db.test.find_one())["hello"]) + self.assertEqual(db.test.find_one(_id), db.test.find_one()) self.assertEqual(db.test.find_one(None), db.test.find_one()) self.assertEqual(db.test.find_one({}), db.test.find_one()) - self.assertEqual(db.test.find_one({"hello": "world"}), - db.test.find_one()) + self.assertEqual(db.test.find_one({"hello": "world"}), db.test.find_one()) + + self.assertIn("hello", db.test.find_one(projection=["hello"])) + self.assertNotIn("hello", db.test.find_one(projection=["foo"])) + + self.assertIn("hello", db.test.find_one(projection=("hello",))) + self.assertNotIn("hello", db.test.find_one(projection=("foo",))) + + self.assertIn("hello", db.test.find_one(projection={"hello"})) + self.assertNotIn("hello", db.test.find_one(projection={"foo"})) + + self.assertIn("hello", db.test.find_one(projection=frozenset(["hello"]))) + self.assertNotIn("hello", db.test.find_one(projection=frozenset(["foo"]))) - self.assertTrue("hello" in db.test.find_one(fields=["hello"])) - self.assertTrue("hello" not in db.test.find_one(fields=["foo"])) - self.assertEqual(["_id"], db.test.find_one(fields=[]).keys()) + self.assertEqual(["_id"], list(db.test.find_one(projection={"_id": True}))) + self.assertIn("hello", list(db.test.find_one(projection={}))) + self.assertIn("hello", list(db.test.find_one(projection=[]))) self.assertEqual(None, db.test.find_one({"hello": "foo"})) self.assertEqual(None, db.test.find_one(ObjectId())) @@ -1648,156 +1733,136 @@ def test_find_one_non_objectid(self): db = self.db db.drop_collection("test") - db.test.save({"_id": 5}) + db.test.insert_one({"_id": 5}) self.assertTrue(db.test.find_one(5)) self.assertFalse(db.test.find_one(6)) - def test_remove_non_objectid(self): - db = self.db - db.drop_collection("test") - - db.test.save({"_id": 5}) - - self.assertEqual(1, db.test.count()) - db.test.remove(5) - self.assertEqual(0, db.test.count()) - def test_find_one_with_find_args(self): db = self.db db.drop_collection("test") - db.test.save({"x": 1}) - db.test.save({"x": 2}) - db.test.save({"x": 3}) + db.test.insert_many([{"x": i} for i in range(1, 4)]) - self.assertEqual(1, db.test.find_one()["x"]) - self.assertEqual(2, db.test.find_one(skip=1, limit=2)["x"]) + self.assertEqual(1, (db.test.find_one())["x"]) + self.assertEqual(2, (db.test.find_one(skip=1, limit=2))["x"]) def test_find_with_sort(self): db = self.db db.drop_collection("test") - db.test.save({"x": 2}) - db.test.save({"x": 1}) - db.test.save({"x": 3}) + db.test.insert_many([{"x": 2}, {"x": 1}, {"x": 3}]) - self.assertEqual(2, db.test.find_one()["x"]) - self.assertEqual(1, db.test.find_one(sort=[("x", 1)])["x"]) - self.assertEqual(3, db.test.find_one(sort=[("x", -1)])["x"]) + self.assertEqual(2, (db.test.find_one())["x"]) + self.assertEqual(1, (db.test.find_one(sort=[("x", 1)]))["x"]) + self.assertEqual(3, (db.test.find_one(sort=[("x", -1)]))["x"]) - def to_list(foo): - return [bar["x"] for bar in foo] + def to_list(things): + return [thing["x"] for thing in things] self.assertEqual([2, 1, 3], to_list(db.test.find())) self.assertEqual([1, 2, 3], to_list(db.test.find(sort=[("x", 1)]))) self.assertEqual([3, 2, 1], to_list(db.test.find(sort=[("x", -1)]))) - self.assertRaises(TypeError, db.test.find, sort=5) - self.assertRaises(TypeError, db.test.find, sort="hello") - self.assertRaises(ValueError, db.test.find, sort=["hello", 1]) - - def test_insert_adds_id(self): - doc = {"hello": "world"} - self.db.test.insert(doc) - self.assertTrue("_id" in doc) - - docs = [{"hello": "world"}, {"hello": "world"}] - self.db.test.insert(docs) - for doc in docs: - self.assertTrue("_id" in doc) - - def test_save_adds_id(self): - doc = {"hello": "jesse"} - self.db.test.save(doc) - self.assertTrue("_id" in doc) + with self.assertRaises(TypeError): + db.test.find(sort=5) + with self.assertRaises(TypeError): + db.test.find(sort="hello") + with self.assertRaises(TypeError): + db.test.find(sort=["hello", 1]) # TODO doesn't actually test functionality, just that it doesn't blow up def test_cursor_timeout(self): - list(self.db.test.find(timeout=False)) - list(self.db.test.find(timeout=True)) + self.db.test.find(no_cursor_timeout=True).to_list() + self.db.test.find(no_cursor_timeout=False).to_list() def test_exhaust(self): - if is_mongos(self.db.connection): - self.assertRaises(InvalidOperation, - self.db.test.find, exhaust=True) + if is_mongos(self.db.client): + with self.assertRaises(InvalidOperation): + next(self.db.test.find(cursor_type=CursorType.EXHAUST)) return - self.assertRaises(TypeError, self.db.test.find, exhaust=5) # Limit is incompatible with exhaust. - self.assertRaises(InvalidOperation, - self.db.test.find, exhaust=True, limit=5) - cur = self.db.test.find(exhaust=True) - self.assertRaises(InvalidOperation, cur.limit, 5) + with self.assertRaises(InvalidOperation): + next(self.db.test.find(cursor_type=CursorType.EXHAUST, limit=5)) + cur = self.db.test.find(cursor_type=CursorType.EXHAUST) + with self.assertRaises(InvalidOperation): + cur.limit(5) + cur.next() cur = self.db.test.find(limit=5) - self.assertRaises(InvalidOperation, cur.add_option, 64) + with self.assertRaises(InvalidOperation): + cur.add_option(64) cur = self.db.test.find() cur.add_option(64) - self.assertRaises(InvalidOperation, cur.limit, 5) + with self.assertRaises(InvalidOperation): + cur.limit(5) self.db.drop_collection("test") # Insert enough documents to require more than one batch - self.db.test.insert([{'i': i} for i in xrange(150)]) + self.db.test.insert_many([{"i": i} for i in range(150)]) - client = get_client(max_pool_size=1) - socks = get_pool(client).sockets - self.assertEqual(1, len(socks)) + client = self.rs_or_single_client(maxPoolSize=1) + pool = get_pool(client) # Make sure the socket is returned after exhaustion. - cur = client[self.db.name].test.find(exhaust=True) - cur.next() - self.assertEqual(0, len(socks)) - for doc in cur: + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) + next(cur) + self.assertEqual(0, len(pool.conns)) + for _ in cur: pass - self.assertEqual(1, len(socks)) + self.assertEqual(1, len(pool.conns)) # Same as previous but don't call next() - for doc in client[self.db.name].test.find(exhaust=True): + for _ in client[self.db.name].test.find(cursor_type=CursorType.EXHAUST): pass - self.assertEqual(1, len(socks)) - - # If the Cursor instance is discarded before being - # completely iterated we have to close and - # discard the socket. - cur = client[self.db.name].test.find(exhaust=True) - cur.next() - self.assertEqual(0, len(socks)) - if sys.platform.startswith('java') or 'PyPy' in sys.version: - # Don't wait for GC or use gc.collect(), it's unreliable. - cur.close() + self.assertEqual(1, len(pool.conns)) + + # If the Cursor instance is discarded before being completely iterated + # and the socket has pending data (more_to_come=True) we have to close + # and discard the socket. + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, batch_size=2) + if client_context.version.at_least(4, 2): + # On 4.2+ we use OP_MSG which only sets more_to_come=True after the + # first getMore. + for _ in range(3): + next(cur) + else: + next(cur) + self.assertEqual(0, len(pool.conns)) + # if sys.platform.startswith("java") or "PyPy" in sys.version: + # # Don't wait for GC or use gc.collect(), it's unreliable. + cur.close() cur = None + # Wait until the background thread returns the socket. + wait_until(lambda: pool.active_sockets == 0, "return socket") # The socket should be discarded. - self.assertEqual(0, len(socks)) + self.assertEqual(0, len(pool.conns)) def test_distinct(self): - if not version.at_least(self.db.connection, (1, 1)): - raise SkipTest("distinct command requires MongoDB >= 1.1") - self.db.drop_collection("test") test = self.db.test - test.save({"a": 1}) - test.save({"a": 2}) - test.save({"a": 2}) - test.save({"a": 2}) - test.save({"a": 3}) + test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) distinct = test.distinct("a") distinct.sort() self.assertEqual([1, 2, 3], distinct) - distinct = test.find({'a': {'$gt': 1}}).distinct("a") + distinct = test.find({"a": {"$gt": 1}}).distinct("a") distinct.sort() + self.assertEqual([2, 3], distinct) + distinct = test.distinct("a", {"a": {"$gt": 1}}) + distinct.sort() self.assertEqual([2, 3], distinct) self.db.drop_collection("test") - test.save({"a": {"b": "a"}, "c": 12}) - test.save({"a": {"b": "b"}, "c": 12}) - test.save({"a": {"b": "c"}, "c": 12}) - test.save({"a": {"b": "c"}, "c": 12}) + test.insert_one({"a": {"b": "a"}, "c": 12}) + test.insert_one({"a": {"b": "b"}, "c": 12}) + test.insert_one({"a": {"b": "c"}, "c": 12}) + test.insert_one({"a": {"b": "c"}, "c": 12}) distinct = test.distinct("a.b") distinct.sort() @@ -1806,526 +1871,370 @@ def test_distinct(self): def test_query_on_query_field(self): self.db.drop_collection("test") - self.db.test.save({"query": "foo"}) - self.db.test.save({"bar": "foo"}) + self.db.test.insert_one({"query": "foo"}) + self.db.test.insert_one({"bar": "foo"}) - self.assertEqual(1, - self.db.test.find({"query": {"$ne": None}}).count()) - self.assertEqual(1, - len(list(self.db.test.find({"query": {"$ne": None}}))) - ) + self.assertEqual(1, self.db.test.count_documents({"query": {"$ne": None}})) + self.assertEqual(1, len(self.db.test.find({"query": {"$ne": None}}).to_list())) def test_min_query(self): self.db.drop_collection("test") - self.db.test.save({"x": 1}) - self.db.test.save({"x": 2}) + self.db.test.insert_many([{"x": 1}, {"x": 2}]) self.db.test.create_index("x") - self.assertEqual(1, len(list(self.db.test.find({"$min": {"x": 2}, - "$query": {}})))) - self.assertEqual(2, self.db.test.find({"$min": {"x": 2}, - "$query": {}})[0]["x"]) + cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, hint="x_1") - def test_insert_large_document(self): - max_size = self.db.connection.max_bson_size - half_size = int(max_size / 2) - if version.at_least(self.db.connection, (1, 7, 4)): - self.assertEqual(max_size, 16777216) - - expected = DocumentTooLarge - if version.at_least(self.client, (2, 5, 4, -1)): - # Document too large handled by the server - expected = OperationFailure - self.assertRaises(expected, self.db.test.insert, - {"foo": "x" * max_size}) - self.assertRaises(expected, self.db.test.save, - {"foo": "x" * max_size}) - self.assertRaises(expected, self.db.test.insert, - [{"x": 1}, {"foo": "x" * max_size}]) - self.db.test.insert([{"foo": "x" * half_size}, - {"foo": "x" * half_size}]) - - self.db.test.insert({"bar": "x"}) - # Use w=0 here to test legacy doc size checking in all server versions - self.assertRaises(DocumentTooLarge, self.db.test.update, - {"bar": "x"}, {"bar": "x" * (max_size - 14)}, w=0) - # This will pass with OP_UPDATE or the update command. - self.db.test.update({"bar": "x"}, {"bar": "x" * (max_size - 32)}) - - def test_insert_large_batch(self): - max_bson_size = self.client.max_bson_size - if version.at_least(self.client, (2, 5, 4, -1)): - # Write commands are limited to 16MB + 16k per batch - big_string = 'x' * int(max_bson_size / 2) - else: - big_string = 'x' * (max_bson_size - 100) + docs = cursor.to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(2, docs[0]["x"]) + + def test_numerous_inserts(self): + # Ensure we don't exceed server's maxWriteBatchSize size limit. + self.db.test.drop() + n_docs = client_context.max_write_batch_size + 100 + self.db.test.insert_many([{} for _ in range(n_docs)]) + self.assertEqual(n_docs, self.db.test.count_documents({})) self.db.test.drop() - self.assertEqual(0, self.db.test.count()) - # Batch insert that requires 2 batches - batch = [{'x': big_string}, {'x': big_string}, - {'x': big_string}, {'x': big_string}] - self.assertTrue(self.db.test.insert(batch, w=1)) - self.assertEqual(4, self.db.test.count()) + def test_insert_many_large_batch(self): + # Tests legacy insert. + db = self.client.test_insert_large_batch + self.addCleanup(self.client.drop_database, "test_insert_large_batch") + max_bson_size = client_context.max_bson_size + # Write commands are limited to 16MB + 16k per batch + big_string = "x" * int(max_bson_size / 2) - batch[1]['_id'] = batch[0]['_id'] + # Batch insert that requires 2 batches. + successful_insert = [ + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + ] + db.collection_0.insert_many(successful_insert) + self.assertEqual(4, db.collection_0.count_documents({})) - # Test that inserts fail after first error, acknowledged. - self.db.test.drop() - self.assertRaises(DuplicateKeyError, self.db.test.insert, batch, w=1) - self.assertEqual(1, self.db.test.count()) + db.collection_0.drop() - # Test that inserts fail after first error, unacknowledged. - self.db.test.drop() - self.client.start_request() - try: - self.assertTrue(self.db.test.insert(batch, w=0)) - self.assertEqual(1, self.db.test.count()) - finally: - self.client.end_request() + # Test that inserts fail after first error. + insert_second_fails = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id2", "x": big_string}, + ] - # 2 batches, 2 errors, acknowledged, continue on error - self.db.test.drop() - batch[3]['_id'] = batch[2]['_id'] - try: - self.db.test.insert(batch, continue_on_error=True, w=1) - except OperationFailure, e: - # Make sure we report the last error, not the first. - self.assertTrue(str(batch[2]['_id']) in str(e)) - else: - self.fail('OperationFailure not raised.') - # Only the first and third documents should be inserted. - self.assertEqual(2, self.db.test.count()) + with self.assertRaises(BulkWriteError): + db.collection_1.insert_many(insert_second_fails) - # 2 batches, 2 errors, unacknowledged, continue on error - self.db.test.drop() - self.client.start_request() - try: - self.assertTrue(self.db.test.insert(batch, continue_on_error=True, w=0)) - # Only the first and third documents should be inserted. - self.assertEqual(2, self.db.test.count()) - finally: - self.client.end_request() + self.assertEqual(1, db.collection_1.count_documents({})) - def test_numerous_inserts(self): - # Ensure we don't exceed server's 1000-document batch size limit. - self.db.test.remove() - n_docs = 2100 - self.db.test.insert({} for _ in range(n_docs)) - self.assertEqual(n_docs, self.db.test.count()) - self.db.test.remove() - - # Starting in PyMongo 2.6 we no longer use message.insert for inserts, but - # message.insert is part of the public API. Do minimal testing here; there - # isn't really a better place. - def test_insert_message_creation(self): - send = self.db.connection._send_message - name = "%s.%s" % (self.db.name, "test") - - def do_insert(args): - send(message_module.insert(*args), args[3]) + db.collection_1.drop() - self.db.drop_collection("test") - self.db.test.insert({'_id': 0}, w=1) - self.assertTrue(1, self.db.test.count()) - - simple_args = (name, [{'_id': 0}], True, False, {}, False, 3) - gle_args = (name, [{'_id': 0}], True, True, {'w': 1}, False, 3) - coe_args = (name, [{'_id': 0}, {'_id': 1}], - True, True, {'w': 1}, True, 3) - - self.assertEqual(None, do_insert(simple_args)) - self.assertTrue(1, self.db.test.count()) - self.assertRaises(DuplicateKeyError, do_insert, gle_args) - self.assertTrue(1, self.db.test.count()) - self.assertRaises(DuplicateKeyError, do_insert, coe_args) - self.assertTrue(2, self.db.test.count()) - - if have_uuid: - doc = {'_id': 2, 'uuid': uuid.uuid4()} - uuid_sub_args = (name, [doc], - True, True, {'w': 1}, True, 6) - do_insert(uuid_sub_args) - coll = self.db.test - self.assertNotEqual(doc, coll.find_one({'_id': 2})) - coll.uuid_subtype = 6 - self.assertEqual(doc, coll.find_one({'_id': 2})) - - def test_map_reduce(self): - if not version.at_least(self.db.connection, (1, 1, 1)): - raise SkipTest("mapReduce command requires MongoDB >= 1.1.1") + # 2 batches, 2nd insert fails, unacknowledged, ordered. + unack_coll = db.collection_2.with_options(write_concern=WriteConcern(w=0)) + unack_coll.insert_many(insert_second_fails) - db = self.db - db.drop_collection("test") + def async_lambda(): + return db.collection_2.count_documents({}) == 1 - db.test.insert({"id": 1, "tags": ["dog", "cat"]}) - db.test.insert({"id": 2, "tags": ["cat"]}) - db.test.insert({"id": 3, "tags": ["mouse", "cat", "dog"]}) - db.test.insert({"id": 4, "tags": []}) - - map = Code("function () {" - " this.tags.forEach(function(z) {" - " emit(z, 1);" - " });" - "}") - reduce = Code("function (key, values) {" - " var total = 0;" - " for (var i = 0; i < values.length; i++) {" - " total += values[i];" - " }" - " return total;" - "}") - result = db.test.map_reduce(map, reduce, out='mrunittests') - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) - - if version.at_least(self.db.connection, (1, 7, 4)): - db.test.insert({"id": 5, "tags": ["hampster"]}) - result = db.test.map_reduce(map, reduce, out='mrunittests') - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - db.test.remove({"id": 5}) - - result = db.test.map_reduce(map, reduce, - out={'merge': 'mrunittests'}) - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - - result = db.test.map_reduce(map, reduce, - out={'reduce': 'mrunittests'}) - - self.assertEqual(6, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(4, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(2, result.find_one({"_id": "mouse"})["value"]) - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - - result = db.test.map_reduce( - map, - reduce, - out={'replace': 'mrunittests'} - ) - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) + wait_until(async_lambda, "insert 1 document", timeout=60) - if (is_mongos(self.db.connection) - and not version.at_least(self.db.connection, (2, 1, 2))): - pass - else: - result = db.test.map_reduce(map, reduce, - out=SON([('replace', 'mrunittests'), - ('db', 'mrtestdb') - ])) - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) - self.client.drop_database('mrtestdb') - - full_result = db.test.map_reduce(map, reduce, - out='mrunittests', full_response=True) - self.assertEqual(6, full_result["counts"]["emit"]) - - result = db.test.map_reduce(map, reduce, out='mrunittests', limit=2) - self.assertEqual(2, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(1, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(None, result.find_one({"_id": "mouse"})) - - if version.at_least(self.db.connection, (1, 7, 4)): - result = db.test.map_reduce(map, reduce, out={'inline': 1}) - self.assertTrue(isinstance(result, dict)) - self.assertTrue('results' in result) - self.assertTrue(result['results'][1]["_id"] in ("cat", - "dog", - "mouse")) - - result = db.test.inline_map_reduce(map, reduce) - self.assertTrue(isinstance(result, list)) - self.assertEqual(3, len(result)) - self.assertTrue(result[1]["_id"] in ("cat", "dog", "mouse")) - - full_result = db.test.inline_map_reduce(map, reduce, - full_response=True) - self.assertEqual(6, full_result["counts"]["emit"]) + db.collection_2.drop() + + # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are + # dupes. Acknowledged, unordered. + insert_two_failures = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id1", "x": big_string}, + ] + + with self.assertRaises(OperationFailure) as context: + db.collection_3.insert_many(insert_two_failures, ordered=False) + + self.assertIn("id1", str(context.exception)) + + # Only the first and third documents should be inserted. + self.assertEqual(2, db.collection_3.count_documents({})) + + db.collection_3.drop() + + # 2 batches, 2 errors, unacknowledged, unordered. + unack_coll = db.collection_4.with_options(write_concern=WriteConcern(w=0)) + unack_coll.insert_many(insert_two_failures, ordered=False) + + def async_lambda(): + return db.collection_4.count_documents({}) == 2 + + # Only the first and third documents are inserted. + wait_until(async_lambda, "insert 2 documents", timeout=60) + + db.collection_4.drop() def test_messages_with_unicode_collection_names(self): db = self.db - db[u"Employés"].insert({"x": 1}) - db[u"Employés"].update({"x": 1}, {"x": 2}) - db[u"Employés"].remove({}) - db[u"Employés"].find_one() - list(db[u"Employés"].find()) + db["Employés"].insert_one({"x": 1}) + db["Employés"].replace_one({"x": 1}, {"x": 2}) + db["Employés"].delete_many({}) + db["Employés"].find_one() + db["Employés"].find().to_list() - def test_drop_indexes_non_existant(self): + def test_drop_indexes_non_existent(self): self.db.drop_collection("test") self.db.test.drop_indexes() # This is really a bson test but easier to just reproduce it here... # (Shame on me) def test_bad_encode(self): - c = self.db.test - self.assertRaises(InvalidDocument, c.save, {"x": c}) - - def test_bad_dbref(self): c = self.db.test c.drop() + with self.assertRaises(InvalidDocument): + c.insert_one({"x": c}) - # Incomplete DBRefs. - self.assertRaises( - InvalidDocument, - c.insert, {'ref': {'$ref': 'collection'}}) - - self.assertRaises( - InvalidDocument, - c.insert, {'ref': {'$id': ObjectId()}}) - - ref_only = {'ref': {'$ref': 'collection'}} - id_only = {'ref': {'$id': ObjectId()}} - - # Starting with MongoDB 2.5.2 this is no longer possible - # from insert, update, or findAndModify. - if not version.at_least(self.db.connection, (2, 5, 2)): - # Force insert of ref without $id. - c.insert(ref_only, check_keys=False) - self.assertEqual(DBRef('collection', id=None), - c.find_one()['ref']) - - c.drop() + class BadGetAttr(dict): + def __getattr__(self, name): + pass - # DBRef without $ref is decoded as normal subdocument. - c.insert(id_only, check_keys=False) - self.assertEqual(id_only, c.find_one()) + bad = BadGetAttr([("foo", "bar")]) + c.insert_one({"bad": bad}) + self.assertEqual("bar", (c.find_one())["bad"]["foo"]) # type: ignore - def test_as_class(self): + def test_array_filters_validation(self): + # array_filters must be a list. + c = self.db.test + with self.assertRaises(TypeError): + c.update_one({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + c.update_many({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + update = {"$set": {"a": 1}} + c.find_one_and_update({}, update, array_filters={}) # type: ignore[arg-type] + + def test_array_filters_unacknowledged(self): + c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) + with self.assertRaises(ConfigurationError): + c_w0.update_one({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + c_w0.update_many({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + c_w0.find_one_and_update({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + + def test_find_one_and(self): c = self.db.test c.drop() - c.insert({"x": 1}) + c.insert_one({"_id": 1, "i": 1}) - doc = c.find().next() - self.assertTrue(isinstance(doc, dict)) - doc = c.find().next() - self.assertFalse(isinstance(doc, SON)) - doc = c.find(as_class=SON).next() - self.assertTrue(isinstance(doc, SON)) + self.assertEqual({"_id": 1, "i": 1}, c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 3}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) - self.assertTrue(isinstance(c.find_one(), dict)) - self.assertFalse(isinstance(c.find_one(), SON)) - self.assertTrue(isinstance(c.find_one(as_class=SON), SON)) + self.assertEqual({"_id": 1, "i": 3}, c.find_one_and_delete({"_id": 1})) + self.assertEqual(None, c.find_one({"_id": 1})) - self.assertEqual(1, c.find_one(as_class=SON)["x"]) - doc = c.find(as_class=SON).next() - self.assertEqual(1, doc["x"]) + self.assertEqual(None, c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 1}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER, upsert=True + ), + ) + self.assertEqual( + {"_id": 1, "i": 2}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual( + {"_id": 1, "i": 3}, + c.find_one_and_replace( + {"_id": 1}, {"i": 3, "j": 1}, projection=["i"], return_document=ReturnDocument.AFTER + ), + ) + self.assertEqual( + {"i": 4}, + c.find_one_and_update( + {"_id": 1}, + {"$inc": {"i": 1}}, + projection={"i": 1, "_id": 0}, + return_document=ReturnDocument.AFTER, + ), + ) - def test_find_and_modify(self): - c = self.db.test c.drop() - c.insert({'_id': 1, 'i': 1}) - - # Test that we raise DuplicateKeyError when appropriate. - # MongoDB doesn't have a code field for DuplicateKeyError - # from commands before 2.2. - if version.at_least(self.db.connection, (2, 2)): - c.ensure_index('i', unique=True) - self.assertRaises(DuplicateKeyError, - c.find_and_modify, query={'i': 1, 'j': 1}, - update={'$set': {'k': 1}}, upsert=True) - c.drop_indexes() - - # Test correct findAndModify - self.assertEqual({'_id': 1, 'i': 1}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 3}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True)) - - self.assertEqual({'_id': 1, 'i': 3}, - c.find_and_modify({'_id': 1}, remove=True)) - - self.assertEqual(None, c.find_one({'_id': 1})) - - self.assertEqual(None, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) - # The return value changed in 2.1.2. See SERVER-6226. - if version.at_least(self.db.connection, (2, 1, 2)): - self.assertEqual(None, c.find_and_modify({'_id': 1}, - {'$inc': {'i': 1}}, - upsert=True)) - else: - self.assertEqual({}, c.find_and_modify({'_id': 1}, - {'$inc': {'i': 1}}, - upsert=True)) - self.assertEqual({'_id': 1, 'i': 2}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - upsert=True, new=True)) - - self.assertEqual({'_id': 1, 'i': 2}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - fields=['i'])) - self.assertEqual({'_id': 1, 'i': 4}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, fields={'i': 1})) - - # Test with full_response=True - # No lastErrorObject from mongos until 2.0 - if (not is_mongos(self.db.connection) and - version.at_least(self.db.connection, (2, 0))): - result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, upsert=True, - full_response=True, - fields={'i': 1}) - self.assertEqual({'_id': 1, 'i': 5}, result["value"]) - self.assertEqual(True, result["lastErrorObject"]["updatedExisting"]) - - result = c.find_and_modify({'_id': 2}, {'$inc': {'i': 1}}, - new=True, upsert=True, - full_response=True, - fields={'i': 1}) - self.assertEqual({'_id': 2, 'i': 1}, result["value"]) - self.assertEqual(False, result["lastErrorObject"]["updatedExisting"]) - - class ExtendedDict(dict): - pass + for j in range(5): + c.insert_one({"j": j, "i": 0}) + + sort = [("j", DESCENDING)] + self.assertEqual(4, (c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort))["j"]) + + def test_find_one_and_write_concern(self): + listener = OvertCommandListener() + db = (self.single_client(event_listeners=[listener]))[self.db.name] + # non-default WriteConcern. + c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) + # default WriteConcern. + c_default = db.get_collection("test", write_concern=WriteConcern()) + # Authenticate the client and throw out auth commands from the listener. + db.command("ping") + listener.reset() + c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + c_w0.find_one_and_delete({"_id": 1}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + # Test write concern errors. + if client_context.is_rs: + c_wc_error = db.get_collection( + "test", write_concern=WriteConcern(w=len(client_context.nodes) + 1) + ) + with self.assertRaises(WriteConcernError): + c_wc_error.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + with self.assertRaises(WriteConcernError): + c_wc_error.find_one_and_replace( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + with self.assertRaises(WriteConcernError): + c_wc_error.find_one_and_delete( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + listener.reset() - result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, fields={'i': 1}) - self.assertFalse(isinstance(result, ExtendedDict)) - result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, fields={'i': 1}, - as_class=ExtendedDict) - self.assertTrue(isinstance(result, ExtendedDict)) + c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() - def test_find_and_modify_with_sort(self): - c = self.db.test - c.drop() - for j in xrange(5): - c.insert({'j': j, 'i': 0}) - - sort={'j': DESCENDING} - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort={'j': ASCENDING} - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort=[('j', DESCENDING)] - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort=[('j', ASCENDING)] - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort=SON([('j', DESCENDING)]) - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort=SON([('j', ASCENDING)]) - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - try: - from collections import OrderedDict - sort=OrderedDict([('j', DESCENDING)]) - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort=OrderedDict([('j', ASCENDING)]) - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - except ImportError: - pass - # Test that a standard dict with two keys is rejected. - sort={'j': DESCENDING, 'foo': DESCENDING} - self.assertRaises(TypeError, c.find_and_modify, {}, - {'$inc': {'i': 1}}, - sort=sort) + c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + c_default.find_one_and_delete({"_id": 1}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() def test_find_with_nested(self): - if not version.at_least(self.db.connection, (2, 0, 0)): - raise SkipTest("nested $and and $or requires MongoDB >= 2.0") c = self.db.test c.drop() - c.insert([{'i': i} for i in range(5)]) # [0, 1, 2, 3, 4] + c.insert_many([{"i": i} for i in range(5)]) # [0, 1, 2, 3, 4] self.assertEqual( [2], - [i['i'] for i in c.find({ - '$and': [ - { - # This clause gives us [1,2,4] - '$or': [ - {'i': {'$lte': 2}}, - {'i': {'$gt': 3}}, - ], - }, + [ + i["i"] + for i in c.find( { - # This clause gives us [2,3] - '$or': [ - {'i': 2}, - {'i': 3}, + "$and": [ + { + # This clause gives us [1,2,4] + "$or": [ + {"i": {"$lte": 2}}, + {"i": {"$gt": 3}}, + ], + }, + { + # This clause gives us [2,3] + "$or": [ + {"i": 2}, + {"i": 3}, + ] + }, ] - }, - ] - })] + } + ) + ], ) self.assertEqual( [0, 1, 2], - [i['i'] for i in c.find({ - '$or': [ - { - # This clause gives us [2] - '$and': [ - {'i': {'$gte': 2}}, - {'i': {'$lt': 3}}, - ], - }, + [ + i["i"] + for i in c.find( { - # This clause gives us [0,1] - '$and': [ - {'i': {'$gt': -100}}, - {'i': {'$lt': 2}}, + "$or": [ + { + # This clause gives us [2] + "$and": [ + {"i": {"$gte": 2}}, + {"i": {"$lt": 3}}, + ], + }, + { + # This clause gives us [0,1] + "$and": [ + {"i": {"$gt": -100}}, + {"i": {"$lt": 2}}, + ] + }, ] - }, - ] - })] + } + ) + ], ) - def test_disabling_manipulators(self): - - class IncByTwo(SONManipulator): - def transform_outgoing(self, son, collection): - if 'foo' in son: - son['foo'] += 2 - return son - - db = self.client.pymongo_test - db.add_son_manipulator(IncByTwo()) - c = db.test + def test_find_regex(self): + c = self.db.test c.drop() - c.insert({'foo': 0}) - self.assertEqual(2, c.find_one()['foo']) - self.assertEqual(0, c.find_one(manipulate=False)['foo']) + c.insert_one({"r": re.compile(".*")}) - self.assertEqual(2, c.find_one(manipulate=True)['foo']) - c.remove({}) - - def test_compile_re(self): - c = self.client.pymongo_test.test - c.drop() - c.insert({'r': re.compile('.*')}) + self.assertIsInstance((c.find_one())["r"], Regex) # type: ignore + for doc in c.find(): + self.assertIsInstance(doc["r"], Regex) + + def test_find_command_generation(self): + cmd = _gen_find_command( + "coll", + {"$query": {"foo": 1}, "$dumb": 2}, + None, + 0, + 0, + 0, + None, + DEFAULT_READ_CONCERN, + None, + None, + ) + self.assertEqual(cmd, {"find": "coll", "$dumb": 2, "filter": {"foo": 1}}) - # Test find_one with compile_re. - self.assertTrue(isinstance(c.find_one()['r'], RE_TYPE)) - self.assertTrue(isinstance(c.find_one(compile_re=False)['r'], Regex)) + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(Collection(self.db, "test")) - # Test find with compile_re. - for doc in c.find(): - self.assertTrue(isinstance(doc['r'], RE_TYPE)) + @client_context.require_version_min(5, 0, 0) + def test_helpers_with_let(self): + c = self.db.test - for doc in c.find(compile_re=False): - self.assertTrue(isinstance(doc['r'], Regex)) + def afind(*args, **kwargs): + return c.find(*args, **kwargs) + + helpers = [ + (c.delete_many, ({}, {})), + (c.delete_one, ({}, {})), + (afind, ({})), + (c.update_many, ({}, {"$inc": {"x": 3}})), + (c.update_one, ({}, {"$inc": {"x": 3}})), + (c.find_one_and_delete, ({}, {})), + (c.find_one_and_replace, ({}, {})), + (c.aggregate, ([],)), + ] + for let in [10, "str", [], False]: + for helper, args in helpers: + with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): + helper(*args, let=let) # type: ignore + for helper, args in helpers: + helper(*args, let={}) # type: ignore if __name__ == "__main__": diff --git a/test/test_collection_management.py b/test/test_collection_management.py new file mode 100644 index 0000000000..063c20df8f --- /dev/null +++ b/test/test_collection_management.py @@ -0,0 +1,41 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection management unified spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "collection_management") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "collection_management" + ) + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_command_logging.py b/test/test_command_logging.py new file mode 100644 index 0000000000..cf865920ca --- /dev/null +++ b/test/test_command_logging.py @@ -0,0 +1,44 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_command_monitoring.py b/test/test_command_monitoring.py new file mode 100644 index 0000000000..4f5ef06f28 --- /dev/null +++ b/test/test_command_monitoring.py @@ -0,0 +1,45 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_monitoring") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_monitoring") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_comment.py b/test/test_comment.py new file mode 100644 index 0000000000..bcab0061fa --- /dev/null +++ b/test/test_comment.py @@ -0,0 +1,159 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the keyword argument 'comment' in various helpers.""" + +from __future__ import annotations + +import inspect +import sys + +sys.path[0:0] = [""] +from inspect import iscoroutinefunction +from test import IntegrationTest, client_context, unittest +from test.utils_shared import OvertCommandListener + +from bson.dbref import DBRef +from pymongo.operations import IndexModel +from pymongo.synchronous.command_cursor import CommandCursor + +_IS_SYNC = True + + +class TestComment(IntegrationTest): + def _test_ops( + self, + helpers, + already_supported, + listener, + ): + for h, args in helpers: + c = "testing comment with " + h.__name__ + with self.subTest("collection-" + h.__name__ + "-comment"): + for cc in [c, {"key": c}, ["any", 1]]: + listener.reset() + kwargs = {"comment": cc} + try: + maybe_cursor = h(*args, **kwargs) + except Exception: + maybe_cursor = None + self.assertIn( + "comment", + inspect.signature(h).parameters, + msg="Could not find 'comment' in the " + "signature of function %s" % (h.__name__), + ) + self.assertEqual( + inspect.signature(h).parameters["comment"].annotation, "Optional[Any]" + ) + if isinstance(maybe_cursor, CommandCursor): + maybe_cursor.close() + + cmd = listener.started_events[0] + self.assertEqual(cc, cmd.command.get("comment"), msg=cmd) + + if h.__name__ != "aggregate_raw_batches": + self.assertIn( + ":param comment:", + h.__doc__, + ) + if h not in already_supported: + self.assertIn( + "Added ``comment`` parameter", + h.__doc__, + ) + else: + self.assertNotIn( + "Added ``comment`` parameter", + h.__doc__, + ) + + listener.reset() + + @client_context.require_version_min(4, 7, -1) + @client_context.require_replica_set + def test_database_helpers(self): + listener = OvertCommandListener() + db = (self.rs_or_single_client(event_listeners=[listener])).db + helpers = [ + (db.watch, []), + (db.command, ["hello"]), + (db.list_collections, []), + (db.list_collection_names, []), + (db.drop_collection, ["hello"]), + (db.validate_collection, ["test"]), + (db.dereference, [DBRef("collection", 1)]), + ] + already_supported = [db.command, db.list_collections, db.list_collection_names] + self._test_ops(helpers, already_supported, listener) + + @client_context.require_version_min(4, 7, -1) + @client_context.require_replica_set + def test_client_helpers(self): + listener = OvertCommandListener() + cli = self.rs_or_single_client(event_listeners=[listener]) + helpers = [ + (cli.watch, []), + (cli.list_databases, []), + (cli.list_database_names, []), + (cli.drop_database, ["test"]), + ] + already_supported = [ + cli.list_databases, + ] + self._test_ops(helpers, already_supported, listener) + + @client_context.require_version_min(4, 7, -1) + def test_collection_helpers(self): + listener = OvertCommandListener() + db = (self.rs_or_single_client(event_listeners=[listener]))[self.db.name] + coll = db.get_collection("test") + + helpers = [ + (coll.list_indexes, []), + (coll.drop, []), + (coll.index_information, []), + (coll.options, []), + (coll.aggregate, [[{"$set": {"x": 1}}]]), + (coll.aggregate_raw_batches, [[{"$set": {"x": 1}}]]), + (coll.rename, ["temp_temp_temp"]), + (coll.distinct, ["_id"]), + (coll.find_one_and_delete, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.estimated_document_count, []), + (coll.count_documents, [{}]), + (coll.create_indexes, [[IndexModel("a")]]), + (coll.create_index, ["a"]), + (coll.drop_index, [[("a", 1)]]), + (coll.drop_indexes, []), + ] + already_supported = [ + coll.estimated_document_count, + coll.count_documents, + coll.create_indexes, + coll.drop_indexes, + coll.options, + coll.find_one_and_replace, + coll.drop_index, + coll.rename, + coll.distinct, + coll.find_one_and_delete, + coll.find_one_and_update, + ] + self._test_ops(helpers, already_supported, listener) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_common.py b/test/test_common.py index f05e406ccc..e69b421c9f 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -1,4 +1,4 @@ -# Copyright 2011-2014 MongoDB, Inc. +# Copyright 2011-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,495 +13,170 @@ # limitations under the License. """Test the pymongo common module.""" +from __future__ import annotations import sys -import unittest -import warnings +import uuid sys.path[0:0] = [""] -from nose.plugins.skip import SkipTest +from test import IntegrationTest, client_context, connected, unittest -from bson.binary import UUIDLegacy, OLD_UUID_SUBTYPE, UUID_SUBTYPE -from bson.code import Code +from bson.binary import PYTHON_LEGACY, STANDARD, Binary, UuidRepresentation +from bson.codec_options import CodecOptions from bson.objectid import ObjectId -from bson.son import SON -from pymongo.connection import Connection -from pymongo.mongo_client import MongoClient -from pymongo.mongo_replica_set_client import MongoReplicaSetClient -from pymongo.errors import ConfigurationError, OperationFailure -from test import host, port, pair, version -from test.utils import drop_collections +from pymongo.errors import OperationFailure +from pymongo.write_concern import WriteConcern -have_uuid = True -try: - import uuid -except ImportError: - have_uuid = False +_IS_SYNC = True -class TestCommon(unittest.TestCase): - - def test_baseobject(self): - - # In Python 2.6+ we could use the catch_warnings context - # manager to test this warning nicely. As we can't do that - # we must test raising errors before the ignore filter is applied. - warnings.simplefilter("error", UserWarning) - try: - self.assertRaises(UserWarning, lambda: - MongoClient(host, port, wtimeout=1000, w=0)) - try: - MongoClient(host, port, wtimeout=1000, w=1) - except UserWarning: - self.fail() - - try: - MongoClient(host, port, wtimeout=1000) - except UserWarning: - self.fail() - finally: - warnings.resetwarnings() - warnings.simplefilter("ignore") - - # Connection tests - c = Connection(pair) - self.assertFalse(c.slave_okay) - self.assertFalse(c.safe) - self.assertEqual({}, c.get_lasterror_options()) - db = c.pymongo_test - db.drop_collection("test") - self.assertFalse(db.slave_okay) - self.assertFalse(db.safe) - self.assertEqual({}, db.get_lasterror_options()) - coll = db.test - self.assertFalse(coll.slave_okay) - self.assertFalse(coll.safe) - self.assertEqual({}, coll.get_lasterror_options()) - - self.assertEqual((False, {}), coll._get_write_mode()) - coll.safe = False - coll.write_concern.update(w=1) - self.assertEqual((True, {}), coll._get_write_mode()) - coll.write_concern.update(w=3) - self.assertEqual((True, {'w': 3}), coll._get_write_mode()) - - coll.safe = True - coll.write_concern.update(w=0) - self.assertEqual((False, {}), coll._get_write_mode()) - - coll = db.test - cursor = coll.find() - self.assertFalse(cursor._Cursor__slave_okay) - cursor = coll.find(slave_okay=True) - self.assertTrue(cursor._Cursor__slave_okay) - - # MongoClient test - c = MongoClient(pair) - self.assertFalse(c.slave_okay) - self.assertTrue(c.safe) - self.assertEqual({}, c.get_lasterror_options()) - db = c.pymongo_test - db.drop_collection("test") - self.assertFalse(db.slave_okay) - self.assertTrue(db.safe) - self.assertEqual({}, db.get_lasterror_options()) - coll = db.test - self.assertFalse(coll.slave_okay) - self.assertTrue(coll.safe) - self.assertEqual({}, coll.get_lasterror_options()) - - self.assertEqual((True, {}), coll._get_write_mode()) - coll.safe = False - coll.write_concern.update(w=1) - self.assertEqual((True, {}), coll._get_write_mode()) - coll.write_concern.update(w=3) - self.assertEqual((True, {'w': 3}), coll._get_write_mode()) - - coll.safe = True - coll.write_concern.update(w=0) - self.assertEqual((False, {}), coll._get_write_mode()) - - coll = db.test - cursor = coll.find() - self.assertFalse(cursor._Cursor__slave_okay) - cursor = coll.find(slave_okay=True) - self.assertTrue(cursor._Cursor__slave_okay) - - # Setting any safe operations overrides explicit safe - self.assertTrue(MongoClient(host, port, wtimeout=1000, safe=False).safe) - - c = MongoClient(pair, slaveok=True, w='majority', - wtimeout=300, fsync=True, j=True) - self.assertTrue(c.slave_okay) - self.assertTrue(c.safe) - d = {'w': 'majority', 'wtimeout': 300, 'fsync': True, 'j': True} - self.assertEqual(d, c.get_lasterror_options()) - db = c.pymongo_test - self.assertTrue(db.slave_okay) - self.assertTrue(db.safe) - self.assertEqual(d, db.get_lasterror_options()) - coll = db.test - self.assertTrue(coll.slave_okay) - self.assertTrue(coll.safe) - self.assertEqual(d, coll.get_lasterror_options()) - cursor = coll.find() - self.assertTrue(cursor._Cursor__slave_okay) - cursor = coll.find(slave_okay=False) - self.assertFalse(cursor._Cursor__slave_okay) - - c = MongoClient('mongodb://%s/?' - 'w=2;wtimeoutMS=300;fsync=true;' - 'journal=true' % (pair,)) - self.assertTrue(c.safe) - d = {'w': 2, 'wtimeout': 300, 'fsync': True, 'j': True} - self.assertEqual(d, c.get_lasterror_options()) - - c = MongoClient('mongodb://%s/?' - 'slaveok=true;w=1;wtimeout=300;' - 'fsync=true;j=true' % (pair,)) - self.assertTrue(c.slave_okay) - self.assertTrue(c.safe) - d = {'w': 1, 'wtimeout': 300, 'fsync': True, 'j': True} - self.assertEqual(d, c.get_lasterror_options()) - self.assertEqual(d, c.write_concern) - db = c.pymongo_test - self.assertTrue(db.slave_okay) - self.assertTrue(db.safe) - self.assertEqual(d, db.get_lasterror_options()) - self.assertEqual(d, db.write_concern) - coll = db.test - self.assertTrue(coll.slave_okay) - self.assertTrue(coll.safe) - self.assertEqual(d, coll.get_lasterror_options()) - self.assertEqual(d, coll.write_concern) - cursor = coll.find() - self.assertTrue(cursor._Cursor__slave_okay) - cursor = coll.find(slave_okay=False) - self.assertFalse(cursor._Cursor__slave_okay) - - c.unset_lasterror_options() - self.assertTrue(c.slave_okay) - self.assertTrue(c.safe) - c.safe = False - self.assertFalse(c.safe) - c.slave_okay = False - self.assertFalse(c.slave_okay) - self.assertEqual({}, c.get_lasterror_options()) - self.assertEqual({}, c.write_concern) - db = c.pymongo_test - self.assertFalse(db.slave_okay) - self.assertFalse(db.safe) - self.assertEqual({}, db.get_lasterror_options()) - self.assertEqual({}, db.write_concern) - coll = db.test - self.assertFalse(coll.slave_okay) - self.assertFalse(coll.safe) - self.assertEqual({}, coll.get_lasterror_options()) - self.assertEqual({}, coll.write_concern) - cursor = coll.find() - self.assertFalse(cursor._Cursor__slave_okay) - cursor = coll.find(slave_okay=True) - self.assertTrue(cursor._Cursor__slave_okay) - - coll.set_lasterror_options(fsync=True) - self.assertEqual({'fsync': True}, coll.get_lasterror_options()) - self.assertEqual({'fsync': True}, coll.write_concern) - self.assertEqual({}, db.get_lasterror_options()) - self.assertEqual({}, db.write_concern) - self.assertFalse(db.safe) - self.assertEqual({}, c.get_lasterror_options()) - self.assertEqual({}, c.write_concern) - self.assertFalse(c.safe) - - db.set_lasterror_options(w='majority') - self.assertEqual({'fsync': True}, coll.get_lasterror_options()) - self.assertEqual({'fsync': True}, coll.write_concern) - self.assertEqual({'w': 'majority'}, db.get_lasterror_options()) - self.assertEqual({'w': 'majority'}, db.write_concern) - self.assertEqual({}, c.get_lasterror_options()) - self.assertEqual({}, c.write_concern) - self.assertFalse(c.safe) - db.slave_okay = True - self.assertTrue(db.slave_okay) - self.assertFalse(c.slave_okay) - self.assertFalse(coll.slave_okay) - cursor = coll.find() - self.assertFalse(cursor._Cursor__slave_okay) - cursor = db.coll2.find() - self.assertTrue(cursor._Cursor__slave_okay) - cursor = db.coll2.find(slave_okay=False) - self.assertFalse(cursor._Cursor__slave_okay) - - self.assertRaises(ConfigurationError, coll.set_lasterror_options, foo=20) - self.assertRaises(TypeError, coll._BaseObject__set_slave_okay, 20) - self.assertRaises(TypeError, coll._BaseObject__set_safe, 20) - - coll.remove() - self.assertEqual(None, coll.find_one(slave_okay=True)) - coll.unset_lasterror_options() - coll.set_lasterror_options(w=4, wtimeout=10) - # Fails if we don't have 4 active nodes or we don't have replication... - self.assertRaises(OperationFailure, coll.insert, {'foo': 'bar'}) - # Succeeds since we override the lasterror settings per query. - self.assertTrue(coll.insert({'foo': 'bar'}, fsync=True)) - drop_collections(db) - - def test_uuid_subtype(self): - if not have_uuid: - raise SkipTest("No uuid module") - - self.client = MongoClient(pair) - self.db = self.client.pymongo_test - coll = self.client.pymongo_test.uuid +class TestCommon(IntegrationTest): + def test_uuid_representation(self): + coll = self.db.uuid coll.drop() - def change_subtype(collection, subtype): - collection.uuid_subtype = subtype - # Test property - self.assertEqual(OLD_UUID_SUBTYPE, coll.uuid_subtype) - self.assertRaises(ConfigurationError, change_subtype, coll, 7) - self.assertRaises(ConfigurationError, change_subtype, coll, 2) + self.assertEqual(UuidRepresentation.UNSPECIFIED, coll.codec_options.uuid_representation) # Test basic query uu = uuid.uuid4() # Insert as binary subtype 3 - coll.insert({'uu': uu}) - self.assertEqual(uu, coll.find_one({'uu': uu})['uu']) - coll.uuid_subtype = UUID_SUBTYPE - self.assertEqual(UUID_SUBTYPE, coll.uuid_subtype) - self.assertEqual(None, coll.find_one({'uu': uu})) - self.assertEqual(uu, coll.find_one({'uu': UUIDLegacy(uu)})['uu']) - - # Test Cursor.count - self.assertEqual(0, coll.find({'uu': uu}).count()) - coll.uuid_subtype = OLD_UUID_SUBTYPE - self.assertEqual(1, coll.find({'uu': uu}).count()) - - # Test remove - coll.uuid_subtype = UUID_SUBTYPE - coll.remove({'uu': uu}) - self.assertEqual(1, coll.count()) - coll.uuid_subtype = OLD_UUID_SUBTYPE - coll.remove({'uu': uu}) - self.assertEqual(0, coll.count()) - - # Test save - coll.insert({'_id': uu, 'i': 0}) - self.assertEqual(1, coll.count()) - self.assertEqual(1, coll.find({'_id': uu}).count()) - self.assertEqual(0, coll.find_one({'_id': uu})['i']) - doc = coll.find_one({'_id': uu}) - doc['i'] = 1 - coll.save(doc) - self.assertEqual(1, coll.find_one({'_id': uu})['i']) - - # Test update - coll.uuid_subtype = UUID_SUBTYPE - coll.update({'_id': uu}, {'$set': {'i': 2}}) - coll.uuid_subtype = OLD_UUID_SUBTYPE - self.assertEqual(1, coll.find_one({'_id': uu})['i']) - coll.update({'_id': uu}, {'$set': {'i': 2}}) - self.assertEqual(2, coll.find_one({'_id': uu})['i']) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + legacy_opts = coll.codec_options + coll.insert_one({"uu": uu}) + self.assertEqual(uu, (coll.find_one({"uu": uu}))["uu"]) # type: ignore + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual(STANDARD, coll.codec_options.uuid_representation) + self.assertEqual(None, coll.find_one({"uu": uu})) + uul = Binary.from_uuid(uu, PYTHON_LEGACY) + self.assertEqual(uul, (coll.find_one({"uu": uul}))["uu"]) # type: ignore + + # Test count_documents + self.assertEqual(0, coll.count_documents({"uu": uu})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, coll.count_documents({"uu": uu})) + + # Test delete + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + coll.delete_one({"uu": uu}) + self.assertEqual(1, coll.count_documents({})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll.delete_one({"uu": uu}) + self.assertEqual(0, coll.count_documents({})) + + # Test update_one + coll.insert_one({"_id": uu, "i": 1}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, (coll.find_one({"_id": uu}))["i"]) # type: ignore + coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + self.assertEqual(2, (coll.find_one({"_id": uu}))["i"]) # type: ignore # Test Cursor.distinct - self.assertEqual([2], coll.find({'_id': uu}).distinct('i')) - coll.uuid_subtype = UUID_SUBTYPE - self.assertEqual([], coll.find({'_id': uu}).distinct('i')) + self.assertEqual([2], coll.find({"_id": uu}).distinct("i")) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual([], coll.find({"_id": uu}).distinct("i")) - # Test find_and_modify - self.assertEqual(None, coll.find_and_modify({'_id': uu}, - {'$set': {'i': 5}})) - coll.uuid_subtype = OLD_UUID_SUBTYPE - self.assertEqual(2, coll.find_and_modify({'_id': uu}, - {'$set': {'i': 5}})['i']) - self.assertEqual(5, coll.find_one({'_id': uu})['i']) + # Test findAndModify + self.assertEqual(None, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(2, (coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}}))["i"]) + self.assertEqual(5, (coll.find_one({"_id": uu}))["i"]) # type: ignore # Test command - db = self.client.pymongo_test - no_obj_error = "No matching object found" - result = db.command('findAndModify', 'uuid', - allowable_errors=[no_obj_error], - uuid_subtype=UUID_SUBTYPE, - query={'_id': uu}, - update={'$set': {'i': 6}}) - self.assertEqual(None, result.get('value')) - self.assertEqual(5, db.command('findAndModify', 'uuid', - update={'$set': {'i': 6}}, - query={'_id': uu})['value']['i']) - self.assertEqual(6, db.command('findAndModify', 'uuid', - update={'$set': {'i': 7}}, - query={'_id': UUIDLegacy(uu)} - )['value']['i']) - - # Test (inline)_map_reduce - coll.drop() - coll.insert({"_id": uu, "x": 1, "tags": ["dog", "cat"]}) - coll.insert({"_id": uuid.uuid4(), "x": 3, - "tags": ["mouse", "cat", "dog"]}) - - map = Code("function () {" - " this.tags.forEach(function(z) {" - " emit(z, 1);" - " });" - "}") - - reduce = Code("function (key, values) {" - " var total = 0;" - " for (var i = 0; i < values.length; i++) {" - " total += values[i];" - " }" - " return total;" - "}") - - coll.uuid_subtype = UUID_SUBTYPE - q = {"_id": uu} - if version.at_least(self.db.connection, (1, 7, 4)): - result = coll.inline_map_reduce(map, reduce, query=q) - self.assertEqual([], result) - - result = coll.map_reduce(map, reduce, "results", query=q) - self.assertEqual(0, db.results.count()) - - coll.uuid_subtype = OLD_UUID_SUBTYPE - q = {"_id": uu} - if version.at_least(self.db.connection, (1, 7, 4)): - result = coll.inline_map_reduce(map, reduce, query=q) - self.assertEqual(2, len(result)) - - result = coll.map_reduce(map, reduce, "results", query=q) - self.assertEqual(2, db.results.count()) - - db.drop_collection("result") - coll.drop() - - # Test group - coll.insert({"_id": uu, "a": 2}) - coll.insert({"_id": uuid.uuid4(), "a": 1}) - - reduce = "function (obj, prev) { prev.count++; }" - coll.uuid_subtype = UUID_SUBTYPE - self.assertEqual([], - coll.group([], {"_id": uu}, - {"count": 0}, reduce)) - coll.uuid_subtype = OLD_UUID_SUBTYPE - self.assertEqual([{"count": 1}], - coll.group([], {"_id": uu}, - {"count": 0}, reduce)) + self.assertEqual( + 5, + ( + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 6}}, + query={"_id": uu}, + codec_options=legacy_opts, + ) + )["value"]["i"], + ) + self.assertEqual( + 6, + ( + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 7}}, + query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + ) + )["value"]["i"], + ) def test_write_concern(self): - c = MongoClient(pair) + c = self.rs_or_single_client(connect=False) + self.assertEqual(WriteConcern(), c.write_concern) - self.assertEqual({}, c.write_concern) - wc = {'w': 2, 'wtimeout': 1000} - c.write_concern = wc - self.assertEqual(wc, c.write_concern) - wc = {'w': 3, 'wtimeout': 1000} - c.write_concern['w'] = 3 - self.assertEqual(wc, c.write_concern) - wc = {'w': 3} - del c.write_concern['wtimeout'] + c = self.rs_or_single_client(connect=False, w=2, wTimeoutMS=1000) + wc = WriteConcern(w=2, wtimeout=1000) self.assertEqual(wc, c.write_concern) - wc = {'w': 3, 'wtimeout': 1000} - c = MongoClient(pair, w=3, wtimeout=1000) - self.assertEqual(wc, c.write_concern) - wc = {'w': 2, 'wtimeout': 1000} - c.write_concern = wc - self.assertEqual(wc, c.write_concern) + # Can we override back to the server default? + db = c.get_database("pymongo_test", write_concern=WriteConcern()) + self.assertEqual(db.write_concern, WriteConcern()) db = c.pymongo_test self.assertEqual(wc, db.write_concern) coll = db.test self.assertEqual(wc, coll.write_concern) - coll.write_concern = {'j': True} - self.assertEqual({'j': True}, coll.write_concern) - self.assertEqual(wc, db.write_concern) - - wc = SON([('w', 2)]) - coll.write_concern = wc - self.assertEqual(wc.to_dict(), coll.write_concern) - def f(): - c.write_concern = {'foo': 'bar'} - self.assertRaises(ConfigurationError, f) - - def f(): - c.write_concern['foo'] = 'bar' - self.assertRaises(ConfigurationError, f) - - def f(): - c.write_concern = [('foo', 'bar')] - self.assertRaises(ConfigurationError, f) + cwc = WriteConcern(j=True) + coll = db.get_collection("test", write_concern=cwc) + self.assertEqual(cwc, coll.write_concern) + self.assertEqual(wc, db.write_concern) def test_mongo_client(self): - m = MongoClient(pair, w=0) + pair = client_context.pair + m = self.rs_or_single_client(w=0) coll = m.pymongo_test.write_concern_test coll.drop() doc = {"_id": ObjectId()} - coll.insert(doc) - self.assertTrue(coll.insert(doc, safe=False)) - self.assertTrue(coll.insert(doc, w=0)) - self.assertTrue(coll.insert(doc)) - self.assertRaises(OperationFailure, coll.insert, doc, safe=True) - self.assertRaises(OperationFailure, coll.insert, doc, w=1) + coll.insert_one(doc) + self.assertTrue(coll.insert_one(doc)) + coll = coll.with_options(write_concern=WriteConcern(w=1)) + with self.assertRaises(OperationFailure): + coll.insert_one(doc) - m = MongoClient(pair) + m = self.rs_or_single_client() coll = m.pymongo_test.write_concern_test - self.assertTrue(coll.insert(doc, safe=False)) - self.assertTrue(coll.insert(doc, w=0)) - self.assertRaises(OperationFailure, coll.insert, doc) - self.assertRaises(OperationFailure, coll.insert, doc, safe=True) - self.assertRaises(OperationFailure, coll.insert, doc, w=1) + new_coll = coll.with_options(write_concern=WriteConcern(w=0)) + self.assertTrue(new_coll.insert_one(doc)) + with self.assertRaises(OperationFailure): + coll.insert_one(doc) - m = MongoClient("mongodb://%s/" % (pair,)) - self.assertTrue(m.safe) - coll = m.pymongo_test.write_concern_test - self.assertRaises(OperationFailure, coll.insert, doc) - m = MongoClient("mongodb://%s/?w=0" % (pair,)) - self.assertFalse(m.safe) - coll = m.pymongo_test.write_concern_test - self.assertTrue(coll.insert(doc)) - - # Equality tests - self.assertEqual(m, MongoClient("mongodb://%s/?w=0" % (pair,))) - self.assertFalse(m != MongoClient("mongodb://%s/?w=0" % (pair,))) + m = self.rs_or_single_client( + f"mongodb://{pair}/", replicaSet=client_context.replica_set_name + ) - def test_mongo_replica_set_client(self): - c = MongoClient(pair) - ismaster = c.admin.command('ismaster') - if 'setName' in ismaster: - setname = str(ismaster.get('setName')) - else: - raise SkipTest("Not connected to a replica set.") - m = MongoReplicaSetClient(pair, replicaSet=setname, w=0) coll = m.pymongo_test.write_concern_test - coll.drop() - doc = {"_id": ObjectId()} - coll.insert(doc) - self.assertTrue(coll.insert(doc, safe=False)) - self.assertTrue(coll.insert(doc, w=0)) - self.assertTrue(coll.insert(doc)) - self.assertRaises(OperationFailure, coll.insert, doc, safe=True) - self.assertRaises(OperationFailure, coll.insert, doc, w=1) + with self.assertRaises(OperationFailure): + coll.insert_one(doc) + m = self.rs_or_single_client( + f"mongodb://{pair}/?w=0", replicaSet=client_context.replica_set_name + ) - m = MongoReplicaSetClient(pair, replicaSet=setname) coll = m.pymongo_test.write_concern_test - self.assertTrue(coll.insert(doc, safe=False)) - self.assertTrue(coll.insert(doc, w=0)) - self.assertRaises(OperationFailure, coll.insert, doc) - self.assertRaises(OperationFailure, coll.insert, doc, safe=True) - self.assertRaises(OperationFailure, coll.insert, doc, w=1) + coll.insert_one(doc) - m = MongoReplicaSetClient("mongodb://%s/?replicaSet=%s" % (pair, setname)) - self.assertTrue(m.safe) - coll = m.pymongo_test.write_concern_test - self.assertRaises(OperationFailure, coll.insert, doc) - m = MongoReplicaSetClient("mongodb://%s/?replicaSet=%s;w=0" % (pair, setname)) - self.assertFalse(m.safe) - coll = m.pymongo_test.write_concern_test - self.assertTrue(coll.insert(doc)) + # Equality tests + direct = connected(self.single_client(w=0)) + direct2 = connected(self.single_client(f"mongodb://{pair}/?w=0", **self.credentials)) + self.assertEqual(direct, direct2) + self.assertFalse(direct != direct2) + + def test_validate_boolean(self): + self.db.test.update_one({}, {"$set": {"total": 1}}, upsert=True) + with self.assertRaisesRegex( + TypeError, "upsert must be True or False, was: upsert={'upsert': True}" + ): + self.db.test.update_one({}, {"$set": {"total": 1}}, {"upsert": True}) # type: ignore if __name__ == "__main__": diff --git a/test/test_connection_logging.py b/test/test_connection_logging.py new file mode 100644 index 0000000000..253193cc43 --- /dev/null +++ b/test/test_connection_logging.py @@ -0,0 +1,45 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the connection logging unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "connection_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "connection_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py new file mode 100644 index 0000000000..1405824453 --- /dev/null +++ b/test/test_connection_monitoring.py @@ -0,0 +1,470 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Execute Transactions Spec tests.""" +from __future__ import annotations + +import asyncio +import os +import sys +import time +from pathlib import Path +from test.utils import get_pool, get_pools + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, client_knobs, unittest +from test.pymongo_mocks import DummyMonitor +from test.utils_shared import ( + CMAPListener, + camel_to_snake, + wait_until, +) +from test.utils_spec_runner import SpecRunnerThread, SpecTestCreator + +from bson.objectid import ObjectId +from bson.son import SON +from pymongo.errors import ( + ConnectionFailure, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) +from pymongo.monitoring import ( + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionClosedReason, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.pool import PoolState, _PoolClosedError +from pymongo.topology_description import updated_topology_description + +_IS_SYNC = True + +OBJECT_TYPES = { + # Event types. + "ConnectionCheckedIn": ConnectionCheckedInEvent, + "ConnectionCheckedOut": ConnectionCheckedOutEvent, + "ConnectionCheckOutFailed": ConnectionCheckOutFailedEvent, + "ConnectionClosed": ConnectionClosedEvent, + "ConnectionCreated": ConnectionCreatedEvent, + "ConnectionReady": ConnectionReadyEvent, + "ConnectionCheckOutStarted": ConnectionCheckOutStartedEvent, + "ConnectionPoolCreated": PoolCreatedEvent, + "ConnectionPoolReady": PoolReadyEvent, + "ConnectionPoolCleared": PoolClearedEvent, + "ConnectionPoolClosed": PoolClosedEvent, + # Error types. + "PoolClosedError": _PoolClosedError, + "WaitQueueTimeoutError": WaitQueueTimeoutError, +} + + +class TestCMAP(IntegrationTest): + # Location of JSON test specifications. + if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "connection_monitoring") + else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "connection_monitoring") + + # Test operations: + + def start(self, op): + """Run the 'start' thread operation.""" + target = op["target"] + thread = SpecRunnerThread(target) + thread.start() + self.targets[target] = thread + + def wait(self, op): + """Run the 'wait' operation.""" + time.sleep(op["ms"] / 1000.0) + + def wait_for_thread(self, op): + """Run the 'waitForThread' operation.""" + target = op["target"] + thread = self.targets[target] + thread.stop() + thread.join() + if thread.exc: + raise thread.exc + self.assertFalse(thread.ops) + + def wait_for_event(self, op): + """Run the 'waitForEvent' operation.""" + event = OBJECT_TYPES[op["event"]] + count = op["count"] + timeout = op.get("timeout", 10000) / 1000.0 + wait_until( + lambda: self.listener.event_count(event) >= count, + f"find {count} {event} event(s)", + timeout=timeout, + ) + + def check_out(self, op): + """Run the 'checkOut' operation.""" + label = op["label"] + with self.pool.checkout() as conn: + # Call 'pin_cursor' so we can hold the socket. + conn.pin_cursor() + if label: + self.labels[label] = conn + else: + self.addCleanup(conn.close_conn, None) + + def check_in(self, op): + """Run the 'checkIn' operation.""" + label = op["connection"] + conn = self.labels[label] + self.pool.checkin(conn) + + def ready(self, op): + """Run the 'ready' operation.""" + self.pool.ready() + + def clear(self, op): + """Run the 'clear' operation.""" + if "interruptInUseConnections" in op: + self.pool.reset(interrupt_connections=op["interruptInUseConnections"]) + else: + self.pool.reset() + + def close(self, op): + """Run the 'close' operation.""" + self.pool.close() + + def run_operation(self, op): + """Run a single operation in a test.""" + op_name = camel_to_snake(op["name"]) + thread = op["thread"] + meth = getattr(self, op_name) + if thread: + self.targets[thread].schedule(lambda: meth(op)) + else: + meth(op) + + def run_operations(self, ops): + """Run a test's operations.""" + for op in ops: + self._ops.append(op) + self.run_operation(op) + + def check_object(self, actual, expected): + """Assert that the actual object matches the expected object.""" + self.assertEqual(type(actual), OBJECT_TYPES[expected["type"]]) + for attr, expected_val in expected.items(): + if attr == "type": + continue + c2s = camel_to_snake(attr) + if c2s == "interrupt_in_use_connections": + c2s = "interrupt_connections" + actual_val = getattr(actual, c2s) + if expected_val == 42: + self.assertIsNotNone(actual_val) + else: + self.assertEqual(actual_val, expected_val) + + def check_event(self, actual, expected): + """Assert that the actual event matches the expected event.""" + self.check_object(actual, expected) + + def actual_events(self, ignore): + """Return all the non-ignored events.""" + ignore = tuple(OBJECT_TYPES[name] for name in ignore) + return [event for event in self.listener.events if not isinstance(event, ignore)] + + def check_events(self, events, ignore): + """Check the events of a test.""" + actual_events = self.actual_events(ignore) + for actual, expected in zip(actual_events, events): + self.logs.append(f"Checking event actual: {actual!r} vs expected: {expected!r}") + self.check_event(actual, expected) + + if len(events) > len(actual_events): + self.fail(f"missing events: {events[len(actual_events) :]!r}") + + def check_error(self, actual, expected): + message = expected.pop("message") + self.check_object(actual, expected) + self.assertIn(message, str(actual)) + + def set_fail_point(self, command_args): + if not client_context.supports_failCommand_fail_point: + self.skipTest("failCommand fail point must be supported") + self.configure_fail_point(self.client, command_args) + + def run_scenario(self, scenario_def, test): + """Run a CMAP spec test.""" + self.logs: list = [] + self.assertEqual(scenario_def["version"], 1) + self.assertIn(scenario_def["style"], ["unit", "integration"]) + self.listener = CMAPListener() + self._ops: list = [] + + # Configure the fail point before creating the client. + if "failPoint" in test: + fp = test["failPoint"] + self.set_fail_point(fp) + self.addCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + opts = test["poolOptions"].copy() + opts["event_listeners"] = [self.listener] + opts["_monitor_class"] = DummyMonitor + opts["connect"] = False + # Support backgroundThreadIntervalMS, default to 50ms. + interval = opts.pop("backgroundThreadIntervalMS", 50) + if interval < 0: + kill_cursor_frequency = 99999999 + else: + kill_cursor_frequency = interval / 1000.0 + with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05): + client = self.single_client(**opts) + # Update the SD to a known type because the DummyMonitor will not. + # Note we cannot simply call topology.on_change because that would + # internally call pool.ready() which introduces unexpected + # PoolReadyEvents. Instead, update the initial state before + # opening the Topology. + td = client_context.client._topology.description + sd = td.server_descriptions()[(client_context.host, client_context.port)] + client._topology._description = updated_topology_description( + client._topology._description, sd + ) + # When backgroundThreadIntervalMS is negative we do not start the + # background thread to ensure it never runs. + if interval < 0: + client._topology.open() + else: + client._get_topology() + self.pool = list(client._topology._servers.values())[0].pool + + # Map of target names to Thread objects. + self.targets: dict = {} + # Map of label names to Connection objects + self.labels: dict = {} + + def cleanup(): + for t in self.targets.values(): + t.stop() + for t in self.targets.values(): + t.join(5) + for conn in self.labels.values(): + conn.close_conn(None) + + self.addCleanup(cleanup) + + try: + if test["error"]: + with self.assertRaises(PyMongoError) as ctx: + self.run_operations(test["operations"]) + self.check_error(ctx.exception, test["error"]) + else: + self.run_operations(test["operations"]) + + self.check_events(test["events"], test["ignore"]) + except Exception: + # Print the events after a test failure. + print("\nFailed test: {!r}".format(test["description"])) + print("Operations:") + for op in self._ops: + print(op) + print("Threads:") + print(self.targets) + print("Connections:") + print(self.labels) + print("Events:") + for event in self.listener.events: + print(event) + print("Log:") + for log in self.logs: + print(log) + raise + + POOL_OPTIONS = { + "maxPoolSize": 50, + "minPoolSize": 1, + "maxIdleTimeMS": 10000, + "waitQueueTimeoutMS": 10000, + } + + # + # Prose tests. Numbers correspond to the prose test number in the spec. + # + def test_1_client_connection_pool_options(self): + client = self.rs_or_single_client(**self.POOL_OPTIONS) + pool_opts = (get_pool(client)).opts + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + + def test_2_all_client_pools_have_same_options(self): + client = self.rs_or_single_client(**self.POOL_OPTIONS) + client.admin.command("ping") + # Discover at least one secondary. + if client_context.has_secondaries: + client.admin.command("ping", read_preference=ReadPreference.SECONDARY) + pools = get_pools(client) + pool_opts = pools[0].opts + + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + for pool in pools[1:]: + self.assertEqual(pool.opts, pool_opts) + + def test_3_uri_connection_pool_options(self): + opts = "&".join([f"{k}={v}" for k, v in self.POOL_OPTIONS.items()]) + uri = f"mongodb://{client_context.pair}/?{opts}" + client = self.rs_or_single_client(uri) + pool_opts = (get_pool(client)).opts + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + + def test_4_subscribe_to_events(self): + listener = CMAPListener() + client = self.single_client(event_listeners=[listener]) + self.assertEqual(listener.event_count(PoolCreatedEvent), 1) + + # Creates a new connection. + client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) + self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) + self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) + self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) + self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) + + # Uses the existing connection. + client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) + self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) + self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) + + client.close() + self.assertEqual(listener.event_count(PoolClosedEvent), 1) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 1) + + def test_5_check_out_fails_connection_error(self): + listener = CMAPListener() + client = self.single_client(event_listeners=[listener]) + pool = get_pool(client) + + def mock_connect(*args, **kwargs): + raise ConnectionFailure("connect failed") + + pool.connect = mock_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + # Attempt to create a new connection. + with self.assertRaisesRegex(ConnectionFailure, "connect failed"): + client.admin.command("ping") + + self.assertIsInstance(listener.events[0], PoolCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) + self.assertIsInstance(listener.events[4], PoolClearedEvent) + + failed_event = listener.events[3] + self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + + @client_context.require_no_fips + def test_5_check_out_fails_auth_error(self): + listener = CMAPListener() + client = self.single_client_noauth( + username="notauser", password="fail", event_listeners=[listener] + ) + + # Attempt to create a new connection. + with self.assertRaisesRegex(OperationFailure, "failed"): + client.admin.command("ping") + + self.assertIsInstance(listener.events[0], PoolCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) + # Error happens here. + self.assertIsInstance(listener.events[4], ConnectionClosedEvent) + self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) + self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR) + + # + # Extra non-spec tests + # + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + def test_events_repr(self): + host = ("localhost", 27017) + self.assertRepr(ConnectionCheckedInEvent(host, 1)) + self.assertRepr(ConnectionCheckedOutEvent(host, 1, time.monotonic())) + self.assertRepr( + ConnectionCheckOutFailedEvent( + host, ConnectionCheckOutFailedReason.POOL_CLOSED, time.monotonic() + ) + ) + self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED)) + self.assertRepr(ConnectionCreatedEvent(host, 1)) + self.assertRepr(ConnectionReadyEvent(host, 1, time.monotonic())) + self.assertRepr(ConnectionCheckOutStartedEvent(host)) + self.assertRepr(PoolCreatedEvent(host, {})) + self.assertRepr(PoolClearedEvent(host)) + self.assertRepr(PoolClearedEvent(host, service_id=ObjectId())) + self.assertRepr(PoolClosedEvent(host)) + + def test_close_leaves_pool_unpaused(self): + listener = CMAPListener() + client = self.single_client(event_listeners=[listener]) + client.admin.command("ping") + pool = get_pool(client) + client.close() + self.assertEqual(1, listener.event_count(PoolClosedEvent)) + self.assertEqual(PoolState.CLOSED, pool.state) + # Checking out a connection should fail + with self.assertRaises(_PoolClosedError): + with pool.checkout(): + pass + + +def create_test(scenario_def, test, name): + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + +class CMAPSpecTestCreator(SpecTestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + CMAP tests do not have a 'tests' field. The whole file represents + a single test case. + """ + return [scenario_def] + + +test_creator = CMAPSpecTestCreator(create_test, TestCMAP, TestCMAP.TEST_PATH) +test_creator.create_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py new file mode 100644 index 0000000000..8e9a3b8e62 --- /dev/null +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -0,0 +1,137 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test compliance with the connections survive primary step down spec.""" +from __future__ import annotations + +import sys +from test.utils import ensure_all_connected + +sys.path[0:0] = [""] + +from test import ( + IntegrationTest, + client_context, + unittest, +) +from test.helpers import repl_set_step_down +from test.utils_shared import ( + CMAPListener, +) + +from bson import SON +from pymongo import monitoring +from pymongo.errors import NotPrimaryError +from pymongo.synchronous.collection import Collection +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): + listener: CMAPListener + coll: Collection + + @client_context.require_replica_set + def setUp(self): + super().setUp() + self.listener = CMAPListener() + self.client = self.rs_or_single_client( + event_listeners=[self.listener], retryWrites=False, heartbeatFrequencyMS=500 + ) + + # Ensure connections to all servers in replica set. This is to test + # that the is_writable flag is properly updated for connections that + # survive a replica set election. + ensure_all_connected(self.client) + self.db = self.client.get_database("step-down", write_concern=WriteConcern("majority")) + self.coll = self.db.get_collection("step-down", write_concern=WriteConcern("majority")) + # Note that all ops use same write-concern as self.db (majority). + self.db.drop_collection("step-down") + self.db.create_collection("step-down") + self.listener.reset() + + def set_fail_point(self, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + self.client.admin.command(cmd) + + def verify_pool_cleared(self): + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 1) + + def verify_pool_not_cleared(self): + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 0) + + @client_context.require_version_min(4, 2, -1) + def test_get_more_iteration(self): + # Insert 5 documents with WC majority. + self.coll.insert_many([{"data": k} for k in range(5)]) + # Start a find operation and retrieve first batch of results. + batch_size = 2 + cursor = self.coll.find(batch_size=batch_size) + for _ in range(batch_size): + cursor.next() + # Force step-down the primary. + repl_set_step_down(self.client, replSetStepDown=5, force=True) + # Get next batch of results. + for _ in range(batch_size): + cursor.next() + # Verify pool not cleared. + self.verify_pool_not_cleared() + # Attempt insertion to mark server description as stale and prevent a + # NotPrimaryError on the subsequent operation. + try: + self.coll.insert_one({}) + except NotPrimaryError: + pass + # Next insert should succeed on the new primary without clearing pool. + self.coll.insert_one({}) + self.verify_pool_not_cleared() + + def run_scenario(self, error_code, retry, pool_status_checker): + # Set fail point. + self.set_fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["insert"], "errorCode": error_code}} + ) + self.addCleanup(self.set_fail_point, {"mode": "off"}) + # Insert record and verify failure. + with self.assertRaises(NotPrimaryError) as exc: + self.coll.insert_one({"test": 1}) + self.assertEqual(exc.exception.details["code"], error_code) # type: ignore[call-overload] + # Retry before CMAPListener assertion if retry_before=True. + if retry: + self.coll.insert_one({"test": 1}) + # Verify pool cleared/not cleared. + pool_status_checker() + # Always retry here to ensure discovery of new primary. + self.coll.insert_one({"test": 1}) + + @client_context.require_version_min(4, 2, -1) + @client_context.require_test_commands + def test_not_primary_keep_connection_pool(self): + self.run_scenario(10107, True, self.verify_pool_not_cleared) + + @client_context.require_version_min(4, 2, 0) + @client_context.require_test_commands + def test_shutdown_in_progress(self): + self.run_scenario(91, False, self.verify_pool_cleared) + + @client_context.require_version_min(4, 2, 0) + @client_context.require_test_commands + def test_interrupted_at_shutdown(self): + self.run_scenario(11600, False, self.verify_pool_cleared) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_create_entities.py b/test/test_create_entities.py new file mode 100644 index 0000000000..9d77a08eee --- /dev/null +++ b/test/test_create_entities.py @@ -0,0 +1,134 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] + +from test import IntegrationTest +from test.unified_format import UnifiedSpecTestMixinV1 + +_IS_SYNC = True + + +class TestCreateEntities(IntegrationTest): + def test_store_events_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "blank", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "events1", + "events": [ + "PoolCreatedEvent", + ], + } + ], + } + }, + ], + "tests": [{"description": "foo", "operations": []}], + } + self.scenario_runner.TEST_SPEC = spec + self.scenario_runner.setUp() + self.scenario_runner.run_scenario(spec["tests"][0]) + self.scenario_runner.entity_map["client0"].close() + final_entity_map = self.scenario_runner.entity_map + self.assertIn("events1", final_entity_map) + self.assertGreater(len(final_entity_map["events1"]), 0) + for event in final_entity_map["events1"]: + self.assertIn("PoolCreatedEvent", event["name"]) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + client.close() + + def test_store_all_others_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "Find", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": {"retryReads": True}, + } + }, + {"database": {"id": "database0", "client": "client0", "databaseName": "dat"}}, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "dat", + } + }, + ], + "tests": [ + { + "description": "test loops", + "operations": [ + { + "name": "loop", + "object": "testRunner", + "arguments": { + "storeIterationsAsEntity": "iterations", + "storeSuccessesAsEntity": "successes", + "storeFailuresAsEntity": "failures", + "storeErrorsAsEntity": "errors", + "numIterations": 5, + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 1, "x": 44}}, + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 2, "x": 44}}, + }, + ], + }, + } + ], + } + ], + } + + self.client.dat.dat.delete_many({}) + self.scenario_runner.TEST_SPEC = spec + self.scenario_runner.setUp() + self.scenario_runner.run_scenario(spec["tests"][0]) + self.scenario_runner.entity_map["client0"].close() + entity_map = self.scenario_runner.entity_map + self.assertEqual(len(entity_map["errors"]), 4) + for error in entity_map["errors"]: + self.assertEqual(error["type"], "DuplicateKeyError") + self.assertEqual(entity_map["failures"], []) + self.assertEqual(entity_map["successes"], 2) + self.assertEqual(entity_map["iterations"], 5) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_crud_unified.py b/test/test_crud_unified.py new file mode 100644 index 0000000000..1b1abf3600 --- /dev/null +++ b/test/test_crud_unified.py @@ -0,0 +1,39 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CRUD unified spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "crud", "unified") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "crud", "unified") + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py new file mode 100644 index 0000000000..af5228b2c7 --- /dev/null +++ b/test/test_crud_v1.py @@ -0,0 +1,130 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection module.""" +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) + + +class TestWriteOpsComparison(unittest.TestCase): + def test_InsertOneEquals(self): + self.assertEqual(InsertOne({"foo": 42}), InsertOne({"foo": 42})) + + def test_InsertOneNotEquals(self): + self.assertNotEqual(InsertOne({"foo": 42}), InsertOne({"foo": 23})) + + def test_DeleteOneEquals(self): + self.assertEqual(DeleteOne({"foo": 42}), DeleteOne({"foo": 42})) + self.assertEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}), DeleteOne({"foo": 42}, {"locale": "en_US"}) + ) + self.assertEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + ) + + def test_DeleteOneNotEquals(self): + self.assertNotEqual(DeleteOne({"foo": 42}), DeleteOne({"foo": 23})) + self.assertNotEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}), DeleteOne({"foo": 42}, {"locale": "en_GB"}) + ) + self.assertNotEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 2}), + ) + + def test_DeleteManyEquals(self): + self.assertEqual(DeleteMany({"foo": 42}), DeleteMany({"foo": 42})) + self.assertEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}), + DeleteMany({"foo": 42}, {"locale": "en_US"}), + ) + self.assertEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + ) + + def test_DeleteManyNotEquals(self): + self.assertNotEqual(DeleteMany({"foo": 42}), DeleteMany({"foo": 23})) + self.assertNotEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}), + DeleteMany({"foo": 42}, {"locale": "en_GB"}), + ) + self.assertNotEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 2}), + ) + + def test_DeleteOneNotEqualsDeleteMany(self): + self.assertNotEqual(DeleteOne({"foo": 42}), DeleteMany({"foo": 42})) + + def test_ReplaceOneEquals(self): + self.assertEqual( + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ) + + def test_ReplaceOneNotEquals(self): + self.assertNotEqual( + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=True), + ) + + def test_UpdateOneEquals(self): + self.assertEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + ) + + def test_UpdateOneNotEquals(self): + self.assertNotEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateOne({"foo": 42}, {"$set": {"bar": 23}}), + ) + + def test_UpdateManyEquals(self): + self.assertEqual( + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + ) + + def test_UpdateManyNotEquals(self): + self.assertNotEqual( + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 23}}), + ) + + def test_UpdateOneNotEqualsUpdateMany(self): + self.assertNotEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_csot.py b/test/test_csot.py new file mode 100644 index 0000000000..981af1ed03 --- /dev/null +++ b/test/test_csot.py @@ -0,0 +1,116 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CSOT unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.unified_format import generate_test_classes +from test.utils import flaky + +import pymongo +from pymongo import _csot +from pymongo.errors import PyMongoError + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "csot") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "csot") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestCSOT(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + + @flaky(reason="PYTHON-3522") + def test_timeout_nested(self): + coll = self.db.coll + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + with pymongo.timeout(10): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 10) + deadline_10 = _csot.get_deadline() + + # Capped at the original 10 deadline. + with pymongo.timeout(15): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 15) + self.assertEqual(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + coll.find_one() + + with pymongo.timeout(5): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 5) + self.assertLess(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + coll.find_one() + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + + @client_context.require_change_streams + @flaky(reason="PYTHON-3522") + def test_change_stream_can_resume_after_timeouts(self): + coll = self.db.test + coll.insert_one({}) + with coll.watch() as stream: + with pymongo.timeout(0.1): + with self.assertRaises(PyMongoError) as ctx: + stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + with self.assertRaises(PyMongoError) as ctx: + stream.try_next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + # Resume before the insert on 3.6 because 4.0 is required to avoid skipping documents + if client_context.version < (4, 0): + stream.try_next() + coll.insert_one({}) + with pymongo.timeout(10): + self.assertTrue(stream.next()) + self.assertTrue(stream.alive) + # Timeout applies to entire next() call, not only individual commands. + with pymongo.timeout(0.5): + with self.assertRaises(PyMongoError) as ctx: + stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + self.assertFalse(stream.alive) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_cursor.py b/test/test_cursor.py index efad94d797..219ca396c9 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,159 +13,407 @@ # limitations under the License. """Test the cursor module.""" +from __future__ import annotations + import copy +import gc import itertools +import os import random import re import sys -import unittest -sys.path[0:0] = [""] +import threading +import time +from typing import Any -from nose.plugins.skip import SkipTest +import pymongo + +sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import flaky +from test.utils_shared import ( + AllowListEventListener, + EventListener, + OvertCommandListener, + delay, + ignore_deprecations, + wait_until, +) + +from bson import decode_all from bson.code import Code -from bson.son import SON -from pymongo import (ASCENDING, - DESCENDING, - ALL, - OFF) -from pymongo.command_cursor import CommandCursor -from pymongo.cursor_manager import CursorManager -from pymongo.database import Database -from pymongo.errors import (InvalidOperation, - OperationFailure, - ExecutionTimeout) -from test import version -from test.test_client import get_client -from test.utils import is_mongos, get_command_line, server_started_with_auth - - -class TestCursor(unittest.TestCase): - - def setUp(self): - self.client = get_client() - self.db = Database(self.client, "pymongo_test") - - def tearDown(self): - self.db = None +from bson.raw_bson import RawBSONDocument +from pymongo import ASCENDING, DESCENDING +from pymongo.collation import Collation +from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure, PyMongoError +from pymongo.operations import _IndexList +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.cursor import Cursor, CursorType +from pymongo.write_concern import WriteConcern - def test_max_time_ms(self): - if not version.at_least(self.db.connection, (2, 5, 3, -1)): - raise SkipTest("MaxTimeMS requires MongoDB >= 2.5.3") +_IS_SYNC = True + + +class TestCursor(IntegrationTest): + def test_deepcopy_cursor_littered_with_regexes(self): + cursor = self.db.test.find( + { + "x": re.compile("^hmmm.*"), + "y": [re.compile("^hmm.*")], + "z": {"a": [re.compile("^hm.*")]}, + re.compile("^key.*"): {"a": [re.compile("^hm.*")]}, + } + ) + + cursor2 = copy.deepcopy(cursor) + self.assertEqual(cursor._spec, cursor2._spec) + + def test_add_remove_option(self): + cursor = self.db.test.find() + self.assertEqual(0, cursor._query_flags) + cursor.add_option(2) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) + self.assertEqual(2, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.add_option(32) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(34, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.add_option(128) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) + self.assertEqual(162, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + self.assertEqual(162, cursor._query_flags) + cursor.add_option(128) + self.assertEqual(162, cursor._query_flags) + + cursor.remove_option(128) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(34, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(32) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) + self.assertEqual(2, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + + self.assertEqual(2, cursor._query_flags) + cursor.remove_option(32) + self.assertEqual(2, cursor._query_flags) + + # Timeout + cursor = self.db.test.find(no_cursor_timeout=True) + self.assertEqual(16, cursor._query_flags) + cursor2 = self.db.test.find().add_option(16) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(16) + self.assertEqual(0, cursor._query_flags) + + # Tailable / Await data + cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(34, cursor._query_flags) + cursor2 = self.db.test.find().add_option(34) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(32) + self.assertEqual(2, cursor._query_flags) + + # Partial + cursor = self.db.test.find(allow_partial_results=True) + self.assertEqual(128, cursor._query_flags) + cursor2 = self.db.test.find().add_option(128) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(128) + self.assertEqual(0, cursor._query_flags) + + def test_add_remove_option_exhaust(self): + # Exhaust - which mongos doesn't support + if client_context.is_mongos: + with self.assertRaises(InvalidOperation): + next(self.db.test.find(cursor_type=CursorType.EXHAUST)) + else: + cursor = self.db.test.find(cursor_type=CursorType.EXHAUST) + self.assertEqual(64, cursor._query_flags) + cursor2 = self.db.test.find().add_option(64) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + self.assertTrue(cursor._exhaust) + cursor.remove_option(64) + self.assertEqual(0, cursor._query_flags) + self.assertFalse(cursor._exhaust) + + def test_allow_disk_use(self): + db = self.db + db.pymongo_test.drop() + coll = db.pymongo_test + + with self.assertRaises(TypeError): + coll.find().allow_disk_use("baz") # type: ignore[arg-type] + + cursor = coll.find().allow_disk_use(True) + self.assertEqual(True, cursor._allow_disk_use) + cursor = coll.find().allow_disk_use(False) + self.assertEqual(False, cursor._allow_disk_use) + + def test_max_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.pymongo_test - self.assertRaises(TypeError, coll.find().max_time_ms, 'foo') - coll.insert({"amalia": 1}) - coll.insert({"amalia": 2}) + with self.assertRaises(TypeError): + coll.find().max_time_ms("foo") # type: ignore[arg-type] + coll.insert_one({"amalia": 1}) + coll.insert_one({"amalia": 2}) coll.find().max_time_ms(None) - coll.find().max_time_ms(1L) + coll.find().max_time_ms(1) cursor = coll.find().max_time_ms(999) - self.assertEqual(999, cursor._Cursor__max_time_ms) + self.assertEqual(999, cursor._max_time_ms) cursor = coll.find().max_time_ms(10).max_time_ms(1000) - self.assertEqual(1000, cursor._Cursor__max_time_ms) + self.assertEqual(1000, cursor._max_time_ms) cursor = coll.find().max_time_ms(999) c2 = cursor.clone() - self.assertEqual(999, c2._Cursor__max_time_ms) - self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec()) - self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec()) + self.assertEqual(999, c2._max_time_ms) + self.assertIn("$maxTimeMS", cursor._query_spec()) + self.assertIn("$maxTimeMS", c2._query_spec()) self.assertTrue(coll.find_one(max_time_ms=1000)) - if "enableTestCommands=1" in get_command_line(self.client)["argv"]: + client = self.client + if not client_context.is_mongos and client_context.test_commands_enabled: # Cursor parses server timeout error in response to initial query. - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: cursor = coll.find().max_time_ms(1) try: - cursor.next() + next(cursor) except ExecutionTimeout: pass else: self.fail("ExecutionTimeout not raised") - self.assertRaises(ExecutionTimeout, - coll.find_one, max_time_ms=1) + with self.assertRaises(ExecutionTimeout): + coll.find_one(max_time_ms=1) finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") - def test_max_time_ms_getmore(self): - # Test that Cursor handles server timeout error in response to getmore. - if "enableTestCommands=1" not in get_command_line(self.client)["argv"]: - raise SkipTest("Need test commands enabled") + def test_maxtime_ms_message(self): + db = self.db + db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) - if not version.at_least(self.db.connection, (2, 5, 3, -1)): - raise SkipTest("MaxTimeMS requires MongoDB >= 2.5.3") + client = self.rs_client(document_class=RawBSONDocument) + client.db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + client.db.t.find_one({"$where": delay(2)}, max_time_ms=1) + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + + def test_max_await_time_ms(self): + db = self.db + db.pymongo_test.drop() + coll = db.create_collection("pymongo_test", capped=True, size=4096) + + with self.assertRaises(TypeError): + coll.find().max_await_time_ms("foo") # type: ignore[arg-type] + coll.insert_one({"amalia": 1}) + coll.insert_one({"amalia": 2}) + + coll.find().max_await_time_ms(None) + coll.find().max_await_time_ms(1) + + # When cursor is not tailable_await + cursor = coll.find() + self.assertEqual(None, cursor._max_await_time_ms) + cursor = coll.find().max_await_time_ms(99) + self.assertEqual(None, cursor._max_await_time_ms) + + # If cursor is tailable_await and timeout is unset + cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(None, cursor._max_await_time_ms) + + # If cursor is tailable_await and timeout is set + cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) + self.assertEqual(99, cursor._max_await_time_ms) + + cursor = ( + coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + .max_await_time_ms(10) + .max_await_time_ms(90) + ) + self.assertEqual(90, cursor._max_await_time_ms) + + listener = AllowListEventListener("find", "getMore") + coll = (self.rs_or_single_client(event_listeners=[listener]))[self.db.name].pymongo_test + + # Tailable_defaults. + coll.find(cursor_type=CursorType.TAILABLE_AWAIT).to_list() + # find + self.assertNotIn("maxTimeMS", listener.started_events[0].command) + # getMore + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() + + # Tailable_with max_await_time_ms set. + coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertIn("maxTimeMS", listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() + + # Tailable_with max_time_ms and make sure list() works on synchronous cursors + if _IS_SYNC: + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # type: ignore[call-overload] + else: + coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() + + # Tailable_with both max_time_ms and max_await_time_ms + ( + coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + .max_time_ms(99) + .max_await_time_ms(99) + .to_list() + ) + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertIn("maxTimeMS", listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() + + # Non tailable_await with max_await_time_ms + coll.find(batch_size=1).max_await_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() + + # Non tailable_await with max_time_ms + coll.find(batch_size=1).max_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + + # Non tailable_await with both max_time_ms and max_await_time_ms + coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + + @client_context.require_test_commands + @client_context.require_no_mongos + def test_max_time_ms_getmore(self): + # Test that Cursor handles server timeout error in response to getmore. coll = self.db.pymongo_test - coll.insert({} for _ in range(200)) + coll.insert_many([{} for _ in range(200)]) cursor = coll.find().max_time_ms(100) # Send initial query before turning on failpoint. - cursor.next() - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + next(cursor) + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: try: # Iterate up to first getmore. - list(cursor) + cursor.to_list() except ExecutionTimeout: pass else: self.fail("ExecutionTimeout not raised") finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_explain(self): a = self.db.test.find() - b = a.explain() + a.explain() for _ in a: break - c = a.explain() - del b["millis"] - b.pop("oldPlan", None) - del c["millis"] - c.pop("oldPlan", None) - self.assertEqual(b, c) - self.assertTrue("cursor" in b) + b = a.explain() + self.assertIn("executionStats", b) + + def test_explain_with_read_concern(self): + # Do not add readConcern level to explain. + listener = AllowListEventListener("explain") + client = self.rs_or_single_client(event_listeners=[listener]) + coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) + self.assertTrue(coll.find().explain()) + started = listener.started_events + self.assertEqual(len(started), 1) + self.assertNotIn("readConcern", started[0].command) + + # https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.md#14-explain-helpers-allow-users-to-specify-maxtimems + def test_explain_csot(self): + # Create a MongoClient with command monitoring enabled (referred to as client). + listener = AllowListEventListener("explain") + client = self.rs_or_single_client(event_listeners=[listener]) + + # Create a collection, referred to as collection, with the namespace explain-test.collection. + # Workaround for SERVER-108463 + names = client["explain-test"].list_collection_names() + if "collection" not in names: + collection = client["explain-test"].create_collection("collection") + else: + collection = client["explain-test"]["collection"] + + # Run an explained find on collection. The find will have the query predicate { name: 'john doe' }. Specify a maxTimeMS value of 2000ms for the explain. + with pymongo.timeout(2.0): + self.assertTrue(collection.find({"name": "john doe"}).explain()) + + # Obtain the command started event for the explain. Confirm that the top-level explain command should has a maxTimeMS value of 2000. + started = listener.started_events + self.assertEqual(len(started), 1) + assert 1500 < started[0].command["maxTimeMS"] <= 2000 def test_hint(self): db = self.db - self.assertRaises(TypeError, db.test.find().hint, 5.5) + with self.assertRaises(TypeError): + db.test.find().hint(5.5) # type: ignore[arg-type] db.test.drop() - for i in range(100): - db.test.insert({"num": i, "foo": i}) - - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("num", ASCENDING)]).explain) - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("foo", ASCENDING)]).explain) - - index = db.test.create_index("num") - - spec = [("num", ASCENDING)] - self.assertEqual(db.test.find({}).explain()["cursor"], "BasicCursor") - self.assertEqual(db.test.find({}).hint(spec).explain()["cursor"], - "BtreeCursor %s" % index) - self.assertEqual(db.test.find({}).hint(spec).hint(None) - .explain()["cursor"], - "BasicCursor") - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("foo", ASCENDING)]).explain) + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + + with self.assertRaises(OperationFailure): + db.test.find({"num": 17, "foo": 17}).hint([("num", ASCENDING)]).explain() + with self.assertRaises(OperationFailure): + db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain() + + spec: list[Any] = [("num", DESCENDING)] + _ = db.test.create_index(spec) + + first = next(db.test.find()) + self.assertEqual(0, first.get("num")) + first = next(db.test.find().hint(spec)) + self.assertEqual(99, first.get("num")) + with self.assertRaises(OperationFailure): + db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain() a = db.test.find({"num": 17}) a.hint(spec) @@ -173,19 +421,46 @@ def test_hint(self): break self.assertRaises(InvalidOperation, a.hint, spec) - self.assertRaises(TypeError, db.test.find().hint, index) + db.test.drop() + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec: _IndexList = ["num", ("foo", DESCENDING)] + db.test.create_index(spec) + first = next(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + self.assertEqual(0, first.get("foo")) + + db.test.drop() + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec = ["num"] + db.test.create_index(spec) + first = next(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + + def test_hint_by_name(self): + db = self.db + db.test.drop() + + db.test.insert_many([{"i": i} for i in range(100)]) + + db.test.create_index([("i", DESCENDING)], name="fooindex") + first = next(db.test.find()) + self.assertEqual(0, first.get("i")) + first = next(db.test.find().hint("fooindex")) + self.assertEqual(99, first.get("i")) def test_limit(self): db = self.db - self.assertRaises(TypeError, db.test.find().limit, None) - self.assertRaises(TypeError, db.test.find().limit, "hello") - self.assertRaises(TypeError, db.test.find().limit, 5.5) - self.assertTrue(db.test.find().limit(5L)) + with self.assertRaises(TypeError): + db.test.find().limit(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().limit("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().limit(5.5) # type: ignore[arg-type] + self.assertTrue((db.test.find()).limit(5)) db.test.drop() - for i in range(100): - db.test.save({"x": i}) + db.test.insert_many([{"x": i} for i in range(100)]) count = 0 for _ in db.test.find(): @@ -221,81 +496,110 @@ def test_limit(self): a.limit(10) for _ in a: break - self.assertRaises(InvalidOperation, a.limit, 5) + with self.assertRaises(InvalidOperation): + a.limit(5) def test_max(self): db = self.db db.test.drop() - db.test.ensure_index([("j", ASCENDING)]) + j_index = [("j", ASCENDING)] + db.test.create_index(j_index) - for j in range(10): - db.test.insert({"j": j, "k": j}) + db.test.insert_many([{"j": j, "k": j} for j in range(10)]) - cursor = db.test.find().max([("j", 3)]) - self.assertEqual(len(list(cursor)), 3) + def find(max_spec, expected_index): + return db.test.find().max(max_spec).hint(expected_index) + + cursor = find([("j", 3)], j_index) + self.assertEqual(len(cursor.to_list()), 3) # Tuple. - cursor = db.test.find().max((("j", 3), )) - self.assertEqual(len(list(cursor)), 3) + cursor = find((("j", 3),), j_index) + self.assertEqual(len(cursor.to_list()), 3) # Compound index. - db.test.ensure_index([("j", ASCENDING), ("k", ASCENDING)]) - cursor = db.test.find().max([("j", 3), ("k", 3)]) - self.assertEqual(len(list(cursor)), 3) + index_keys = [("j", ASCENDING), ("k", ASCENDING)] + db.test.create_index(index_keys) + cursor = find([("j", 3), ("k", 3)], index_keys) + self.assertEqual(len(cursor.to_list()), 3) # Wrong order. - cursor = db.test.find().max([("k", 3), ("j", 3)]) - self.assertRaises(OperationFailure, list, cursor) + cursor = find([("k", 3), ("j", 3)], index_keys) + with self.assertRaises(OperationFailure): + cursor.to_list() # No such index. - cursor = db.test.find().max([("k", 3)]) - self.assertRaises(OperationFailure, list, cursor) - - self.assertRaises(TypeError, db.test.find().max, 10) - self.assertRaises(TypeError, db.test.find().max, {"j": 10}) + cursor = find([("k", 3)], "k") + with self.assertRaises(OperationFailure): + cursor.to_list() + with self.assertRaises(TypeError): + db.test.find().max(10) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().max({"j": 10}) # type: ignore[arg-type] def test_min(self): db = self.db db.test.drop() - db.test.ensure_index([("j", ASCENDING)]) + j_index = [("j", ASCENDING)] + db.test.create_index(j_index) + + db.test.insert_many([{"j": j, "k": j} for j in range(10)]) - for j in range(10): - db.test.insert({"j": j, "k": j}) + def find(min_spec, expected_index): + return db.test.find().min(min_spec).hint(expected_index) - cursor = db.test.find().min([("j", 3)]) - self.assertEqual(len(list(cursor)), 7) + cursor = find([("j", 3)], j_index) + self.assertEqual(len(cursor.to_list()), 7) # Tuple. - cursor = db.test.find().min((("j", 3), )) - self.assertEqual(len(list(cursor)), 7) + cursor = find((("j", 3),), j_index) + self.assertEqual(len(cursor.to_list()), 7) # Compound index. - db.test.ensure_index([("j", ASCENDING), ("k", ASCENDING)]) - cursor = db.test.find().min([("j", 3), ("k", 3)]) - self.assertEqual(len(list(cursor)), 7) + index_keys = [("j", ASCENDING), ("k", ASCENDING)] + db.test.create_index(index_keys) + cursor = find([("j", 3), ("k", 3)], index_keys) + self.assertEqual(len(cursor.to_list()), 7) # Wrong order. - cursor = db.test.find().min([("k", 3), ("j", 3)]) - self.assertRaises(OperationFailure, list, cursor) + cursor = find([("k", 3), ("j", 3)], index_keys) + with self.assertRaises(OperationFailure): + cursor.to_list() # No such index. - cursor = db.test.find().min([("k", 3)]) - self.assertRaises(OperationFailure, list, cursor) + cursor = find([("k", 3)], "k") + with self.assertRaises(OperationFailure): + cursor.to_list() - self.assertRaises(TypeError, db.test.find().min, 10) - self.assertRaises(TypeError, db.test.find().min, {"j": 10}) + with self.assertRaises(TypeError): + db.test.find().min(10) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().min({"j": 10}) # type: ignore[arg-type] + + def test_min_max_without_hint(self): + coll = self.db.test + j_index = [("j", ASCENDING)] + coll.create_index(j_index) + + with self.assertRaises(InvalidOperation): + coll.find().min([("j", 3)]).to_list() + with self.assertRaises(InvalidOperation): + coll.find().max([("j", 3)]).to_list() def test_batch_size(self): db = self.db db.test.drop() - for x in range(200): - db.test.save({"x": x}) - - self.assertRaises(TypeError, db.test.find().batch_size, None) - self.assertRaises(TypeError, db.test.find().batch_size, "hello") - self.assertRaises(TypeError, db.test.find().batch_size, 5.5) - self.assertRaises(ValueError, db.test.find().batch_size, -1) - self.assertTrue(db.test.find().batch_size(5L)) + db.test.insert_many([{"x": x} for x in range(200)]) + + with self.assertRaises(TypeError): + db.test.find().batch_size(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().batch_size("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().batch_size(5.5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().batch_size(-1) + self.assertTrue((db.test.find()).batch_size(5)) a = db.test.find() for _ in a: break @@ -307,82 +611,123 @@ def cursor_count(cursor, expected_count): count += 1 self.assertEqual(expected_count, count) - cursor_count(db.test.find().batch_size(0), 200) - cursor_count(db.test.find().batch_size(1), 200) - cursor_count(db.test.find().batch_size(2), 200) - cursor_count(db.test.find().batch_size(5), 200) - cursor_count(db.test.find().batch_size(100), 200) - cursor_count(db.test.find().batch_size(500), 200) - - cursor_count(db.test.find().batch_size(0).limit(1), 1) - cursor_count(db.test.find().batch_size(1).limit(1), 1) - cursor_count(db.test.find().batch_size(2).limit(1), 1) - cursor_count(db.test.find().batch_size(5).limit(1), 1) - cursor_count(db.test.find().batch_size(100).limit(1), 1) - cursor_count(db.test.find().batch_size(500).limit(1), 1) - - cursor_count(db.test.find().batch_size(0).limit(10), 10) - cursor_count(db.test.find().batch_size(1).limit(10), 10) - cursor_count(db.test.find().batch_size(2).limit(10), 10) - cursor_count(db.test.find().batch_size(5).limit(10), 10) - cursor_count(db.test.find().batch_size(100).limit(10), 10) - cursor_count(db.test.find().batch_size(500).limit(10), 10) + cursor_count((db.test.find()).batch_size(0), 200) + cursor_count((db.test.find()).batch_size(1), 200) + cursor_count((db.test.find()).batch_size(2), 200) + cursor_count((db.test.find()).batch_size(5), 200) + cursor_count((db.test.find()).batch_size(100), 200) + cursor_count((db.test.find()).batch_size(500), 200) + + cursor_count((db.test.find()).batch_size(0).limit(1), 1) + cursor_count((db.test.find()).batch_size(1).limit(1), 1) + cursor_count((db.test.find()).batch_size(2).limit(1), 1) + cursor_count((db.test.find()).batch_size(5).limit(1), 1) + cursor_count((db.test.find()).batch_size(100).limit(1), 1) + cursor_count((db.test.find()).batch_size(500).limit(1), 1) + + cursor_count((db.test.find()).batch_size(0).limit(10), 10) + cursor_count((db.test.find()).batch_size(1).limit(10), 10) + cursor_count((db.test.find()).batch_size(2).limit(10), 10) + cursor_count((db.test.find()).batch_size(5).limit(10), 10) + cursor_count((db.test.find()).batch_size(100).limit(10), 10) + cursor_count((db.test.find()).batch_size(500).limit(10), 10) + + cur = db.test.find().batch_size(1) + next(cur) + # find command batchSize should be 1 + self.assertEqual(0, len(cur._data)) + next(cur) + self.assertEqual(0, len(cur._data)) + next(cur) + self.assertEqual(0, len(cur._data)) + next(cur) + self.assertEqual(0, len(cur._data)) def test_limit_and_batch_size(self): db = self.db db.test.drop() - for x in range(500): - db.test.save({"x": x}) + db.test.insert_many([{"x": x} for x in range(500)]) curs = db.test.find().limit(0).batch_size(10) - curs.next() - self.assertEqual(10, curs._Cursor__retrieved) + next(curs) + self.assertEqual(10, curs._retrieved) + + curs = db.test.find(limit=0, batch_size=10) + next(curs) + self.assertEqual(10, curs._retrieved) curs = db.test.find().limit(-2).batch_size(0) - curs.next() - self.assertEqual(2, curs._Cursor__retrieved) + next(curs) + self.assertEqual(2, curs._retrieved) + + curs = db.test.find(limit=-2, batch_size=0) + next(curs) + self.assertEqual(2, curs._retrieved) curs = db.test.find().limit(-4).batch_size(5) - curs.next() - self.assertEqual(4, curs._Cursor__retrieved) + next(curs) + self.assertEqual(4, curs._retrieved) + + curs = db.test.find(limit=-4, batch_size=5) + next(curs) + self.assertEqual(4, curs._retrieved) curs = db.test.find().limit(50).batch_size(500) - curs.next() - self.assertEqual(50, curs._Cursor__retrieved) + next(curs) + self.assertEqual(50, curs._retrieved) + + curs = db.test.find(limit=50, batch_size=500) + next(curs) + self.assertEqual(50, curs._retrieved) curs = db.test.find().batch_size(500) - curs.next() - self.assertEqual(500, curs._Cursor__retrieved) + next(curs) + self.assertEqual(500, curs._retrieved) + + curs = db.test.find(batch_size=500) + next(curs) + self.assertEqual(500, curs._retrieved) curs = db.test.find().limit(50) - curs.next() - self.assertEqual(50, curs._Cursor__retrieved) + next(curs) + self.assertEqual(50, curs._retrieved) + + curs = db.test.find(limit=50) + next(curs) + self.assertEqual(50, curs._retrieved) # these two might be shaky, as the default # is set by the server. as of 2.0.0-rc0, 101 # or 1MB (whichever is smaller) is default # for queries without ntoreturn curs = db.test.find() - curs.next() - self.assertEqual(101, curs._Cursor__retrieved) + next(curs) + self.assertEqual(101, curs._retrieved) curs = db.test.find().limit(0).batch_size(0) - curs.next() - self.assertEqual(101, curs._Cursor__retrieved) + next(curs) + self.assertEqual(101, curs._retrieved) + + curs = db.test.find(limit=0, batch_size=0) + next(curs) + self.assertEqual(101, curs._retrieved) def test_skip(self): db = self.db - self.assertRaises(TypeError, db.test.find().skip, None) - self.assertRaises(TypeError, db.test.find().skip, "hello") - self.assertRaises(TypeError, db.test.find().skip, 5.5) - self.assertRaises(ValueError, db.test.find().skip, -5) - self.assertTrue(db.test.find().skip(5L)) + with self.assertRaises(TypeError): + db.test.find().skip(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().skip("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().skip(5.5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().skip(-5) + self.assertTrue((db.test.find()).skip(5)) db.drop_collection("test") - for i in range(100): - db.test.save({"x": i}) + db.test.insert_many([{"x": i} for i in range(100)]) for i in db.test.find(): self.assertEqual(i["x"], 0) @@ -408,7 +753,7 @@ def test_skip(self): self.assertEqual(i["x"], 10) break - for i in db.test.find().skip(1000): + for _ in db.test.find().skip(1000): self.fail() a = db.test.find() @@ -420,35 +765,35 @@ def test_skip(self): def test_sort(self): db = self.db - self.assertRaises(TypeError, db.test.find().sort, 5) - self.assertRaises(ValueError, db.test.find().sort, []) - self.assertRaises(TypeError, db.test.find().sort, [], ASCENDING) - self.assertRaises(TypeError, db.test.find().sort, - [("hello", DESCENDING)], DESCENDING) + with self.assertRaises(TypeError): + db.test.find().sort(5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().sort([]) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().sort([], ASCENDING) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().sort([("hello", DESCENDING)], DESCENDING) # type: ignore[arg-type] db.test.drop() - unsort = range(10) + unsort = list(range(10)) random.shuffle(unsort) - for i in unsort: - db.test.save({"x": i}) + db.test.insert_many([{"x": i} for i in unsort]) asc = [i["x"] for i in db.test.find().sort("x", ASCENDING)] - self.assertEqual(asc, range(10)) + self.assertEqual(asc, list(range(10))) asc = [i["x"] for i in db.test.find().sort("x")] - self.assertEqual(asc, range(10)) + self.assertEqual(asc, list(range(10))) asc = [i["x"] for i in db.test.find().sort([("x", ASCENDING)])] - self.assertEqual(asc, range(10)) + self.assertEqual(asc, list(range(10))) - expect = range(10) - expect.reverse() + expect = list(reversed(range(10))) desc = [i["x"] for i in db.test.find().sort("x", DESCENDING)] self.assertEqual(desc, expect) desc = [i["x"] for i in db.test.find().sort([("x", DESCENDING)])] self.assertEqual(desc, expect) - desc = [i["x"] for i in - db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] + desc = [i["x"] for i in db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] self.assertEqual(desc, expect) expected = [(1, 5), (2, 5), (0, 3), (7, 3), (9, 2), (2, 1), (3, 1)] @@ -456,12 +801,14 @@ def test_sort(self): random.shuffle(shuffled) db.test.drop() - for (a, b) in shuffled: - db.test.save({"a": a, "b": b}) + for a, b in shuffled: + db.test.insert_one({"a": a, "b": b}) - result = [(i["a"], i["b"]) for i in - db.test.find().sort([("b", DESCENDING), - ("a", ASCENDING)])] + result = [ + (i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), ("a", ASCENDING)]) + ] + self.assertEqual(result, expected) + result = [(i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), "a"])] self.assertEqual(result, expected) a = db.test.find() @@ -470,76 +817,51 @@ def test_sort(self): break self.assertRaises(InvalidOperation, a.sort, "x", ASCENDING) - def test_count(self): + def test_where(self): db = self.db db.test.drop() - self.assertEqual(0, db.test.find().count()) - - for i in range(10): - db.test.save({"x": i}) - - self.assertEqual(10, db.test.find().count()) - self.assertTrue(isinstance(db.test.find().count(), int)) - self.assertEqual(10, db.test.find().limit(5).count()) - self.assertEqual(10, db.test.find().skip(5).count()) - - self.assertEqual(1, db.test.find({"x": 1}).count()) - self.assertEqual(5, db.test.find({"x": {"$lt": 5}}).count()) - a = db.test.find() - b = a.count() - for _ in a: - break - self.assertEqual(b, a.count()) - - self.assertEqual(0, db.test.acollectionthatdoesntexist.find().count()) + with self.assertRaises(TypeError): + a.where(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + a.where(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + a.where({}) # type: ignore[arg-type] + + db.test.insert_many([{"x": i} for i in range(10)]) + + self.assertEqual(3, len(db.test.find().where("this.x < 3").to_list())) + self.assertEqual(3, len(db.test.find().where(Code("this.x < 3")).to_list())) + + code_with_scope = Code("this.x < i", {"i": 3}) + if client_context.version.at_least(4, 3, 3): + # MongoDB 4.4 removed support for Code with scope. + with self.assertRaises(OperationFailure): + db.test.find().where(code_with_scope).to_list() + + code_with_empty_scope = Code("this.x < 3", {}) + with self.assertRaises(OperationFailure): + db.test.find().where(code_with_empty_scope).to_list() + else: + self.assertEqual(3, len(db.test.find().where(code_with_scope).to_list())) - def test_where(self): - db = self.db - db.test.drop() + self.assertEqual(10, len(db.test.find().to_list())) + self.assertEqual([0, 1, 2], [a["x"] for a in db.test.find().where("this.x < 3")]) + self.assertEqual([], [a["x"] for a in db.test.find({"x": 5}).where("this.x < 3")]) + self.assertEqual([5], [a["x"] for a in db.test.find({"x": 5}).where("this.x > 3")]) - a = db.test.find() - self.assertRaises(TypeError, a.where, 5) - self.assertRaises(TypeError, a.where, None) - self.assertRaises(TypeError, a.where, {}) - - for i in range(10): - db.test.save({"x": i}) - - self.assertEqual(3, len(list(db.test.find().where('this.x < 3')))) - self.assertEqual(3, - len(list(db.test.find().where(Code('this.x < 3'))))) - self.assertEqual(3, len(list(db.test.find().where(Code('this.x < i', - {"i": 3}))))) - self.assertEqual(10, len(list(db.test.find()))) - - self.assertEqual(3, db.test.find().where('this.x < 3').count()) - self.assertEqual(10, db.test.find().count()) - self.assertEqual(3, db.test.find().where(u'this.x < 3').count()) - self.assertEqual([0, 1, 2], - [a["x"] for a in - db.test.find().where('this.x < 3')]) - self.assertEqual([], - [a["x"] for a in - db.test.find({"x": 5}).where('this.x < 3')]) - self.assertEqual([5], - [a["x"] for a in - db.test.find({"x": 5}).where('this.x > 3')]) - - cursor = db.test.find().where('this.x < 3').where('this.x > 7') + cursor = db.test.find().where("this.x < 3").where("this.x > 7") self.assertEqual([8, 9], [a["x"] for a in cursor]) a = db.test.find() - b = a.where('this.x > 3') + _ = a.where("this.x > 3") for _ in a: break - self.assertRaises(InvalidOperation, a.where, 'this.x < 3') + self.assertRaises(InvalidOperation, a.where, "this.x < 3") def test_rewind(self): - self.db.test.save({"x": 1}) - self.db.test.save({"x": 2}) - self.db.test.save({"x": 3}) + self.db.test.insert_many([{"x": i} for i in range(1, 4)]) cursor = self.db.test.find().limit(2) @@ -570,10 +892,10 @@ def test_rewind(self): self.assertEqual(cursor, cursor.rewind()) + # oplog_reply, and snapshot are all deprecated. + @ignore_deprecations def test_clone(self): - self.db.test.save({"x": 1}) - self.db.test.save({"x": 2}) - self.db.test.save({"x": 3}) + self.db.test.insert_many([{"x": i} for i in range(1, 4)]) cursor = self.db.test.find().limit(2) @@ -608,66 +930,52 @@ def test_clone(self): self.assertNotEqual(cursor, cursor.clone()) - class MyClass(dict): - pass - - cursor = self.db.test.find(as_class=MyClass) - for e in cursor: - self.assertEqual(type(MyClass()), type(e)) - cursor = self.db.test.find(as_class=MyClass) - self.assertEqual(type(MyClass()), type(cursor[0])) - # Just test attributes - cursor = self.db.test.find({"x": re.compile("^hello.*")}, - skip=1, - timeout=False, - snapshot=True, - tailable=True, - as_class=MyClass, - slave_okay=True, - await_data=True, - partial=True, - manipulate=False, - compile_re=False, - fields={'_id': False}).limit(2) - cursor.min([('a', 1)]).max([('b', 3)]) + cursor = ( + self.db.test.find( + {"x": re.compile("^hello.*")}, + projection={"_id": False}, + skip=1, + no_cursor_timeout=True, + cursor_type=CursorType.TAILABLE_AWAIT, + sort=[("x", 1)], + allow_partial_results=True, + oplog_replay=True, + batch_size=123, + collation={"locale": "en_US"}, + hint=[("_id", 1)], + max_scan=100, + max_time_ms=1000, + return_key=True, + show_record_id=True, + snapshot=True, + allow_disk_use=True, + ) + ).limit(2) + cursor.min([("a", 1)]).max([("b", 3)]) cursor.add_option(128) - cursor.comment('hi!') + cursor.comment("hi!") + # Every attribute should be the same. cursor2 = cursor.clone() - self.assertEqual(cursor._Cursor__skip, cursor2._Cursor__skip) - self.assertEqual(cursor._Cursor__limit, cursor2._Cursor__limit) - self.assertEqual(cursor._Cursor__snapshot, cursor2._Cursor__snapshot) - self.assertEqual(type(cursor._Cursor__as_class), - type(cursor2._Cursor__as_class)) - self.assertEqual(cursor._Cursor__slave_okay, - cursor2._Cursor__slave_okay) - self.assertEqual(cursor._Cursor__manipulate, - cursor2._Cursor__manipulate) - self.assertEqual(cursor._Cursor__compile_re, - cursor2._Cursor__compile_re) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__comment, - cursor2._Cursor__comment) - self.assertEqual(cursor._Cursor__min, - cursor2._Cursor__min) - self.assertEqual(cursor._Cursor__max, - cursor2._Cursor__max) + self.assertEqual(cursor.__dict__, cursor2.__dict__) # Shallow copies can so can mutate cursor2 = copy.copy(cursor) - cursor2._Cursor__fields['cursor2'] = False - self.assertTrue('cursor2' in cursor._Cursor__fields) + cursor2._projection["cursor2"] = False + self.assertIsNotNone(cursor._projection) + self.assertIn("cursor2", cursor._projection.keys()) # Deepcopies and shouldn't mutate cursor3 = copy.deepcopy(cursor) - cursor3._Cursor__fields['cursor3'] = False - self.assertFalse('cursor3' in cursor._Cursor__fields) + cursor3._projection["cursor3"] = False + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor3", cursor._projection.keys()) cursor4 = cursor.clone() - cursor4._Cursor__fields['cursor4'] = False - self.assertFalse('cursor4' in cursor._Cursor__fields) + cursor4._projection["cursor4"] = False + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor4", cursor._projection.keys()) # Test memo when deepcopying queries query = {"hello": "world"} @@ -676,313 +984,224 @@ class MyClass(dict): cursor2 = copy.deepcopy(cursor) - self.assertNotEqual(id(cursor._Cursor__spec), - id(cursor2._Cursor__spec)) - self.assertEqual(id(cursor2._Cursor__spec['reflexive']), - id(cursor2._Cursor__spec)) - self.assertEqual(len(cursor2._Cursor__spec), 2) + self.assertNotEqual(id(cursor._spec), id(cursor2._spec)) + self.assertEqual(id(cursor2._spec["reflexive"]), id(cursor2._spec)) + self.assertEqual(len(cursor2._spec), 2) # Ensure hints are cloned as the correct type - cursor = self.db.test.find().hint([('z', 1), ("a", 1)]) + cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) cursor2 = copy.deepcopy(cursor) - self.assertTrue(isinstance(cursor2._Cursor__hint, SON)) - self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint) - - def test_deepcopy_cursor_littered_with_regexes(self): - - cursor = self.db.test.find({"x": re.compile("^hmmm.*"), - "y": [re.compile("^hmm.*")], - "z": {"a": [re.compile("^hm.*")]}, - re.compile("^key.*"): {"a": [re.compile("^hm.*")]}}) - - cursor2 = copy.deepcopy(cursor) - self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) - - def test_add_remove_option(self): - cursor = self.db.test.find() - self.assertEqual(0, cursor._Cursor__query_options()) - cursor.add_option(2) - cursor2 = self.db.test.find(tailable=True) - self.assertEqual(2, cursor2._Cursor__query_options()) - self.assertEqual(cursor._Cursor__query_options(), - cursor2._Cursor__query_options()) - cursor.add_option(32) - cursor2 = self.db.test.find(tailable=True, await_data=True) - self.assertEqual(34, cursor2._Cursor__query_options()) - self.assertEqual(cursor._Cursor__query_options(), - cursor2._Cursor__query_options()) - cursor.add_option(128) - cursor2 = self.db.test.find(tailable=True, - await_data=True).add_option(128) - self.assertEqual(162, cursor2._Cursor__query_options()) - self.assertEqual(cursor._Cursor__query_options(), - cursor2._Cursor__query_options()) - - self.assertEqual(162, cursor._Cursor__query_options()) - cursor.add_option(128) - self.assertEqual(162, cursor._Cursor__query_options()) - - cursor.remove_option(128) - cursor2 = self.db.test.find(tailable=True, await_data=True) - self.assertEqual(34, cursor2._Cursor__query_options()) - self.assertEqual(cursor._Cursor__query_options(), - cursor2._Cursor__query_options()) - cursor.remove_option(32) - cursor2 = self.db.test.find(tailable=True) - self.assertEqual(2, cursor2._Cursor__query_options()) - self.assertEqual(cursor._Cursor__query_options(), - cursor2._Cursor__query_options()) - - self.assertEqual(2, cursor._Cursor__query_options()) - cursor.remove_option(32) - self.assertEqual(2, cursor._Cursor__query_options()) - - # Slave OK - cursor = self.db.test.find(slave_okay=True) - self.assertEqual(4, cursor._Cursor__query_options()) - cursor2 = self.db.test.find().add_option(4) - self.assertEqual(cursor._Cursor__query_options(), - cursor2._Cursor__query_options()) - self.assertTrue(cursor._Cursor__slave_okay) - cursor.remove_option(4) - self.assertEqual(0, cursor._Cursor__query_options()) - self.assertFalse(cursor._Cursor__slave_okay) - - # Timeout - cursor = self.db.test.find(timeout=False) - self.assertEqual(16, cursor._Cursor__query_options()) - cursor2 = self.db.test.find().add_option(16) - self.assertEqual(cursor._Cursor__query_options(), - cursor2._Cursor__query_options()) - cursor.remove_option(16) - self.assertEqual(0, cursor._Cursor__query_options()) - - # Tailable / Await data - cursor = self.db.test.find(tailable=True, await_data=True) - self.assertEqual(34, cursor._Cursor__query_options()) - cursor2 = self.db.test.find().add_option(34) - self.assertEqual(cursor._Cursor__query_options(), - cursor2._Cursor__query_options()) - cursor.remove_option(32) - self.assertEqual(2, cursor._Cursor__query_options()) - - # Exhaust - which mongos doesn't support - if not is_mongos(self.db.connection): - cursor = self.db.test.find(exhaust=True) - self.assertEqual(64, cursor._Cursor__query_options()) - cursor2 = self.db.test.find().add_option(64) - self.assertEqual(cursor._Cursor__query_options(), - cursor2._Cursor__query_options()) - self.assertTrue(cursor._Cursor__exhaust) - cursor.remove_option(64) - self.assertEqual(0, cursor._Cursor__query_options()) - self.assertFalse(cursor._Cursor__exhaust) - - # Partial - cursor = self.db.test.find(partial=True) - self.assertEqual(128, cursor._Cursor__query_options()) - cursor2 = self.db.test.find().add_option(128) - self.assertEqual(cursor._Cursor__query_options(), - cursor2._Cursor__query_options()) - cursor.remove_option(128) - self.assertEqual(0, cursor._Cursor__query_options()) - - def test_count_with_fields(self): - self.db.test.drop() - self.db.test.save({"x": 1}) - - if not version.at_least(self.db.connection, (1, 1, 3, -1)): - for _ in self.db.test.find({}, ["a"]): - self.fail() - - self.assertEqual(0, self.db.test.find({}, ["a"]).count()) - else: - self.assertEqual(1, self.db.test.find({}, ["a"]).count()) + # Internal types are now dict rather than SON by default + self.assertIsInstance(cursor2._hint, dict) + self.assertEqual(cursor._hint, cursor2._hint) + + @client_context.require_sync + def test_clone_empty(self): + self.db.test.delete_many({}) + self.db.test.insert_many([{"x": i} for i in range(1, 4)]) + cursor = self.db.test.find()[2:2] + cursor2 = cursor.clone() + self.assertRaises(StopIteration, cursor.next) + self.assertRaises(StopIteration, cursor2.next) + # Cursors don't support slicing + @client_context.require_sync def test_bad_getitem(self): self.assertRaises(TypeError, lambda x: self.db.test.find()[x], "hello") self.assertRaises(TypeError, lambda x: self.db.test.find()[x], 5.5) self.assertRaises(TypeError, lambda x: self.db.test.find()[x], None) + # Cursors don't support slicing + @client_context.require_sync def test_getitem_slice_index(self): self.db.drop_collection("test") - for i in range(100): - self.db.test.save({"i": i}) + self.db.test.insert_many([{"i": i} for i in range(100)]) count = itertools.count self.assertRaises(IndexError, lambda: self.db.test.find()[-1:]) self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2]) - for a, b in zip(count(0), self.db.test.find()): - self.assertEqual(a, b['i']) + for a, b in zip(count(0), self.db.test.find()): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) - self.assertEqual(100, len(list(self.db.test.find()[0:]))) - for a, b in zip(count(0), self.db.test.find()[0:]): - self.assertEqual(a, b['i']) + self.assertEqual(100, len(list(self.db.test.find()[0:]))) # type: ignore[call-overload] + for a, b in zip(count(0), self.db.test.find()[0:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) - self.assertEqual(80, len(list(self.db.test.find()[20:]))) - for a, b in zip(count(20), self.db.test.find()[20:]): - self.assertEqual(a, b['i']) + self.assertEqual(80, len(list(self.db.test.find()[20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[20:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) - for a, b in zip(count(99), self.db.test.find()[99:]): - self.assertEqual(a, b['i']) + for a, b in zip(count(99), self.db.test.find()[99:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) - for i in self.db.test.find()[1000:]: + for _i in self.db.test.find()[1000:]: self.fail() - self.assertEqual(5, len(list(self.db.test.find()[20:25]))) - self.assertEqual(5, len(list(self.db.test.find()[20L:25L]))) - for a, b in zip(count(20), self.db.test.find()[20:25]): - self.assertEqual(a, b['i']) - - self.assertEqual(80, len(list(self.db.test.find()[40:45][20:]))) - for a, b in zip(count(20), self.db.test.find()[40:45][20:]): - self.assertEqual(a, b['i']) - - self.assertEqual(80, - len(list(self.db.test.find()[40:45].limit(0).skip(20)) - ) - ) - for a, b in zip(count(20), - self.db.test.find()[40:45].limit(0).skip(20)): - self.assertEqual(a, b['i']) - - self.assertEqual(80, - len(list(self.db.test.find().limit(10).skip(40)[20:])) - ) - for a, b in zip(count(20), - self.db.test.find().limit(10).skip(40)[20:]): - self.assertEqual(a, b['i']) - - self.assertEqual(1, len(list(self.db.test.find()[:1]))) - self.assertEqual(5, len(list(self.db.test.find()[:5]))) - - self.assertEqual(1, len(list(self.db.test.find()[99:100]))) - self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) - self.assertEqual(0, len(list(self.db.test.find()[10:10]))) - self.assertEqual(0, len(list(self.db.test.find()[:0]))) - self.assertEqual(80, - len(list(self.db.test.find()[10:10].limit(0).skip(20)) - ) - ) + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) # type: ignore[call-overload] + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[20:25]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[40:45][20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[40:45][20:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[40:45].limit(0).skip(20)))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[40:45].limit(0).skip(20)): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find().limit(10).skip(40)[20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find().limit(10).skip(40)[20:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(1, len(list(self.db.test.find()[:1]))) # type: ignore[call-overload] + self.assertEqual(5, len(list(self.db.test.find()[:5]))) # type: ignore[call-overload] + + self.assertEqual(1, len(list(self.db.test.find()[99:100]))) # type: ignore[call-overload] + self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) # type: ignore[call-overload] + self.assertEqual(0, len(list(self.db.test.find()[10:10]))) # type: ignore[call-overload] + self.assertEqual(0, len(list(self.db.test.find()[:0]))) # type: ignore[call-overload] + self.assertEqual(80, len(list(self.db.test.find()[10:10].limit(0).skip(20)))) # type: ignore[call-overload] self.assertRaises(IndexError, lambda: self.db.test.find()[10:8]) + # Cursors don't support slicing + @client_context.require_sync def test_getitem_numeric_index(self): self.db.drop_collection("test") - for i in range(100): - self.db.test.save({"i": i}) + self.db.test.insert_many([{"i": i} for i in range(100)]) - self.assertEqual(0, self.db.test.find()[0]['i']) - self.assertEqual(50, self.db.test.find()[50]['i']) - self.assertEqual(50, self.db.test.find().skip(50)[0]['i']) - self.assertEqual(50, self.db.test.find().skip(49)[1]['i']) - self.assertEqual(50, self.db.test.find()[50L]['i']) - self.assertEqual(99, self.db.test.find()[99]['i']) + self.assertEqual(0, self.db.test.find()[0]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(50, self.db.test.find().skip(50)[0]["i"]) + self.assertEqual(50, self.db.test.find().skip(49)[1]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(99, self.db.test.find()[99]["i"]) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100) - self.assertRaises(IndexError, - lambda x: self.db.test.find().skip(50)[x], 50) - - def test_count_with_limit_and_skip(self): - if not version.at_least(self.db.connection, (1, 1, 4, -1)): - raise SkipTest("count with limit / skip requires MongoDB >= 1.1.4") - - self.assertRaises(TypeError, self.db.test.find().count, "foo") - - def check_len(cursor, length): - self.assertEqual(len(list(cursor)), cursor.count(True)) - self.assertEqual(length, cursor.count(True)) + self.assertRaises(IndexError, lambda x: self.db.test.find().skip(50)[x], 50) + @client_context.require_sync + def test_iteration_with_list(self): self.db.drop_collection("test") - for i in range(100): - self.db.test.save({"i": i}) + self.db.test.insert_many([{"i": i} for i in range(100)]) - check_len(self.db.test.find(), 100) + cur = self.db.test.find().batch_size(10) - check_len(self.db.test.find().limit(10), 10) - check_len(self.db.test.find().limit(110), 100) - - check_len(self.db.test.find().skip(10), 90) - check_len(self.db.test.find().skip(110), 0) - - check_len(self.db.test.find().limit(10).skip(10), 10) - check_len(self.db.test.find()[10:20], 10) - check_len(self.db.test.find().limit(10).skip(95), 5) - check_len(self.db.test.find()[95:105], 5) + self.assertEqual(100, len(list(cur))) # type: ignore[call-overload] def test_len(self): - self.assertRaises(TypeError, len, self.db.test.find()) + with self.assertRaises(TypeError): + len(self.db.test.find()) # type: ignore[arg-type] def test_properties(self): self.assertEqual(self.db.test, self.db.test.find().collection) - def set_coll(): - self.db.test.find().collection = "hello" - - self.assertRaises(AttributeError, set_coll) + with self.assertRaises(AttributeError): + self.db.test.find().collection = "hello" # type: ignore def test_get_more(self): db = self.db db.drop_collection("test") - db.test.insert([{'i': i} for i in range(10)]) - self.assertEqual(10, len(list(db.test.find().batch_size(5)))) + db.test.insert_many([{"i": i} for i in range(10)]) + self.assertEqual(10, len(db.test.find().batch_size(5).to_list())) def test_tailable(self): db = self.db db.drop_collection("test") db.create_collection("test", capped=True, size=1000, max=3) + self.addCleanup(db.drop_collection, "test") + cursor = db.test.find(cursor_type=CursorType.TAILABLE) - try: - cursor = db.test.find(tailable=True) - - db.test.insert({"x": 1}) - count = 0 - for doc in cursor: - count += 1 - self.assertEqual(1, doc["x"]) - self.assertEqual(1, count) - - db.test.insert({"x": 2}) - count = 0 - for doc in cursor: - count += 1 - self.assertEqual(2, doc["x"]) - self.assertEqual(1, count) - - db.test.insert({"x": 3}) - count = 0 - for doc in cursor: - count += 1 - self.assertEqual(3, doc["x"]) - self.assertEqual(1, count) + db.test.insert_one({"x": 1}) + count = 0 + for doc in cursor: + count += 1 + self.assertEqual(1, doc["x"]) + self.assertEqual(1, count) - # Capped rollover - the collection can never - # have more than 3 documents. Just make sure - # this doesn't raise... - db.test.insert(({"x": i} for i in xrange(4, 7))) - self.assertEqual(0, len(list(cursor))) + db.test.insert_one({"x": 2}) + count = 0 + for doc in cursor: + count += 1 + self.assertEqual(2, doc["x"]) + self.assertEqual(1, count) - # and that the cursor doesn't think it's still alive. - self.assertFalse(cursor.alive) + db.test.insert_one({"x": 3}) + count = 0 + for doc in cursor: + count += 1 + self.assertEqual(3, doc["x"]) + self.assertEqual(1, count) + + # Capped rollover - the collection can never + # have more than 3 documents. Just make sure + # this doesn't raise... + db.test.insert_many([{"x": i} for i in range(4, 7)]) + self.assertEqual(0, len(cursor.to_list())) + + # and that the cursor doesn't think it's still alive. + self.assertFalse(cursor.alive) + + self.assertEqual(3, db.test.count_documents({})) + + # __getitem__(index) + if _IS_SYNC: + for cursor in ( + db.test.find(cursor_type=CursorType.TAILABLE), + db.test.find(cursor_type=CursorType.TAILABLE_AWAIT), + ): + self.assertEqual(4, cursor[0]["x"]) + self.assertEqual(5, cursor[1]["x"]) + self.assertEqual(6, cursor[2]["x"]) + + cursor.rewind() + self.assertEqual([4], [doc["x"] for doc in cursor[0:1]]) + cursor.rewind() + self.assertEqual([5], [doc["x"] for doc in cursor[1:2]]) + cursor.rewind() + self.assertEqual([6], [doc["x"] for doc in cursor[2:3]]) + cursor.rewind() + self.assertEqual([4, 5], [doc["x"] for doc in cursor[0:2]]) + cursor.rewind() + self.assertEqual([5, 6], [doc["x"] for doc in cursor[1:3]]) + cursor.rewind() + self.assertEqual([4, 5, 6], [doc["x"] for doc in cursor[0:3]]) + + # The Async API does not support threading + @client_context.require_sync + def test_concurrent_close(self): + """Ensure a tailable can be closed from another thread.""" + db = self.db + db.drop_collection("test") + db.create_collection("test", capped=True, size=1000, max=3) + self.addCleanup(db.drop_collection, "test") + cursor = db.test.find(cursor_type=CursorType.TAILABLE) - self.assertEqual(3, db.test.count()) - finally: - db.drop_collection("test") + def iterate_cursor(): + while cursor.alive: + try: + for _doc in cursor: + pass + except OperationFailure as e: + if e.code != 237: # CursorKilled error code + raise + + t = threading.Thread(target=iterate_cursor) + t.start() + time.sleep(1) + cursor.close() + self.assertFalse(cursor.alive) + t.join(3) + self.assertFalse(t.is_alive()) def test_distinct(self): - if not version.at_least(self.db.connection, (1, 1, 3, 1)): - raise SkipTest("distinct with query requires MongoDB >= 1.1.3") - self.db.drop_collection("test") - self.db.test.save({"a": 1}) - self.db.test.save({"a": 2}) - self.db.test.save({"a": 2}) - self.db.test.save({"a": 2}) - self.db.test.save({"a": 3}) + self.db.test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) distinct = self.db.test.find({"a": {"$lt": 3}}).distinct("a") distinct.sort() @@ -991,130 +1210,647 @@ def test_distinct(self): self.db.drop_collection("test") - self.db.test.save({"a": {"b": "a"}, "c": 12}) - self.db.test.save({"a": {"b": "b"}, "c": 8}) - self.db.test.save({"a": {"b": "c"}, "c": 12}) - self.db.test.save({"a": {"b": "c"}, "c": 8}) + self.db.test.insert_one({"a": {"b": "a"}, "c": 12}) + self.db.test.insert_one({"a": {"b": "b"}, "c": 8}) + self.db.test.insert_one({"a": {"b": "c"}, "c": 12}) + self.db.test.insert_one({"a": {"b": "c"}, "c": 8}) distinct = self.db.test.find({"c": 8}).distinct("a.b") distinct.sort() self.assertEqual(["b", "c"], distinct) - def test_max_scan(self): - if not version.at_least(self.db.connection, (1, 5, 1)): - raise SkipTest("maxScan requires MongoDB >= 1.5.1") - - self.db.drop_collection("test") - for _ in range(100): - self.db.test.insert({}) - - self.assertEqual(100, len(list(self.db.test.find()))) - self.assertEqual(50, len(list(self.db.test.find(max_scan=50)))) - self.assertEqual(50, len(list(self.db.test.find() - .max_scan(90).max_scan(50)))) - def test_with_statement(self): - if sys.version_info < (2, 6): - raise SkipTest("With statement requires Python >= 2.6") - self.db.drop_collection("test") - for _ in range(100): - self.db.test.insert({}) + self.db.test.insert_many([{} for _ in range(100)]) c1 = self.db.test.find() - exec """ -with self.db.test.find() as c2: - self.assertTrue(c2.alive) -self.assertFalse(c2.alive) - -with self.db.test.find() as c2: - self.assertEqual(100, len(list(c2))) -self.assertFalse(c2.alive) -""" + with self.db.test.find() as c2: + self.assertTrue(c2.alive) + self.assertFalse(c2.alive) + + with self.db.test.find() as c2: + self.assertEqual(100, len(c2.to_list())) + self.assertFalse(c2.alive) self.assertTrue(c1.alive) + @client_context.require_no_mongos def test_comment(self): - if is_mongos(self.client): - raise SkipTest("profile is not supported by mongos") - if not version.at_least(self.db.connection, (2, 0)): - raise SkipTest("Requires server >= 2.0") - if server_started_with_auth(self.db.connection): - raise SkipTest("SERVER-4754 - This test uses profiling.") - - def run_with_profiling(func): - self.db.set_profiling_level(OFF) + self.client.drop_database(self.db) + self.db.command("profile", 2) # Profile ALL commands. + try: + self.db.test.find().comment("foo").to_list() + count = self.db.system.profile.count_documents( + {"ns": "pymongo_test.test", "op": "query", "command.comment": "foo"} + ) + self.assertEqual(count, 1) + + self.db.test.find().comment("foo").distinct("type") + count = self.db.system.profile.count_documents( + { + "ns": "pymongo_test.test", + "op": "command", + "command.distinct": "test", + "command.comment": "foo", + } + ) + self.assertEqual(count, 1) + finally: + self.db.command("profile", 0) # Turn off profiling. self.db.system.profile.drop() - self.db.set_profiling_level(ALL) - func() - self.db.set_profiling_level(OFF) - - def find(): - list(self.db.test.find().comment('foo')) - op = self.db.system.profile.find({'ns': 'pymongo_test.test', - 'op': 'query', - 'query.$comment': 'foo'}) - self.assertEqual(op.count(), 1) - - run_with_profiling(find) - - def count(): - self.db.test.find().comment('foo').count() - op = self.db.system.profile.find({'ns': 'pymongo_test.$cmd', - 'op': 'command', - 'command.count': 'test', - 'command.$comment': 'foo'}) - self.assertEqual(op.count(), 1) - - run_with_profiling(count) - - def distinct(): - self.db.test.find().comment('foo').distinct('type') - op = self.db.system.profile.find({'ns': 'pymongo_test.$cmd', - 'op': 'command', - 'command.distinct': 'test', - 'command.$comment': 'foo'}) - self.assertEqual(op.count(), 1) - - run_with_profiling(distinct) - - self.db.test.insert([{}, {}]) + + self.db.test.insert_many([{}, {}]) cursor = self.db.test.find() - cursor.next() - self.assertRaises(InvalidOperation, cursor.comment, 'hello') + next(cursor) + self.assertRaises(InvalidOperation, cursor.comment, "hello") + + def test_alive(self): + self.db.test.delete_many({}) + self.db.test.insert_many([{} for _ in range(3)]) + self.addCleanup(self.db.test.delete_many, {}) + cursor = self.db.test.find().batch_size(2) + n = 0 + while True: + cursor.next() + n += 1 + if n == 3: + self.assertFalse(cursor.alive) + break + + self.assertTrue(cursor.alive) + + def test_close_kills_cursor_synchronously(self): + # Kill any cursors possibly queued up by previous tests. + gc.collect() + self.client._process_periodic_tasks() + + listener = AllowListEventListener("killCursors") + client = self.rs_or_single_client(event_listeners=[listener]) + coll = client[self.db.name].test_close_kills_cursors + + # Add some test data. + docs_inserted = 1000 + coll.insert_many([{"i": i} for i in range(docs_inserted)]) + + listener.reset() + + # Close a cursor while it's still open on the server. + cursor = coll.find().batch_size(10) + self.assertTrue(bool(next(cursor))) + self.assertLess(cursor.retrieved, docs_inserted) + cursor.close() + + def assertCursorKilled(): + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) + + assertCursorKilled() + listener.reset() + + # Close a command cursor while it's still open on the server. + cursor = coll.aggregate([], batchSize=10) + self.assertTrue(bool(next(cursor))) + cursor.close() + + # The cursor should be killed if it had a non-zero id. + if cursor.cursor_id: + assertCursorKilled() + else: + self.assertEqual(0, len(listener.started_events)) - self.db.system.profile.drop() + @client_context.require_failCommand_appName + def test_timeout_kills_cursor_synchronously(self): + listener = AllowListEventListener("killCursors") + client = self.rs_or_single_client(event_listeners=[listener]) + coll = client[self.db.name].test_timeout_kills_cursor - def test_cursor_transfer(self): + # Add some test data. + docs_inserted = 10 + coll.insert_many([{"i": i} for i in range(docs_inserted)]) - # This is just a test, don't try this at home... - self.db.test.remove({}) - self.db.test.insert({'_id': i} for i in xrange(200)) + listener.reset() - class CManager(CursorManager): - def __init__(self, connection): - super(CManager, self).__init__(connection) + cursor = coll.find({}, batch_size=1) + cursor.next() - def close(self, dummy): - # Do absolutely nothing... - pass + # Mock getMore commands timing out. + mock_timeout_errors = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "errorCode": 50, + "failCommands": ["getMore"], + }, + } + + with self.fail_point(mock_timeout_errors): + with self.assertRaises(ExecutionTimeout): + cursor.next() + + def assertCursorKilled(): + wait_until( + lambda: len(listener.succeeded_events), + "find successful killCursors command", + ) + + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) + + assertCursorKilled() + listener.reset() + + cursor = coll.aggregate([], batchSize=1) + cursor.next() - client = self.db.connection + with self.fail_point(mock_timeout_errors): + with self.assertRaises(ExecutionTimeout): + cursor.next() + + assertCursorKilled() + + def test_delete_not_initialized(self): + # Creating a cursor with invalid arguments will not run __init__ + # but will still call __del__, eg test.find(invalidKwarg=1). + cursor = Cursor.__new__(Cursor) # Skip calling __init__ + cursor.__del__() # no error + + def test_getMore_does_not_send_readPreference(self): + listener = AllowListEventListener("find", "getMore") + client = self.rs_or_single_client(event_listeners=[listener]) + # We never send primary read preference so override the default. + coll = client[self.db.name].get_collection( + "test", read_preference=ReadPreference.PRIMARY_PREFERRED + ) + + coll.delete_many({}) + coll.insert_many([{} for _ in range(5)]) + self.addCleanup(coll.drop) + + coll.find(batch_size=3).to_list() + started = listener.started_events + self.assertEqual(2, len(started)) + self.assertEqual("find", started[0].command_name) + if client_context.is_rs or client_context.is_mongos: + self.assertIn("$readPreference", started[0].command) + else: + self.assertNotIn("$readPreference", started[0].command) + self.assertEqual("getMore", started[1].command_name) + self.assertNotIn("$readPreference", started[1].command) + + @client_context.require_replica_set + def test_to_list_tailable(self): + oplog = self.client.local.oplog.rs + last = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1).next() + ts = last["ts"] + # Set maxAwaitTimeMS=1 to speed up the test and avoid blocking on the noop writer. + c = oplog.find( + {"ts": {"$gte": ts}}, cursor_type=pymongo.CursorType.TAILABLE_AWAIT, oplog_replay=True + ).max_await_time_ms(1) + self.addCleanup(c.close) + # Wait for the change to be read. + docs = [] + while not docs: + docs = c.to_list() + self.assertGreaterEqual(len(docs), 1) + + def test_to_list_empty(self): + c = self.db.does_not_exist.find() + docs = c.to_list() + self.assertEqual([], docs) + + def test_to_list_length(self): + coll = self.db.test + coll.insert_many([{} for _ in range(5)]) + self.addCleanup(coll.drop) + c = coll.find() + docs = c.to_list(3) + self.assertEqual(len(docs), 3) + + c = coll.find(batch_size=2) + docs = c.to_list(3) + self.assertEqual(len(docs), 3) + docs = c.to_list(3) + self.assertEqual(len(docs), 2) + + @flaky(reason="PYTHON-3522") + def test_to_list_csot_applied(self): + client = self.single_client(timeoutMS=500, w=1) + coll = client.pymongo.test + # Initialize the client with a larger timeout to help make test less flaky + with pymongo.timeout(10): + coll.insert_many([{} for _ in range(5)]) + cursor = coll.find({"$where": delay(1)}) + with self.assertRaises(PyMongoError) as ctx: + cursor.to_list() + self.assertTrue(ctx.exception.timeout) + + @client_context.require_change_streams + def test_command_cursor_to_list(self): + # Set maxAwaitTimeMS=1 to speed up the test. + c = self.db.test.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addCleanup(c.close) + docs = c.to_list() + self.assertGreaterEqual(len(docs), 0) + + @client_context.require_change_streams + def test_command_cursor_to_list_empty(self): + # Set maxAwaitTimeMS=1 to speed up the test. + c = self.db.does_not_exist.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addCleanup(c.close) + docs = c.to_list() + self.assertEqual([], docs) + + @client_context.require_change_streams + def test_command_cursor_to_list_length(self): + db = self.db + db.drop_collection("test") + db.test.insert_many([{"foo": 1}, {"foo": 2}]) + + pipeline = {"$project": {"_id": False, "foo": True}} + result = db.test.aggregate([pipeline]) + self.assertEqual(len(result.to_list()), 2) + + result = db.test.aggregate([pipeline]) + self.assertEqual(len(result.to_list(1)), 1) + + @client_context.require_failCommand_blockConnection + @flaky(reason="PYTHON-3522") + def test_command_cursor_to_list_csot_applied(self): + client = self.single_client(timeoutMS=500, w=1) + coll = client.pymongo.test + # Initialize the client with a larger timeout to help make test less flaky + with pymongo.timeout(10): + coll.insert_many([{} for _ in range(5)]) + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 5}, + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 1000}, + } + cursor = coll.aggregate([], batchSize=1) + with self.fail_point(fail_command): + with self.assertRaises(PyMongoError) as ctx: + cursor.to_list() + self.assertTrue(ctx.exception.timeout) + + +class TestRawBatchCursor(IntegrationTest): + def test_find_raw(self): + c = self.db.test + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + batches = c.find_raw_batches().sort("_id").to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @client_context.require_transactions + def test_find_raw_transaction(self): + c = self.db.test + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + with client.start_session() as session: + with session.start_transaction(): + batches = ( + client[self.db.name].test.find_raw_batches(session=session).sort("_id") + ).to_list() + cmd = listener.started_events[0] + self.assertEqual(cmd.command_name, "find") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) + # Ensure we update $clusterTime from the command response. + last_cmd = listener.succeeded_events[-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @client_context.require_sessions + @client_context.require_failCommand_fail_point + def test_find_raw_retryable_reads(self): + c = self.db.test + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) + with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} + ): + batches = client[self.db.name].test.find_raw_batches().sort("_id").to_list() + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + self.assertEqual(len(listener.started_events), 2) + for cmd in listener.started_events: + self.assertEqual(cmd.command_name, "find") + + @client_context.require_version_min(5, 0, 0) + @client_context.require_no_standalone + def test_find_raw_snapshot_reads(self): + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) + db = client[self.db.name] + with client.start_session(snapshot=True) as session: + db.test.distinct("x", {}, session=session) + batches = db.test.find_raw_batches(session=session).sort("_id").to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + find_cmd = listener.started_events[1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) + + def test_explain(self): + c = self.db.test + c.insert_one({}) + explanation = c.find_raw_batches().explain() + self.assertIsInstance(explanation, dict) + + def test_empty(self): + self.db.test.drop() + cursor = self.db.test.find_raw_batches() + with self.assertRaises(StopIteration): + next(cursor) + + def test_clone(self): + self.db.test.insert_one({}) + cursor = self.db.test.find_raw_batches() + # Copy of a RawBatchCursor is also a RawBatchCursor, not a Cursor. + self.assertIsInstance(next(cursor.clone()), bytes) + self.assertIsInstance(next(copy.copy(cursor)), bytes) + + @client_context.require_no_mongos + def test_exhaust(self): + c = self.db.test + c.drop() + c.insert_many({"_id": i} for i in range(200)) + result = b"".join(c.find_raw_batches(cursor_type=CursorType.EXHAUST).to_list()) + self.assertEqual([{"_id": i} for i in range(200)], decode_all(result)) + + def test_server_error(self): + with self.assertRaises(OperationFailure) as exc: + next(self.db.test.find_raw_batches({"x": {"$bad": 1}})) + + # The server response was decoded, not left raw. + self.assertIsInstance(exc.exception.details, dict) + + def test_get_item(self): + with self.assertRaises(InvalidOperation): + self.db.test.find_raw_batches()[0] + + def test_collation(self): + next(self.db.test.find_raw_batches(collation=Collation("en_US"))) + + def test_read_concern(self): + self.db.get_collection("test", write_concern=WriteConcern(w="majority")).insert_one({}) + c = self.db.get_collection("test", read_concern=ReadConcern("majority")) + next(c.find_raw_batches()) + + def test_monitoring(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + c = client.pymongo_test.test + c.drop() + c.insert_many([{"_id": i} for i in range(10)]) + + listener.reset() + cursor = c.find_raw_batches(batch_size=4) + + # First raw batch of 4 documents. + next(cursor) + + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("find", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("find", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + + # The batch is a list of one raw bytes object. + self.assertEqual(len(csr["firstBatch"]), 1) + self.assertEqual(decode_all(csr["firstBatch"][0]), [{"_id": i} for i in range(4)]) + + listener.reset() + + # Next raw batch of 4 documents. + next(cursor) try: - client.set_cursor_manager(CManager) - docs = [] - cursor = self.db.test.find().batch_size(10) - docs.append(cursor.next()) - cursor.close() - docs.extend(cursor) - self.assertEqual(len(docs), 10) - cmd_cursor = {'id': cursor.cursor_id, 'firstBatch': []} - ccursor = CommandCursor(cursor.collection, cmd_cursor, - cursor.conn_id, retrieved=cursor.retrieved) - docs.extend(ccursor) - self.assertEqual(len(docs), 200) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(len(csr["nextBatch"]), 1) + self.assertEqual(decode_all(csr["nextBatch"][0]), [{"_id": i} for i in range(4, 8)]) finally: - client.set_cursor_manager(CursorManager) + # Finish the cursor. + cursor.close() + + +class TestRawBatchCommandCursor(IntegrationTest): + def test_aggregate_raw(self): + c = self.db.test + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + batches = (c.aggregate_raw_batches([{"$sort": {"_id": 1}}])).to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @client_context.require_transactions + def test_aggregate_raw_transaction(self): + c = self.db.test + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + with client.start_session() as session: + with session.start_transaction(): + batches = ( + client[self.db.name].test.aggregate_raw_batches( + [{"$sort": {"_id": 1}}], session=session + ) + ).to_list() + cmd = listener.started_events[0] + self.assertEqual(cmd.command_name, "aggregate") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) + # Ensure we update $clusterTime from the command response. + last_cmd = listener.succeeded_events[-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @client_context.require_sessions + @client_context.require_failCommand_fail_point + def test_aggregate_raw_retryable_reads(self): + c = self.db.test + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) + with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["aggregate"], "closeConnection": True}} + ): + batches = ( + client[self.db.name].test.aggregate_raw_batches([{"$sort": {"_id": 1}}]) + ).to_list() + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + self.assertEqual(len(listener.started_events), 3) + cmds = listener.started_events + self.assertEqual(cmds[0].command_name, "aggregate") + self.assertEqual(cmds[1].command_name, "aggregate") + + @client_context.require_version_min(5, 0, -1) + @client_context.require_no_standalone + def test_aggregate_raw_snapshot_reads(self): + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) + db = client[self.db.name] + with client.start_session(snapshot=True) as session: + db.test.distinct("x", {}, session=session) + batches = ( + db.test.aggregate_raw_batches([{"$sort": {"_id": 1}}], session=session) + ).to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + find_cmd = listener.started_events[1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) + + def test_server_error(self): + c = self.db.test + c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + c.insert_many(docs) + c.insert_one({"_id": 10, "x": "not a number"}) + + with self.assertRaises(OperationFailure) as exc: + ( + self.db.test.aggregate_raw_batches( + [ + { + "$sort": {"_id": 1}, + }, + {"$project": {"x": {"$multiply": [2, "$x"]}}}, + ], + batchSize=4, + ) + ).to_list() + + # The server response was decoded, not left raw. + self.assertIsInstance(exc.exception.details, dict) + + def test_get_item(self): + with self.assertRaises(InvalidOperation): + (self.db.test.aggregate_raw_batches([]))[0] + + def test_collation(self): + next(self.db.test.aggregate_raw_batches([], collation=Collation("en_US"))) + + def test_monitoring(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + c = client.pymongo_test.test + c.drop() + c.insert_many([{"_id": i} for i in range(10)]) + + listener.reset() + cursor = c.aggregate_raw_batches([{"$sort": {"_id": 1}}], batchSize=4) + + # Start cursor, no initial batch. + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("aggregate", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("aggregate", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + + # First batch is empty. + self.assertEqual(len(csr["firstBatch"]), 0) + listener.reset() + + # Batches of 4 documents. + n = 0 + for batch in cursor: + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(len(csr["nextBatch"]), 1) + self.assertEqual(csr["nextBatch"][0], batch) + self.assertEqual(decode_all(batch), [{"_id": i} for i in range(n, min(n + 4, 10))]) + + n += 4 + listener.reset() + + @client_context.require_version_min(5, 0, -1) + @client_context.require_no_mongos + @client_context.require_sync + def test_exhaust_cursor_db_set(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + c = client.pymongo_test.test + c.delete_many({}) + c.insert_many([{"_id": i} for i in range(3)]) + + listener.reset() + + result = list(c.find({}, cursor_type=pymongo.CursorType.EXHAUST, batch_size=1)) + + self.assertEqual(len(result), 3) + + self.assertEqual( + listener.started_command_names(), ["find", "getMore", "getMore", "getMore"] + ) + for cmd in listener.started_events: + self.assertEqual(cmd.command["$db"], "pymongo_test") + if __name__ == "__main__": unittest.main() diff --git a/test/test_custom_types.py b/test/test_custom_types.py new file mode 100644 index 0000000000..02f3127165 --- /dev/null +++ b/test/test_custom_types.py @@ -0,0 +1,970 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test support for callbacks to encode/decode custom types.""" +from __future__ import annotations + +import datetime +import sys +import tempfile +from collections import OrderedDict +from decimal import Decimal +from random import random +from typing import Any, Tuple, Type, no_type_check + +from bson.decimal128 import DecimalDecoder, DecimalEncoder +from gridfs.synchronous.grid_file import GridIn, GridOut + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest + +from bson import ( + _BUILT_IN_TYPES, + RE_TYPE, + Decimal128, + _bson_to_dict, + _dict_to_bson, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, +) +from bson.codec_options import ( + CodecOptions, + TypeCodec, + TypeDecoder, + TypeEncoder, + TypeRegistry, +) +from bson.errors import InvalidDocument +from bson.int64 import Int64 +from bson.raw_bson import RawBSONDocument +from pymongo.errors import DuplicateKeyError +from pymongo.message import _CursorAddress +from pymongo.synchronous.collection import ReturnDocument + +_IS_SYNC = True + + +DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalEncoder(), DecimalDecoder()])) + + +class UndecipherableInt64Type: + def __init__(self, value): + self.value = value + + def __eq__(self, other): + if isinstance(other, type(self)): + return self.value == other.value + # Does not compare equal to integers. + return False + + +class UndecipherableIntDecoder(TypeDecoder): + bson_type = Int64 + + def transform_bson(self, value): + return UndecipherableInt64Type(value) + + +class UndecipherableIntEncoder(TypeEncoder): + python_type = UndecipherableInt64Type + + def transform_python(self, value): + return Int64(value.value) + + +UNINT_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UndecipherableIntDecoder(), + ] + ) +) + + +UNINT_CODECOPTS = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder(), UndecipherableIntEncoder()]) +) + + +class UppercaseTextDecoder(TypeDecoder): + bson_type = str + + def transform_bson(self, value): + return value.upper() + + +UPPERSTR_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UppercaseTextDecoder(), + ] + ) +) + + +def type_obfuscating_decoder_factory(rt_type): + class ResumeTokenToNanDecoder(TypeDecoder): + bson_type = rt_type + + def transform_bson(self, value): + return "NaN" + + return ResumeTokenToNanDecoder + + +class CustomBSONTypeTests: + @no_type_check + def roundtrip(self, doc): + bsonbytes = encode(doc, codec_options=self.codecopts) + rt_document = decode(bsonbytes, codec_options=self.codecopts) + self.assertEqual(doc, rt_document) + + def test_encode_decode_roundtrip(self): + self.roundtrip({"average": Decimal("56.47")}) + self.roundtrip({"average": {"b": Decimal("56.47")}}) + self.roundtrip({"average": [Decimal("56.47")]}) + self.roundtrip({"average": [[Decimal("56.47")]]}) + self.roundtrip({"average": [{"b": Decimal("56.47")}]}) + + @no_type_check + def test_decode_all(self): + documents = [] + for dec in range(3): + documents.append({"average": Decimal(f"56.4{dec}")}) + + bsonstream = b"" + for doc in documents: + bsonstream += encode(doc, codec_options=self.codecopts) + + self.assertEqual(decode_all(bsonstream, self.codecopts), documents) + + @no_type_check + def test__bson_to_dict(self): + document = {"average": Decimal("56.47")} + rawbytes = encode(document, codec_options=self.codecopts) + decoded_document = _bson_to_dict(rawbytes, self.codecopts) + self.assertEqual(document, decoded_document) + + @no_type_check + def test__dict_to_bson(self): + document = {"average": Decimal("56.47")} + rawbytes = encode(document, codec_options=self.codecopts) + encoded_document = _dict_to_bson(document, False, self.codecopts) + self.assertEqual(encoded_document, rawbytes) + + def _generate_multidocument_bson_stream(self): + inp_num = [str(random() * 100)[:4] for _ in range(10)] + docs = [{"n": Decimal128(dec)} for dec in inp_num] + edocs = [{"n": Decimal(dec)} for dec in inp_num] + bsonstream = b"" + for doc in docs: + bsonstream += encode(doc) + return edocs, bsonstream + + @no_type_check + def test_decode_iter(self): + expected, bson_data = self._generate_multidocument_bson_stream() + for expected_doc, decoded_doc in zip(expected, decode_iter(bson_data, self.codecopts)): + self.assertEqual(expected_doc, decoded_doc) + + @no_type_check + def test_decode_file_iter(self): + expected, bson_data = self._generate_multidocument_bson_stream() + fileobj = tempfile.TemporaryFile() + fileobj.write(bson_data) + fileobj.seek(0) + + for expected_doc, decoded_doc in zip(expected, decode_file_iter(fileobj, self.codecopts)): + self.assertEqual(expected_doc, decoded_doc) + + fileobj.close() + + +class TestCustomPythonBSONTypeToBSONMonolithicCodec(CustomBSONTypeTests, unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.codecopts = DECIMAL_CODECOPTS + + +class TestCustomPythonBSONTypeToBSONMultiplexedCodec(CustomBSONTypeTests, unittest.TestCase): + @classmethod + def setUpClass(cls): + codec_options = CodecOptions( + type_registry=TypeRegistry((DecimalEncoder(), DecimalDecoder())) + ) + cls.codecopts = codec_options + + +class TestBSONFallbackEncoder(unittest.TestCase): + def _get_codec_options(self, fallback_encoder): + type_registry = TypeRegistry(fallback_encoder=fallback_encoder) + return CodecOptions(type_registry=type_registry) + + def test_simple(self): + codecopts = self._get_codec_options(lambda x: Decimal128(x)) + document = {"average": Decimal("56.47")} + bsonbytes = encode(document, codec_options=codecopts) + + exp_document = {"average": Decimal128("56.47")} + exp_bsonbytes = encode(exp_document) + self.assertEqual(bsonbytes, exp_bsonbytes) + + def test_erroring_fallback_encoder(self): + codecopts = self._get_codec_options(lambda _: 1 / 0) + + # fallback converter should not be invoked when encoding known types. + encode( + {"a": 1, "b": Decimal128("1.01"), "c": {"arr": ["abc", 3.678]}}, codec_options=codecopts + ) + + # expect an error when encoding a custom type. + document = {"average": Decimal("56.47")} + with self.assertRaises(ZeroDivisionError): + encode(document, codec_options=codecopts) + + def test_noop_fallback_encoder(self): + codecopts = self._get_codec_options(lambda x: x) + document = {"average": Decimal("56.47")} + with self.assertRaises(InvalidDocument): + encode(document, codec_options=codecopts) + + def test_type_unencodable_by_fallback_encoder(self): + def fallback_encoder(value): + try: + return Decimal128(value) + except: + raise TypeError("cannot encode type %s" % (type(value))) + + codecopts = self._get_codec_options(fallback_encoder) + document = {"average": Decimal} + with self.assertRaises(TypeError): + encode(document, codec_options=codecopts) + + def test_call_only_once_for_not_handled_big_integers(self): + called_with = [] + + def fallback_encoder(value): + called_with.append(value) + return value + + codecopts = self._get_codec_options(fallback_encoder) + document = {"a": {"b": {"c": 2 << 65}}} + + msg = "MongoDB can only handle up to 8-byte ints" + with self.assertRaises(OverflowError, msg=msg): + encode(document, codec_options=codecopts) + + self.assertEqual(called_with, [2 << 65]) + + +class TestBSONTypeEnDeCodecs(unittest.TestCase): + def test_instantiation(self): + msg = "Can't instantiate abstract class" + + def run_test(base, attrs, fail): + codec = type("testcodec", (base,), attrs) + if fail: + with self.assertRaisesRegex(TypeError, msg): + codec() + else: + codec() + + class MyType: + pass + + run_test( + TypeEncoder, + { + "python_type": MyType, + }, + fail=True, + ) + run_test(TypeEncoder, {"transform_python": lambda s, x: x}, fail=True) + run_test( + TypeEncoder, {"transform_python": lambda s, x: x, "python_type": MyType}, fail=False + ) + + run_test( + TypeDecoder, + { + "bson_type": Decimal128, + }, + fail=True, + ) + run_test(TypeDecoder, {"transform_bson": lambda s, x: x}, fail=True) + run_test( + TypeDecoder, {"transform_bson": lambda s, x: x, "bson_type": Decimal128}, fail=False + ) + + run_test(TypeCodec, {"bson_type": Decimal128, "python_type": MyType}, fail=True) + run_test( + TypeCodec, + {"transform_bson": lambda s, x: x, "transform_python": lambda s, x: x}, + fail=True, + ) + run_test( + TypeCodec, + { + "python_type": MyType, + "transform_python": lambda s, x: x, + "transform_bson": lambda s, x: x, + "bson_type": Decimal128, + }, + fail=False, + ) + + def test_type_checks(self): + self.assertTrue(issubclass(TypeCodec, TypeEncoder)) + self.assertTrue(issubclass(TypeCodec, TypeDecoder)) + self.assertFalse(issubclass(TypeDecoder, TypeEncoder)) + self.assertFalse(issubclass(TypeEncoder, TypeDecoder)) + + +class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): + TypeA: Any + TypeB: Any + fallback_encoder_A2B: Any + fallback_encoder_A2BSON: Any + B2BSON: Type[TypeEncoder] + B2A: Type[TypeEncoder] + A2B: Type[TypeEncoder] + + @classmethod + def setUpClass(cls): + class TypeA: + def __init__(self, x): + self.value = x + + class TypeB: + def __init__(self, x): + self.value = x + + # transforms A, and only A into B + def fallback_encoder_A2B(value): + assert isinstance(value, TypeA) + return TypeB(value.value) + + # transforms A, and only A into something encodable + def fallback_encoder_A2BSON(value): + assert isinstance(value, TypeA) + return value.value + + # transforms B into something encodable + class B2BSON(TypeEncoder): + python_type = TypeB + + def transform_python(self, value): + return value.value + + # transforms A into B + # technically, this isn't a proper type encoder as the output is not + # BSON-encodable. + class A2B(TypeEncoder): + python_type = TypeA + + def transform_python(self, value): + return TypeB(value.value) + + # transforms B into A + # technically, this isn't a proper type encoder as the output is not + # BSON-encodable. + class B2A(TypeEncoder): + python_type = TypeB + + def transform_python(self, value): + return TypeA(value.value) + + cls.TypeA = TypeA + cls.TypeB = TypeB + cls.fallback_encoder_A2B = staticmethod(fallback_encoder_A2B) + cls.fallback_encoder_A2BSON = staticmethod(fallback_encoder_A2BSON) + cls.B2BSON = B2BSON + cls.B2A = B2A + cls.A2B = A2B + + def test_encode_fallback_then_custom(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2BSON()], fallback_encoder=self.fallback_encoder_A2B) + ) + testdoc = {"x": self.TypeA(123)} + expected_bytes = encode({"x": 123}) + + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) + + def test_encode_custom_then_fallback(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2BSON) + ) + testdoc = {"x": self.TypeB(123)} + expected_bytes = encode({"x": 123}) + + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) + + def test_chaining_encoders_fails(self): + codecopts = CodecOptions(type_registry=TypeRegistry([self.A2B(), self.B2BSON()])) + + with self.assertRaises(InvalidDocument): + encode({"x": self.TypeA(123)}, codec_options=codecopts) + + def test_infinite_loop_exceeds_max_recursion_depth(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2B) + ) + + # Raises max recursion depth exceeded error + with self.assertRaises(RuntimeError): + encode({"x": self.TypeA(100)}, codec_options=codecopts) + + +class TestTypeRegistry(unittest.TestCase): + types: Tuple[object, object] + codecs: Tuple[Type[TypeCodec], Type[TypeCodec]] + fallback_encoder: Any + + @classmethod + def setUpClass(cls): + class MyIntType: + def __init__(self, x): + assert isinstance(x, int) + self.x = x + + class MyStrType: + def __init__(self, x): + assert isinstance(x, str) + self.x = x + + class MyIntCodec(TypeCodec): + @property + def python_type(self): + return MyIntType + + @property + def bson_type(self): + return int + + def transform_python(self, value): + return value.x + + def transform_bson(self, value): + return MyIntType(value) + + class MyStrCodec(TypeCodec): + @property + def python_type(self): + return MyStrType + + @property + def bson_type(self): + return str + + def transform_python(self, value): + return value.x + + def transform_bson(self, value): + return MyStrType(value) + + def fallback_encoder(value): + return value + + cls.types = (MyIntType, MyStrType) + cls.codecs = (MyIntCodec, MyStrCodec) + cls.fallback_encoder = fallback_encoder + + def test_simple(self): + codec_instances = [codec() for codec in self.codecs] + + def assert_proper_initialization(type_registry, codec_instances): + self.assertEqual( + type_registry._encoder_map, + { + self.types[0]: codec_instances[0].transform_python, + self.types[1]: codec_instances[1].transform_python, + }, + ) + self.assertEqual( + type_registry._decoder_map, + {int: codec_instances[0].transform_bson, str: codec_instances[1].transform_bson}, + ) + self.assertEqual(type_registry._fallback_encoder, self.fallback_encoder) + + type_registry = TypeRegistry(codec_instances, self.fallback_encoder) + assert_proper_initialization(type_registry, codec_instances) + + type_registry = TypeRegistry( + fallback_encoder=self.fallback_encoder, type_codecs=codec_instances + ) + assert_proper_initialization(type_registry, codec_instances) + + # Ensure codec list held by the type registry doesn't change if we + # mutate the initial list. + codec_instances_copy = list(codec_instances) + codec_instances.pop(0) + self.assertListEqual(type_registry._TypeRegistry__type_codecs, codec_instances_copy) + + def test_simple_separate_codecs(self): + class MyIntEncoder(TypeEncoder): + python_type = self.types[0] + + def transform_python(self, value): + return value.x + + class MyIntDecoder(TypeDecoder): + bson_type = int + + def transform_bson(self, value): + return self.types[0](value) + + codec_instances: list = [MyIntDecoder(), MyIntEncoder()] + type_registry = TypeRegistry(codec_instances) + + self.assertEqual( + type_registry._encoder_map, + {MyIntEncoder.python_type: codec_instances[1].transform_python}, + ) + self.assertEqual( + type_registry._decoder_map, + {MyIntDecoder.bson_type: codec_instances[0].transform_bson}, + ) + + def test_initialize_fail(self): + err_msg = "Expected an instance of TypeEncoder, TypeDecoder, or TypeCodec, got .* instead" + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry(self.codecs) # type: ignore[arg-type] + + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry([type("AnyType", (object,), {})()]) + + err_msg = f"fallback_encoder {True!r} is not a callable" + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry([], True) # type: ignore[arg-type] + + err_msg = "fallback_encoder {!r} is not a callable".format("hello") + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] + + def test_type_registry_codecs(self): + codec_instances = [codec() for codec in self.codecs] + type_registry = TypeRegistry(codec_instances) + self.assertEqual(type_registry.codecs, codec_instances) + + def test_type_registry_fallback(self): + type_registry = TypeRegistry(fallback_encoder=self.fallback_encoder) + self.assertEqual(type_registry.fallback_encoder, self.fallback_encoder) + + def test_type_registry_repr(self): + codec_instances = [codec() for codec in self.codecs] + type_registry = TypeRegistry(codec_instances) + r = f"TypeRegistry(type_codecs={codec_instances!r}, fallback_encoder={None!r})" + self.assertEqual(r, repr(type_registry)) + + def test_type_registry_eq(self): + codec_instances = [codec() for codec in self.codecs] + self.assertEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances)) + + codec_instances_2 = [codec() for codec in self.codecs] + self.assertNotEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances_2)) + + def test_builtin_types_override_fails(self): + def run_test(base, attrs): + msg = ( + r"TypeEncoders cannot change how built-in types " + r"are encoded \(encoder .* transforms type .*\)" + ) + for pytype in _BUILT_IN_TYPES: + attrs.update({"python_type": pytype, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) + codec_instance = codec() + with self.assertRaisesRegex(TypeError, msg): + TypeRegistry( + [ + codec_instance, + ] + ) + + # Test only some subtypes as not all can be subclassed. + if pytype in [ + bool, + type(None), + RE_TYPE, + ]: + continue + + class MyType(pytype): # type: ignore + pass + + attrs.update({"python_type": MyType, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) + codec_instance = codec() + with self.assertRaisesRegex(TypeError, msg): + TypeRegistry( + [ + codec_instance, + ] + ) + + run_test(TypeEncoder, {}) + run_test(TypeCodec, {"bson_type": Decimal128, "transform_bson": lambda x: x}) + + +class TestCollectionWCustomType(IntegrationTest): + def setUp(self): + super().setUp() + self.db.test.drop() + + def tearDown(self): + self.db.test.drop() + + def test_overflow_int_w_custom_decoder(self): + type_registry = TypeRegistry(fallback_encoder=lambda val: str(val)) + codec_options = CodecOptions(type_registry=type_registry) + collection = self.db.get_collection("test", codec_options=codec_options) + + collection.insert_one({"_id": 1, "data": 2**520}) + ret = collection.find_one() + self.assertEqual(ret["data"], str(2**520)) + + def test_command_errors_w_custom_type_decoder(self): + db = self.db + test_doc = {"_id": 1, "data": "a"} + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + + result = test.insert_one(test_doc) + self.assertEqual(result.inserted_id, test_doc["_id"]) + with self.assertRaises(DuplicateKeyError): + test.insert_one(test_doc) + + def test_find_w_custom_type_decoder(self): + db = self.db + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] + for doc in input_docs: + db.test.insert_one(doc) + + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + for doc in test.find({}, batch_size=1): + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + + def test_find_w_custom_type_decoder_and_document_class(self): + def run_test(doc_cls): + db = self.db + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] + for doc in input_docs: + db.test.insert_one(doc) + + test = db.get_collection( + "test", + codec_options=CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder()]), document_class=doc_cls + ), + ) + for doc in test.find({}, batch_size=1): + self.assertIsInstance(doc, doc_cls) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + + for doc_cls in [RawBSONDocument, OrderedDict]: + run_test(doc_cls) + + def test_aggregate_w_custom_type_decoder(self): + db = self.db + db.test.insert_many( + [ + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + ] + ) + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + + pipeline: list = [ + {"$match": {"status": "complete"}}, + {"$group": {"_id": "$status", "total_qty": {"$sum": "$qty"}}}, + ] + result = test.aggregate(pipeline) + + res = (result.to_list())[0] + self.assertEqual(res["_id"], "complete") + self.assertIsInstance(res["total_qty"], UndecipherableInt64Type) + self.assertEqual(res["total_qty"].value, 20) + + def test_distinct_w_custom_type(self): + self.db.drop_collection("test") + + test = self.db.get_collection("test", codec_options=UNINT_CODECOPTS) + values = [ + UndecipherableInt64Type(1), + UndecipherableInt64Type(2), + UndecipherableInt64Type(3), + {"b": UndecipherableInt64Type(3)}, + ] + test.insert_many({"a": val} for val in values) + + self.assertEqual(values, test.distinct("a")) + + def test_find_one_and__w_custom_type_decoder(self): + db = self.db + c = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + c.insert_one({"_id": 1, "x": Int64(1)}) + + doc = c.find_one_and_update( + {"_id": 1}, {"$inc": {"x": 1}}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 2) + + doc = c.find_one_and_replace( + {"_id": 1}, {"x": Int64(3), "y": True}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertEqual(doc["y"], True) + + doc = c.find_one_and_delete({"y": True}) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertIsNone(c.find_one()) + + +class TestGridFileCustomType(IntegrationTest): + def setUp(self): + super().setUp() + self.db.drop_collection("fs.files") + self.db.drop_collection("fs.chunks") + + def test_grid_out_custom_opts(self): + db = self.db.with_options(codec_options=UPPERSTR_DECODER_CODECOPTS) + one = GridIn( + db.fs, + _id=5, + filename="my_file", + chunkSize=1000, + metadata={"foo": "red", "bar": "blue"}, + bar=3, + baz="hello", + ) + one.write(b"hello world") + one.close() + + two = GridOut(db.fs, 5) + two.open() + + self.assertEqual("my_file", two.name) + self.assertEqual("my_file", two.filename) + self.assertEqual(5, two._id) + self.assertEqual(11, two.length) + self.assertEqual(1000, two.chunk_size) + self.assertIsInstance(two.upload_date, datetime.datetime) + self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) + self.assertEqual(3, two.bar) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: + self.assertRaises(AttributeError, setattr, two, attr, 5) + + +class ChangeStreamsWCustomTypesTestMixin: + @no_type_check + def change_stream(self, *args, **kwargs): + stream = self.watched_target.watch(*args, max_await_time_ms=1, **kwargs) + self.addCleanup(stream.close) + return stream + + @no_type_check + def insert_and_check(self, change_stream, insert_doc, expected_doc): + self.input_target.insert_one(insert_doc) + change = next(change_stream) + self.assertEqual(change["fullDocument"], expected_doc) + + @no_type_check + def kill_change_stream_cursor(self, change_stream): + # Cause a cursor not found error on the next getMore. + cursor = change_stream._cursor + address = _CursorAddress(cursor.address, cursor._ns) + client = self.input_target.database.client + client._close_cursor_now(cursor.cursor_id, address) + + @no_type_check + def test_simple(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) + self.create_targets(codec_options=codecopts) + + input_docs = [ + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [ + {"_id": 1, "data": "HELLO"}, + {"_id": 2, "data": "WORLD"}, + {"_id": 3, "data": "!"}, + ] + + change_stream = self.change_stream() + + self.insert_and_check(change_stream, input_docs[0], expected_docs[0]) + self.kill_change_stream_cursor(change_stream) + self.insert_and_check(change_stream, input_docs[1], expected_docs[1]) + self.kill_change_stream_cursor(change_stream) + self.insert_and_check(change_stream, input_docs[2], expected_docs[2]) + + @no_type_check + def test_custom_type_in_pipeline(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) + self.create_targets(codec_options=codecopts) + + input_docs = [ + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [{"_id": 2, "data": "WORLD"}, {"_id": 3, "data": "!"}] + + # UndecipherableInt64Type should be encoded with the TypeRegistry. + change_stream = self.change_stream( + [{"$match": {"documentKey._id": {"$gte": UndecipherableInt64Type(2)}}}] + ) + + self.input_target.insert_one(input_docs[0]) + self.insert_and_check(change_stream, input_docs[1], expected_docs[0]) + self.kill_change_stream_cursor(change_stream) + self.insert_and_check(change_stream, input_docs[2], expected_docs[1]) + + @no_type_check + def test_break_resume_token(self): + # Get one document from a change stream to determine resumeToken type. + self.create_targets() + change_stream = self.change_stream() + self.input_target.insert_one({"data": "test"}) + change = next(change_stream) + resume_token_decoder = type_obfuscating_decoder_factory(type(change["_id"]["_data"])) + + # Custom-decoding the resumeToken type breaks resume tokens. + codecopts = CodecOptions( + type_registry=TypeRegistry([resume_token_decoder(), UndecipherableIntEncoder()]) + ) + + # Re-create targets, change stream and proceed. + self.create_targets(codec_options=codecopts) + + docs = [{"_id": 1}, {"_id": 2}, {"_id": 3}] + + change_stream = self.change_stream() + self.insert_and_check(change_stream, docs[0], docs[0]) + self.kill_change_stream_cursor(change_stream) + self.insert_and_check(change_stream, docs[1], docs[1]) + self.kill_change_stream_cursor(change_stream) + self.insert_and_check(change_stream, docs[2], docs[2]) + + @no_type_check + def test_document_class(self): + def run_test(doc_cls): + codecopts = CodecOptions( + type_registry=TypeRegistry([UppercaseTextDecoder(), UndecipherableIntEncoder()]), + document_class=doc_cls, + ) + + self.create_targets(codec_options=codecopts) + change_stream = self.change_stream() + + doc = {"a": UndecipherableInt64Type(101), "b": "xyz"} + self.input_target.insert_one(doc) + change = next(change_stream) + + self.assertIsInstance(change, doc_cls) + self.assertEqual(change["fullDocument"]["a"], 101) + self.assertEqual(change["fullDocument"]["b"], "XYZ") + + for doc_cls in [OrderedDict, RawBSONDocument]: + run_test(doc_cls) + + +class TestCollectionChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): + @client_context.require_change_streams + def setUp(self): + super().setUp() + self.db.test.delete_many({}) + + def tearDown(self): + self.input_target.drop() + + def create_targets(self, *args, **kwargs): + self.watched_target = self.db.get_collection("test", *args, **kwargs) + self.input_target = self.watched_target + # Ensure the collection exists and is empty. + self.input_target.insert_one({}) + self.input_target.delete_many({}) + + +class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): + @client_context.require_version_min(4, 2, 0) + @client_context.require_change_streams + def setUp(self): + super().setUp() + self.db.test.delete_many({}) + + def tearDown(self): + self.input_target.drop() + self.client.drop_database(self.watched_target) + + def create_targets(self, *args, **kwargs): + self.watched_target = self.client.get_database(self.db.name, *args, **kwargs) + self.input_target = self.watched_target.test + # Insert a record to ensure db, coll are created. + self.input_target.insert_one({"data": "dummy"}) + + +class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): + @client_context.require_version_min(4, 2, 0) + @client_context.require_change_streams + def setUp(self): + super().setUp() + self.db.test.delete_many({}) + + def tearDown(self): + self.input_target.drop() + self.client.drop_database(self.db) + + def create_targets(self, *args, **kwargs): + codec_options = kwargs.pop("codec_options", None) + if codec_options: + kwargs["type_registry"] = codec_options.type_registry + kwargs["document_class"] = codec_options.document_class + self.watched_target = self.rs_client(*args, **kwargs) + self.input_target = self.watched_target[self.db.name].test + # Insert a record to ensure db, coll are created. + self.input_target.insert_one({"data": "dummy"}) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_database.py b/test/test_database.py index b2e0efbb75..ebbf6e55c6 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,75 +13,122 @@ # limitations under the License. """Test the database module.""" +from __future__ import annotations -import datetime -import os import re import sys -import warnings +from typing import Any, Iterable, List, Mapping, Union + +from pymongo.synchronous.command_cursor import CommandCursor sys.path[0:0] = [""] -import unittest -from nose.plugins.skip import SkipTest +from test import IntegrationTest, client_context, unittest +from test.test_custom_types import DECIMAL_CODECOPTS +from test.utils_shared import ( + IMPOSSIBLE_WRITE_CONCERN, + OvertCommandListener, + wait_until, +) -from bson.code import Code -from bson.regex import Regex +from bson.codec_options import CodecOptions from bson.dbref import DBRef +from bson.int64 import Int64 from bson.objectid import ObjectId -from bson.son import SON, RE_TYPE -from pymongo import (ALL, - auth, - OFF, - SLOW_ONLY, - helpers, - ReadPreference) -from pymongo.collection import Collection -from pymongo.database import Database -from pymongo.errors import (CollectionInvalid, - ConfigurationError, - ExecutionTimeout, - InvalidName, - OperationFailure) -from pymongo.son_manipulator import (AutoReference, - NamespaceInjector, - ObjectIdShuffler) -from test import version -from test.utils import (get_command_line, is_mongos, - remove_all_users, server_started_with_auth) -from test.test_client import get_client - - -class TestDatabase(unittest.TestCase): - - def setUp(self): - self.client = get_client() - - def tearDown(self): - self.client = None +from bson.regex import Regex +from bson.son import SON +from pymongo import helpers_shared +from pymongo.errors import ( + CollectionInvalid, + ExecutionTimeout, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous import auth +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class TestDatabaseNoConnect(unittest.TestCase): + """Test Database features on a client that does not connect.""" + + client: MongoClient + + @classmethod + def setUpClass(cls): + cls.client = MongoClient(connect=False) def test_name(self): self.assertRaises(TypeError, Database, self.client, 4) self.assertRaises(InvalidName, Database, self.client, "my db") + self.assertRaises(InvalidName, Database, self.client, 'my"db') self.assertRaises(InvalidName, Database, self.client, "my\x00db") - self.assertRaises(InvalidName, Database, - self.client, u"my\u0000db") + self.assertRaises(InvalidName, Database, self.client, "my\u0000db") self.assertEqual("name", Database(self.client, "name").name) + def test_get_collection(self): + codec_options = CodecOptions(tz_aware=True) + write_concern = WriteConcern(w=2, j=True) + read_concern = ReadConcern("majority") + coll = self.client.pymongo_test.get_collection( + "foo", codec_options, ReadPreference.SECONDARY, write_concern, read_concern + ) + self.assertEqual("foo", coll.name) + self.assertEqual(codec_options, coll.codec_options) + self.assertEqual(ReadPreference.SECONDARY, coll.read_preference) + self.assertEqual(write_concern, coll.write_concern) + self.assertEqual(read_concern, coll.read_concern) + + def test_getattr(self): + db = self.client.pymongo_test + self.assertIsInstance(db["_does_not_exist"], Collection) + + with self.assertRaises(AttributeError) as context: + db._does_not_exist + + # Message should be: "AttributeError: Database has no attribute + # '_does_not_exist'. To access the _does_not_exist collection, + # use database['_does_not_exist']". + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) + + def test_iteration(self): + db = self.client.pymongo_test + msg = "'Database' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in db: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = db[0] + # next fails + with self.assertRaisesRegex(TypeError, "'Database' object is not iterable"): + _ = next(db) + # .next() fails + with self.assertRaisesRegex(TypeError, "'Database' object is not iterable"): + _ = db.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(db, Iterable) + + +class TestDatabase(IntegrationTest): def test_equality(self): - self.assertNotEqual(Database(self.client, "test"), - Database(self.client, "mike")) - self.assertEqual(Database(self.client, "test"), - Database(self.client, "test")) + self.assertNotEqual(Database(self.client, "test"), Database(self.client, "mike")) + self.assertEqual(Database(self.client, "test"), Database(self.client, "test")) # Explicitly test inequality - self.assertFalse(Database(self.client, "test") != - Database(self.client, "test")) + self.assertFalse(Database(self.client, "test") != Database(self.client, "test")) - def test_repr(self): - self.assertEqual(repr(Database(self.client, "pymongo_test")), - "Database(%r, %s)" % (self.client, - repr(u"pymongo_test"))) + def test_hashable(self): + self.assertIn(self.client.test, {Database(self.client, "test")}) def test_get_coll(self): db = Database(self.client, "pymongo_test") @@ -90,578 +137,333 @@ def test_get_coll(self): self.assertNotEqual(db.test, Collection(db, "mike")) self.assertEqual(db.test.mike, db["test.mike"]) + def test_repr(self): + name = "Database" + self.assertEqual( + repr(Database(self.client, "pymongo_test")), + "{}({!r}, {})".format(name, self.client, repr("pymongo_test")), + ) + def test_create_collection(self): db = Database(self.client, "pymongo_test") - db.test.insert({"hello": "world"}) - self.assertRaises(CollectionInvalid, db.create_collection, "test") + db.test.insert_one({"hello": "world"}) + with self.assertRaises(CollectionInvalid): + db.create_collection("test") db.drop_collection("test") - self.assertRaises(TypeError, db.create_collection, 5) - self.assertRaises(TypeError, db.create_collection, None) - self.assertRaises(InvalidName, db.create_collection, "coll..ection") + with self.assertRaises(TypeError): + db.create_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.create_collection(None) # type: ignore[arg-type] + with self.assertRaises(InvalidName): + db.create_collection("coll..ection") # type: ignore[arg-type] test = db.create_collection("test") - test.save({"hello": u"world"}) - self.assertEqual(db.test.find_one()["hello"], "world") - self.assertTrue(u"test" in db.collection_names()) + self.assertIn("test", db.list_collection_names()) + test.insert_one({"hello": "world"}) + self.assertEqual((db.test.find_one())["hello"], "world") db.drop_collection("test.foo") db.create_collection("test.foo") - self.assertTrue(u"test.foo" in db.collection_names()) - self.assertEqual(db.test.foo.options(), {}) - self.assertRaises(CollectionInvalid, db.create_collection, "test.foo") + self.assertIn("test.foo", db.list_collection_names()) + with self.assertRaises(CollectionInvalid): + db.create_collection("test.foo") - def test_collection_names(self): + def test_list_collection_names(self): db = Database(self.client, "pymongo_test") - db.test.save({"dummy": u"object"}) - db.test.mike.save({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) + db.test.mike.insert_one({"dummy": "object"}) - colls = db.collection_names() - self.assertTrue("test" in colls) - self.assertTrue("test.mike" in colls) + colls = db.list_collection_names() + self.assertIn("test", colls) + self.assertIn("test.mike", colls) for coll in colls: - self.assertTrue("$" not in coll) - - colls_without_systems = db.collection_names(False) - for coll in colls_without_systems: - self.assertTrue(not coll.startswith("system.")) - - def test_drop_collection(self): + self.assertNotIn("$", coll) + + db.systemcoll.test.insert_one({}) + no_system_collections = db.list_collection_names( + filter={"name": {"$regex": r"^(?!system\.)"}} + ) + for coll in no_system_collections: + self.assertFalse(coll.startswith("system.")) + self.assertIn("systemcoll.test", no_system_collections) + + # Force more than one batch. + db = self.client.many_collections + for i in range(101): + db["coll" + str(i)].insert_one({}) + # No Error + try: + db.list_collection_names() + finally: + self.client.drop_database("many_collections") + + def test_list_collection_names_filter(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + db = client[self.db.name] + db.capped.drop() + db.create_collection("capped", capped=True, size=4096) + db.capped.insert_one({}) + db.non_capped.insert_one({}) + self.addCleanup(client.drop_database, db.name) + filter: Union[None, Mapping[str, Any]] + # Should not send nameOnly. + for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): + listener.reset() + names = db.list_collection_names(filter=filter) + self.assertEqual(names, ["capped"]) + self.assertNotIn("nameOnly", listener.started_events[0].command) + + # Should send nameOnly (except on 2.6). + for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): + listener.reset() + names = db.list_collection_names(filter=filter) + self.assertIn("capped", names) + self.assertIn("non_capped", names) + command = listener.started_events[0].command + self.assertIn("nameOnly", command) + self.assertTrue(command["nameOnly"]) + + def test_check_exists(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + db = client[self.db.name] + db.drop_collection("unique") + db.create_collection("unique", check_exists=True) + self.assertIn("listCollections", listener.started_command_names()) + listener.reset() + db.drop_collection("unique") + db.create_collection("unique", check_exists=False) + self.assertGreater(len(listener.started_events), 0) + self.assertNotIn("listCollections", listener.started_command_names()) + + def test_list_collections(self): + self.client.drop_database("pymongo_test") db = Database(self.client, "pymongo_test") + db.test.insert_one({"dummy": "object"}) + db.test.mike.insert_one({"dummy": "object"}) - self.assertRaises(TypeError, db.drop_collection, 5) - self.assertRaises(TypeError, db.drop_collection, None) + results = db.list_collections() + colls = [result["name"] for result in results] - db.test.save({"dummy": u"object"}) - self.assertTrue("test" in db.collection_names()) - db.drop_collection("test") - self.assertFalse("test" in db.collection_names()) + # All the collections present. + self.assertIn("test", colls) + self.assertIn("test.mike", colls) - db.test.save({"dummy": u"object"}) - self.assertTrue("test" in db.collection_names()) - db.drop_collection(u"test") - self.assertFalse("test" in db.collection_names()) + # No collection containing a '$'. + for coll in colls: + self.assertNotIn("$", coll) - db.test.save({"dummy": u"object"}) - self.assertTrue("test" in db.collection_names()) - db.drop_collection(db.test) - self.assertFalse("test" in db.collection_names()) + # Duplicate check. + coll_cnt: dict = {} + for coll in colls: + try: + # Found duplicate. + coll_cnt[coll] += 1 + self.fail("Found duplicate") + except KeyError: + coll_cnt[coll] = 1 + coll_cnt: dict = {} - db.test.save({"dummy": u"object"}) - self.assertTrue("test" in db.collection_names()) - db.test.drop() - self.assertFalse("test" in db.collection_names()) - db.test.drop() + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "test.mike", "system.indexes"}) - db.drop_collection(db.test.doesnotexist) + colls = (db.list_collections(filter={"name": {"$regex": "^test$"}})).to_list() + self.assertEqual(1, len(colls)) - def test_validate_collection(self): - db = self.client.pymongo_test + colls = (db.list_collections(filter={"name": {"$regex": "^test.mike$"}})).to_list() + self.assertEqual(1, len(colls)) - self.assertRaises(TypeError, db.validate_collection, 5) - self.assertRaises(TypeError, db.validate_collection, None) + db.drop_collection("test") - db.test.save({"dummy": u"object"}) + db.create_collection("test", capped=True, size=4096) + results = db.list_collections(filter={"options.capped": True}) + colls = [result["name"] for result in results] - self.assertRaises(OperationFailure, db.validate_collection, - "test.doesnotexist") - self.assertRaises(OperationFailure, db.validate_collection, - db.test.doesnotexist) + # Checking only capped collections are present + self.assertIn("test", colls) + self.assertNotIn("test.mike", colls) - self.assertTrue(db.validate_collection("test")) - self.assertTrue(db.validate_collection(db.test)) - self.assertTrue(db.validate_collection(db.test, full=True)) - self.assertTrue(db.validate_collection(db.test, scandata=True)) - self.assertTrue(db.validate_collection(db.test, scandata=True, full=True)) - self.assertTrue(db.validate_collection(db.test, True, True)) + # No collection containing a '$'. + for coll in colls: + self.assertNotIn("$", coll) - def test_profiling_levels(self): - if is_mongos(self.client): - raise SkipTest('profile is not supported by mongos') - db = self.client.pymongo_test - self.assertEqual(db.profiling_level(), OFF) # default + # Duplicate check. + coll_cnt = {} + for coll in colls: + try: + # Found duplicate. + coll_cnt[coll] += 1 + self.fail("Found duplicate") + except KeyError: + coll_cnt[coll] = 1 + coll_cnt = {} - self.assertRaises(ValueError, db.set_profiling_level, 5.5) - self.assertRaises(ValueError, db.set_profiling_level, None) - self.assertRaises(ValueError, db.set_profiling_level, -1) - self.assertRaises(TypeError, db.set_profiling_level, SLOW_ONLY, 5.5) - self.assertRaises(TypeError, db.set_profiling_level, SLOW_ONLY, '1') + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "system.indexes"}) - db.set_profiling_level(SLOW_ONLY) - self.assertEqual(db.profiling_level(), SLOW_ONLY) + self.client.drop_database("pymongo_test") - db.set_profiling_level(ALL) - self.assertEqual(db.profiling_level(), ALL) + def test_list_collection_names_single_socket(self): + client = self.rs_or_single_client(maxPoolSize=1) + client.drop_database("test_collection_names_single_socket") + db = client.test_collection_names_single_socket + for i in range(200): + db.create_collection(str(i)) - db.set_profiling_level(OFF) - self.assertEqual(db.profiling_level(), OFF) + db.list_collection_names() # Must not hang. + client.drop_database("test_collection_names_single_socket") - db.set_profiling_level(SLOW_ONLY, 50) - self.assertEqual(50, db.command("profile", -1)['slowms']) + def test_drop_collection(self): + db = Database(self.client, "pymongo_test") - db.set_profiling_level(ALL, -1) - self.assertEqual(-1, db.command("profile", -1)['slowms']) + with self.assertRaises(TypeError): + db.drop_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.drop_collection(None) # type: ignore[arg-type] - db.set_profiling_level(OFF, 100) # back to default - self.assertEqual(100, db.command("profile", -1)['slowms']) + db.test.insert_one({"dummy": "object"}) + self.assertIn("test", db.list_collection_names()) + db.drop_collection("test") + self.assertNotIn("test", db.list_collection_names()) - def test_profiling_info(self): - if is_mongos(self.client): - raise SkipTest('profile is not supported by mongos') - db = self.client.pymongo_test + db.test.insert_one({"dummy": "object"}) + self.assertIn("test", db.list_collection_names()) + db.drop_collection("test") + self.assertNotIn("test", db.list_collection_names()) - db.set_profiling_level(ALL) - db.test.find_one() - db.set_profiling_level(OFF) + db.test.insert_one({"dummy": "object"}) + self.assertIn("test", db.list_collection_names()) + db.drop_collection(db.test) + self.assertNotIn("test", db.list_collection_names()) - info = db.profiling_info() - self.assertTrue(isinstance(info, list)) + db.test.insert_one({"dummy": "object"}) + self.assertIn("test", db.list_collection_names()) + db.test.drop() + self.assertNotIn("test", db.list_collection_names()) + db.test.drop() - # Check if we're going to fail because of SERVER-4754, in which - # profiling info isn't collected if mongod was started with --auth - if server_started_with_auth(self.client): - raise SkipTest( - "We need SERVER-4754 fixed for the rest of this test to pass" - ) + db.drop_collection(db.test.doesnotexist) - self.assertTrue(len(info) >= 1) - # These basically clue us in to server changes. - if version.at_least(db.connection, (1, 9, 1, -1)): - self.assertTrue(isinstance(info[0]['responseLength'], int)) - self.assertTrue(isinstance(info[0]['millis'], int)) - self.assertTrue(isinstance(info[0]['client'], basestring)) - self.assertTrue(isinstance(info[0]['user'], basestring)) - self.assertTrue(isinstance(info[0]['ns'], basestring)) - self.assertTrue(isinstance(info[0]['op'], basestring)) - else: - self.assertTrue(isinstance(info[0]["info"], basestring)) - self.assertTrue(isinstance(info[0]["millis"], float)) - self.assertTrue(isinstance(info[0]["ts"], datetime.datetime)) + if client_context.is_rs: + db_wc = Database(self.client, "pymongo_test", write_concern=IMPOSSIBLE_WRITE_CONCERN) + with self.assertRaises(WriteConcernError): + db_wc.drop_collection("test") - def test_iteration(self): + def test_validate_collection(self): db = self.client.pymongo_test - def iterate(): - [a for a in db] + with self.assertRaises(TypeError): + db.validate_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.validate_collection(None) # type: ignore[arg-type] - self.assertRaises(TypeError, iterate) + db.test.insert_one({"dummy": "object"}) - def test_errors(self): - if is_mongos(self.client): - raise SkipTest('getpreverror not supported by mongos') - db = self.client.pymongo_test + with self.assertRaises(OperationFailure): + db.validate_collection("test.doesnotexist") + with self.assertRaises(OperationFailure): + db.validate_collection(db.test.doesnotexist) - db.reset_error_history() - self.assertEqual(None, db.error()) - self.assertEqual(None, db.previous_error()) - - db.command("forceerror", check=False) - self.assertTrue(db.error()) - self.assertTrue(db.previous_error()) - - db.command("forceerror", check=False) - self.assertTrue(db.error()) - prev_error = db.previous_error() - self.assertEqual(prev_error["nPrev"], 1) - del prev_error["nPrev"] - prev_error.pop("lastOp", None) - error = db.error() - error.pop("lastOp", None) - # getLastError includes "connectionId" in recent - # server versions, getPrevError does not. - error.pop("connectionId", None) - self.assertEqual(error, prev_error) - - db.test.find_one() - self.assertEqual(None, db.error()) - self.assertTrue(db.previous_error()) - self.assertEqual(db.previous_error()["nPrev"], 2) - - db.reset_error_history() - self.assertEqual(None, db.error()) - self.assertEqual(None, db.previous_error()) + self.assertTrue(db.validate_collection("test")) + self.assertTrue(db.validate_collection(db.test)) + self.assertTrue(db.validate_collection(db.test, full=True)) + self.assertTrue(db.validate_collection(db.test, scandata=True)) + self.assertTrue(db.validate_collection(db.test, scandata=True, full=True)) + self.assertTrue(db.validate_collection(db.test, True, True)) + + @client_context.require_version_min(4, 3, 3) + @client_context.require_no_standalone + def test_validate_collection_background(self): + db = self.client.pymongo_test.with_options(write_concern=WriteConcern(w="majority")) + db.test.insert_one({"dummy": "object"}) + coll = db.test + self.assertTrue(db.validate_collection(coll, background=False)) + # The inMemory storage engine does not support background=True. + if client_context.storage_engine != "inMemory": + # background=True requires the collection exist in a checkpoint. + self.client.admin.command("fsync") + self.assertTrue(db.validate_collection(coll, background=True)) + self.assertTrue(db.validate_collection(coll, scandata=True, background=True)) + # The server does not support background=True with full=True. + # Assert that we actually send the background option by checking + # that this combination fails. + with self.assertRaises(OperationFailure): + db.validate_collection(coll, full=True, background=True) def test_command(self): + self.maxDiff = None db = self.client.admin - - self.assertEqual(db.command("buildinfo"), db.command({"buildinfo": 1})) - - def test_command_ignores_network_timeout(self): - # command() should ignore network_timeout. - if not version.at_least(self.client, (1, 9, 0)): - raise SkipTest("Need sleep() to test command with network timeout") - + first = db.command("buildinfo") + second = db.command({"buildinfo": 1}) + third = db.command("buildinfo", 1) + self.assertEqualReply(first, second) + self.assertEqualReply(second, third) + + # We use 'aggregate' as our example command, since it's an easy way to + # retrieve a BSON regex from a collection using a command. + def test_command_with_regex(self): db = self.client.pymongo_test + db.test.drop() + db.test.insert_one({"r": re.compile(".*")}) + db.test.insert_one({"r": Regex(".*")}) + + result = db.command("aggregate", "test", pipeline=[], cursor={}) + for doc in result["cursor"]["firstBatch"]: + self.assertIsInstance(doc["r"], Regex) + + def test_command_bulkWrite(self): + # Ensure bulk write commands can be run directly via db.command(). + if client_context.version.at_least(8, 0): + self.client.admin.command( + { + "bulkWrite": 1, + "nsInfo": [{"ns": self.db.test.full_name}], + "ops": [{"insert": 0, "document": {}}], + } + ) + self.db.command({"insert": "test", "documents": [{}]}) + self.db.command({"update": "test", "updates": [{"q": {}, "u": {"$set": {"x": 1}}}]}) + self.db.command({"delete": "test", "deletes": [{"q": {}, "limit": 1}]}) + self.db.test.drop() - # No errors. - db.test.remove() - db.test.insert({}) - cursor = db.test.find( - {'$where': 'sleep(100); return true'}, network_timeout=0.001) - - self.assertEqual(1, cursor.count()) - # mongos doesn't support the eval command - if not is_mongos(self.client): - db.command('eval', 'sleep(100)', network_timeout=0.001) - - def test_command_with_compile_re(self): - # We use 'aggregate' as our example command, since it's an easy way to - # retrieve a BSON regex from a collection using a command. But until - # MongoDB 2.3.2, aggregation turned regexes into strings: SERVER-6470. - if not version.at_least(self.client, (2, 3, 2)): - raise SkipTest( - "Retrieving a regex with aggregation requires " - "MongoDB >= 2.3.2") - + def test_cursor_command(self): db = self.client.pymongo_test db.test.drop() - db.test.insert({'r': re.compile('.*')}) - result = db.command('aggregate', 'test', pipeline=[]) - self.assertTrue(isinstance(result['result'][0]['r'], RE_TYPE)) - result = db.command('aggregate', 'test', pipeline=[], compile_re=False) - self.assertTrue(isinstance(result['result'][0]['r'], Regex)) + docs = [{"_id": i, "doc": i} for i in range(3)] + db.test.insert_many(docs) - def test_last_status(self): - db = self.client.pymongo_test + cursor = db.cursor_command("find", "test") - db.test.remove({}) - db.test.save({"i": 1}) + self.assertIsInstance(cursor, CommandCursor) - db.test.update({"i": 1}, {"$set": {"i": 2}}, w=0) - self.assertTrue(db.last_status()["updatedExisting"]) + result_docs = cursor.to_list() + self.assertEqual(docs, result_docs) - db.test.update({"i": 1}, {"$set": {"i": 500}}, w=0) - self.assertFalse(db.last_status()["updatedExisting"]) + def test_cursor_command_invalid(self): + with self.assertRaises(InvalidOperation): + self.db.cursor_command("usersInfo", "test") + @client_context.require_no_fips def test_password_digest(self): - self.assertRaises(TypeError, auth._password_digest, 5) - self.assertRaises(TypeError, auth._password_digest, True) - self.assertRaises(TypeError, auth._password_digest, None) - - self.assertTrue(isinstance(auth._password_digest("mike", "password"), - unicode)) - self.assertEqual(auth._password_digest("mike", "password"), - u"cd7e45b3b2767dc2fa9b6b548457ed00") - self.assertEqual(auth._password_digest("mike", "password"), - auth._password_digest(u"mike", u"password")) - self.assertEqual(auth._password_digest("Gustave", u"Dor\xe9"), - u"81e0e2364499209f466e75926a162d73") - - def test_authenticate_add_remove_user(self): - if (is_mongos(self.client) and not - version.at_least(self.client, (2, 0, 0))): - raise SkipTest("Auth with sharding requires MongoDB >= 2.0.0") - if not server_started_with_auth(self.client): - raise SkipTest('Authentication is not enabled on server') - - db = self.client.pymongo_test - - # Configuration errors - self.assertRaises(ValueError, db.add_user, "user", '') - self.assertRaises(TypeError, db.add_user, "user", 'password', 15) - self.assertRaises(ConfigurationError, db.add_user, - "user", 'password', 'True') - self.assertRaises(ConfigurationError, db.add_user, - "user", 'password', True, roles=['read']) - - if version.at_least(self.client, (2, 5, 3, -1)): - warnings.simplefilter("error", DeprecationWarning) - try: - self.assertRaises(DeprecationWarning, db.add_user, - "user", "password") - self.assertRaises(DeprecationWarning, db.add_user, - "user", "password", True) - finally: - warnings.resetwarnings() - warnings.simplefilter("ignore") - - self.assertRaises(ConfigurationError, db.add_user, - "user", "password", digestPassword=True) - - self.client.admin.add_user("admin", "password") - self.client.admin.authenticate("admin", "password") - - try: - # Add / authenticate / remove - db.add_user("mike", "password") - self.assertRaises(TypeError, db.authenticate, 5, "password") - self.assertRaises(TypeError, db.authenticate, "mike", 5) - self.assertRaises(OperationFailure, - db.authenticate, "mike", "not a real password") - self.assertRaises(OperationFailure, - db.authenticate, "faker", "password") - self.assertTrue(db.authenticate("mike", "password")) - db.logout() - self.assertTrue(db.authenticate(u"mike", u"password")) - db.remove_user("mike") - db.logout() - - self.assertRaises(OperationFailure, - db.authenticate, "mike", "password") - - # Add / authenticate / change password - self.assertRaises(OperationFailure, - db.authenticate, "Gustave", u"Dor\xe9") - db.add_user("Gustave", u"Dor\xe9") - self.assertTrue(db.authenticate("Gustave", u"Dor\xe9")) - db.add_user("Gustave", "password") - db.logout() - self.assertRaises(OperationFailure, - db.authenticate, "Gustave", u"Dor\xe9") - self.assertTrue(db.authenticate("Gustave", u"password")) - - if not version.at_least(self.client, (2, 5, 3, -1)): - # Add a readOnly user - db.add_user("Ross", "password", read_only=True) - db.logout() - self.assertTrue(db.authenticate("Ross", u"password")) - self.assertTrue(db.system.users.find({"readOnly": True}).count()) - db.logout() - - # Cleanup - finally: - remove_all_users(db) - self.client.admin.remove_user("admin") - self.client.admin.logout() - - def test_make_user_readonly(self): - if (is_mongos(self.client) - and not version.at_least(self.client, (2, 0, 0))): - raise SkipTest('Auth with sharding requires MongoDB >= 2.0.0') - - if not server_started_with_auth(self.client): - raise SkipTest('Authentication is not enabled on server') - - admin = self.client.admin - admin.add_user('admin', 'pw') - admin.authenticate('admin', 'pw') - - db = self.client.pymongo_test - - try: - # Make a read-write user. - db.add_user('jesse', 'pw') - admin.logout() - - # Check that we're read-write by default. - db.authenticate('jesse', 'pw') - db.collection.insert({}) - db.logout() - - # Make the user read-only. - admin.authenticate('admin', 'pw') - db.add_user('jesse', 'pw', read_only=True) - admin.logout() - - db.authenticate('jesse', 'pw') - self.assertRaises(OperationFailure, db.collection.insert, {}) - finally: - # Cleanup - admin.authenticate('admin', 'pw') - remove_all_users(db) - admin.remove_user("admin") - admin.logout() - - def test_default_roles(self): - if not version.at_least(self.client, (2, 5, 3, -1)): - raise SkipTest("Default roles only exist in MongoDB >= 2.5.3") - if not server_started_with_auth(self.client): - raise SkipTest('Authentication is not enabled on server') - - # "Admin" user - db = self.client.admin - db.add_user('admin', 'pass') - try: - db.authenticate('admin', 'pass') - info = db.command('usersInfo', 'admin')['users'][0] - self.assertEqual("root", info['roles'][0]['role']) - - # Read only "admin" user - db.add_user('ro-admin', 'pass', read_only=True) - db.logout() - db.authenticate('ro-admin', 'pass') - info = db.command('usersInfo', 'ro-admin')['users'][0] - self.assertEqual("readAnyDatabase", info['roles'][0]['role']) - db.logout() - - # Cleanup - finally: - db.authenticate('admin', 'pass') - remove_all_users(db) - db.logout() - - db.connection.disconnect() - - # "Non-admin" user - db = self.client.pymongo_test - db.add_user('user', 'pass') - try: - db.authenticate('user', 'pass') - info = db.command('usersInfo', 'user')['users'][0] - self.assertEqual("dbOwner", info['roles'][0]['role']) - - # Read only "Non-admin" user - db.add_user('ro-user', 'pass', read_only=True) - db.logout() - db.authenticate('ro-user', 'pass') - info = db.command('usersInfo', 'ro-user')['users'][0] - self.assertEqual("read", info['roles'][0]['role']) - db.logout() - - # Cleanup - finally: - db.authenticate('user', 'pass') - remove_all_users(db) - db.logout() - - def test_new_user_cmds(self): - if not version.at_least(self.client, (2, 5, 3, -1)): - raise SkipTest("User manipulation through commands " - "requires MongoDB >= 2.5.3") - if not server_started_with_auth(self.client): - raise SkipTest('Authentication is not enabled on server') - - db = self.client.pymongo_test - db.add_user("amalia", "password", roles=["userAdmin"]) - db.authenticate("amalia", "password") - try: - # This tests the ability to update user attributes. - db.add_user("amalia", "new_password", - customData={"secret": "koalas"}) - - user_info = db.command("usersInfo", "amalia") - self.assertTrue(user_info["users"]) - amalia_user = user_info["users"][0] - self.assertEqual(amalia_user["user"], "amalia") - self.assertEqual(amalia_user["customData"], {"secret": "koalas"}) - finally: - db.remove_user("amalia") - db.logout() - - def test_authenticate_and_safe(self): - if (is_mongos(self.client) and not - version.at_least(self.client, (2, 0, 0))): - raise SkipTest("Auth with sharding requires MongoDB >= 2.0.0") - if not server_started_with_auth(self.client): - raise SkipTest('Authentication is not enabled on server') - db = self.client.auth_test - - db.add_user("bernie", "password", - roles=["userAdmin", "dbAdmin", "readWrite"]) - db.authenticate("bernie", "password") - try: - db.test.remove({}) - self.assertTrue(db.test.insert({"bim": "baz"})) - self.assertEqual(1, db.test.count()) - - self.assertEqual(1, - db.test.update({"bim": "baz"}, - {"$set": {"bim": "bar"}}).get('n')) - - self.assertEqual(1, - db.test.remove({}).get('n')) - - self.assertEqual(0, db.test.count()) - finally: - db.remove_user("bernie") - db.logout() - - def test_authenticate_and_request(self): - if (is_mongos(self.client) and not - version.at_least(self.client, (2, 0, 0))): - raise SkipTest("Auth with sharding requires MongoDB >= 2.0.0") - if not server_started_with_auth(self.client): - raise SkipTest('Authentication is not enabled on server') - - # Database.authenticate() needs to be in a request - check that it - # always runs in a request, and that it restores the request state - # (in or not in a request) properly when it's finished. - self.assertFalse(self.client.auto_start_request) - db = self.client.pymongo_test - db.add_user("mike", "password", - roles=["userAdmin", "dbAdmin", "readWrite"]) - try: - self.assertFalse(self.client.in_request()) - self.assertTrue(db.authenticate("mike", "password")) - self.assertFalse(self.client.in_request()) - - request_cx = get_client(auto_start_request=True) - request_db = request_cx.pymongo_test - self.assertTrue(request_db.authenticate("mike", "password")) - self.assertTrue(request_cx.in_request()) - finally: - db.authenticate("mike", "password") - db.remove_user("mike") - db.logout() - request_db.logout() - - def test_authenticate_multiple(self): - client = get_client() - if (is_mongos(client) and not - version.at_least(self.client, (2, 0, 0))): - raise SkipTest("Auth with sharding requires MongoDB >= 2.0.0") - if not server_started_with_auth(client): - raise SkipTest("Authentication is not enabled on server") - - # Setup - users_db = client.pymongo_test - admin_db = client.admin - other_db = client.pymongo_test1 - users_db.test.remove() - other_db.test.remove() - - admin_db.add_user('admin', 'pass', - roles=["userAdminAnyDatabase", "dbAdmin", - "clusterAdmin", "readWrite"]) - try: - self.assertTrue(admin_db.authenticate('admin', 'pass')) - - if version.at_least(self.client, (2, 5, 3, -1)): - admin_db.add_user('ro-admin', 'pass', - roles=["userAdmin", "readAnyDatabase"]) - else: - admin_db.add_user('ro-admin', 'pass', read_only=True) - - users_db.add_user('user', 'pass', - roles=["userAdmin", "readWrite"]) - - admin_db.logout() - self.assertRaises(OperationFailure, users_db.test.find_one) - - # Regular user should be able to query its own db, but - # no other. - users_db.authenticate('user', 'pass') - self.assertEqual(0, users_db.test.count()) - self.assertRaises(OperationFailure, other_db.test.find_one) - - # Admin read-only user should be able to query any db, - # but not write. - admin_db.authenticate('ro-admin', 'pass') - self.assertEqual(0, other_db.test.count()) - self.assertRaises(OperationFailure, - other_db.test.insert, {}) - - # Force close all sockets - client.disconnect() - - # We should still be able to write to the regular user's db - self.assertTrue(users_db.test.remove()) - # And read from other dbs... - self.assertEqual(0, other_db.test.count()) - # But still not write to other dbs... - self.assertRaises(OperationFailure, - other_db.test.insert, {}) - - # Cleanup - finally: - admin_db.logout() - users_db.logout() - admin_db.authenticate('admin', 'pass') - remove_all_users(users_db) - remove_all_users(admin_db) + with self.assertRaises(TypeError): + auth._password_digest(5) # type: ignore[arg-type, call-arg] + with self.assertRaises(TypeError): + auth._password_digest(True) # type: ignore[arg-type, call-arg] + with self.assertRaises(TypeError): + auth._password_digest(None) # type: ignore[arg-type, call-arg] + + self.assertIsInstance(auth._password_digest("mike", "password"), str) + self.assertEqual( + auth._password_digest("mike", "password"), "cd7e45b3b2767dc2fa9b6b548457ed00" + ) + self.assertEqual( + auth._password_digest("Gustave", "Dor\xe9"), "81e0e2364499209f466e75926a162d73" + ) def test_id_ordering(self): # PyMongo attempts to have _id show up first @@ -671,82 +473,73 @@ def test_id_ordering(self): # work right in Jython or any Python or environment # with hash randomization enabled (e.g. tox). db = self.client.pymongo_test - db.test.remove({}) - db.test.insert(SON([("hello", "world"), - ("_id", 5)])) + db.test.drop() + db.test.insert_one(SON([("hello", "world"), ("_id", 5)])) - cursor = db.test.find(as_class=SON) + db = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) + ) + cursor = db.test.find() for x in cursor: - for (k, v) in x.items(): + for k, _v in x.items(): self.assertEqual(k, "_id") break def test_deref(self): db = self.client.pymongo_test - db.test.remove({}) + db.test.drop() - self.assertRaises(TypeError, db.dereference, 5) - self.assertRaises(TypeError, db.dereference, "hello") - self.assertRaises(TypeError, db.dereference, None) + with self.assertRaises(TypeError): + db.dereference(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.dereference("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.dereference(None) # type: ignore[arg-type] self.assertEqual(None, db.dereference(DBRef("test", ObjectId()))) - obj = {"x": True} - key = db.test.save(obj) + obj: dict[str, Any] = {"x": True} + key = (db.test.insert_one(obj)).inserted_id self.assertEqual(obj, db.dereference(DBRef("test", key))) - self.assertEqual(obj, - db.dereference(DBRef("test", key, "pymongo_test"))) - self.assertRaises(ValueError, - db.dereference, DBRef("test", key, "foo")) + self.assertEqual(obj, db.dereference(DBRef("test", key, "pymongo_test"))) + with self.assertRaises(ValueError): + db.dereference(DBRef("test", key, "foo")) self.assertEqual(None, db.dereference(DBRef("test", 4))) obj = {"_id": 4} - db.test.save(obj) + db.test.insert_one(obj) self.assertEqual(obj, db.dereference(DBRef("test", 4))) - def test_eval(self): + def test_deref_kwargs(self): db = self.client.pymongo_test - db.test.remove({}) - - self.assertRaises(TypeError, db.eval, None) - self.assertRaises(TypeError, db.eval, 5) - self.assertRaises(TypeError, db.eval, []) - - self.assertEqual(3, db.eval("function (x) {return x;}", 3)) - self.assertEqual(3, db.eval(u"function (x) {return x;}", 3)) - - self.assertEqual(None, - db.eval("function (x) {db.test.save({y:x});}", 5)) - self.assertEqual(db.test.find_one()["y"], 5) - - self.assertEqual(5, db.eval("function (x, y) {return x + y;}", 2, 3)) - self.assertEqual(5, db.eval("function () {return 5;}")) - self.assertEqual(5, db.eval("2 + 3;")) - - self.assertEqual(5, db.eval(Code("2 + 3;"))) - self.assertRaises(OperationFailure, db.eval, Code("return i;")) - self.assertEqual(2, db.eval(Code("return i;", {"i": 2}))) - self.assertEqual(5, db.eval(Code("i + 3;", {"i": 2}))) + db.test.drop() - self.assertRaises(OperationFailure, db.eval, "5 ++ 5;") + db.test.insert_one({"_id": 4, "foo": "bar"}) + db = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) + ) + self.assertEqual( + SON([("foo", "bar")]), db.dereference(DBRef("test", 4), projection={"_id": False}) + ) # TODO some of these tests belong in the collection level testing. - def test_save_find_one(self): - db = Database(self.client, "pymongo_test") - db.test.remove({}) + def test_insert_find_one(self): + db = self.client.pymongo_test + db.test.drop() - a_doc = SON({"hello": u"world"}) - a_key = db.test.save(a_doc) - self.assertTrue(isinstance(a_doc["_id"], ObjectId)) + a_doc = SON({"hello": "world"}) + a_key = (db.test.insert_one(a_doc)).inserted_id + self.assertIsInstance(a_doc["_id"], ObjectId) self.assertEqual(a_doc["_id"], a_key) self.assertEqual(a_doc, db.test.find_one({"_id": a_doc["_id"]})) self.assertEqual(a_doc, db.test.find_one(a_key)) self.assertEqual(None, db.test.find_one(ObjectId())) - self.assertEqual(a_doc, db.test.find_one({"hello": u"world"})) - self.assertEqual(None, db.test.find_one({"hello": u"test"})) + self.assertEqual(a_doc, db.test.find_one({"hello": "world"})) + self.assertEqual(None, db.test.find_one({"hello": "test"})) b = db.test.find_one() - b["hello"] = u"mike" - db.test.save(b) + assert b is not None + b["hello"] = "mike" + db.test.replace_one({"_id": b["_id"]}, b) self.assertNotEqual(a_doc, db.test.find_one(a_key)) self.assertEqual(b, db.test.find_one(a_key)) @@ -759,235 +552,212 @@ def test_save_find_one(self): def test_long(self): db = self.client.pymongo_test - db.test.remove({}) - db.test.save({"x": 9223372036854775807L}) - self.assertEqual(9223372036854775807L, db.test.find_one()["x"]) - - def test_remove(self): + db.test.drop() + db.test.insert_one({"x": 9223372036854775807}) + retrieved = (db.test.find_one())["x"] + self.assertEqual(Int64(9223372036854775807), retrieved) + self.assertIsInstance(retrieved, Int64) + db.test.delete_many({}) + db.test.insert_one({"x": Int64(1)}) + retrieved = (db.test.find_one())["x"] + self.assertEqual(Int64(1), retrieved) + self.assertIsInstance(retrieved, Int64) + + def test_delete(self): db = self.client.pymongo_test - db.test.remove({}) + db.test.drop() - one = db.test.save({"x": 1}) - db.test.save({"x": 2}) - db.test.save({"x": 3}) + db.test.insert_one({"x": 1}) + db.test.insert_one({"x": 2}) + db.test.insert_one({"x": 3}) length = 0 for _ in db.test.find(): length += 1 self.assertEqual(length, 3) - db.test.remove(one) + db.test.delete_one({"x": 1}) length = 0 for _ in db.test.find(): length += 1 self.assertEqual(length, 2) - db.test.remove(db.test.find_one()) - db.test.remove(db.test.find_one()) + db.test.delete_one(db.test.find_one()) # type: ignore[arg-type] + db.test.delete_one(db.test.find_one()) # type: ignore[arg-type] self.assertEqual(db.test.find_one(), None) - one = db.test.save({"x": 1}) - db.test.save({"x": 2}) - db.test.save({"x": 3}) + db.test.insert_one({"x": 1}) + db.test.insert_one({"x": 2}) + db.test.insert_one({"x": 3}) self.assertTrue(db.test.find_one({"x": 2})) - db.test.remove({"x": 2}) + db.test.delete_one({"x": 2}) self.assertFalse(db.test.find_one({"x": 2})) self.assertTrue(db.test.find_one()) - db.test.remove({}) + db.test.delete_many({}) self.assertFalse(db.test.find_one()) - def test_save_a_bunch(self): - db = self.client.pymongo_test - db.test.remove({}) - - for i in xrange(1000): - db.test.save({"x": i}) - - count = 0 - for _ in db.test.find(): - count += 1 - - self.assertEqual(1000, count) - - # test that kill cursors doesn't assert or anything - for _ in xrange(62): - for _ in db.test.find(): - break - - def test_auto_ref_and_deref(self): - db = self.client.pymongo_test - db.add_son_manipulator(AutoReference(db)) - db.add_son_manipulator(NamespaceInjector()) - - db.test.a.remove({}) - db.test.b.remove({}) - db.test.c.remove({}) - - a = {"hello": u"world"} - db.test.a.save(a) - - b = {"test": a} - db.test.b.save(b) - - c = {"another test": b} - db.test.c.save(c) - - a["hello"] = "mike" - db.test.a.save(a) - - self.assertEqual(db.test.a.find_one(), a) - self.assertEqual(db.test.b.find_one()["test"], a) - self.assertEqual(db.test.c.find_one()["another test"]["test"], a) - self.assertEqual(db.test.b.find_one(), b) - self.assertEqual(db.test.c.find_one()["another test"], b) - self.assertEqual(db.test.c.find_one(), c) + def test_command_response_without_ok(self): + # Sometimes (SERVER-10891) the server's response to a badly-formatted + # command document will have no 'ok' field. We should raise + # OperationFailure instead of KeyError. + with self.assertRaises(OperationFailure): + helpers_shared._check_command_response({}, None) - # some stuff the user marc wanted to be able to do, make sure it works - def test_marc(self): - db = self.client.pymongo_test - db.add_son_manipulator(AutoReference(db)) - db.add_son_manipulator(NamespaceInjector()) + try: + helpers_shared._check_command_response({"$err": "foo"}, None) + except OperationFailure as e: + self.assertEqual(e.args[0], "foo, full error: {'$err': 'foo'}") + else: + self.fail("_check_command_response didn't raise OperationFailure") - db.drop_collection("users") - db.drop_collection("messages") + def test_mongos_response(self): + error_document = { + "ok": 0, + "errmsg": "outer", + "raw": {"shard0/host0,host1": {"ok": 0, "errmsg": "inner"}}, + } - message_1 = {"title": "foo"} - db.messages.save(message_1) - message_2 = {"title": "bar"} - db.messages.save(message_2) + with self.assertRaises(OperationFailure) as context: + helpers_shared._check_command_response(error_document, None) - user = {"name": "marc", - "messages": [message_1, message_2]} - db.users.save(user) + self.assertIn("inner", str(context.exception)) - message = db.messages.find_one() - db.messages.update(message, {"title": "buzz"}) + # If a shard has no primary and you run a command like dbstats, which + # cannot be run on a secondary, mongos's response includes empty "raw" + # errors. See SERVER-15428. + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {}}} - self.assertEqual("buzz", db.users.find_one()["messages"][0]["title"]) - self.assertEqual("bar", db.users.find_one()["messages"][1]["title"]) + with self.assertRaises(OperationFailure) as context: + helpers_shared._check_command_response(error_document, None) - def test_system_js(self): - db = self.client.pymongo_test - db.system.js.remove() - - self.assertEqual(0, db.system.js.count()) - db.system_js.add = "function(a, b) { return a + b; }" - self.assertEqual('add', db.system.js.find_one()['_id']) - self.assertEqual(1, db.system.js.count()) - self.assertEqual(6, db.system_js.add(1, 5)) - del db.system_js.add - self.assertEqual(0, db.system.js.count()) - - db.system_js['add'] = "function(a, b) { return a + b; }" - self.assertEqual('add', db.system.js.find_one()['_id']) - self.assertEqual(1, db.system.js.count()) - self.assertEqual(6, db.system_js['add'](1, 5)) - del db.system_js['add'] - self.assertEqual(0, db.system.js.count()) - - if version.at_least(db.connection, (1, 3, 2, -1)): - self.assertRaises(OperationFailure, db.system_js.add, 1, 5) - - # TODO right now CodeWScope doesn't work w/ system js - # db.system_js.scope = Code("return hello;", {"hello": 8}) - # self.assertEqual(8, db.system_js.scope()) - - self.assertRaises(OperationFailure, db.system_js.non_existant) - - # XXX: Broken in V8, works in SpiderMonkey - if not version.at_least(db.connection, (2, 3, 0)): - db.system_js.no_param = Code("return 5;") - self.assertEqual(5, db.system_js.no_param()) - - def test_system_js_list(self): - db = self.client.pymongo_test - db.system.js.remove() - self.assertEqual([], db.system_js.list()) - - db.system_js.foo = "function() { return 'blah'; }" - self.assertEqual(["foo"], db.system_js.list()) - - db.system_js.bar = "function() { return 'baz'; }" - self.assertEqual(set(["foo", "bar"]), set(db.system_js.list())) - - del db.system_js.foo - self.assertEqual(["bar"], db.system_js.list()) - - def test_manipulator_properties(self): - db = self.client.foo - self.assertEqual([], db.incoming_manipulators) - self.assertEqual([], db.incoming_copying_manipulators) - self.assertEqual([], db.outgoing_manipulators) - self.assertEqual([], db.outgoing_copying_manipulators) - db.add_son_manipulator(AutoReference(db)) - db.add_son_manipulator(NamespaceInjector()) - db.add_son_manipulator(ObjectIdShuffler()) - self.assertEqual(1, len(db.incoming_manipulators)) - self.assertEqual(db.incoming_manipulators, ['NamespaceInjector']) - self.assertEqual(2, len(db.incoming_copying_manipulators)) - for name in db.incoming_copying_manipulators: - self.assertTrue(name in ('ObjectIdShuffler', 'AutoReference')) - self.assertEqual([], db.outgoing_manipulators) - self.assertEqual(['AutoReference'], db.outgoing_copying_manipulators) + self.assertIn("outer", str(context.exception)) - def test_command_response_without_ok(self): - # Sometimes (SERVER-10891) the server's response to a badly-formatted - # command document will have no 'ok' field. We should raise - # OperationFailure instead of KeyError. - self.assertRaises( - OperationFailure, - helpers._check_command_response, {}, reset=None) + # Raw error has ok: 0 but no errmsg. Not a known case, but test it. + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {"ok": 0}}} - try: - helpers._check_command_response({'$err': 'foo'}, reset=None) - except OperationFailure, e: - self.assertEqual(e.args[0], 'foo') - else: - self.fail("_check_command_response didn't raise OperationFailure") + with self.assertRaises(OperationFailure) as context: + helpers_shared._check_command_response(error_document, None) - def test_command_read_pref_warning(self): - warnings.simplefilter("error", UserWarning) - try: - self.assertRaises(UserWarning, self.client.pymongo_test.command, - 'ping', read_preference=ReadPreference.SECONDARY) - try: - self.client.pymongo_test.command( - 'dbStats', read_preference=ReadPreference.SECONDARY) - except UserWarning: - self.fail("Shouldn't have raised UserWarning.") - finally: - warnings.resetwarnings() - warnings.simplefilter("ignore") + self.assertIn("outer", str(context.exception)) + @client_context.require_test_commands + @client_context.require_no_mongos def test_command_max_time_ms(self): - if not version.at_least(self.client, (2, 5, 3, -1)): - raise SkipTest("MaxTimeMS requires MongoDB >= 2.5.3") - if "enableTestCommands=1" not in get_command_line(self.client)["argv"]: - raise SkipTest("Test commands must be enabled.") - - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: db = self.client.pymongo_test - db.command('count', 'test') - self.assertRaises(ExecutionTimeout, db.command, - 'count', 'test', maxTimeMS=1) - pipeline = [{'$project': {'name': 1, 'count': 1}}] + db.command("count", "test") + with self.assertRaises(ExecutionTimeout): + db.command("count", "test", maxTimeMS=1) + pipeline = [{"$project": {"name": 1, "count": 1}}] # Database command helper. - db.command('aggregate', 'test', pipeline=pipeline) - self.assertRaises(ExecutionTimeout, db.command, - 'aggregate', 'test', - pipeline=pipeline, maxTimeMS=1) + db.command("aggregate", "test", pipeline=pipeline, cursor={}) + with self.assertRaises(ExecutionTimeout): + db.command( + "aggregate", + "test", + pipeline=pipeline, + cursor={}, + maxTimeMS=1, + ) # Collection helper. db.test.aggregate(pipeline=pipeline) - self.assertRaises(ExecutionTimeout, - db.test.aggregate, pipeline, maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + db.test.aggregate(pipeline, maxTimeMS=1) finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") + + def test_with_options(self): + codec_options = DECIMAL_CODECOPTS + read_preference = ReadPreference.SECONDARY_PREFERRED + write_concern = WriteConcern(j=True) + read_concern = ReadConcern(level="majority") + + # List of all options to compare. + allopts = [ + "name", + "client", + "codec_options", + "read_preference", + "write_concern", + "read_concern", + ] + + db1 = self.client.get_database( + "with_options_test", + codec_options=codec_options, + read_preference=read_preference, + write_concern=write_concern, + read_concern=read_concern, + ) + + # Case 1: swap no options + db2 = db1.with_options() + for opt in allopts: + self.assertEqual(getattr(db1, opt), getattr(db2, opt)) + + # Case 2: swap all options + newopts = { + "codec_options": CodecOptions(), + "read_preference": ReadPreference.PRIMARY, + "write_concern": WriteConcern(w=1), + "read_concern": ReadConcern(level="local"), + } + db2 = db1.with_options(**newopts) # type: ignore[arg-type, call-overload] + for opt in newopts: + self.assertEqual(getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) + + +class TestDatabaseAggregation(IntegrationTest): + def setUp(self): + super().setUp() + self.pipeline: List[Mapping[str, Any]] = [ + {"$listLocalSessions": {}}, + {"$limit": 1}, + {"$addFields": {"dummy": "dummy field"}}, + {"$project": {"_id": 0, "dummy": 1}}, + ] + self.result = {"dummy": "dummy field"} + self.admin = self.client.admin + + def test_database_aggregation(self): + with self.admin.aggregate(self.pipeline) as cursor: + result = next(cursor) + self.assertEqual(result, self.result) + + @client_context.require_no_mongos + def test_database_aggregation_fake_cursor(self): + coll_name = "test_output" + write_stage: dict + if client_context.version < (4, 3): + db_name = "admin" + write_stage = {"$out": coll_name} + else: + # SERVER-43287 disallows writing with $out to the admin db, use + # $merge instead. + db_name = "pymongo_test" + write_stage = {"$merge": {"into": {"db": db_name, "coll": coll_name}}} + output_coll = self.client[db_name][coll_name] + output_coll.drop() + self.addCleanup(output_coll.drop) + + admin = self.admin.with_options(write_concern=WriteConcern(w=0)) + pipeline = self.pipeline[:] + pipeline.append(write_stage) + with admin.aggregate(pipeline) as cursor: + with self.assertRaises(StopIteration): + next(cursor) + + def lambda_fn(): + return output_coll.find_one() + + result = wait_until(lambda_fn, "read unacknowledged write") + self.assertEqual(result["dummy"], self.result["dummy"]) + + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(Database(self.client, "test")) if __name__ == "__main__": diff --git a/test/test_dbref.py b/test/test_dbref.py index a34b3ae999..ac2767a1ce 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,17 +13,20 @@ # limitations under the License. """Tests for the dbref module.""" +from __future__ import annotations import pickle -import unittest import sys +from typing import Any + sys.path[0:0] = [""] +from copy import deepcopy +from test import unittest + +from bson import decode, encode from bson.dbref import DBRef from bson.objectid import ObjectId -from bson.py3compat import b - -from copy import deepcopy class TestDBRef(unittest.TestCase): @@ -37,18 +40,17 @@ def test_creation(self): self.assertRaises(TypeError, DBRef, None, a) self.assertRaises(TypeError, DBRef, "coll", a, 5) self.assertTrue(DBRef("coll", a)) - self.assertTrue(DBRef(u"coll", a)) - self.assertTrue(DBRef(u"coll", 5)) - self.assertTrue(DBRef(u"coll", 5, "database")) + self.assertTrue(DBRef("coll", 5)) + self.assertTrue(DBRef("coll", 5, "database")) def test_read_only(self): a = DBRef("coll", ObjectId()) def foo(): - a.collection = "blah" + a.collection = "blah" # type: ignore[misc] def bar(): - a.id = "aoeu" + a.id = "aoeu" # type: ignore[misc] self.assertEqual("coll", a.collection) a.id @@ -57,55 +59,45 @@ def bar(): self.assertRaises(AttributeError, bar) def test_repr(self): - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"))), - "DBRef('coll', ObjectId('1234567890abcdef12345678'))") - self.assertEqual(repr(DBRef(u"coll", - ObjectId("1234567890abcdef12345678"))), - "DBRef(%s, ObjectId('1234567890abcdef12345678'))" - % (repr(u'coll'),) - ) - self.assertEqual(repr(DBRef("coll", 5, foo="bar")), - "DBRef('coll', 5, foo='bar')") - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"), "foo")), - "DBRef('coll', ObjectId('1234567890abcdef12345678'), " - "'foo')") + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), + "DBRef('coll', ObjectId('1234567890abcdef12345678'))", + ) + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), + "DBRef({}, ObjectId('1234567890abcdef12345678'))".format(repr("coll")), + ) + self.assertEqual(repr(DBRef("coll", 5, foo="bar")), "DBRef('coll', 5, foo='bar')") + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"), "foo")), + "DBRef('coll', ObjectId('1234567890abcdef12345678'), 'foo')", + ) def test_equality(self): obj_id = ObjectId("1234567890abcdef12345678") - self.assertEqual(DBRef('foo', 5), DBRef('foo', 5)) - self.assertEqual(DBRef("coll", obj_id), DBRef(u"coll", obj_id)) - self.assertNotEqual(DBRef("coll", obj_id), - DBRef(u"coll", obj_id, "foo")) + self.assertEqual(DBRef("foo", 5), DBRef("foo", 5)) + self.assertEqual(DBRef("coll", obj_id), DBRef("coll", obj_id)) + self.assertNotEqual(DBRef("coll", obj_id), DBRef("coll", obj_id, "foo")) self.assertNotEqual(DBRef("coll", obj_id), DBRef("col", obj_id)) - self.assertNotEqual(DBRef("coll", obj_id), - DBRef("coll", ObjectId(b("123456789011")))) + self.assertNotEqual(DBRef("coll", obj_id), DBRef("coll", ObjectId(b"123456789011"))) self.assertNotEqual(DBRef("coll", obj_id), 4) - self.assertEqual(DBRef("coll", obj_id, "foo"), - DBRef(u"coll", obj_id, "foo")) - self.assertNotEqual(DBRef("coll", obj_id, "foo"), - DBRef(u"coll", obj_id, "bar")) + self.assertNotEqual(DBRef("coll", obj_id, "foo"), DBRef("coll", obj_id, "bar")) # Explicitly test inequality - self.assertFalse(DBRef('foo', 5) != DBRef('foo', 5)) - self.assertFalse(DBRef("coll", obj_id) != DBRef(u"coll", obj_id)) - self.assertFalse(DBRef("coll", obj_id, "foo") != - DBRef(u"coll", obj_id, "foo")) + self.assertFalse(DBRef("foo", 5) != DBRef("foo", 5)) + self.assertFalse(DBRef("coll", obj_id) != DBRef("coll", obj_id)) + self.assertFalse(DBRef("coll", obj_id, "foo") != DBRef("coll", obj_id, "foo")) def test_kwargs(self): - self.assertEqual(DBRef("coll", 5, foo="bar"), - DBRef("coll", 5, foo="bar")) + self.assertEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5, foo="bar")) self.assertNotEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5)) - self.assertNotEqual(DBRef("coll", 5, foo="bar"), - DBRef("coll", 5, foo="baz")) + self.assertNotEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5, foo="baz")) self.assertEqual("bar", DBRef("coll", 5, foo="bar").foo) - self.assertRaises(AttributeError, getattr, - DBRef("coll", 5, foo="bar"), "bar") + self.assertRaises(AttributeError, getattr, DBRef("coll", 5, foo="bar"), "bar") def test_deepcopy(self): - a = DBRef('coll', 'asdf', 'db', x=[1]) + a = DBRef("coll", "asdf", "db", x=[1]) b = deepcopy(a) self.assertEqual(a, b) @@ -118,22 +110,127 @@ def test_deepcopy(self): self.assertEqual(b.x, [2]) def test_pickling(self): - dbr = DBRef('coll', 5, foo='bar') + dbr = DBRef("coll", 5, foo="bar") for protocol in [0, 1, 2, -1]: pkl = pickle.dumps(dbr, protocol=protocol) dbr2 = pickle.loads(pkl) self.assertEqual(dbr, dbr2) def test_dbref_hash(self): - dbref_1a = DBRef('collection', 'id', 'database') - dbref_1b = DBRef('collection', 'id', 'database') + dbref_1a = DBRef("collection", "id", "database") + dbref_1b = DBRef("collection", "id", "database") self.assertEqual(hash(dbref_1a), hash(dbref_1b)) - dbref_2a = DBRef('collection', 'id', 'database', custom='custom') - dbref_2b = DBRef('collection', 'id', 'database', custom='custom') + dbref_2a = DBRef("collection", "id", "database", custom="custom") + dbref_2b = DBRef("collection", "id", "database", custom="custom") self.assertEqual(hash(dbref_2a), hash(dbref_2b)) self.assertNotEqual(hash(dbref_1a), hash(dbref_2a)) + +# https://github.com/mongodb/specifications/blob/master/source/dbref/dbref.md#test-plan +class TestDBRefSpec(unittest.TestCase): + def test_decoding_1_2_3(self): + doc: Any + for doc in [ + # 1, Valid documents MUST be decoded to a DBRef: + {"$ref": "coll0", "$id": ObjectId("60a6fe9a54f4180c86309efa")}, + {"$ref": "coll0", "$id": 1}, + {"$ref": "coll0", "$id": None}, + {"$ref": "coll0", "$id": 1, "$db": "db0"}, + # 2, Valid documents with extra fields: + {"$ref": "coll0", "$id": 1, "$db": "db0", "foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo": True, "bar": False}, + {"$ref": "coll0", "$id": 1, "meta": {"foo": 1, "bar": 2}}, + {"$ref": "coll0", "$id": 1, "$foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo.bar": 0}, + # 3, Valid documents with out of order fields: + {"$id": 1, "$ref": "coll0"}, + {"$db": "db0", "$ref": "coll0", "$id": 1}, + {"foo": 1, "$id": 1, "$ref": "coll0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0", "bar": 1}, + ]: + with self.subTest(doc=doc): + decoded = decode(encode({"dbref": doc})) + dbref = decoded["dbref"] + self.assertIsInstance(dbref, DBRef) + self.assertEqual(dbref.collection, doc["$ref"]) + self.assertEqual(dbref.id, doc["$id"]) + self.assertEqual(dbref.database, doc.get("$db")) + for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: + self.assertEqual(getattr(dbref, extra), doc[extra]) + + def test_decoding_4_5(self): + for doc in [ + # 4, Documents missing required fields MUST NOT be decoded to a + # DBRef: + {"$ref": "coll0"}, + {"$id": ObjectId("60a6fe9a54f4180c86309efa")}, + {"$db": "db0"}, + # 5, Documents with invalid types for $ref or $db MUST NOT be + # decoded to a DBRef + {"$ref": True, "$id": 1}, + {"$ref": "coll0", "$id": 1, "$db": 1}, + ]: + with self.subTest(doc=doc): + decoded = decode(encode({"dbref": doc})) + dbref = decoded["dbref"] + self.assertIsInstance(dbref, dict) + + def test_encoding_1_2(self): + doc: Any + for doc in [ + # 1, Encoding DBRefs with basic fields: + {"$ref": "coll0", "$id": ObjectId("60a6fe9a54f4180c86309efa")}, + {"$ref": "coll0", "$id": 1}, + {"$ref": "coll0", "$id": None}, + {"$ref": "coll0", "$id": 1, "$db": "db0"}, + # 2, Encoding DBRefs with extra, optional fields: + {"$ref": "coll0", "$id": 1, "$db": "db0", "foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo": True, "bar": False}, + {"$ref": "coll0", "$id": 1, "meta": {"foo": 1, "bar": 2}}, + {"$ref": "coll0", "$id": 1, "$foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo.bar": 0}, + ]: + with self.subTest(doc=doc): + # Decode the test input to a DBRef via a BSON roundtrip. + encoded_doc = encode({"dbref": doc}) + decoded = decode(encoded_doc) + dbref = decoded["dbref"] + self.assertIsInstance(dbref, DBRef) + # Encode the DBRef. + encoded_dbref = encode(decoded) + self.assertEqual(encoded_dbref, encoded_doc) + # Ensure extra fields are present. + for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: + self.assertEqual(getattr(dbref, extra), doc[extra]) + + def test_encoding_3(self): + for doc in [ + # 3, Encoding DBRefs re-orders any out of order fields during + # decoding: + {"$id": 1, "$ref": "coll0"}, + {"$db": "db0", "$ref": "coll0", "$id": 1}, + {"foo": 1, "$id": 1, "$ref": "coll0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0", "bar": 1}, + ]: + with self.subTest(doc=doc): + # Decode the test input to a DBRef via a BSON roundtrip. + encoded_doc = encode({"dbref": doc}) + decoded = decode(encoded_doc) + dbref = decoded["dbref"] + self.assertIsInstance(dbref, DBRef) + # Encode the DBRef. + encoded_dbref = encode(decoded) + # BSON does not match because DBRef fields are reordered. + self.assertNotEqual(encoded_dbref, encoded_doc) + self.assertEqual(decode(encoded_dbref), decode(encoded_doc)) + # Ensure extra fields are present. + for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: + self.assertEqual(getattr(dbref, extra), doc[extra]) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_decimal128.py b/test/test_decimal128.py new file mode 100644 index 0000000000..46819dd587 --- /dev/null +++ b/test/test_decimal128.py @@ -0,0 +1,73 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Decimal128.""" +from __future__ import annotations + +import pickle +import sys +from decimal import Decimal + +sys.path[0:0] = [""] + +from test import client_context, unittest + +from bson.decimal128 import Decimal128, create_decimal128_context + + +class TestDecimal128(unittest.TestCase): + @client_context.require_connection + def test_round_trip(self): + coll = client_context.client.pymongo_test.test + coll.drop() + + dec128 = Decimal128.from_bid(b"\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0") + coll.insert_one({"dec128": dec128}) + doc = coll.find_one({"dec128": dec128}) + assert doc is not None + self.assertIsNotNone(doc) + self.assertEqual(doc["dec128"], dec128) + + def test_pickle(self): + dec128 = Decimal128.from_bid(b"\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0") + for protocol in range(pickle.HIGHEST_PROTOCOL + 1): + pkl = pickle.dumps(dec128, protocol=protocol) + self.assertEqual(dec128, pickle.loads(pkl)) + + def test_special(self): + dnan = Decimal("NaN") + dnnan = Decimal("-NaN") + dsnan = Decimal("sNaN") + dnsnan = Decimal("-sNaN") + dnan128 = Decimal128(dnan) + dnnan128 = Decimal128(dnnan) + dsnan128 = Decimal128(dsnan) + dnsnan128 = Decimal128(dnsnan) + + # Due to the comparison rules for decimal.Decimal we have to + # compare strings. + self.assertEqual(str(dnan), str(dnan128.to_decimal())) + self.assertEqual(str(dnnan), str(dnnan128.to_decimal())) + self.assertEqual(str(dsnan), str(dsnan128.to_decimal())) + self.assertEqual(str(dnsnan), str(dnsnan128.to_decimal())) + + def test_decimal128_context(self): + ctx = create_decimal128_context() + self.assertEqual("NaN", str(ctx.copy().create_decimal(".13.1"))) + self.assertEqual("Infinity", str(ctx.copy().create_decimal("1E6145"))) + self.assertEqual("0E-6176", str(ctx.copy().create_decimal("1E-6177"))) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_default_exports.py b/test/test_default_exports.py new file mode 100644 index 0000000000..adc3882a36 --- /dev/null +++ b/test/test_default_exports.py @@ -0,0 +1,240 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the default exports of the top level packages.""" +from __future__ import annotations + +import inspect +import unittest + +import bson +import gridfs +import pymongo + +BSON_IGNORE = [] +GRIDFS_IGNORE = [ + "ASCENDING", + "DESCENDING", + "AsyncClientSession", + "Collection", + "ObjectId", + "validate_string", + "Database", + "ConfigurationError", + "WriteConcern", +] +PYMONGO_IGNORE = [] +GLOBAL_INGORE = ["TYPE_CHECKING", "annotations"] + + +class TestDefaultExports(unittest.TestCase): + def check_module(self, mod, ignores): + names = dir(mod) + names.remove("__all__") + for name in mod.__all__: + if name not in names and name not in ignores: + self.fail(f"{name} was included in {mod}.__all__ but is not a valid symbol") + + for name in names: + if name not in mod.__all__ and name not in ignores: + if name in GLOBAL_INGORE: + continue + value = getattr(mod, name) + if inspect.ismodule(value): + continue + if getattr(value, "__module__", None) == "typing": + continue + if not name.startswith("_"): + self.fail(f"{name} was not included in {mod}.__all__") + + def test_pymongo(self): + self.check_module(pymongo, PYMONGO_IGNORE) + + def test_gridfs(self): + self.check_module(gridfs, GRIDFS_IGNORE) + + def test_bson(self): + self.check_module(bson, BSON_IGNORE) + + def test_pymongo_imports(self): + import pymongo + from pymongo.auth import MECHANISMS + from pymongo.auth_oidc import ( + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + OIDCIdPInfo, + ) + from pymongo.change_stream import ( + ChangeStream, + ClusterChangeStream, + CollectionChangeStream, + DatabaseChangeStream, + ) + from pymongo.client_options import ClientOptions + from pymongo.client_session import ClientSession, SessionOptions, TransactionOptions + from pymongo.collation import ( + Collation, + CollationAlternate, + CollationCaseFirst, + CollationMaxVariable, + CollationStrength, + validate_collation_or_none, + ) + from pymongo.collection import Collection, ReturnDocument + from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor + from pymongo.cursor import Cursor, RawBatchCursor + from pymongo.database import Database + from pymongo.driver_info import DriverInfo + from pymongo.encryption import ( + Algorithm, + ClientEncryption, + QueryType, + RewrapManyDataKeyResult, + ) + from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts + from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + CollectionInvalid, + ConfigurationError, + ConnectionFailure, + CursorNotFound, + DocumentTooLarge, + DuplicateKeyError, + EncryptedCollectionError, + EncryptionError, + ExecutionTimeout, + InvalidName, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + ProtocolError, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, + WriteError, + WTimeoutError, + ) + from pymongo.event_loggers import ( + CommandLogger, + ConnectionPoolLogger, + HeartbeatLogger, + ServerLogger, + TopologyLogger, + ) + from pymongo.mongo_client import MongoClient + from pymongo.monitoring import ( + CommandFailedEvent, + CommandListener, + CommandStartedEvent, + CommandSucceededEvent, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionClosedReason, + ConnectionCreatedEvent, + ConnectionPoolListener, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, + ServerClosedEvent, + ServerDescriptionChangedEvent, + ServerHeartbeatFailedEvent, + ServerHeartbeatListener, + ServerHeartbeatStartedEvent, + ServerHeartbeatSucceededEvent, + ServerListener, + ServerOpeningEvent, + TopologyClosedEvent, + TopologyDescriptionChangedEvent, + TopologyEvent, + TopologyListener, + TopologyOpenedEvent, + register, + ) + from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + SearchIndexModel, + UpdateMany, + UpdateOne, + ) + from pymongo.pool import PoolOptions + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import ( + Nearest, + Primary, + PrimaryPreferred, + ReadPreference, + SecondaryPreferred, + ) + from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, + ) + from pymongo.server_api import ServerApi, ServerApiVersion + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + from pymongo.uri_parser import ( + parse_host, + parse_ipv6_literal_host, + parse_uri, + parse_userinfo, + split_hosts, + split_options, + validate_options, + ) + from pymongo.write_concern import WriteConcern, validate_boolean + + def test_pymongo_submodule_attributes(self): + import pymongo + + self.assertTrue(hasattr(pymongo, "uri_parser")) + self.assertTrue(pymongo.uri_parser) + self.assertTrue(pymongo.uri_parser.parse_uri) + self.assertTrue(pymongo.change_stream) + self.assertTrue(pymongo.client_session) + self.assertTrue(pymongo.collection) + self.assertTrue(pymongo.cursor) + self.assertTrue(pymongo.command_cursor) + self.assertTrue(pymongo.database) + + def test_gridfs_imports(self): + import gridfs + from gridfs.errors import CorruptGridFile, FileExists, GridFSError, NoFile + from gridfs.grid_file import ( + GridFS, + GridFSBucket, + GridIn, + GridOut, + GridOutChunkIterator, + GridOutCursor, + GridOutIterator, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py new file mode 100644 index 0000000000..67a82996bd --- /dev/null +++ b/test/test_discovery_and_monitoring.py @@ -0,0 +1,585 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module.""" +from __future__ import annotations + +import asyncio +import os +import socketserver +import sys +import threading +import time +from asyncio import StreamReader, StreamWriter +from pathlib import Path +from test.helpers import ConcurrentRunner +from test.utils import flaky + +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector +from pymongo.synchronous.pool import Connection + +sys.path[0:0] = [""] + +from test import ( + IntegrationTest, + PyMongoTestCase, + UnitTest, + client_context, + unittest, +) +from test.pymongo_mocks import DummyMonitor +from test.unified_format import generate_test_classes +from test.utils import ( + get_pool, +) +from test.utils_shared import ( + CMAPListener, + HeartbeatEventListener, + HeartbeatEventsListListener, + assertion_context, + barrier_wait, + create_barrier, + server_name_to_type, + wait_until, +) +from unittest.mock import patch + +from bson import Timestamp, json_util +from pymongo import common, monitoring +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + NetworkTimeout, + NotPrimaryError, + OperationFailure, +) +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _check_command_response, _check_write_command_response +from pymongo.monitoring import ServerHeartbeatFailedEvent, ServerHeartbeatStartedEvent +from pymongo.server_description import SERVER_TYPE, ServerDescription +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology, _ErrorContext +from pymongo.synchronous.uri_parser import parse_uri +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + SDAM_PATH = os.path.join(Path(__file__).resolve().parent, "discovery_and_monitoring") +else: + SDAM_PATH = os.path.join( + Path(__file__).resolve().parent.parent, + "discovery_and_monitoring", + ) + + +def create_mock_topology(uri, monitor_class=DummyMonitor): + parsed_uri = parse_uri(uri) + replica_set_name = None + direct_connection = None + load_balanced = None + if "replicaSet" in parsed_uri["options"]: + replica_set_name = parsed_uri["options"]["replicaSet"] + if "directConnection" in parsed_uri["options"]: + direct_connection = parsed_uri["options"]["directConnection"] + if "loadBalanced" in parsed_uri["options"]: + load_balanced = parsed_uri["options"]["loadBalanced"] + + topology_settings = TopologySettings( + parsed_uri["nodelist"], + replica_set_name=replica_set_name, + monitor_class=monitor_class, + direct_connection=direct_connection, + load_balanced=load_balanced, + ) + + c = Topology(topology_settings) + c.open() + return c + + +def got_hello(topology, server_address, hello_response): + server_description = ServerDescription(server_address, Hello(hello_response), 0) + topology.on_change(server_description) + + +def got_app_error(topology, app_error): + server_address = common.partition_node(app_error["address"]) + server = topology.get_server_by_address(server_address) + error_type = app_error["type"] + generation = app_error.get("generation", server.pool.gen.get_overall()) + when = app_error["when"] + max_wire_version = app_error["maxWireVersion"] + # XXX: We could get better test coverage by mocking the errors on the + # Pool/Connection. + try: + if error_type == "command": + _check_command_response(app_error["response"], max_wire_version) + _check_write_command_response(app_error["response"]) + elif error_type == "network": + raise AutoReconnect("mock non-timeout network error") + elif error_type == "timeout": + raise NetworkTimeout("mock network timeout error") + else: + raise AssertionError(f"unknown error type: {error_type}") + raise AssertionError + except (AutoReconnect, NotPrimaryError, OperationFailure) as e: + if when == "beforeHandshakeCompletes": + completed_handshake = False + elif when == "afterHandshakeCompletes": + completed_handshake = True + else: + raise AssertionError(f"Unknown when field {when}") + + topology.handle_error( + server_address, + _ErrorContext(e, max_wire_version, generation, completed_handshake, None), + ) + + +def get_type(topology, hostname): + description = topology.get_server_by_address((hostname, 27017)).description + return description.server_type + + +class TestAllScenarios(UnitTest): + pass + + +def topology_type_name(topology_type): + return TOPOLOGY_TYPE._fields[topology_type] + + +def server_type_name(server_type): + return SERVER_TYPE._fields[server_type] + + +def check_outcome(self, topology, outcome): + expected_servers = outcome["servers"] + + # Check weak equality before proceeding. + self.assertEqual(len(topology.description.server_descriptions()), len(expected_servers)) + + if outcome.get("compatible") is False: + with self.assertRaises(ConfigurationError): + topology.description.check_compatible() + else: + # No error. + topology.description.check_compatible() + + # Since lengths are equal, every actual server must have a corresponding + # expected server. + for expected_server_address, expected_server in expected_servers.items(): + node = common.partition_node(expected_server_address) + self.assertTrue(topology.has_server(node)) + actual_server = topology.get_server_by_address(node) + actual_server_description = actual_server.description + expected_server_type = server_name_to_type(expected_server["type"]) + + self.assertEqual( + server_type_name(expected_server_type), + server_type_name(actual_server_description.server_type), + ) + expected_error = expected_server.get("error") + if expected_error: + self.assertIn(expected_error, str(actual_server_description.error)) + + self.assertEqual(expected_server.get("setName"), actual_server_description.replica_set_name) + + self.assertEqual(expected_server.get("setVersion"), actual_server_description.set_version) + + self.assertEqual(expected_server.get("electionId"), actual_server_description.election_id) + + self.assertEqual( + expected_server.get("topologyVersion"), actual_server_description.topology_version + ) + + expected_pool = expected_server.get("pool") + if expected_pool: + self.assertEqual(expected_pool.get("generation"), actual_server.pool.gen.get_overall()) + + self.assertEqual(outcome["setName"], topology.description.replica_set_name) + self.assertEqual( + outcome.get("logicalSessionTimeoutMinutes"), + topology.description.logical_session_timeout_minutes, + ) + + expected_topology_type = getattr(TOPOLOGY_TYPE, outcome["topologyType"]) + self.assertEqual( + topology_type_name(expected_topology_type), + topology_type_name(topology.description.topology_type), + ) + + self.assertEqual(outcome.get("maxSetVersion"), topology.description.max_set_version) + self.assertEqual(outcome.get("maxElectionId"), topology.description.max_election_id) + + +def create_test(scenario_def): + def run_scenario(self): + c = create_mock_topology(scenario_def["uri"]) + + for i, phase in enumerate(scenario_def["phases"]): + # Including the phase description makes failures easier to debug. + description = phase.get("description", str(i)) + with assertion_context(f"phase: {description}"): + for response in phase.get("responses", []): + got_hello(c, common.partition_node(response[0]), response[1]) + + for app_error in phase.get("applicationErrors", []): + got_app_error(c, app_error) + + check_outcome(self, c, phase["outcome"]) + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(SDAM_PATH): + dirname = os.path.split(dirpath)[-1] + # SDAM unified tests are handled separately. + if dirname == "unified": + continue + + for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json_util.loads(scenario_stream.read()) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + + +class TestClusterTimeComparison(PyMongoTestCase): + def test_cluster_time_comparison(self): + t = create_mock_topology("mongodb://host") + + def send_cluster_time(time, inc): + old = t.max_cluster_time() + new = {"clusterTime": Timestamp(time, inc)} + got_hello( + t, + ("host", 27017), + { + "ok": 1, + "minWireVersion": 0, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + "$clusterTime": new, + }, + ) + + actual = t.max_cluster_time() + # We never update $clusterTime from monitoring connections. + self.assertEqual(actual, old) + + send_cluster_time(0, 1) + send_cluster_time(2, 2) + send_cluster_time(2, 1) + send_cluster_time(1, 3) + send_cluster_time(2, 3) + + +class TestIgnoreStaleErrors(IntegrationTest): + def test_ignore_stale_connection_errors(self): + if not _IS_SYNC and sys.version_info < (3, 11): + self.skipTest("Test requires asyncio.Barrier (added in Python 3.11)") + N_TASKS = 5 + barrier = create_barrier(N_TASKS) + client = self.rs_or_single_client(minPoolSize=N_TASKS) + + # Wait for initial discovery. + client.admin.command("ping") + pool = get_pool(client) + starting_generation = pool.gen.get_overall() + wait_until(lambda: len(pool.conns) == N_TASKS, "created conns") + + def mock_command(*args, **kwargs): + # Synchronize all tasks to ensure they use the same generation. + barrier_wait(barrier, timeout=30) + raise AutoReconnect("mock Connection.command error") + + for conn in pool.conns: + conn.command = mock_command + + def insert_command(i): + try: + client.test.command("insert", "test", documents=[{"i": i}]) + except AutoReconnect: + pass + + tasks = [] + for i in range(N_TASKS): + tasks.append(ConcurrentRunner(target=insert_command, args=(i,))) + for t in tasks: + t.start() + for t in tasks: + t.join() + + # Expect a single pool reset for the network error + self.assertEqual(starting_generation + 1, pool.gen.get_overall()) + + # Server should be selectable. + client.admin.command("ping") + + +class CMAPHeartbeatListener(HeartbeatEventListener, CMAPListener): + pass + + +class TestPoolManagement(IntegrationTest): + @client_context.require_failCommand_appName + def test_pool_unpause(self): + # This test implements the prose test "Connection Pool Management" + listener = CMAPHeartbeatListener() + _ = self.single_client( + appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener] + ) + # Assert that ConnectionPoolReadyEvent occurs after the first + # ServerHeartbeatSucceededEvent. + listener.wait_for_event(monitoring.PoolReadyEvent, 1) + pool_ready = listener.events_by_type(monitoring.PoolReadyEvent)[0] + hb_succeeded = listener.events_by_type(monitoring.ServerHeartbeatSucceededEvent)[0] + self.assertGreater(listener.events.index(pool_ready), listener.events.index(hb_succeeded)) + + listener.reset() + fail_hello = { + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMPoolManagementTest", + }, + } + with self.fail_point(fail_hello): + listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + listener.wait_for_event(monitoring.PoolClearedEvent, 1) + listener.wait_for_event(monitoring.ServerHeartbeatSucceededEvent, 1) + listener.wait_for_event(monitoring.PoolReadyEvent, 1) + + @client_context.require_failCommand_appName + @client_context.require_test_commands + @client_context.require_async + @flaky(reason="PYTHON-5428") + def test_connection_close_does_not_block_other_operations(self): + listener = CMAPHeartbeatListener() + client = self.single_client( + appName="SDAMConnectionCloseTest", + event_listeners=[listener], + heartbeatFrequencyMS=500, + minPoolSize=10, + ) + server = (client._get_topology()).select_server(writable_server_selector, _Op.TEST) + wait_until( + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", + ) + + client.db.test.insert_one({"x": 1}) + close_delay = 0.1 + latencies = [] + should_exit = [] + + def run_task(): + while True: + start_time = time.monotonic() + client.db.test.find_one({}) + elapsed = time.monotonic() - start_time + latencies.append(elapsed) + if should_exit: + break + time.sleep(0.001) + + task = ConcurrentRunner(target=run_task) + task.start() + original_close = Connection.close_conn + try: + # Artificially delay the close operation to simulate a slow close + def mock_close(self, reason): + time.sleep(close_delay) + original_close(self, reason) + + Connection.close_conn = mock_close + + fail_hello = { + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 91, + "appName": "SDAMConnectionCloseTest", + }, + } + with self.fail_point(fail_hello): + # Wait for server heartbeat to fail + listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + # Wait until all idle connections are closed to simulate real-world conditions + listener.wait_for_event(monitoring.ConnectionClosedEvent, 10) + # Wait for one more find to complete after the pool has been reset, then shutdown the task + n = len(latencies) + wait_until(lambda: len(latencies) >= n + 1, "run one more find") + should_exit.append(True) + task.join() + # No operation latency should not significantly exceed close_delay + self.assertLessEqual(max(latencies), close_delay * 5.0) + finally: + Connection.close_conn = original_close + + +class TestServerMonitoringMode(IntegrationTest): + @client_context.require_no_load_balancer + def setUp(self): + super().setUp() + + def test_rtt_connection_is_enabled_stream(self): + client = self.rs_or_single_client(serverMonitoringMode="stream") + client.admin.command("ping") + + def predicate(): + for _, server in client._topology._servers.items(): + monitor = server._monitor + if not monitor._stream: + return False + if client_context.version >= (4, 4): + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is None: + return False + else: + if monitor._rtt_monitor._executor._task is None: + return False + else: + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is not None: + return False + else: + if monitor._rtt_monitor._executor._task is not None: + return False + return True + + wait_until(predicate, "find all RTT monitors") + + def test_rtt_connection_is_disabled_poll(self): + client = self.rs_or_single_client(serverMonitoringMode="poll") + + self.assert_rtt_connection_is_disabled(client) + + def test_rtt_connection_is_disabled_auto(self): + envs = [ + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10"}, + {"FUNCTIONS_WORKER_RUNTIME": "python"}, + {"K_SERVICE": "gcpservicename"}, + {"FUNCTION_NAME": "gcpfunctionname"}, + {"VERCEL": "1"}, + ] + for env in envs: + with patch.dict("os.environ", env): + client = self.rs_or_single_client(serverMonitoringMode="auto") + self.assert_rtt_connection_is_disabled(client) + + def assert_rtt_connection_is_disabled(self, client): + client.admin.command("ping") + for _, server in client._topology._servers.items(): + monitor = server._monitor + self.assertFalse(monitor._stream) + if _IS_SYNC: + self.assertIsNone(monitor._rtt_monitor._executor._thread) + else: + self.assertIsNone(monitor._rtt_monitor._executor._task) + + +class MockTCPHandler(socketserver.BaseRequestHandler): + def handle(self): + self.server.events.append("client connected") + if self.request.recv(1024).strip(): + self.server.events.append("client hello received") + self.request.close() + + +class TCPServer(socketserver.TCPServer): + allow_reuse_address = True + + def handle_request_and_shutdown(self): + self.handle_request() + self.server_close() + + +class TestHeartbeatStartOrdering(PyMongoTestCase): + def test_heartbeat_start_ordering(self): + events = [] + listener = HeartbeatEventsListListener(events) + + if _IS_SYNC: + server = TCPServer(("localhost", 9999), MockTCPHandler) + server.events = events + server_thread = ConcurrentRunner(target=server.handle_request_and_shutdown) + server_thread.start() + _c = self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + server_thread.join() + listener.wait_for_event(ServerHeartbeatStartedEvent, 1) + listener.wait_for_event(ServerHeartbeatFailedEvent, 1) + + else: + + def handle_client(reader: StreamReader, writer: StreamWriter): + events.append("client connected") + if (reader.read(1024)).strip(): + events.append("client hello received") + writer.close() + writer.wait_closed() + + server = asyncio.start_server(handle_client, "localhost", 9999) + server.events = events + server.start_serving() + _c = self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + _c._connect() + + listener.wait_for_event(ServerHeartbeatStartedEvent, 1) + listener.wait_for_event(ServerHeartbeatFailedEvent, 1) + + server.close() + server.wait_closed() + _c.close() + + self.assertEqual( + events, + [ + "serverHeartbeatStartedEvent", + "client connected", + "client hello received", + "serverHeartbeatFailedEvent", + ], + ) + + +# Generate unified tests. +globals().update(generate_test_classes(os.path.join(SDAM_PATH, "unified"), module=__name__)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_dns.py b/test/test_dns.py new file mode 100644 index 0000000000..8f88562e3f --- /dev/null +++ b/test/test_dns.py @@ -0,0 +1,306 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the SRV support tests.""" +from __future__ import annotations + +import glob +import json +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import ( + IntegrationTest, + PyMongoTestCase, + client_context, + unittest, +) +from test.utils_shared import wait_until +from unittest.mock import MagicMock, patch + +from pymongo.common import validate_read_preference_tags +from pymongo.errors import ConfigurationError +from pymongo.synchronous.uri_parser import parse_uri +from pymongo.uri_parser_shared import split_hosts + +_IS_SYNC = True + + +class TestDNSRepl(PyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "replica-set" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "replica-set" + ) + load_balanced = False + + @client_context.require_replica_set + def setUp(self): + pass + + +class TestDNSLoadBalanced(PyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "load-balanced" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "load-balanced" + ) + load_balanced = True + + @client_context.require_load_balancer + def setUp(self): + pass + + +class TestDNSSharded(PyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "srv_seedlist", "sharded") + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "sharded" + ) + load_balanced = False + + @client_context.require_mongos + def setUp(self): + pass + + +def create_test(test_case): + def run_test(self): + uri = test_case["uri"] + seeds = test_case.get("seeds") + num_seeds = test_case.get("numSeeds", len(seeds or [])) + hosts = test_case.get("hosts") + num_hosts = test_case.get("numHosts", len(hosts or [])) + + options = test_case.get("options", {}) + if "ssl" in options: + options["tls"] = options.pop("ssl") + parsed_options = test_case.get("parsed_options") + # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. + needs_tls = not (options and (options.get("ssl") is False or options.get("tls") is False)) + if needs_tls and not client_context.tls: + self.skipTest("this test requires a TLS cluster") + if not needs_tls and client_context.tls: + self.skipTest("this test requires a non-TLS cluster") + + if seeds: + seeds = split_hosts(",".join(seeds)) + if hosts: + hosts = frozenset(split_hosts(",".join(hosts))) + + if seeds or num_seeds: + result = parse_uri(uri, validate=True) + if seeds is not None: + self.assertEqual(sorted(result["nodelist"]), sorted(seeds)) + if num_seeds is not None: + self.assertEqual(len(result["nodelist"]), num_seeds) + if options: + opts = result["options"] + if "readpreferencetags" in opts: + rpts = validate_read_preference_tags( + "readPreferenceTags", opts.pop("readpreferencetags") + ) + opts["readPreferenceTags"] = rpts + self.assertEqual(result["options"], options) + if parsed_options: + for opt, expected in parsed_options.items(): + if opt == "user": + self.assertEqual(result["username"], expected) + elif opt == "password": + self.assertEqual(result["password"], expected) + elif opt == "auth_database" or opt == "db": + self.assertEqual(result["database"], expected) + + hostname = next(iter(client_context.client.nodes))[0] + # The replica set members must be configured as 'localhost'. + if hostname == "localhost": + copts = client_context.default_client_options.copy() + # Remove tls since SRV parsing should add it automatically. + copts.pop("tls", None) + if client_context.tls: + # Our test certs don't support the SRV hosts used in these + # tests. + copts["tlsAllowInvalidHostnames"] = True + + client = self.simple_client(uri, **copts) + if client._options.connect: + client._connect() + if num_seeds is not None: + self.assertEqual(len(client._topology_settings.seeds), num_seeds) + if hosts is not None: + wait_until(lambda: hosts == client.nodes, "match test hosts to client nodes") + if num_hosts is not None: + wait_until( + lambda: num_hosts == len(client.nodes), "wait to connect to num_hosts" + ) + if test_case.get("ping", True): + client.admin.command("ping") + # XXX: we should block until SRV poller runs at least once + # and re-run these assertions. + else: + try: + parse_uri(uri) + except (ConfigurationError, ValueError): + pass + else: + self.fail("failed to raise an exception") + + return run_test + + +def create_tests(cls): + for filename in glob.glob(os.path.join(cls.TEST_PATH, "*.json")): + test_suffix, _ = os.path.splitext(os.path.basename(filename)) + with open(filename) as dns_test_file: + test_method = create_test(json.load(dns_test_file)) + setattr(cls, "test_" + test_suffix, test_method) + + +create_tests(TestDNSRepl) +create_tests(TestDNSLoadBalanced) +create_tests(TestDNSSharded) + + +class TestParsingErrors(PyMongoTestCase): + def test_invalid_host(self): + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://127.0.0.1") + client._connect() + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://[::1]") + client._connect() + + +class TestCaseInsensitive(IntegrationTest): + def test_connect_case_insensitive(self): + client = self.simple_client("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") + client._connect() + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + + +class TestInitialDnsSeedlistDiscovery(PyMongoTestCase): + """ + Initial DNS Seedlist Discovery prose tests + https://github.com/mongodb/specifications/blob/0a7a8b5/source/initial-dns-seedlist-discovery/tests/README.md#prose-tests + """ + + def run_initial_dns_seedlist_discovery_prose_tests(self, test_cases): + for case in test_cases: + with patch("dns.resolver.resolve") as mock_resolver: + + def mock_resolve(query, record_type, *args, **kwargs): + mock_srv = MagicMock() + mock_srv.target.to_text.return_value = case["mock_target"] + return [mock_srv] + + mock_resolver.side_effect = mock_resolve + domain = case["query"].split("._tcp.")[1] + connection_string = f"mongodb+srv://{domain}" + if "expected_error" not in case: + parse_uri(connection_string) + else: + try: + parse_uri(connection_string) + except ConfigurationError as e: + self.assertIn(case["expected_error"], str(e)) + else: + self.fail(f"ConfigurationError was not raised for query: {case['query']}") + + def test_1_allow_srv_hosts_with_fewer_than_three_dot_separated_parts(self): + with patch("dns.resolver.resolve"): + parse_uri("mongodb+srv://localhost/") + parse_uri("mongodb+srv://mongo.local/") + + def test_2_throw_when_return_address_does_not_end_with_srv_domain(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost.mongodb", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.evil.com", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongo.local", + "mock_target": "test_1.evil.com", + "expected_error": "Invalid SRV host", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + def test_3_throw_when_return_address_is_identical_to_srv_hostname(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "mongo.local", + "expected_error": "Invalid SRV host", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + def test_4_throw_when_return_address_does_not_contain_dot_separating_shared_part_of_domain( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "test_1.cluster_1localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "test_1.my_hostmongo.local", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "cluster.testmongodb.com", + "expected_error": "Invalid SRV host", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + def test_5_when_srv_hostname_has_two_dot_separated_parts_it_is_valid_for_the_returned_hostname_to_be_identical( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.mongodb.com", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_encryption.py b/test/test_encryption.py new file mode 100644 index 0000000000..04e61b7bad --- /dev/null +++ b/test/test_encryption.py @@ -0,0 +1,3735 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption spec.""" +from __future__ import annotations + +import base64 +import copy +import http.client +import json +import os +import pathlib +import re +import socket +import socketserver +import ssl +import sys +import textwrap +import traceback +import uuid +import warnings +from test import IntegrationTest, PyMongoTestCase, client_context +from test.test_bulk import BulkTestBase +from test.utils import flaky +from test.utils_spec_runner import SpecRunner, SpecTestCreator +from threading import Thread +from typing import Any, Dict, Mapping, Optional + +import pytest + +from pymongo.daemon import _spawn_daemon +from pymongo.synchronous.collection import Collection +from pymongo.uri_parser_shared import _parse_kms_tls_options + +try: + from pymongo.pyopenssl_context import IS_PYOPENSSL +except ImportError: + IS_PYOPENSSL = False + +sys.path[0:0] = [""] + +from test import ( + unittest, +) +from test.helpers_shared import ( + ALL_KMS_PROVIDERS, + AWS_CREDS, + AWS_TEMP_CREDS, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + DEFAULT_KMS_TLS, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, +) +from test.test_bulk import BulkTestBase +from test.unified_format import generate_test_classes +from test.utils_shared import ( + AllowListEventListener, + OvertCommandListener, + TopologyEventListener, + camel_to_snake_args, + is_greenthread_patched, + wait_until, +) +from test.utils_spec_runner import SpecRunner + +from bson import BSON, DatetimeMS, Decimal128, encode, json_util +from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.errors import BSONError +from bson.json_util import JSONOptions +from bson.son import SON +from pymongo import ReadPreference +from pymongo.cursor_shared import CursorType +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts, TextOpts +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ConfigurationError, + DuplicateKeyError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WriteError, +) +from pymongo.operations import InsertOne, ReplaceOne, UpdateOne +from pymongo.synchronous import encryption +from pymongo.synchronous.encryption import Algorithm, ClientEncryption, QueryType +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + +pytestmark = pytest.mark.encryption + +KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} + + +def get_client_opts(client): + return client.options + + +class TestAutoEncryptionOpts(PyMongoTestCase): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_crypt_shared(self): + # Test that we can pick up crypt_shared lib automatically + self.simple_client( + auto_encryption_opts=AutoEncryptionOpts( + KMS_PROVIDERS, "keyvault.datakeys", crypt_shared_lib_required=True + ), + connect=False, + ) + + @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") + def test_init_requires_pymongocrypt(self): + with self.assertRaises(ConfigurationError): + AutoEncryptionOpts({}, "keyvault.datakeys") + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init(self): + opts = AutoEncryptionOpts({}, "keyvault.datakeys") + self.assertEqual(opts._kms_providers, {}) + self.assertEqual(opts._key_vault_namespace, "keyvault.datakeys") + self.assertEqual(opts._key_vault_client, None) + self.assertEqual(opts._schema_map, None) + self.assertEqual(opts._bypass_auto_encryption, False) + self.assertEqual(opts._mongocryptd_uri, "mongodb://localhost:27020") + self.assertEqual(opts._mongocryptd_bypass_spawn, False) + self.assertEqual(opts._mongocryptd_spawn_path, "mongocryptd") + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) + self.assertEqual(opts._kms_tls_options, None) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init_spawn_args(self): + # User can override idleShutdownTimeoutSecs + opts = AutoEncryptionOpts( + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--idleShutdownTimeoutSecs=88"] + ) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=88"]) + + # idleShutdownTimeoutSecs is added by default + opts = AutoEncryptionOpts({}, "keyvault.datakeys", mongocryptd_spawn_args=[]) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) + + # Also added when other options are given + opts = AutoEncryptionOpts( + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--quiet", "--port=27020"] + ) + self.assertEqual( + opts._mongocryptd_spawn_args, + ["--quiet", "--port=27020", "--idleShutdownTimeoutSecs=60"], + ) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init_kms_tls_options(self): + # Error cases: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) + with self.assertRaisesRegex(TypeError, r'kms_tls_options\["kmip"\] must be a dict'): + MongoClient(auto_encryption_opts=opts) + + tls_opts: Any + for tls_opts in [ + {"kmip": {"tls": True, "tlsInsecure": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, + ]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): + MongoClient(auto_encryption_opts=opts) + opts = AutoEncryptionOpts( + {}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}} + ) + with self.assertRaises(FileNotFoundError): + MongoClient(auto_encryption_opts=opts) + # Success cases: + tls_opts: Any + for tls_opts in [None, {}]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + kms_tls_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + self.assertEqual(kms_tls_contexts, {}) + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + ctx = _kms_ssl_contexts["aws"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + opts = AutoEncryptionOpts( + {}, + "k.d", + kms_tls_options=DEFAULT_KMS_TLS, + ) + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + + +class TestClientOptions(PyMongoTestCase): + def test_default(self): + client = self.simple_client(connect=False) + self.assertEqual(get_client_opts(client).auto_encryption_opts, None) + + client = self.simple_client(auto_encryption_opts=None, connect=False) + self.assertEqual(get_client_opts(client).auto_encryption_opts, None) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_kwargs(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = self.simple_client(auto_encryption_opts=opts, connect=False) + self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) + + +class EncryptionIntegrationTest(IntegrationTest): + """Base class for encryption integration tests.""" + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUp(self) -> None: + super().setUp() + + def assertEncrypted(self, val): + self.assertIsInstance(val, Binary) + self.assertEqual(val.subtype, 6) + + def assertBinaryUUID(self, val): + self.assertIsInstance(val, Binary) + self.assertEqual(val.subtype, UUID_SUBTYPE) + + def create_client_encryption( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + self.addCleanup(client_encryption.close) + return client_encryption + + @classmethod + def unmanaged_create_client_encryption( + cls, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + return client_encryption + + +# Location of JSON test files. +if _IS_SYNC: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent, "client-side-encryption") +else: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "client-side-encryption") + +SPEC_PATH = os.path.join(BASE, "spec") + +OPTS = CodecOptions() + +# Use SON to preserve the order of fields while parsing json. Use tz_aware +# =False to match how CodecOptions decodes dates. +JSON_OPTS = JSONOptions(document_class=SON, tz_aware=False) + + +def read(*paths): + with open(os.path.join(BASE, *paths)) as fp: + return fp.read() + + +def json_data(*paths): + return json_util.loads(read(*paths), json_options=JSON_OPTS) + + +def bson_data(*paths): + return encode(json_data(*paths), codec_options=OPTS) + + +class TestClientSimple(EncryptionIntegrationTest): + def _test_auto_encrypt(self, opts): + client = self.rs_or_single_client(auto_encryption_opts=opts) + + # Create the encrypted field's data key. + key_vault = create_key_vault( + self.client.keyvault.datakeys, json_data("custom", "key-document-local.json") + ) + self.addCleanup(key_vault.drop) + + # Collection.insert_one/insert_many auto encrypts. + docs = [ + {"_id": 0, "ssn": "000"}, + {"_id": 1, "ssn": "111"}, + {"_id": 2, "ssn": "222"}, + {"_id": 3, "ssn": "333"}, + {"_id": 4, "ssn": "444"}, + {"_id": 5, "ssn": "555"}, + ] + encrypted_coll = client.pymongo_test.test + encrypted_coll.insert_one(docs[0]) + encrypted_coll.insert_many(docs[1:3]) + unack = encrypted_coll.with_options(write_concern=WriteConcern(w=0)) + unack.insert_one(docs[3]) + unack.insert_many(docs[4:], ordered=False) + + def count_documents(): + return self.db.test.count_documents({}) == len(docs) + + wait_until(count_documents, "insert documents with w=0") + + # Database.command auto decrypts. + res = client.pymongo_test.command("find", "test", filter={"ssn": "000"}) + decrypted_docs = res["cursor"]["firstBatch"] + self.assertEqual(decrypted_docs, [{"_id": 0, "ssn": "000"}]) + + # Collection.find auto decrypts. + decrypted_docs = encrypted_coll.find().to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.find auto decrypts getMores. + decrypted_docs = encrypted_coll.find(batch_size=1).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.aggregate auto decrypts. + decrypted_docs = (encrypted_coll.aggregate([])).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.aggregate auto decrypts getMores. + decrypted_docs = (encrypted_coll.aggregate([], batchSize=1)).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.distinct auto decrypts. + decrypted_ssns = encrypted_coll.distinct("ssn") + self.assertEqual(set(decrypted_ssns), {d["ssn"] for d in docs}) + + # Make sure the field is actually encrypted. + for encrypted_doc in self.db.test.find(): + self.assertIsInstance(encrypted_doc["_id"], int) + self.assertEncrypted(encrypted_doc["ssn"]) + + # Attempt to encrypt an unencodable object. + with self.assertRaises(BSONError): + encrypted_coll.insert_one({"unencodeable": object()}) + + def test_auto_encrypt(self): + # Configure the encrypted field via jsonSchema. + json_schema = json_data("custom", "schema.json") + create_with_schema(self.db.test, json_schema) + self.addCleanup(self.db.test.drop) + + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + self._test_auto_encrypt(opts) + + def test_auto_encrypt_local_schema_map(self): + # Configure the encrypted field via the local schema_map option. + schemas = {"pymongo_test.test": json_data("custom", "schema.json")} + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas) + + self._test_auto_encrypt(opts) + + def test_use_after_close(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = self.rs_or_single_client(auto_encryption_opts=opts) + + client.admin.command("ping") + client.close() + with self.assertRaisesRegex(InvalidOperation, "Cannot use MongoClient after close"): + client.admin.command("ping") + + @unittest.skipIf( + not hasattr(os, "register_at_fork"), + "register_at_fork not available in this version of Python", + ) + @unittest.skipIf( + is_greenthread_patched(), + "gevent does not support POSIX-style forking.", + ) + @client_context.require_sync + def test_fork(self): + self.skipTest("Test is flaky, PYTHON-4738") + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = self.rs_or_single_client(auto_encryption_opts=opts) + + def target(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + client.admin.command("ping") + + with self.fork(target): + target() + + +class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): + def test_upsert_uuid_standard_encrypt(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = self.rs_or_single_client(auto_encryption_opts=opts) + + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encrypted_coll = client.pymongo_test.test + coll = encrypted_coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + +class TestClientMaxWireVersion(IntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def setUp(self): + super().setUp() + + def test_raise_unsupported_error(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = self.rs_or_single_client(auto_encryption_opts=opts) + msg = "find_raw_batches does not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + client.test.test.find_raw_batches({}) + + msg = "aggregate_raw_batches does not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + client.test.test.aggregate_raw_batches([]) + + if client_context.is_mongos: + msg = "Exhaust cursors are not supported by mongos" + else: + msg = "exhaust cursors do not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + next(client.test.test.find(cursor_type=CursorType.EXHAUST)) + + +class TestExplicitSimple(EncryptionIntegrationTest): + def test_encrypt_decrypt(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) + # Use standard UUID representation. + key_vault = client_context.client.keyvault.get_collection("datakeys", codec_options=OPTS) + self.addCleanup(key_vault.drop) + + # Create the encrypted field's data key. + key_id = client_encryption.create_data_key("local", key_alt_names=["name"]) + self.assertBinaryUUID(key_id) + self.assertTrue(key_vault.find_one({"_id": key_id})) + + # Create an unused data key to make sure filtering works. + unused_key_id = client_encryption.create_data_key("local", key_alt_names=["unused"]) + self.assertBinaryUUID(unused_key_id) + self.assertTrue(key_vault.find_one({"_id": unused_key_id})) + + doc = {"_id": 0, "ssn": "000"} + encrypted_ssn = client_encryption.encrypt( + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + + # Ensure encryption via key_alt_name for the same key produces the + # same output. + encrypted_ssn2 = client_encryption.encrypt( + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="name" + ) + self.assertEqual(encrypted_ssn, encrypted_ssn2) + + # Test encryption via UUID + encrypted_ssn3 = client_encryption.encrypt( + doc["ssn"], + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=key_id.as_uuid(), + ) + self.assertEqual(encrypted_ssn, encrypted_ssn3) + + # Test decryption. + decrypted_ssn = client_encryption.decrypt(encrypted_ssn) + self.assertEqual(decrypted_ssn, doc["ssn"]) + + def test_validation(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) + + msg = "value to decrypt must be a bson.binary.Binary with subtype 6" + with self.assertRaisesRegex(TypeError, msg): + client_encryption.decrypt("str") # type: ignore[arg-type] + with self.assertRaisesRegex(TypeError, msg): + client_encryption.decrypt(Binary(b"123")) + + msg = "key_id must be a bson.binary.Binary with subtype 4" + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + with self.assertRaisesRegex(TypeError, msg): + client_encryption.encrypt("str", algo, key_id=Binary(b"123")) + + def test_bson_errors(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) + + # Attempt to encrypt an unencodable object. + unencodable_value = object() + with self.assertRaises(BSONError): + client_encryption.encrypt( + unencodable_value, + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=Binary.from_uuid(uuid.uuid4()), + ) + + def test_codec_options(self): + with self.assertRaisesRegex(TypeError, "codec_options must be"): + self.create_client_encryption( + KMS_PROVIDERS, + "keyvault.datakeys", + client_context.client, + None, # type: ignore[arg-type] + ) + + opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) + client_encryption_legacy = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts + ) + + # Create the encrypted field's data key. + key_id = client_encryption_legacy.create_data_key("local") + + # Encrypt a UUID with JAVA_LEGACY codec options. + value = uuid.uuid4() + encrypted_legacy = client_encryption_legacy.encrypt( + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_value_legacy = client_encryption_legacy.decrypt(encrypted_legacy) + self.assertEqual(decrypted_value_legacy, value) + + # Encrypt the same UUID with STANDARD codec options. + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts + ) + encrypted_standard = client_encryption.encrypt( + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_standard = client_encryption.decrypt(encrypted_standard) + self.assertEqual(decrypted_standard, value) + + # Test that codec_options is applied during encryption. + self.assertNotEqual(encrypted_standard, encrypted_legacy) + # Test that codec_options is applied during decryption. + self.assertEqual( + client_encryption_legacy.decrypt(encrypted_standard), Binary.from_uuid(value) + ) + self.assertNotEqual(client_encryption.decrypt(encrypted_legacy), value) + + def test_close(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) + client_encryption.close() + # Close can be called multiple times. + client_encryption.close() + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + msg = "Cannot use closed ClientEncryption" + with self.assertRaisesRegex(InvalidOperation, msg): + client_encryption.create_data_key("local") + with self.assertRaisesRegex(InvalidOperation, msg): + client_encryption.encrypt("val", algo, key_alt_name="name") + with self.assertRaisesRegex(InvalidOperation, msg): + client_encryption.decrypt(Binary(b"", 6)) + + def test_with_statement(self): + with self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) as client_encryption: + pass + with self.assertRaisesRegex(InvalidOperation, "Cannot use closed ClientEncryption"): + client_encryption.create_data_key("local") + + +# Spec tests +AWS_TEMP_NO_SESSION_CREDS = { + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), +} + + +class TestSpec(SpecRunner): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def _setup_class(cls): + super()._setup_class() + + def parse_auto_encrypt_opts(self, opts): + """Parse clientOptions.autoEncryptOpts.""" + opts = camel_to_snake_args(opts) + kms_providers = opts["kms_providers"] + if "aws" in kms_providers: + kms_providers["aws"] = AWS_CREDS + if not any(AWS_CREDS.values()): + self.skipTest("AWS environment credentials are not set") + if "awsTemporary" in kms_providers: + kms_providers["aws"] = AWS_TEMP_CREDS + del kms_providers["awsTemporary"] + if not any(AWS_TEMP_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "awsTemporaryNoSessionToken" in kms_providers: + kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers["awsTemporaryNoSessionToken"] + if not any(AWS_TEMP_NO_SESSION_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "azure" in kms_providers: + kms_providers["azure"] = AZURE_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("Azure environment credentials are not set") + if "gcp" in kms_providers: + kms_providers["gcp"] = GCP_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("GCP environment credentials are not set") + if "kmip" in kms_providers: + kms_providers["kmip"] = KMIP_CREDS + opts["kms_tls_options"] = DEFAULT_KMS_TLS + if "key_vault_namespace" not in opts: + opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) + + opts = dict(opts) + return AutoEncryptionOpts(**opts) + + def parse_client_options(self, opts): + """Override clientOptions parsing to support autoEncryptOpts.""" + encrypt_opts = opts.pop("autoEncryptOpts", None) + if encrypt_opts: + opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) + + return super().parse_client_options(opts) + + def get_object_name(self, op): + """Default object is collection.""" + return op.get("object", "collection") + + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") + if "type=symbol" in desc: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to listcollections to get collection schema" in desc and not _IS_SYNC: + self.skipTest("PYTHON-4844 flaky test on async") + + def setup_scenario(self, scenario_def): + """Override a test's setup.""" + key_vault_data = scenario_def["key_vault_data"] + encrypted_fields = scenario_def["encrypted_fields"] + json_schema = scenario_def["json_schema"] + data = scenario_def["data"] + coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + coll.delete_many({}) + if key_vault_data: + coll.insert_many(key_vault_data) + + db_name = self.get_scenario_db_name(scenario_def) + coll_name = self.get_scenario_coll_name(scenario_def) + db = client_context.client.get_database(db_name, codec_options=OPTS) + db.drop_collection(coll_name, encrypted_fields=encrypted_fields) + wc = WriteConcern(w="majority") + kwargs: Dict[str, Any] = {} + if json_schema: + kwargs["validator"] = {"$jsonSchema": json_schema} + kwargs["codec_options"] = OPTS + if not data: + kwargs["write_concern"] = wc + if encrypted_fields: + kwargs["encryptedFields"] = encrypted_fields + db.create_collection(coll_name, **kwargs) + coll = db[coll_name] + if data: + # Load data. + coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) + + def allowable_errors(self, op): + """Override expected error classes.""" + errors = super().allowable_errors(op) + # An updateOne test expects encryption to error when no $ operator + # appears but pymongo raises a client side ValueError in this case. + if op["name"] == "updateOne": + errors += (ValueError,) + return errors + + +def create_test(scenario_def, test, name): + @client_context.require_test_commands + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + +test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) +test_creator.create_tests() + +if _HAVE_PYMONGOCRYPT: + globals().update( + generate_test_classes( + os.path.join(SPEC_PATH, "unified"), module=__name__, expected_failures=["mapReduce .*"] + ) + ) + +# Prose Tests +LOCAL_KEY_ID = Binary(base64.b64decode(b"LOCALAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AWS_KEY_ID = Binary(base64.b64decode(b"AWSAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AZURE_KEY_ID = Binary(base64.b64decode(b"AZUREAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +GCP_KEY_ID = Binary(base64.b64decode(b"GCPAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +KMIP_KEY_ID = Binary(base64.b64decode(b"KMIPAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) + + +def create_with_schema(coll, json_schema): + """Create and return a Collection with a jsonSchema.""" + coll.with_options(write_concern=WriteConcern(w="majority")).drop() + return coll.database.create_collection( + coll.name, validator={"$jsonSchema": json_schema}, codec_options=OPTS + ) + + +def create_key_vault(vault, *data_keys): + """Create the key vault collection with optional data keys.""" + vault = vault.with_options(write_concern=WriteConcern(w="majority"), codec_options=OPTS) + vault.drop() + if data_keys: + vault.insert_many(data_keys) + vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) + return vault + + +class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): + client_encrypted: MongoClient + client_encryption: ClientEncryption + listener: OvertCommandListener + vault: Any + + KMS_PROVIDERS = ALL_KMS_PROVIDERS + + MASTER_KEYS = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": None, + } + + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) + def setUp(self): + super().setUp() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.client.db.coll.drop() + self.vault = create_key_vault(self.client.keyvault.datakeys) + + # Configure the encrypted field via the local schema_map option. + schemas = { + "db.coll": { + "bsonType": "object", + "properties": { + "encrypted_placeholder": { + "encrypt": { + "keyId": "/placeholder", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + } + } + }, + } + } + opts = AutoEncryptionOpts( + self.KMS_PROVIDERS, + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=DEFAULT_KMS_TLS, + ) + self.client_encrypted = self.rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + self.client_encryption = self.create_client_encryption( + self.KMS_PROVIDERS, + "keyvault.datakeys", + self.client, + OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + self.listener.reset() + + def tearDown(self) -> None: + self.vault.drop() + + def run_test(self, provider_name): + # Create data key. + master_key: Any = self.MASTER_KEYS[provider_name] + datakey_id = self.client_encryption.create_data_key( + provider_name, master_key=master_key, key_alt_names=[f"{provider_name}_altname"] + ) + self.assertBinaryUUID(datakey_id) + cmd = self.listener.started_events[-1] + self.assertEqual("insert", cmd.command_name) + self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) + docs = self.vault.find({"_id": datakey_id}).to_list() + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["masterKey"]["provider"], provider_name) + + # Encrypt by key_id. + encrypted = self.client_encryption.encrypt( + f"hello {provider_name}", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=datakey_id, + ) + self.assertEncrypted(encrypted) + self.client_encrypted.db.coll.insert_one({"_id": provider_name, "value": encrypted}) + doc_decrypted = self.client_encrypted.db.coll.find_one({"_id": provider_name}) + self.assertEqual(doc_decrypted["value"], f"hello {provider_name}") # type: ignore + + # Encrypt by key_alt_name. + encrypted_altname = self.client_encryption.encrypt( + f"hello {provider_name}", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_alt_name=f"{provider_name}_altname", + ) + self.assertEqual(encrypted_altname, encrypted) + + # Explicitly encrypting an auto encrypted field. + with self.assertRaisesRegex(EncryptionError, r"encrypt element of type"): + self.client_encrypted.db.coll.insert_one({"encrypted_placeholder": encrypted}) + + def test_data_key_local(self): + self.run_test("local") + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_data_key_aws(self): + self.run_test("aws") + + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + def test_data_key_azure(self): + self.run_test("azure") + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + def test_data_key_gcp(self): + self.run_test("gcp") + + def test_data_key_kmip(self): + self.run_test("kmip") + + +class TestExternalKeyVault(EncryptionIntegrationTest): + @staticmethod + def kms_providers(): + return {"local": {"key": LOCAL_MASTER_KEY}} + + def _test_external_key_vault(self, with_external_key_vault): + self.client.db.coll.drop() + vault = create_key_vault( + self.client.keyvault.datakeys, + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + ) + self.addCleanup(vault.drop) + + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": json_data("external", "external-schema.json")} + if with_external_key_vault: + key_vault_client = self.rs_or_single_client(username="fake-user", password="fake-pwd") + else: + key_vault_client = client_context.client + opts = AutoEncryptionOpts( + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + key_vault_client=key_vault_client, + ) + + client_encrypted = self.rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + + client_encryption = self.create_client_encryption( + self.kms_providers(), "keyvault.datakeys", key_vault_client, OPTS + ) + + if with_external_key_vault: + # Authentication error. + with self.assertRaises(EncryptionError) as ctx: + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + # AuthenticationFailed error. + self.assertIsInstance(ctx.exception.cause, OperationFailure) + self.assertEqual(ctx.exception.cause.code, 18) + else: + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + + if with_external_key_vault: + # Authentication error. + with self.assertRaises(EncryptionError) as ctx: + client_encryption.encrypt( + "test", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=LOCAL_KEY_ID, + ) + # AuthenticationFailed error. + self.assertIsInstance(ctx.exception.cause, OperationFailure) + self.assertEqual(ctx.exception.cause.code, 18) + else: + client_encryption.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=LOCAL_KEY_ID + ) + + def test_external_key_vault_1(self): + self._test_external_key_vault(True) + + def test_external_key_vault_2(self): + self._test_external_key_vault(False) + + +class TestViews(EncryptionIntegrationTest): + @staticmethod + def kms_providers(): + return {"local": {"key": LOCAL_MASTER_KEY}} + + def test_views_are_prohibited(self): + self.client.db.view.drop() + self.client.db.create_collection("view", viewOn="coll") + self.addCleanup(self.client.db.view.drop) + + opts = AutoEncryptionOpts(self.kms_providers(), "keyvault.datakeys") + client_encrypted = self.rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + + with self.assertRaisesRegex(EncryptionError, "cannot auto encrypt a view"): + client_encrypted.db.view.insert_one({}) + + +class TestCorpus(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def setUp(self): + super().setUp() + + @staticmethod + def kms_providers(): + return ALL_KMS_PROVIDERS + + @staticmethod + def fix_up_schema(json_schema): + """Remove deprecated symbol/dbPointer types from json schema.""" + for key in list(json_schema["properties"]): + if "_symbol_" in key or "_dbPointer_" in key: + del json_schema["properties"][key] + return json_schema + + @staticmethod + def fix_up_curpus(corpus): + """Disallow deprecated symbol/dbPointer types from corpus test.""" + for key in corpus: + if "_symbol_" in key or "_dbPointer_" in key: + corpus[key]["allowed"] = False + return corpus + + @staticmethod + def fix_up_curpus_encrypted(corpus_encrypted, corpus): + """Fix the expected values for deprecated symbol/dbPointer types.""" + for key in corpus_encrypted: + if "_symbol_" in key or "_dbPointer_" in key: + corpus_encrypted[key] = copy.deepcopy(corpus[key]) + return corpus_encrypted + + def _test_corpus(self, opts): + # Drop and create the collection 'db.coll' with jsonSchema. + coll = create_with_schema( + self.client.db.coll, self.fix_up_schema(json_data("corpus", "corpus-schema.json")) + ) + self.addCleanup(coll.drop) + + vault = create_key_vault( + self.client.keyvault.datakeys, + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + json_data("corpus", "corpus-key-azure.json"), + json_data("corpus", "corpus-key-gcp.json"), + json_data("corpus", "corpus-key-kmip.json"), + ) + self.addCleanup(vault.drop) + + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) + + client_encryption = self.create_client_encryption( + self.kms_providers(), + "keyvault.datakeys", + client_context.client, + OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + + corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) + corpus_copied: SON = SON() + for key, value in corpus.items(): + corpus_copied[key] = copy.deepcopy(value) + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): + continue + if value["method"] == "auto": + continue + if value["method"] == "explicit": + identifier = value["identifier"] + self.assertIn(identifier, ("id", "altname")) + kms = value["kms"] + self.assertIn(kms, ("local", "aws", "azure", "gcp", "kmip")) + if identifier == "id": + if kms == "local": + kwargs = {"key_id": LOCAL_KEY_ID} + elif kms == "aws": + kwargs = {"key_id": AWS_KEY_ID} + elif kms == "azure": + kwargs = {"key_id": AZURE_KEY_ID} + elif kms == "gcp": + kwargs = {"key_id": GCP_KEY_ID} + else: + kwargs = {"key_id": KMIP_KEY_ID} + else: + kwargs = {"key_alt_name": kms} + + self.assertIn(value["algo"], ("det", "rand")) + if value["algo"] == "det": + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + else: + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random + + try: + encrypted_val = client_encryption.encrypt( + value["value"], + algo, + **kwargs, # type: ignore[arg-type] + ) + if not value["allowed"]: + self.fail(f"encrypt should have failed: {key!r}: {value!r}") + corpus_copied[key]["value"] = encrypted_val + except Exception: + if value["allowed"]: + tb = traceback.format_exc() + self.fail(f"encrypt failed: {key!r}: {value!r}, traceback: {tb}") + + client_encrypted.db.coll.insert_one(corpus_copied) + corpus_decrypted = client_encrypted.db.coll.find_one() + self.assertEqual(corpus_decrypted, corpus) + + corpus_encrypted_expected = self.fix_up_curpus_encrypted( + json_data("corpus", "corpus-encrypted.json"), corpus + ) + corpus_encrypted_actual = coll.find_one() + for key, value in corpus_encrypted_actual.items(): + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): + continue + + if value["algo"] == "det": + self.assertEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + elif value["algo"] == "rand" and value["allowed"]: + self.assertNotEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + + if value["allowed"]: + decrypt_actual = client_encryption.decrypt(value["value"]) + decrypt_expected = client_encryption.decrypt( + corpus_encrypted_expected[key]["value"] + ) + self.assertEqual(decrypt_actual, decrypt_expected, key) + else: + self.assertEqual(value["value"], corpus[key]["value"], key) + + def test_corpus(self): + opts = AutoEncryptionOpts( + self.kms_providers(), "keyvault.datakeys", kms_tls_options=DEFAULT_KMS_TLS + ) + self._test_corpus(opts) + + def test_corpus_local_schema(self): + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": self.fix_up_schema(json_data("corpus", "corpus-schema.json"))} + opts = AutoEncryptionOpts( + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=DEFAULT_KMS_TLS, + ) + self._test_corpus(opts) + + +_2_MiB = 2097152 +_16_MiB = 16777216 + + +class TestBsonSizeBatches(EncryptionIntegrationTest): + """Prose tests for BSON size limits and batch splitting.""" + + coll: Collection + coll_encrypted: Collection + client_encrypted: MongoClient + listener: OvertCommandListener + + def setUp(self): + super().setUp() + db = client_context.client.db + self.coll = db.coll + self.coll.drop() + # Configure the encrypted 'db.coll' collection via jsonSchema. + json_schema = json_data("limits", "limits-schema.json") + db.create_collection( + "coll", + validator={"$jsonSchema": json_schema}, + codec_options=OPTS, + write_concern=WriteConcern(w="majority"), + ) + + # Create the key vault. + coll = client_context.client.get_database( + "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS + )["datakeys"] + coll.drop() + coll.insert_one(json_data("limits", "limits-key.json")) + + opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") + self.listener = OvertCommandListener() + self.client_encrypted = self.rs_or_single_client( + auto_encryption_opts=opts, event_listeners=[self.listener] + ) + self.coll_encrypted = self.client_encrypted.db.coll + + def tearDown(self) -> None: + self.coll_encrypted.drop() + + def test_01_insert_succeeds_under_2MiB(self): + doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} + self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "over_2mib_under_16mib_bulk" + self.coll_encrypted.bulk_write([InsertOne(doc)]) + + def test_02_insert_succeeds_over_2MiB_post_encryption(self): + doc = {"_id": "encryption_exceeds_2mib", "unencrypted": "a" * ((2**21) - 2000)} + doc.update(json_data("limits", "limits-doc.json")) + self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "encryption_exceeds_2mib_bulk" + self.coll_encrypted.bulk_write([InsertOne(doc)]) + + def test_03_bulk_batch_split(self): + doc1 = {"_id": "over_2mib_1", "unencrypted": "a" * _2_MiB} + doc2 = {"_id": "over_2mib_2", "unencrypted": "a" * _2_MiB} + self.listener.reset() + self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) + + def test_04_bulk_batch_split(self): + limits_doc = json_data("limits", "limits-doc.json") + doc1 = {"_id": "encryption_exceeds_2mib_1", "unencrypted": "a" * (_2_MiB - 2000)} + doc1.update(limits_doc) + doc2 = {"_id": "encryption_exceeds_2mib_2", "unencrypted": "a" * (_2_MiB - 2000)} + doc2.update(limits_doc) + self.listener.reset() + self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) + + def test_05_insert_succeeds_just_under_16MiB(self): + doc = {"_id": "under_16mib", "unencrypted": "a" * (_16_MiB - 2000)} + self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "under_16mib_bulk" + self.coll_encrypted.bulk_write([InsertOne(doc)]) + + def test_06_insert_fails_over_16MiB(self): + limits_doc = json_data("limits", "limits-doc.json") + doc = {"_id": "encryption_exceeds_16mib", "unencrypted": "a" * (_16_MiB - 2000)} + doc.update(limits_doc) + + with self.assertRaisesRegex(WriteError, "object to insert too large"): + self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "encryption_exceeds_16mib_bulk" + with self.assertRaises(BulkWriteError) as ctx: + self.coll_encrypted.bulk_write([InsertOne(doc)]) + err = ctx.exception.details["writeErrors"][0] + self.assertIn(err["code"], [2, 10334]) + self.assertIn("object to insert too large", err["errmsg"]) + + +class TestCustomEndpoint(EncryptionIntegrationTest): + """Prose tests for creating data keys with a custom endpoint.""" + + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) + def setUp(self): + super().setUp() + kms_providers = { + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP_CREDS, + } + self.client_encryption = self.create_client_encryption( + kms_providers=kms_providers, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + + kms_providers_invalid = copy.deepcopy(kms_providers) + kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.invalid:5698" + self.client_encryption_invalid = self.create_client_encryption( + kms_providers=kms_providers_invalid, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + self._kmip_host_error = None + self._invalid_host_error = None + + def run_test_expected_success(self, provider_name, master_key): + data_key_id = self.client_encryption.create_data_key(provider_name, master_key=master_key) + encrypted = self.client_encryption.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", self.client_encryption.decrypt(encrypted)) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_01_aws_region_key(self): + self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_02_aws_region_key_endpoint(self): + self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_03_aws_region_key_endpoint_port(self): + self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com:443", + }, + ) + + def test_04_kmip_endpoint_invalid_port(self): + master_key = {"keyId": "1", "endpoint": "localhost:12345"} + with self.assertRaisesRegex(EncryptionError, "localhost:12345"): + self.client_encryption.create_data_key("kmip", master_key=master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_05_aws_endpoint_wrong_region(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-2.amazonaws.com", + } + # The full error should be something like: + # "Credential should be scoped to a valid region, not 'us-east-1'" + # but we only check for EncryptionError to avoid breaking on slight + # changes to AWS' error message. + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("aws", master_key=master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_06_aws_endpoint_invalid_host(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "doesnotexist.invalid", + } + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + self.client_encryption.create_data_key("aws", master_key=master_key) + + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + def test_07_azure(self): + master_key = { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + } + self.run_test_expected_success("azure", master_key) + + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + self.client_encryption_invalid.create_data_key("azure", master_key=master_key) + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + def test_08_gcp_valid_endpoint(self): + master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "cloudkms.googleapis.com:443", + } + self.run_test_expected_success("gcp", master_key) + + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + self.client_encryption_invalid.create_data_key("gcp", master_key=master_key) + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + def test_09_gcp_invalid_endpoint(self): + master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "doesnotexist.invalid:443", + } + + # The full error should be something like: + # "Invalid KMS response, no access_token returned. HTTP status=200" + with self.assertRaisesRegex(EncryptionError, "Invalid KMS response"): + self.client_encryption.create_data_key("gcp", master_key=master_key) + + def dns_error(self, host, port): + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaises(Exception) as ctx: + socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) + return re.escape(str(ctx.exception)) + + @property + def invalid_host_error(self): + if self._invalid_host_error is None: + self._invalid_host_error = self.dns_error("doesnotexist.invalid", 443) + return self._invalid_host_error + + @property + def kmip_host_error(self): + if self._kmip_host_error is None: + self._kmip_host_error = self.dns_error("doesnotexist.local", 5698) + return self._kmip_host_error + + def test_10_kmip_invalid_endpoint(self): + key = {"keyId": "1"} + self.run_test_expected_success("kmip", key) + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): + self.client_encryption_invalid.create_data_key("kmip", key) + + def test_11_kmip_master_key_endpoint(self): + key = {"keyId": "1", "endpoint": KMIP_CREDS["endpoint"]} + self.run_test_expected_success("kmip", key) + # Override invalid endpoint: + data_key_id = self.client_encryption_invalid.create_data_key("kmip", master_key=key) + encrypted = self.client_encryption_invalid.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", self.client_encryption_invalid.decrypt(encrypted)) + + def test_12_kmip_master_key_invalid_endpoint(self): + key = {"keyId": "1", "endpoint": "doesnotexist.invalid:5698"} + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): + self.client_encryption.create_data_key("kmip", key) + + +class AzureGCPEncryptionTestMixin(EncryptionIntegrationTest): + DEK = None + KMS_PROVIDER_MAP = None + KEYVAULT_DB = "keyvault" + KEYVAULT_COLL = "datakeys" + client: MongoClient + + def _setup(self): + keyvault = self.client.get_database(self.KEYVAULT_DB).get_collection(self.KEYVAULT_COLL) + create_key_vault(keyvault, self.DEK) + + def _test_explicit(self, expectation): + self._setup() + client_encryption = self.create_client_encryption( + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] + ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), + client_context.client, + OPTS, + ) + + ciphertext = client_encryption.encrypt( + "string0", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=self.DEK["_id"], + ) + + self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) + self.assertEqual(client_encryption.decrypt(ciphertext), "string0") + + def _test_automatic(self, expectation_extjson, payload): + self._setup() + encrypted_db = "db" + encrypted_coll = "coll" + keyvault_namespace = ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) + + encryption_opts = AutoEncryptionOpts( + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] + keyvault_namespace, + schema_map=self.SCHEMA_MAP, + ) + + insert_listener = AllowListEventListener("insert") + client = self.rs_or_single_client( + auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] + ) + + coll = client.get_database(encrypted_db).get_collection( + encrypted_coll, codec_options=OPTS, write_concern=WriteConcern("majority") + ) + coll.drop() + + expected_document = json_util.loads(expectation_extjson, json_options=JSON_OPTS) + + coll.insert_one(payload) + event = insert_listener.started_events[0] + inserted_doc = event.command["documents"][0] + + for key, value in expected_document.items(): + self.assertEqual(value, inserted_doc[key]) + + output_doc = coll.find_one({}) + for key, value in payload.items(): + self.assertEqual(output_doc[key], value) + + +class TestAzureEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + def setUp(self): + self.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} + self.DEK = json_data(BASE, "custom", "azure-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + super().setUp() + + def test_explicit(self): + return self._test_explicit( + "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==" + ) + + def test_automatic(self): + expected_document_extjson = textwrap.dedent( + """ + {"secret_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06"} + }}""" + ) + return self._test_automatic(expected_document_extjson, {"secret_azure": "string0"}) + + +class TestGCPEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + def setUp(self): + self.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} + self.DEK = json_data(BASE, "custom", "gcp-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + super().setUp() + + def test_explicit(self): + return self._test_explicit( + "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==" + ) + + def test_automatic(self): + expected_document_extjson = textwrap.dedent( + """ + {"secret_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06"} + }}""" + ) + return self._test_automatic(expected_document_extjson, {"secret_gcp": "string0"}) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#deadlock-tests +class TestDeadlockProse(EncryptionIntegrationTest): + def setUp(self): + super().setUp() + self.client_test = self.rs_or_single_client( + maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" + ) + + self.client_keyvault_listener = OvertCommandListener() + self.client_keyvault = self.rs_or_single_client( + maxPoolSize=1, + readConcernLevel="majority", + w="majority", + event_listeners=[self.client_keyvault_listener], + ) + + self.client_test.keyvault.datakeys.drop() + self.client_test.db.coll.drop() + self.client_test.keyvault.datakeys.insert_one(json_data("external", "external-key.json")) + _ = self.client_test.db.create_collection( + "coll", + validator={"$jsonSchema": json_data("external", "external-schema.json")}, + codec_options=OPTS, + ) + + client_encryption = self.create_client_encryption( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=self.client_test, + codec_options=OPTS, + ) + self.ciphertext = client_encryption.encrypt( + "string0", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="local" + ) + + self.client_listener = OvertCommandListener() + self.topology_listener = TopologyEventListener() + self.optargs = ({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") + + def _run_test(self, max_pool_size, auto_encryption_opts): + client_encrypted = self.rs_or_single_client( + readConcernLevel="majority", + w="majority", + maxPoolSize=max_pool_size, + auto_encryption_opts=auto_encryption_opts, + event_listeners=[self.client_listener, self.topology_listener], + ) + + if auto_encryption_opts._bypass_auto_encryption is True: + self.client_test.db.coll.insert_one({"_id": 0, "encrypted": self.ciphertext}) + elif auto_encryption_opts._bypass_auto_encryption is False: + client_encrypted.db.coll.insert_one({"_id": 0, "encrypted": "string0"}) + else: + raise RuntimeError("bypass_auto_encryption must be a bool") + + result = client_encrypted.db.coll.find_one({"_id": 0}) + self.assertEqual(result, {"_id": 0, "encrypted": "string0"}) + + def test_case_1(self): + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 4) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "insert") + self.assertEqual(cev[2].database_name, "db") + self.assertEqual(cev[3].command_name, "find") + self.assertEqual(cev[3].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + def test_case_2(self): + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + def test_case_3(self): + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + def test_case_4(self): + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + def test_case_5(self): + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 5) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "listCollections") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "keyvault") + self.assertEqual(cev[3].command_name, "insert") + self.assertEqual(cev[3].database_name, "db") + self.assertEqual(cev[4].command_name, "find") + self.assertEqual(cev[4].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + def test_case_6(self): + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + def test_case_7(self): + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + def test_case_8(self): + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#14-decryption-events +class TestDecryptProse(EncryptionIntegrationTest): + def setUp(self): + super().setUp() + self.client = client_context.client + self.client.db.drop_collection("decryption_events") + create_key_vault(self.client.keyvault.datakeys) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + self.client_encryption = self.create_client_encryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + keyID = self.client_encryption.create_data_key("local") + self.cipher_text = self.client_encryption.encrypt( + "hello", key_id=keyID, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + self.malformed_cipher_text = self.cipher_text[:-1] + (self.cipher_text[-1] ^ 1).to_bytes( + 1, "big" + ) + self.malformed_cipher_text = Binary(self.malformed_cipher_text, 6) + opts = AutoEncryptionOpts( + key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map + ) + self.listener = AllowListEventListener("aggregate") + self.encrypted_client = self.rs_or_single_client( + auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] + ) + + def test_01_command_error(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(OperationFailure): + self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.failed_events), 1) + for event in self.listener.failed_events: + self.assertEqual(event.failure["code"], 123) + + def test_02_network_error(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "closeConnection": True, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(AutoReconnect): + self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.failed_events), 1) + self.assertEqual(self.listener.failed_events[0].command_name, "aggregate") + + def test_03_decrypt_error(self): + self.encrypted_client.db.decryption_events.insert_one( + {"encrypted": self.malformed_cipher_text} + ) + with self.assertRaises(EncryptionError): + next(self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) + self.assertEqual( + event.reply["cursor"]["firstBatch"][0]["encrypted"], self.malformed_cipher_text + ) + + def test_04_decrypt_success(self): + self.encrypted_client.db.decryption_events.insert_one({"encrypted": self.cipher_text}) + next(self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) + self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#bypass-spawning-mongocryptd +class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): + @unittest.skipIf( + os.environ.get("TEST_CRYPT_SHARED"), + "this prose test does not work when crypt_shared is on a system dynamic " + "library search path.", + ) + def test_mongocryptd_bypass_spawn(self): + # Lower the mongocryptd timeout to reduce the test run time. + self._original_timeout = encryption._MONGOCRYPTD_TIMEOUT_MS + encryption._MONGOCRYPTD_TIMEOUT_MS = 500 + + def reset_timeout(): + encryption._MONGOCRYPTD_TIMEOUT_MS = self._original_timeout + + self.addCleanup(reset_timeout) + + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", + schema_map=schemas, + mongocryptd_bypass_spawn=True, + mongocryptd_uri="mongodb://localhost:27027/", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], + ) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) + with self.assertRaisesRegex(EncryptionError, "Timeout"): + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + + def test_bypassAutoEncryption(self): + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", + bypass_auto_encryption=True, + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], + ) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) + client_encrypted.db.coll.insert_one({"unencrypted": "test"}) + # Validate that mongocryptd was not spawned: + mongocryptd_client = self.simple_client( + "mongodb://localhost:27027/?serverSelectionTimeoutMS=500" + ) + with self.assertRaises(ServerSelectionTimeoutError): + mongocryptd_client.admin.command("ping") + + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_via_loading_shared_library(self): + create_key_vault( + client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=47021", + ], + crypt_shared_lib_required=True, + ) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) + client_encrypted.db.coll.drop() + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + self.assertEncrypted((client_context.client.db.coll.find_one({}))["encrypted"]) + no_mongocryptd_client = self.simple_client( + host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" + ) + with self.assertRaises(ServerSelectionTimeoutError): + no_mongocryptd_client.db.command("ping") + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_client_via_loading_shared_library(self): + connection_established = False + + class Handler(socketserver.BaseRequestHandler): + def handle(self): + nonlocal connection_established + connection_established = True + + server = socketserver.TCPServer(("localhost", 47021), Handler) + + def listener(): + with server: + server.serve_forever(poll_interval=0.05) # Short poll timeout to speed up the test + + listener_t = Thread(target=listener) + listener_t.start() + create_key_vault( + client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021", + crypt_shared_lib_required=False, + ) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) + client_encrypted.db.coll.drop() + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + server.shutdown() + listener_t.join() + self.assertFalse(connection_established, "a connection was established on port 47021") + + +# https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests +class TestKmsTLSProse(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def setUp(self): + super().setUp() + self.patch_system_certs(CA_PEM) + self.client_encrypted = self.create_client_encryption( + {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS + ) + + def test_invalid_kms_certificate_expired(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:9000", + } + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encrypted.create_data_key("aws", master_key=key) + + def test_invalid_hostname_in_kms_certificate(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:9001", + } + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + ): + self.client_encrypted.create_data_key("aws", master_key=key) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#kms-tls-options-tests +class TestKmsTLSOptions(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def setUp(self): + super().setUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9002" + providers["gcp"]["endpoint"] = "127.0.0.1:9002" + kms_tls_opts_ca_only = { + "aws": {"tlsCAFile": CA_PEM}, + "azure": {"tlsCAFile": CA_PEM}, + "gcp": {"tlsCAFile": CA_PEM}, + "kmip": {"tlsCAFile": CA_PEM}, + } + self.client_encryption_no_client_cert = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + # 2, same providers as above but with tlsCertificateKeyFile. + kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) + for p in kms_tls_opts: + kms_tls_opts[p]["tlsCertificateKeyFile"] = CLIENT_PEM + self.client_encryption_with_tls = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + # 3, update endpoints to expired host. + providers: dict = copy.deepcopy(providers) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9000" + providers["gcp"]["endpoint"] = "127.0.0.1:9000" + providers["kmip"]["endpoint"] = "127.0.0.1:9000" + self.client_encryption_expired = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + # 3, update endpoints to invalid host. + providers: dict = copy.deepcopy(providers) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9001" + providers["gcp"]["endpoint"] = "127.0.0.1:9001" + providers["kmip"]["endpoint"] = "127.0.0.1:9001" + self.client_encryption_invalid_hostname = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + # Errors when client has no cert, some examples: + # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) + self.cert_error = ( + "certificate required|SSL handshake failed|" + "KMS connection closed|Connection reset by peer|ECONNRESET|EPIPE" + ) + # On Python 3.10+ this error might be: + # EOF occurred in violation of protocol (_ssl.c:2384) + if sys.version_info[:2] >= (3, 10): + self.cert_error += "|EOF" + # On Windows this error might be: + # [WinError 10054] An existing connection was forcibly closed by the remote host + if sys.platform == "win32": + self.cert_error += "|forcibly closed" + # 4, Test named KMS providers. + providers = { + "aws:no_client_cert": AWS_CREDS, + "azure:no_client_cert": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:no_client_cert": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:no_client_cert": KMIP_CREDS, + "aws:with_tls": AWS_CREDS, + "azure:with_tls": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:with_tls": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:with_tls": KMIP_CREDS, + } + no_cert = {"tlsCAFile": CA_PEM} + with_cert = {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} + kms_tls_opts_4 = { + "aws:no_client_cert": no_cert, + "azure:no_client_cert": no_cert, + "gcp:no_client_cert": no_cert, + "kmip:no_client_cert": no_cert, + "aws:with_tls": with_cert, + "azure:with_tls": with_cert, + "gcp:with_tls": with_cert, + "kmip:with_tls": with_cert, + } + self.client_encryption_with_names = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_4 + ) + + def test_01_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:9002", + } + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key("aws", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + self.client_encryption_with_tls.create_data_key("aws", key) + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + key["endpoint"] = "127.0.0.1:9000" + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("aws", key) + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + key["endpoint"] = "127.0.0.1:9001" + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + self.client_encryption_invalid_hostname.create_data_key("aws", key) + + def test_02_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key("azure", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_tls.create_data_key("azure", key) + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("azure", key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + self.client_encryption_invalid_hostname.create_data_key("azure", key) + + def test_03_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key("gcp", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_tls.create_data_key("gcp", key) + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("gcp", key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + self.client_encryption_invalid_hostname.create_data_key("gcp", key) + + def test_04_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key("kmip") + self.client_encryption_with_tls.create_data_key("kmip") + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("kmip") + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + self.client_encryption_invalid_hostname.create_data_key("kmip") + + def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): + providers = {"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}} + options = {"aws": {"tlsDisableOCSPEndpointCheck": True}} + encryption = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options + ) + ctx = encryption._io_callbacks._kms_ssl_contexts["aws"] + if not hasattr(ctx, "check_ocsp_endpoint"): + raise self.skipTest("OCSP not enabled") + self.assertFalse(ctx.check_ocsp_endpoint) + + def test_06_named_kms_providers_apply_tls_options_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:9002", + } + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("aws:no_client_cert", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + self.client_encryption_with_names.create_data_key("aws:with_tls", key) + + def test_06_named_kms_providers_apply_tls_options_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("azure:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_names.create_data_key("azure:with_tls", key) + + def test_06_named_kms_providers_apply_tls_options_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("gcp:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_names.create_data_key("gcp:with_tls", key) + + def test_06_named_kms_providers_apply_tls_options_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("kmip:no_client_cert") + self.client_encryption_with_names.create_data_key("kmip:with_tls") + + +# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.md#unique-index-on-keyaltnames +class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): + def setUp(self): + super().setUp() + self.client = client_context.client + create_key_vault(self.client.keyvault.datakeys) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = self.create_client_encryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + self.def_key_id = self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + def test_01_create_key(self): + self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + def test_02_add_key_alt_name(self): + key_id = self.client_encryption.create_data_key("local") + self.client_encryption.add_key_alt_name(key_id, "abc") + key_doc = self.client_encryption.add_key_alt_name(key_id, "abc") + assert key_doc["keyAltNames"] == ["abc"] + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error collection"): + self.client_encryption.add_key_alt_name(key_id, "def") + key_doc = self.client_encryption.add_key_alt_name(self.def_key_id, "def") + assert key_doc["keyAltNames"] == ["def"] + + +# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.md#explicit-encryption +class TestExplicitQueryableEncryption(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + def setUp(self): + super().setUp() + self.encrypted_fields = json_data("etc", "data", "encryptedFields.json") + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.db = self.client.test_queryable_encryption + self.client.drop_database(self.db) + self.db.command("create", "explicit_encryption", encryptedFields=self.encrypted_fields) + key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = self.rs_or_single_client(auto_encryption_opts=opts) + + def test_01_insert_encrypted_indexed_and_find(self): + val = "encrypted indexed value" + insert_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 + ) + docs = ( + self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedIndexed"], val) + + def test_02_insert_encrypted_indexed_and_find_contention(self): + val = "encrypted indexed value" + contention = 10 + for _ in range(contention): + insert_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=contention + ) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 + ) + docs = ( + self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertLessEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + # Find with contention_factor will return all 10 documents. + find_payload = self.client_encryption.encrypt( + val, + Algorithm.INDEXED, + self.key1_id, + query_type=QueryType.EQUALITY, + contention_factor=contention, + ) + docs = ( + self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + def test_03_insert_encrypted_unindexed(self): + val = "encrypted unindexed value" + insert_payload = self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"_id": 1, "encryptedUnindexed": insert_payload} + ) + + docs = self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1}).to_list() + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedUnindexed"], val) + + def test_04_roundtrip_encrypted_indexed(self): + val = "encrypted indexed value" + payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) + decrypted = self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + def test_05_roundtrip_encrypted_unindexed(self): + val = "encrypted indexed value" + payload = self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + decrypted = self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + +# https://github.com/mongodb/specifications/blob/527e22d5090ec48bf1e144c45fc831de0f1935f6/source/client-side-encryption/tests/README.md#25-test-lookup +class TestLookupProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + def setUp(self): + super().setUp() + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + encrypted_client.drop_database("db") + + key_doc = json_data("etc", "data", "lookup", "key-doc.json") + create_key_vault(encrypted_client.db.keyvault, key_doc) + self.addCleanup(client_context.client.drop_database, "db") + + encrypted_client.db.create_collection( + "csfle", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle.json")}, + ) + encrypted_client.db.create_collection( + "csfle2", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle2.json")}, + ) + encrypted_client.db.create_collection( + "qe", encryptedFields=json_data("etc", "data", "lookup", "schema-qe.json") + ) + encrypted_client.db.create_collection( + "qe2", encryptedFields=json_data("etc", "data", "lookup", "schema-qe2.json") + ) + encrypted_client.db.create_collection("no_schema") + encrypted_client.db.create_collection("no_schema2") + + unencrypted_client = self.rs_or_single_client() + + encrypted_client.db.csfle.insert_one({"csfle": "csfle"}) + doc = unencrypted_client.db.csfle.find_one() + self.assertIsInstance(doc["csfle"], Binary) + encrypted_client.db.csfle2.insert_one({"csfle2": "csfle2"}) + doc = unencrypted_client.db.csfle2.find_one() + self.assertIsInstance(doc["csfle2"], Binary) + encrypted_client.db.qe.insert_one({"qe": "qe"}) + doc = unencrypted_client.db.qe.find_one() + self.assertIsInstance(doc["qe"], Binary) + encrypted_client.db.qe2.insert_one({"qe2": "qe2"}) + doc = unencrypted_client.db.qe2.find_one() + self.assertIsInstance(doc["qe2"], Binary) + encrypted_client.db.no_schema.insert_one({"no_schema": "no_schema"}) + encrypted_client.db.no_schema2.insert_one({"no_schema2": "no_schema2"}) + + encrypted_client.close() + unencrypted_client.close() + + @client_context.require_version_min(8, 1, -1) + def test_1_csfle_joins_no_schema(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"no_schema": "no_schema"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_2_qe_joins_no_schema(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"no_schema": "no_schema"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_3_no_schema_joins_csfle(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "csfle", + "as": "matched", + "pipeline": [{"$match": {"csfle": "csfle"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"csfle": "csfle"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_4_no_schema_joins_qe(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [ + {"$match": {"qe": "qe"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"qe": "qe"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_5_csfle_joins_csfle2(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "csfle2", + "as": "matched", + "pipeline": [ + {"$match": {"csfle2": "csfle2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"csfle2": "csfle2"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_6_qe_joins_qe2(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "qe2", + "as": "matched", + "pipeline": [ + {"$match": {"qe2": "qe2"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"qe2": "qe2"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_7_no_schema_joins_no_schema2(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "no_schema2", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema2": "no_schema2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"no_schema2": "no_schema2"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_8_csfle_joins_qe(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "qe"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [{"$match": {"qe": "qe"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertIn("not supported", str(exc)) + + @client_context.require_version_max(8, 1, -1) + def test_9_error(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertIn("Upgrade", str(exc)) + + +# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.md#rewrap +class TestRewrapWithSeparateClientEncryption(EncryptionIntegrationTest): + MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": {}, + } + + def test_rewrap(self): + for src_provider in self.MASTER_KEYS: + for dst_provider in self.MASTER_KEYS: + with self.subTest(src_provider=src_provider, dst_provider=dst_provider): + self.run_test(src_provider, dst_provider) + + def run_test(self, src_provider, dst_provider): + # Step 1. Drop the collection ``keyvault.datakeys``. + self.client.keyvault.drop_collection("datakeys") + + # Step 2. Create a ``ClientEncryption`` object named ``client_encryption1`` + client_encryption1 = self.create_client_encryption( + key_vault_client=self.client, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=DEFAULT_KMS_TLS, + codec_options=OPTS, + ) + + # Step 3. Call ``client_encryption1.create_data_key`` with ``src_provider``. + key_id = client_encryption1.create_data_key( + master_key=self.MASTER_KEYS[src_provider], kms_provider=src_provider + ) + + # Step 4. Call ``client_encryption1.encrypt`` with the value "test" + cipher_text = client_encryption1.encrypt( + "test", key_id=key_id, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + + # Step 5. Create a ``ClientEncryption`` object named ``client_encryption2`` + client2 = self.rs_or_single_client() + client_encryption2 = self.create_client_encryption( + key_vault_client=client2, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=DEFAULT_KMS_TLS, + codec_options=OPTS, + ) + + # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. + rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( + {}, provider=dst_provider, master_key=self.MASTER_KEYS[dst_provider] + ) + + self.assertEqual(rewrap_many_data_key_result.bulk_write_result.modified_count, 1) + + # 7. Call ``client_encryption1.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result1 = client_encryption1.decrypt(cipher_text) + self.assertEqual(decrypt_result1, "test") + + # 8. Call ``client_encryption2.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result2 = client_encryption2.decrypt(cipher_text) + self.assertEqual(decrypt_result2, "test") + + # 8. Case 2. Provider is not optional when master_key is given. + with self.assertRaises(ConfigurationError): + rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( + {}, master_key=self.MASTER_KEYS[dst_provider] + ) + + +# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.md#on-demand-aws-credentials +class TestOnDemandAWSCredentials(EncryptionIntegrationTest): + def setUp(self): + super().setUp() + self.master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + + @unittest.skipIf(any(AWS_CREDS.values()), "AWS environment credentials are set") + def test_01_failure(self): + self.client_encryption = self.create_client_encryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("aws", self.master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_02_success(self): + self.client_encryption = self.create_client_encryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + ) + self.client_encryption.create_data_key("aws", self.master_key) + + +class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): + # Queryable Encryption is not supported on Standalone topology. + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + def setUp(self): + super().setUp() + + def test_queryable_encryption(self): + # MongoClient to use in testing that handles auth/tls/etc, + # and cleanup. + def MongoClient(**kwargs): + c = self.rs_or_single_client(**kwargs) + return c + + # Drop data from prior test runs. + self.client.keyvault.datakeys.drop() + self.client.drop_database("docs_examples") + + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + # Create two data keys. + key_vault_client = MongoClient() + client_encryption = self.create_client_encryption( + kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() + ) + key1_id = client_encryption.create_data_key("local") + key2_id = client_encryption.create_data_key("local") + + # Create an encryptedFieldsMap. + encrypted_fields_map = { + "docs_examples.encrypted": { + "fields": [ + { + "path": "encrypted_indexed", + "bsonType": "string", + "keyId": key1_id, + "queries": [ + { + "queryType": "equality", + }, + ], + }, + { + "path": "encrypted_unindexed", + "bsonType": "string", + "keyId": key2_id, + }, + ], + }, + } + + # Create an Queryable Encryption collection. + opts = AutoEncryptionOpts( + kms_providers_map, "keyvault.datakeys", encrypted_fields_map=encrypted_fields_map + ) + encrypted_client = MongoClient(auto_encryption_opts=opts) + + # Create a Queryable Encryption collection "docs_examples.encrypted". + # Because docs_examples.encrypted is in encrypted_fields_map, it is + # created with Queryable Encryption support. + db = encrypted_client.docs_examples + encrypted_coll = db.create_collection("encrypted") + + # Auto encrypt an insert and find. + + # Encrypt an insert. + encrypted_coll.insert_one( + { + "_id": 1, + "encrypted_indexed": "indexed_value", + "encrypted_unindexed": "unindexed_value", + } + ) + + # Encrypt a find. + res = encrypted_coll.find_one({"encrypted_indexed": "indexed_value"}) + assert res is not None + assert res["encrypted_indexed"] == "indexed_value" + assert res["encrypted_unindexed"] == "unindexed_value" + + # Find documents without decryption. + unencrypted_client = MongoClient() + unencrypted_coll = unencrypted_client.docs_examples.encrypted + res = unencrypted_coll.find_one({"_id": 1}) + assert res is not None + assert isinstance(res["encrypted_indexed"], Binary) + assert isinstance(res["encrypted_unindexed"], Binary) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#22-range-explicit-encryption +class TestRangeQueryProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(8, 0, -1) + def setUp(self): + super().setUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.client.drop_database(self.db) + key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = self.rs_or_single_client(auto_encryption_opts=opts) + self.db = self.encrypted_client.db + + def run_expression_find( + self, name, expression, expected_elems, range_opts, use_expr=False, key_id=None + ): + find_payload = self.client_encryption.encrypt_expression( + expression=expression, + key_id=key_id or self.key1_id, + algorithm=Algorithm.RANGE, + query_type=QueryType.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + if use_expr: + find_payload = {"$expr": find_payload} + sorted_find = sorted( + self.encrypted_client.db.explicit_encryption.find(find_payload).to_list(), + key=lambda x: x["_id"], + ) + for elem, expected in zip(sorted_find, expected_elems): + self.assertEqual(elem[f"encrypted{name}"], expected) + + def run_test_cases(self, name, range_opts, cast_func): + encrypted_fields = json_data("etc", "data", f"range-encryptedFields-{name}.json") + self.db.drop_collection("explicit_encryption", encrypted_fields=encrypted_fields) + self.db.create_collection("explicit_encryption", encryptedFields=encrypted_fields) + + def encrypt_and_cast(i): + return self.client_encryption.encrypt( + cast_func(i), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + for elem in [{f"encrypted{name}": encrypt_and_cast(i)} for i in [0, 6, 30, 200]]: + self.encrypted_client.db.explicit_encryption.insert_one(elem) + + # Case 1. + insert_payload = self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + self.assertEqual(self.client_encryption.decrypt(insert_payload), cast_func(6)) + + # Case 2. + expression = { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(6)}}, + {f"encrypted{name}": {"$lte": cast_func(200)}}, + ] + } + self.run_expression_find(name, expression, [cast_func(i) for i in [6, 30, 200]], range_opts) + # Case 2, with UUID key_id + self.run_expression_find( + name, + expression, + [cast_func(i) for i in [6, 30, 200]], + range_opts, + key_id=self.key1_id.as_uuid(), + ) + + # Case 3. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(0)}}, + {f"encrypted{name}": {"$lte": cast_func(6)}}, + ] + }, + [cast_func(i) for i in [0, 6]], + range_opts, + ) + + # Case 4. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gt": cast_func(30)}}, + ] + }, + [cast_func(i) for i in [200]], + range_opts, + ) + + # Case 5. + self.run_expression_find( + name, + {"$and": [{"$lt": [f"$encrypted{name}", cast_func(30)]}]}, + [cast_func(i) for i in [0, 6]], + range_opts, + use_expr=True, + ) + + # The spec says to skip the following tests for no precision decimal or double types. + if name not in ("DoubleNoPrecision", "DecimalNoPrecision"): + # Case 6. + with self.assertRaisesRegex( + EncryptionError, + "greater than or equal to the minimum value and less than or equal to the maximum value", + ): + self.client_encryption.encrypt( + cast_func(201), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 7. + with self.assertRaisesRegex( + EncryptionError, "expected matching 'min' and value type. Got range option" + ): + self.client_encryption.encrypt( + 6 if cast_func != int else float(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 8. + # The spec says we must additionally not run this case with any precision type, not just the ones above. + if "Precision" not in name: + with self.assertRaisesRegex( + EncryptionError, + "expected 'precision' to be set with double or decimal128 index, but got:", + ): + self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=RangeOpts( + min=cast_func(0), + max=cast_func(200), + sparsity=1, + trim_factor=1, + precision=2, + ), + ) + + def test_double_no_precision(self): + self.run_test_cases("DoubleNoPrecision", RangeOpts(sparsity=1, trim_factor=1), float) + + def test_double_precision(self): + self.run_test_cases( + "DoublePrecision", + RangeOpts(min=0.0, max=200.0, sparsity=1, trim_factor=1, precision=2), + float, + ) + + def test_decimal_no_precision(self): + self.run_test_cases( + "DecimalNoPrecision", RangeOpts(sparsity=1, trim_factor=1), lambda x: Decimal128(str(x)) + ) + + def test_decimal_precision(self): + self.run_test_cases( + "DecimalPrecision", + RangeOpts( + min=Decimal128("0.0"), + max=Decimal128("200.0"), + sparsity=1, + trim_factor=1, + precision=2, + ), + lambda x: Decimal128(str(x)), + ) + + def test_datetime(self): + self.run_test_cases( + "Date", + RangeOpts(min=DatetimeMS(0), max=DatetimeMS(200), sparsity=1, trim_factor=1), + lambda x: DatetimeMS(x).as_datetime(), + ) + + def test_int(self): + self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1, trim_factor=1), int) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#23-range-explicit-encryption-applies-defaults +class TestRangeQueryDefaultsProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(8, 0, -1) + def setUp(self): + super().setUp() + self.client.drop_database(self.db) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys", self.key_vault_client, OPTS + ) + self.key_id = self.client_encryption.create_data_key("local") + opts = RangeOpts(min=0, max=1000) + self.payload_defaults = self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + + def test_uses_libmongocrypt_defaults(self): + opts = RangeOpts(min=0, max=1000, sparsity=2, trim_factor=6) + payload = self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) == len(self.payload_defaults) + + def test_accepts_trim_factor_0(self): + opts = RangeOpts(min=0, max=1000, trim_factor=0) + payload = self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) > len(self.payload_defaults) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#24-kms-retry-tests +class TestKmsRetryProse(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def setUp(self): + super().setUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9003" + providers["gcp"]["endpoint"] = "127.0.0.1:9003" + kms_tls_opts = { + p: {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} for p in providers + } + self.client_encryption = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + + def http_post(self, path, data=None): + # Note, the connection to the mock server needs to be closed after + # each request because the server is single threaded. + ctx = ssl.create_default_context(cafile=CA_PEM) + ctx.load_cert_chain(CLIENT_PEM) + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE + conn = http.client.HTTPSConnection("127.0.0.1:9003", context=ctx) + try: + if data is not None: + headers = {"Content-type": "application/json"} + body = json.dumps(data) + else: + headers = {} + body = None + conn.request("POST", path, body, headers) + res = conn.getresponse() + res.read() + finally: + conn.close() + + def _test(self, provider, master_key): + self.http_post("/reset") + # Case 1: createDataKey and encrypt with TCP retry + self.http_post("/set_failpoint/network", {"count": 1}) + key_id = self.client_encryption.create_data_key(provider, master_key=master_key) + self.http_post("/set_failpoint/network", {"count": 1}) + self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 2: createDataKey and encrypt with HTTP retry + self.http_post("/set_failpoint/http", {"count": 1}) + key_id = self.client_encryption.create_data_key(provider, master_key=master_key) + self.http_post("/set_failpoint/http", {"count": 1}) + self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 3: createDataKey fails after too many retries + self.http_post("/set_failpoint/network", {"count": 4}) + with self.assertRaisesRegex(EncryptionError, "KMS request failed after"): + self.client_encryption.create_data_key(provider, master_key=master_key) + + def test_kms_retry(self): + if IS_PYOPENSSL: + self.skipTest( + "PyOpenSSL does not support a required method for this test, Connection.makefile" + ) + self._test("aws", {"region": "foo", "key": "bar", "endpoint": "127.0.0.1:9003"}) + self._test("azure", {"keyVaultEndpoint": "127.0.0.1:9003", "keyName": "foo"}) + self._test( + "gcp", + { + "projectId": "foo", + "location": "bar", + "keyRing": "baz", + "keyName": "qux", + "endpoint": "127.0.0.1:9003", + }, + ) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#automatic-data-encryption-keys +class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + @flaky(reason="PYTHON-4982") + def setUp(self): + super().setUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.client.drop_database(self.db) + self.key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(self.key_vault.drop) + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, + self.key_vault.full_name, + self.client, + OPTS, + ) + + def test_01_simple_create(self): + coll, _ = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + with self.assertRaises(WriteError) as exc: + coll.insert_one({"ssn": "123-45-6789"}) + self.assertEqual(exc.exception.code, 121) + + def test_02_no_fields(self): + with self.assertRaisesRegex( + TypeError, + "create_encrypted_collection.* missing 1 required positional argument: 'encrypted_fields'", + ): + self.client_encryption.create_encrypted_collection( # type:ignore[call-arg] + database=self.db, + name="testing1", + ) + + def test_03_invalid_keyid(self): + # checkAuthForCreateCollection can be removed when SERVER-102101 is fixed. + with self.assertRaisesRegex( + EncryptedCollectionError, + "(create|checkAuthForCreateCollection).encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [{"path": "ssn", "bsonType": "string", "keyId": False}] + }, + kms_provider="local", + ) + + def test_04_insert_encrypted(self): + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + key1_id = ef["fields"][0]["keyId"] + encrypted_value = self.client_encryption.encrypt( + "123-45-6789", + key_id=key1_id, + algorithm=Algorithm.UNINDEXED, + ) + coll.insert_one({"ssn": encrypted_value}) + + def test_copy_encrypted_fields(self): + encrypted_fields = { + "fields": [ + { + "path": "ssn", + "bsonType": "string", + "keyId": None, + } + ] + } + _, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields=encrypted_fields, + ) + self.assertIsNotNone(ef["fields"][0]["keyId"]) + self.assertIsNone(encrypted_fields["fields"][0]["keyId"]) + + def test_options_forward(self): + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + read_preference=ReadPreference.NEAREST, + ) + self.assertEqual(coll.read_preference, ReadPreference.NEAREST) + self.assertEqual(coll.name, "testing1") + + def test_mixed_null_keyids(self): + key = self.client_encryption.create_data_key(kms_provider="local") + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [ + {"path": "ssn", "bsonType": "string", "keyId": None}, + {"path": "dob", "bsonType": "string", "keyId": key}, + {"path": "secrets", "bsonType": "string"}, + {"path": "address", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + encrypted_values = [ + self.client_encryption.encrypt( + val, + key_id=key, + algorithm=Algorithm.UNINDEXED, + ) + for val, key in zip( + ["123-45-6789", "11/22/1963", "My secret", "New Mexico, 87104"], + [field["keyId"] for field in ef["fields"]], + ) + ] + coll.insert_one( + { + "ssn": encrypted_values[0], + "dob": encrypted_values[1], + "secrets": encrypted_values[2], + "address": encrypted_values[3], + } + ) + + def test_create_datakey_fails(self): + key = self.client_encryption.create_data_key(kms_provider="local") + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + } + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # generating keys fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="does not exist", + ) + self.assertEqual(exc.exception.encrypted_fields, encrypted_fields) + + def test_create_failure(self): + key = self.client_encryption.create_data_key(kms_provider="local") + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # it is the creation of the collection that fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name=1, # type:ignore[arg-type] + encrypted_fields={ + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + for field in exc.exception.encrypted_fields["fields"]: + self.assertIsInstance(field["keyId"], Binary) + + def test_collection_name_collision(self): + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": None}, + ] + } + self.db.create_collection("testing1") + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + self.db.drop_collection("testing1", encrypted_fields=encrypted_fields) + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#27-text-explicit-encryption +class TestExplicitTextEncryptionProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(8, 2, -1) + @client_context.require_libmongocrypt_min(1, 15, 1) + @client_context.require_pymongocrypt_min(1, 16, 0) + def setUp(self): + super().setUp() + # Load the file key1-document.json as key1Document. + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + # Read the "_id" field of key1Document as key1ID. + self.key1_id = self.key1_document["_id"] + # Drop and create the collection keyvault.datakeys. + # Insert key1Document in keyvault.datakeys with majority write concern. + self.key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(self.key_vault.drop) + # Create a ClientEncryption object named clientEncryption with these options. + self.kms_providers = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = self.create_client_encryption( + self.kms_providers, + self.key_vault.full_name, + self.client, + OPTS, + ) + # Create a MongoClient named encryptedClient with these AutoEncryptionOpts. + opts = AutoEncryptionOpts( + self.kms_providers, + "keyvault.datakeys", + bypass_query_analysis=True, + ) + self.client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) + + # Using QE CreateCollection() and Collection.Drop(), drop and create the following collections with majority write concern: + # db.prefix-suffix using the encryptedFields option set to the contents of encryptedFields-prefix-suffix.json. + db = self.client_encrypted.db + db.drop_collection("prefix-suffix") + encrypted_fields = json_data("etc", "data", "encryptedFields-prefix-suffix.json") + self.client_encryption.create_encrypted_collection( + db, "prefix-suffix", kms_provider="local", encrypted_fields=encrypted_fields + ) + # db.substring using the encryptedFields option set to the contents of encryptedFields-substring.json. + db.drop_collection("substring") + encrypted_fields = json_data("etc", "data", "encryptedFields-substring.json") + self.client_encryption.create_encrypted_collection( + db, "substring", kms_provider="local", encrypted_fields=encrypted_fields + ) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.prefix-suffix with majority write concern. + coll = self.client_encrypted.db["prefix-suffix"].with_options( + write_concern=WriteConcern(w="majority") + ) + coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.substring with majority write concern. + coll = self.client_encrypted.db["substring"].with_options( + write_concern=WriteConcern(w="majority") + ) + coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + def test_01_can_find_a_document_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter. + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + def test_02_can_find_a_document_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + def test_03_no_document_found_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + def test_04_no_document_found_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + def test_05_can_find_a_document_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "bar" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "bar", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert the following document is returned: + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + def test_06_no_document_found_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "qux" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "qux", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + def test_07_contentionFactor_is_required(self): + from pymongocrypt.errors import MongoCryptError + + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + with self.assertRaises(EncryptionError) as ctx: + self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + text_opts=text_opts, + ) + # Expect an error from libmongocrypt with a message containing the string: "contention factor is required for textPreview algorithm". + self.assertIsInstance(ctx.exception.cause, MongoCryptError) + self.assertEqual( + str(ctx.exception), "contention factor is required for textPreview algorithm" + ) + + +def start_mongocryptd(port) -> None: + args = ["mongocryptd", f"--port={port}", "--idleShutdownTimeoutSecs=60"] + _spawn_daemon(args) + + +@unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") +class TestNoSessionsSupport(EncryptionIntegrationTest): + mongocryptd_client: MongoClient + MONGOCRYPTD_PORT = 27020 + + def setUp(self) -> None: + super().setUp() + start_mongocryptd(self.MONGOCRYPTD_PORT) + + self.listener = OvertCommandListener() + self.mongocryptd_client = self.simple_client( + f"mongodb://localhost:{self.MONGOCRYPTD_PORT}", event_listeners=[self.listener] + ) + + hello = self.mongocryptd_client.db.command("hello") + self.assertNotIn("logicalSessionTimeoutMinutes", hello) + + def test_implicit_session_ignored_when_unsupported(self): + self.listener.reset() + with self.assertRaises(OperationFailure): + self.mongocryptd_client.db.test.find_one() + + self.assertNotIn("lsid", self.listener.started_events[0].command) + + with self.assertRaises(OperationFailure): + self.mongocryptd_client.db.test.insert_one({"x": 1}) + + self.assertNotIn("lsid", self.listener.started_events[1].command) + + self.mongocryptd_client.close() + + def test_explicit_session_errors_when_unsupported(self): + self.listener.reset() + with self.mongocryptd_client.start_session() as s: + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + self.mongocryptd_client.db.test.find_one(session=s) + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + self.mongocryptd_client.db.test.insert_one({"x": 1}, session=s) + + self.mongocryptd_client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_errors.py b/test/test_errors.py index 59416e521a..d6db6a4ec1 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2020-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,22 +11,90 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations -"""Test the errors module.""" - -import unittest +import pickle import sys +import traceback + sys.path[0:0] = [""] -from pymongo import MongoClient -from pymongo.errors import PyMongoError +from test import PyMongoTestCase, unittest + +from pymongo.errors import ( + BulkWriteError, + EncryptionError, + NotPrimaryError, + OperationFailure, +) + + +class TestErrors(PyMongoTestCase): + def test_not_primary_error(self): + exc = NotPrimaryError("not primary test", {"errmsg": "error"}) + self.assertIn("full error", str(exc)) + try: + raise exc + except NotPrimaryError: + self.assertIn("full error", traceback.format_exc()) + + def test_operation_failure(self): + exc = OperationFailure("operation failure test", 10, {"errmsg": "error"}) + self.assertIn("full error", str(exc)) + try: + raise exc + except OperationFailure: + self.assertIn("full error", traceback.format_exc()) + + def _test_unicode_strs(self, exc): + self.assertEqual( + "unicode \U0001f40d, full error: {'errmsg': 'unicode \U0001f40d'}", str(exc) + ) + try: + raise exc + except Exception: + self.assertIn("full error", traceback.format_exc()) + + def test_unicode_strs_operation_failure(self): + exc = OperationFailure("unicode \U0001f40d", 10, {"errmsg": "unicode \U0001f40d"}) + self._test_unicode_strs(exc) + + def test_unicode_strs_not_primary_error(self): + exc = NotPrimaryError("unicode \U0001f40d", {"errmsg": "unicode \U0001f40d"}) + self._test_unicode_strs(exc) + + def assertPyMongoErrorEqual(self, exc1, exc2): + self.assertEqual(exc1._message, exc2._message) + self.assertEqual(exc1._error_labels, exc2._error_labels) + self.assertEqual(exc1.args, exc2.args) + self.assertEqual(str(exc1), str(exc2)) + + def assertOperationFailureEqual(self, exc1, exc2): + self.assertPyMongoErrorEqual(exc1, exc2) + self.assertEqual(exc1.code, exc2.code) + self.assertEqual(exc1.details, exc2.details) + self.assertEqual(exc1._max_wire_version, exc2._max_wire_version) + + def test_pickle_NotPrimaryError(self): + exc = NotPrimaryError("not primary test", {"errmsg": "error"}) + self.assertPyMongoErrorEqual(exc, pickle.loads(pickle.dumps(exc))) + def test_pickle_OperationFailure(self): + exc = OperationFailure("error", code=5, details={}, max_wire_version=7) + self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc))) -class TestErrors(unittest.TestCase): + def test_pickle_BulkWriteError(self): + exc = BulkWriteError({}) + self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc))) + self.assertIn("batch op errors occurred", str(exc)) - def test_base_exception(self): - self.assertRaises(PyMongoError, MongoClient, port=0) + def test_pickle_EncryptionError(self): + cause = OperationFailure("error", code=5, details={}, max_wire_version=7) + exc = EncryptionError(cause) + exc2 = pickle.loads(pickle.dumps(exc)) + self.assertPyMongoErrorEqual(exc, exc2) + self.assertOperationFailureEqual(cause, exc2.cause) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_examples.py b/test/test_examples.py new file mode 100644 index 0000000000..266e32e8d4 --- /dev/null +++ b/test/test_examples.py @@ -0,0 +1,1440 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MongoDB documentation examples in Python.""" +from __future__ import annotations + +import asyncio +import datetime +import functools +import sys +import threading +import time +from test.helpers import ConcurrentRunner + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils_shared import wait_until + +import pymongo +from pymongo.errors import ConnectionFailure, OperationFailure +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import ServerApi +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class TestSampleShellCommands(IntegrationTest): + def setUp(self): + super().setUp() + self.db.inventory.drop() + + def tearDown(self): + # Run after every test. + self.db.inventory.drop() + self.client.drop_database("pymongo_test") + + def test_first_three_examples(self): + db = self.db + + # Start Example 1 + db.inventory.insert_one( + { + "item": "canvas", + "qty": 100, + "tags": ["cotton"], + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + } + ) + # End Example 1 + + self.assertEqual(db.inventory.count_documents({}), 1) + + # Start Example 2 + cursor = db.inventory.find({"item": "canvas"}) + # End Example 2 + + self.assertEqual(len(cursor.to_list()), 1) + + # Start Example 3 + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "tags": ["blank", "red"], + "size": {"h": 14, "w": 21, "uom": "cm"}, + }, + { + "item": "mat", + "qty": 85, + "tags": ["gray"], + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + }, + { + "item": "mousepad", + "qty": 25, + "tags": ["gel", "blue"], + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + }, + ] + ) + # End Example 3 + + self.assertEqual(db.inventory.count_documents({}), 4) + + def test_query_top_level_fields(self): + db = self.db + + # Start Example 6 + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 6 + + self.assertEqual(db.inventory.count_documents({}), 5) + + # Start Example 7 + cursor = db.inventory.find({}) + # End Example 7 + + self.assertEqual(len(cursor.to_list()), 5) + + # Start Example 9 + cursor = db.inventory.find({"status": "D"}) + # End Example 9 + + self.assertEqual(len(cursor.to_list()), 2) + + # Start Example 10 + cursor = db.inventory.find({"status": {"$in": ["A", "D"]}}) + # End Example 10 + + self.assertEqual(len(cursor.to_list()), 5) + + # Start Example 11 + cursor = db.inventory.find({"status": "A", "qty": {"$lt": 30}}) + # End Example 11 + + self.assertEqual(len(cursor.to_list()), 1) + + # Start Example 12 + cursor = db.inventory.find({"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) + # End Example 12 + + self.assertEqual(len(cursor.to_list()), 3) + + # Start Example 13 + cursor = db.inventory.find( + {"status": "A", "$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]} + ) + # End Example 13 + + self.assertEqual(len(cursor.to_list()), 2) + + def test_query_embedded_documents(self): + db = self.db + + # Start Example 14 + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 14 + + # Start Example 15 + cursor = db.inventory.find({"size": {"h": 14, "w": 21, "uom": "cm"}}) + # End Example 15 + + self.assertEqual(len(cursor.to_list()), 1) + + # Start Example 16 + cursor = db.inventory.find({"size": {"w": 21, "h": 14, "uom": "cm"}}) + # End Example 16 + + self.assertEqual(len(cursor.to_list()), 0) + + # Start Example 17 + cursor = db.inventory.find({"size.uom": "in"}) + # End Example 17 + + self.assertEqual(len(cursor.to_list()), 2) + + # Start Example 18 + cursor = db.inventory.find({"size.h": {"$lt": 15}}) + # End Example 18 + + self.assertEqual(len(cursor.to_list()), 4) + + # Start Example 19 + cursor = db.inventory.find({"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) + # End Example 19 + + self.assertEqual(len(cursor.to_list()), 1) + + def test_query_arrays(self): + db = self.db + + # Start Example 20 + db.inventory.insert_many( + [ + {"item": "journal", "qty": 25, "tags": ["blank", "red"], "dim_cm": [14, 21]}, + {"item": "notebook", "qty": 50, "tags": ["red", "blank"], "dim_cm": [14, 21]}, + { + "item": "paper", + "qty": 100, + "tags": ["red", "blank", "plain"], + "dim_cm": [14, 21], + }, + {"item": "planner", "qty": 75, "tags": ["blank", "red"], "dim_cm": [22.85, 30]}, + {"item": "postcard", "qty": 45, "tags": ["blue"], "dim_cm": [10, 15.25]}, + ] + ) + # End Example 20 + + # Start Example 21 + cursor = db.inventory.find({"tags": ["red", "blank"]}) + # End Example 21 + + self.assertEqual(len(cursor.to_list()), 1) + + # Start Example 22 + cursor = db.inventory.find({"tags": {"$all": ["red", "blank"]}}) + # End Example 22 + + self.assertEqual(len(cursor.to_list()), 4) + + # Start Example 23 + cursor = db.inventory.find({"tags": "red"}) + # End Example 23 + + self.assertEqual(len(cursor.to_list()), 4) + + # Start Example 24 + cursor = db.inventory.find({"dim_cm": {"$gt": 25}}) + # End Example 24 + + self.assertEqual(len(cursor.to_list()), 1) + + # Start Example 25 + cursor = db.inventory.find({"dim_cm": {"$gt": 15, "$lt": 20}}) + # End Example 25 + + self.assertEqual(len(cursor.to_list()), 4) + + # Start Example 26 + cursor = db.inventory.find({"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) + # End Example 26 + + self.assertEqual(len(cursor.to_list()), 1) + + # Start Example 27 + cursor = db.inventory.find({"dim_cm.1": {"$gt": 25}}) + # End Example 27 + + self.assertEqual(len(cursor.to_list()), 1) + + # Start Example 28 + cursor = db.inventory.find({"tags": {"$size": 3}}) + # End Example 28 + + self.assertEqual(len(cursor.to_list()), 1) + + def test_query_array_of_documents(self): + db = self.db + + # Start Example 29 + db.inventory.insert_many( + [ + { + "item": "journal", + "instock": [ + {"warehouse": "A", "qty": 5}, + {"warehouse": "C", "qty": 15}, + ], + }, + {"item": "notebook", "instock": [{"warehouse": "C", "qty": 5}]}, + { + "item": "paper", + "instock": [ + {"warehouse": "A", "qty": 60}, + {"warehouse": "B", "qty": 15}, + ], + }, + { + "item": "planner", + "instock": [ + {"warehouse": "A", "qty": 40}, + {"warehouse": "B", "qty": 5}, + ], + }, + { + "item": "postcard", + "instock": [ + {"warehouse": "B", "qty": 15}, + {"warehouse": "C", "qty": 35}, + ], + }, + ] + ) + # End Example 29 + + # Start Example 30 + cursor = db.inventory.find({"instock": {"warehouse": "A", "qty": 5}}) + # End Example 30 + + self.assertEqual(len(cursor.to_list()), 1) + + # Start Example 31 + cursor = db.inventory.find({"instock": {"qty": 5, "warehouse": "A"}}) + # End Example 31 + + self.assertEqual(len(cursor.to_list()), 0) + + # Start Example 32 + cursor = db.inventory.find({"instock.0.qty": {"$lte": 20}}) + # End Example 32 + + self.assertEqual(len(cursor.to_list()), 3) + + # Start Example 33 + cursor = db.inventory.find({"instock.qty": {"$lte": 20}}) + # End Example 33 + + self.assertEqual(len(cursor.to_list()), 5) + + # Start Example 34 + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) + # End Example 34 + + self.assertEqual(len(cursor.to_list()), 1) + + # Start Example 35 + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) + # End Example 35 + + self.assertEqual(len(cursor.to_list()), 3) + + # Start Example 36 + cursor = db.inventory.find({"instock.qty": {"$gt": 10, "$lte": 20}}) + # End Example 36 + + self.assertEqual(len(cursor.to_list()), 4) + + # Start Example 37 + cursor = db.inventory.find({"instock.qty": 5, "instock.warehouse": "A"}) + # End Example 37 + + self.assertEqual(len(cursor.to_list()), 2) + + def test_query_null(self): + db = self.db + + # Start Example 38 + db.inventory.insert_many([{"_id": 1, "item": None}, {"_id": 2}]) + # End Example 38 + + # Start Example 39 + cursor = db.inventory.find({"item": None}) + # End Example 39 + + self.assertEqual(len(cursor.to_list()), 2) + + # Start Example 40 + cursor = db.inventory.find({"item": {"$type": 10}}) + # End Example 40 + + self.assertEqual(len(cursor.to_list()), 1) + + # Start Example 41 + cursor = db.inventory.find({"item": {"$exists": False}}) + # End Example 41 + + self.assertEqual(len(cursor.to_list()), 1) + + def test_projection(self): + db = self.db + + # Start Example 42 + db.inventory.insert_many( + [ + { + "item": "journal", + "status": "A", + "size": {"h": 14, "w": 21, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 5}], + }, + { + "item": "notebook", + "status": "A", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "C", "qty": 5}], + }, + { + "item": "paper", + "status": "D", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "A", "qty": 60}], + }, + { + "item": "planner", + "status": "D", + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 40}], + }, + { + "item": "postcard", + "status": "A", + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "instock": [{"warehouse": "B", "qty": 15}, {"warehouse": "C", "qty": 35}], + }, + ] + ) + # End Example 42 + + # Start Example 43 + cursor = db.inventory.find({"status": "A"}) + # End Example 43 + + self.assertEqual(len(cursor.to_list()), 3) + + # Start Example 44 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1}) + # End Example 44 + + for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) + + # Start Example 45 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "_id": 0}) + # End Example 45 + + for doc in cursor: + self.assertNotIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) + + # Start Example 46 + cursor = db.inventory.find({"status": "A"}, {"status": 0, "instock": 0}) + # End Example 46 + + for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertNotIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) + + # Start Example 47 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) + # End Example 47 + + for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) + size = doc["size"] + self.assertIn("uom", size) + self.assertNotIn("h", size) + self.assertNotIn("w", size) + + # Start Example 48 + cursor = db.inventory.find({"status": "A"}, {"size.uom": 0}) + # End Example 48 + + for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertIn("instock", doc) + size = doc["size"] + self.assertNotIn("uom", size) + self.assertIn("h", size) + self.assertIn("w", size) + + # Start Example 49 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) + # End Example 49 + + for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) + for subdoc in doc["instock"]: + self.assertNotIn("warehouse", subdoc) + self.assertIn("qty", subdoc) + + # Start Example 50 + cursor = db.inventory.find( + {"status": "A"}, {"item": 1, "status": 1, "instock": {"$slice": -1}} + ) + # End Example 50 + + for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) + self.assertEqual(len(doc["instock"]), 1) + + def test_update_and_replace(self): + db = self.db + + # Start Example 51 + db.inventory.insert_many( + [ + { + "item": "canvas", + "qty": 100, + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "mat", + "qty": 85, + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "mousepad", + "qty": 25, + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + "status": "P", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketchbook", + "qty": 80, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketch pad", + "qty": 95, + "size": {"h": 22.85, "w": 30.5, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 51 + + # Start Example 52 + db.inventory.update_one( + {"item": "paper"}, + {"$set": {"size.uom": "cm", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) + # End Example 52 + + for doc in db.inventory.find({"item": "paper"}): + self.assertEqual(doc["size"]["uom"], "cm") + self.assertEqual(doc["status"], "P") + self.assertIn("lastModified", doc) + + # Start Example 53 + db.inventory.update_many( + {"qty": {"$lt": 50}}, + {"$set": {"size.uom": "in", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) + # End Example 53 + + for doc in db.inventory.find({"qty": {"$lt": 50}}): + self.assertEqual(doc["size"]["uom"], "in") + self.assertEqual(doc["status"], "P") + self.assertIn("lastModified", doc) + + # Start Example 54 + db.inventory.replace_one( + {"item": "paper"}, + { + "item": "paper", + "instock": [{"warehouse": "A", "qty": 60}, {"warehouse": "B", "qty": 40}], + }, + ) + # End Example 54 + + for doc in db.inventory.find({"item": "paper"}, {"_id": 0}): + self.assertEqual(len(doc.keys()), 2) + self.assertIn("item", doc) + self.assertIn("instock", doc) + self.assertEqual(len(doc["instock"]), 2) + + def test_delete(self): + db = self.db + + # Start Example 55 + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 55 + + self.assertEqual(db.inventory.count_documents({}), 5) + + # Start Example 57 + db.inventory.delete_many({"status": "A"}) + # End Example 57 + + self.assertEqual(db.inventory.count_documents({}), 3) + + # Start Example 58 + db.inventory.delete_one({"status": "D"}) + # End Example 58 + + self.assertEqual(db.inventory.count_documents({}), 2) + + # Start Example 56 + db.inventory.delete_many({}) + # End Example 56 + + self.assertEqual(db.inventory.count_documents({}), 0) + + @client_context.require_change_streams + def test_change_streams(self): + db = self.db + done = False + + def insert_docs(): + nonlocal done + while not done: + db.inventory.insert_one({"username": "alice"}) + db.inventory.delete_one({"username": "alice"}) + time.sleep(0.005) + + t = ConcurrentRunner(target=insert_docs) + t.start() + + try: + # 1. The database for reactive, real-time applications + # Start Changestream Example 1 + cursor = db.inventory.watch() + next(cursor) + # End Changestream Example 1 + cursor.close() + + # Start Changestream Example 2 + cursor = db.inventory.watch(full_document="updateLookup") + next(cursor) + # End Changestream Example 2 + cursor.close() + + # Start Changestream Example 3 + resume_token = cursor.resume_token + cursor = db.inventory.watch(resume_after=resume_token) + next(cursor) + # End Changestream Example 3 + cursor.close() + + # Start Changestream Example 4 + pipeline = [ + {"$match": {"fullDocument.username": "alice"}}, + {"$addFields": {"newField": "this is an added field!"}}, + ] + cursor = db.inventory.watch(pipeline=pipeline) + next(cursor) + # End Changestream Example 4 + cursor.close() + finally: + done = True + t.join() + + def test_aggregate_examples(self): + db = self.db + + # Start Aggregation Example 1 + db.sales.aggregate([{"$match": {"items.fruit": "banana"}}, {"$sort": {"date": 1}}]) + # End Aggregation Example 1 + + # Start Aggregation Example 2 + db.sales.aggregate( + [ + {"$unwind": "$items"}, + {"$match": {"items.fruit": "banana"}}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "count": {"$sum": "$items.quantity"}, + } + }, + {"$project": {"dayOfWeek": "$_id.day", "numberSold": "$count", "_id": 0}}, + {"$sort": {"numberSold": 1}}, + ] + ) + # End Aggregation Example 2 + + # Start Aggregation Example 3 + db.sales.aggregate( + [ + {"$unwind": "$items"}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "items_sold": {"$sum": "$items.quantity"}, + "revenue": {"$sum": {"$multiply": ["$items.quantity", "$items.price"]}}, + } + }, + { + "$project": { + "day": "$_id.day", + "revenue": 1, + "items_sold": 1, + "discount": { + "$cond": {"if": {"$lte": ["$revenue", 250]}, "then": 25, "else": 0} + }, + } + }, + ] + ) + # End Aggregation Example 3 + + # Start Aggregation Example 4 + db.air_alliances.aggregate( + [ + { + "$lookup": { + "from": "air_airlines", + "let": {"constituents": "$airlines"}, + "pipeline": [{"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}}], + "as": "airlines", + } + }, + { + "$project": { + "_id": 0, + "name": 1, + "airlines": { + "$filter": { + "input": "$airlines", + "as": "airline", + "cond": {"$eq": ["$$airline.country", "Canada"]}, + } + }, + } + }, + ] + ) + # End Aggregation Example 4 + + @client_context.require_version_min(4, 4) + def test_aggregate_projection_example(self): + db = self.db + + # Start Aggregation Projection Example 1 + db.inventory.find( + {}, + { + "_id": 0, + "item": 1, + "status": { + "$switch": { + "branches": [ + {"case": {"$eq": ["$status", "A"]}, "then": "Available"}, + {"case": {"$eq": ["$status", "D"]}, "then": "Discontinued"}, + ], + "default": "No status found", + } + }, + "area": { + "$concat": [ + {"$toString": {"$multiply": ["$size.h", "$size.w"]}}, + " ", + "$size.uom", + ] + }, + "reportNumber": {"$literal": 1}, + }, + ) + + # End Aggregation Projection Example 1 + + def test_commands(self): + db = self.db + db.restaurants.insert_one({}) + + # Start runCommand Example 1 + db.command("buildInfo") + # End runCommand Example 1 + + # Start runCommand Example 2 + db.command("count", "restaurants") + # End runCommand Example 2 + + def test_index_management(self): + db = self.db + + # Start Index Example 1 + db.records.create_index("score") + # End Index Example 1 + + # Start Index Example 1 + db.restaurants.create_index( + [("cuisine", pymongo.ASCENDING), ("name", pymongo.ASCENDING)], + partialFilterExpression={"rating": {"$gt": 5}}, + ) + # End Index Example 1 + + @client_context.require_replica_set + def test_misc(self): + # Marketing examples + client = self.client + self.addCleanup(client.drop_database, "test") + self.addCleanup(client.drop_database, "my_database") + + # 2. Tunable consistency controls + collection = client.my_database.my_collection + with client.start_session() as session: + collection.insert_one({"_id": 1}, session=session) + collection.update_one({"_id": 1}, {"$set": {"a": 1}}, session=session) + for _doc in collection.find({}, session=session): + pass + + # 3. Exploiting the power of arrays + collection = client.test.array_updates_test + collection.update_one({"_id": 1}, {"$set": {"a.$[i].b": 2}}, array_filters=[{"i.b": 0}]) + + +class TestTransactionExamples(IntegrationTest): + @client_context.require_transactions + def test_transactions(self): + # Transaction examples + client = self.client + self.addCleanup(client.drop_database, "hr") + self.addCleanup(client.drop_database, "reporting") + + employees = client.hr.employees + events = client.reporting.events + employees.insert_one({"employee": 3, "status": "Active"}) + events.insert_one({"employee": 3, "status": {"new": "Active", "old": None}}) + + # Start Transactions Intro Example 1 + + def update_employee_info(session): + employees_coll = session.client.hr.employees + events_coll = session.client.reporting.events + + with session.start_transaction( + read_concern=ReadConcern("snapshot"), write_concern=WriteConcern(w="majority") + ): + employees_coll.update_one( + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) + events_coll.insert_one( + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) + + while True: + try: + # Commit uses write concern set at transaction start. + session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # End Transactions Intro Example 1 + + with client.start_session() as session: + update_employee_info(session) + + employee = employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + # Start Transactions Retry Example 1 + def run_transaction_with_retry(txn_func, session): + while True: + try: + txn_func(session) # performs transaction + break + except (ConnectionFailure, OperationFailure) as exc: + print("Transaction aborted. Caught exception during transaction.") + + # If transient error, retry the whole transaction + if exc.has_error_label("TransientTransactionError"): + print("TransientTransactionError, retrying transaction ...") + continue + else: + raise + + # End Transactions Retry Example 1 + + with client.start_session() as session: + run_transaction_with_retry(update_employee_info, session) + + employee = employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + # Start Transactions Retry Example 2 + def commit_with_retry(session): + while True: + try: + # Commit uses write concern set at transaction start. + session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # End Transactions Retry Example 2 + + # Test commit_with_retry from the previous examples + def _insert_employee_retry_commit(session): + with session.start_transaction(): + employees.insert_one({"employee": 4, "status": "Active"}, session=session) + events.insert_one( + {"employee": 4, "status": {"new": "Active", "old": None}}, session=session + ) + + commit_with_retry(session) + + with client.start_session() as session: + run_transaction_with_retry(_insert_employee_retry_commit, session) + + employee = employees.find_one({"employee": 4}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Active") + + # Start Transactions Retry Example 3 + + def run_transaction_with_retry(txn_func, session): + while True: + try: + txn_func(session) # performs transaction + break + except (ConnectionFailure, OperationFailure) as exc: + # If transient error, retry the whole transaction + if exc.has_error_label("TransientTransactionError"): + print("TransientTransactionError, retrying transaction ...") + continue + else: + raise + + def commit_with_retry(session): + while True: + try: + # Commit uses write concern set at transaction start. + session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # Updates two collections in a transactions + + def update_employee_info(session): + employees_coll = session.client.hr.employees + events_coll = session.client.reporting.events + + with session.start_transaction( + read_concern=ReadConcern("snapshot"), + write_concern=WriteConcern(w="majority"), + read_preference=ReadPreference.PRIMARY, + ): + employees_coll.update_one( + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) + events_coll.insert_one( + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) + + commit_with_retry(session) + + # Start a session. + with client.start_session() as session: + try: + run_transaction_with_retry(update_employee_info, session) + except Exception: + # Do something with error. + raise + + # End Transactions Retry Example 3 + + employee = employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + def MongoClient(_): + return self.rs_client() + + uriString = None + + # Start Transactions withTxn API Example 1 + + # For a replica set, include the replica set name and a seedlist of the members in the URI string; e.g. + # uriString = 'mongodb://mongodb0.example.com:27017,mongodb1.example.com:27017/?replicaSet=myRepl' + # For a sharded cluster, connect to the mongos instances; e.g. + # uriString = 'mongodb://mongos0.example.com:27017,mongos1.example.com:27017/' + + client = MongoClient(uriString) + wc_majority = WriteConcern("majority", wtimeout=1000) + + # Prereq: Create collections. + client.get_database("mydb1", write_concern=wc_majority).foo.insert_one({"abc": 0}) + client.get_database("mydb2", write_concern=wc_majority).bar.insert_one({"xyz": 0}) + + # Step 1: Define the callback that specifies the sequence of operations to perform inside the transactions. + def callback(session): + collection_one = session.client.mydb1.foo + collection_two = session.client.mydb2.bar + + # Important:: You must pass the session to the operations. + collection_one.insert_one({"abc": 1}, session=session) + collection_two.insert_one({"xyz": 999}, session=session) + + # Step 2: Start a client session. + with client.start_session() as session: + # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). + session.with_transaction(callback) + + # End Transactions withTxn API Example 1 + + +class TestCausalConsistencyExamples(IntegrationTest): + @client_context.require_secondaries_count(1) + def test_causal_consistency(self): + # Causal consistency examples + client = self.client + self.addCleanup(client.drop_database, "test") + client.test.drop_collection("items") + client.test.items.insert_one( + {"sku": "111", "name": "Peanuts", "start": datetime.datetime.today()} + ) + + # Start Causal Consistency Example 1 + with client.start_session(causal_consistency=True) as s1: + current_date = datetime.datetime.today() + items = client.get_database( + "test", + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + items.update_one( + {"sku": "111", "end": None}, {"$set": {"end": current_date}}, session=s1 + ) + items.insert_one( + {"sku": "nuts-111", "name": "Pecans", "start": current_date}, session=s1 + ) + # End Causal Consistency Example 1 + + assert s1.cluster_time is not None + assert s1.operation_time is not None + + # Start Causal Consistency Example 2 + with client.start_session(causal_consistency=True) as s2: + s2.advance_cluster_time(s1.cluster_time) + s2.advance_operation_time(s1.operation_time) + + items = client.get_database( + "test", + read_preference=ReadPreference.SECONDARY, + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + for item in items.find({"end": None}, session=s2): + print(item) + # End Causal Consistency Example 2 + + +class TestVersionedApiExamples(IntegrationTest): + @client_context.require_version_min(4, 7) + def test_versioned_api(self): + # Versioned API examples + def MongoClient(_, server_api): + return self.rs_client(server_api=server_api, connect=False) + + uri = None + + # Start Versioned API Example 1 + from pymongo.server_api import ServerApi + + MongoClient(uri, server_api=ServerApi("1")) + # End Versioned API Example 1 + + # Start Versioned API Example 2 + MongoClient(uri, server_api=ServerApi("1", strict=True)) + # End Versioned API Example 2 + + # Start Versioned API Example 3 + MongoClient(uri, server_api=ServerApi("1", strict=False)) + # End Versioned API Example 3 + + # Start Versioned API Example 4 + MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) + # End Versioned API Example 4 + + @unittest.skip("PYTHON-3167 count has been added to API version 1") + @client_context.require_version_min(4, 7) + def test_versioned_api_migration(self): + # SERVER-58785 + if client_context.is_topology_type(["sharded"]) and not client_context.version.at_least( + 5, 0, 2 + ): + self.skipTest("This test needs MongoDB 5.0.2 or newer") + + client = self.rs_client(server_api=ServerApi("1", strict=True)) + client.db.sales.drop() + + # Start Versioned API Example 5 + def strptime(s): + return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") + + client.db.sales.insert_many( + [ + { + "_id": 1, + "item": "abc", + "price": 10, + "quantity": 2, + "date": strptime("2021-01-01T08:00:00Z"), + }, + { + "_id": 2, + "item": "jkl", + "price": 20, + "quantity": 1, + "date": strptime("2021-02-03T09:00:00Z"), + }, + { + "_id": 3, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-03T09:05:00Z"), + }, + { + "_id": 4, + "item": "abc", + "price": 10, + "quantity": 10, + "date": strptime("2021-02-15T08:00:00Z"), + }, + { + "_id": 5, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T09:05:00Z"), + }, + { + "_id": 6, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-15T12:05:10Z"), + }, + { + "_id": 7, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T14:12:12Z"), + }, + { + "_id": 8, + "item": "abc", + "price": 10, + "quantity": 5, + "date": strptime("2021-03-16T20:20:13Z"), + }, + ] + ) + # End Versioned API Example 5 + + with self.assertRaisesRegex( + OperationFailure, + "Provided apiStrict:true, but the command count is not in API Version 1", + ): + client.db.command("count", "sales", query={}) + # Start Versioned API Example 6 + # pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError'} + # End Versioned API Example 6 + + # Start Versioned API Example 7 + client.db.sales.count_documents({}) + # End Versioned API Example 7 + + # Start Versioned API Example 8 + # 8 + # End Versioned API Example 8 + + +class TestSnapshotQueryExamples(IntegrationTest): + @client_context.require_version_min(5, 0) + def test_snapshot_query(self): + client = self.client + + if not client_context.is_topology_type(["replicaset", "sharded"]): + self.skipTest("Must be a sharded or replicaset") + + self.addCleanup(client.drop_database, "pets") + db = client.pets + db.drop_collection("cats") + db.drop_collection("dogs") + db.cats.insert_one({"name": "Whiskers", "color": "white", "age": 10, "adoptable": True}) + db.dogs.insert_one({"name": "Pebbles", "color": "Brown", "age": 10, "adoptable": True}) + + def predicate_one(): + return self.check_for_snapshot(db.cats) + + def predicate_two(): + return self.check_for_snapshot(db.dogs) + + wait_until(predicate_two, "success") + wait_until(predicate_one, "success") + + # Start Snapshot Query Example 1 + + db = client.pets + with client.start_session(snapshot=True) as s: + adoptablePetsCount = ( + ( + db.cats.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], + session=s, + ) + ).next() + )["adoptableCatsCount"] + + adoptablePetsCount += ( + ( + db.dogs.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], + session=s, + ) + ).next() + )["adoptableDogsCount"] + + print(adoptablePetsCount) + + # End Snapshot Query Example 1 + db = client.retail + self.addCleanup(client.drop_database, "retail") + db.drop_collection("sales") + + saleDate = datetime.datetime.now() + db.sales.insert_one({"shoeType": "boot", "price": 30, "saleDate": saleDate}) + + def predicate_three(): + return self.check_for_snapshot(db.sales) + + wait_until(predicate_three, "success") + + # Start Snapshot Query Example 2 + db = client.retail + with client.start_session(snapshot=True) as s: + _ = ( + ( + db.sales.aggregate( + [ + { + "$match": { + "$expr": { + "$gt": [ + "$saleDate", + { + "$dateSubtract": { + "startDate": "$$NOW", + "unit": "day", + "amount": 1, + } + }, + ] + } + } + }, + {"$count": "totalDailySales"}, + ], + session=s, + ) + ).next() + )["totalDailySales"] + + # End Snapshot Query Example 2 + + def check_for_snapshot(self, collection): + """Wait for snapshot reads to become available to prevent this error: + [246:SnapshotUnavailable]: Unable to read from a snapshot due to pending collection catalog changes; please retry the operation. Snapshot timestamp is Timestamp(1646666892, 4). Collection minimum is Timestamp(1646666892, 5) (on localhost:27017, modern retry, attempt 1) + From https://github.com/mongodb/mongo-ruby-driver/commit/7c4117b58e3d12e237f7536f7521e18fc15f79ac + """ + with self.client.start_session(snapshot=True) as s: + try: + if collection.find_one(session=s): + return True + return False + except OperationFailure as e: + # Retry them as the server demands... + if e.code == 246: # SnapshotUnavailable + return False + raise + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_fork.py b/test/test_fork.py new file mode 100644 index 0000000000..dad947d8c5 --- /dev/null +++ b/test/test_fork.py @@ -0,0 +1,102 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that pymongo resets its own locks after a fork.""" +from __future__ import annotations + +import os +import sys +import unittest +import warnings +from multiprocessing import Pipe + +sys.path[0:0] = [""] + +from test import IntegrationTest +from test.utils_shared import is_greenthread_patched + +from bson.objectid import ObjectId + + +@unittest.skipIf( + not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" +) +@unittest.skipIf( + is_greenthread_patched(), + "gevent does not support POSIX-style forking.", +) +class TestFork(IntegrationTest): + def test_lock_client(self): + # Forks the client with some items locked. + # Parent => All locks should be as before the fork. + # Child => All locks should be reset. + with self.client._lock: + + def target(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.client.admin.command("ping") + + with self.fork(target): + pass + self.client.admin.command("ping") + + def test_lock_object_id(self): + # Forks the client with ObjectId's _inc_lock locked. + # Parent => _inc_lock should remain locked. + # Child => _inc_lock should be unlocked. + with ObjectId._inc_lock: + + def target(): + self.assertFalse(ObjectId._inc_lock.locked()) + self.assertTrue(ObjectId()) + + with self.fork(target): + pass + + def test_topology_reset(self): + # Tests that topologies are different from each other. + # Cannot use ID because virtual memory addresses may be the same. + # Cannot reinstantiate ObjectId in the topology settings. + # Relies on difference in PID when opened again. + parent_conn, child_conn = Pipe() + init_id = self.client._topology._pid + parent_cursor_exc = self.client._kill_cursors_executor + + def target(): + # Catch the fork warning and send to the parent for assertion. + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + self.client.admin.command("ping") + child_conn.send(str(ctx[0])) + child_conn.send(self.client._topology._pid) + child_conn.send( + ( + parent_cursor_exc != self.client._kill_cursors_executor, + "client._kill_cursors_executor was not reinitialized", + ) + ) + + with self.fork(target): + self.assertEqual(self.client._topology._pid, init_id) + fork_warning = parent_conn.recv() + self.assertIn("MongoClient opened before fork", fork_warning) + child_id = parent_conn.recv() + self.assertNotEqual(child_id, init_id) + passed, msg = parent_conn.recv() + self.assertTrue(passed, msg) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_grid_file.py b/test/test_grid_file.py index e3f4329e61..c7ccda44a4 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -1,6 +1,5 @@ -# -*- coding: utf-8 -*- # -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,101 +13,143 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the grid_file module. -""" +"""Tests for the grid_file module.""" +from __future__ import annotations import datetime +import io import sys -import unittest +import zipfile +from io import BytesIO +from test import ( + IntegrationTest, + UnitTest, + client_context, + qcheck, + unittest, +) + +from pymongo.synchronous.database import Database + sys.path[0:0] = [""] -from nose.plugins.skip import SkipTest +from test.utils_shared import OvertCommandListener from bson.objectid import ObjectId -from bson.py3compat import b, StringIO -from gridfs import GridFS -from gridfs.grid_file import (DEFAULT_CHUNK_SIZE, - _SEEK_CUR, - _SEEK_END, - GridIn, - GridFile, - GridOut, - GridOutCursor) -from gridfs.errors import (NoFile, - UnsupportedAPI) +from gridfs.errors import NoFile +from gridfs.synchronous.grid_file import ( + _SEEK_CUR, + _SEEK_END, + DEFAULT_CHUNK_SIZE, + GridFS, + GridIn, + GridOut, + GridOutCursor, +) from pymongo import MongoClient -from pymongo.errors import ConnectionFailure -from test.test_client import get_client -from test import qcheck +from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError +from pymongo.message import _CursorAddress +_IS_SYNC = True -class TestGridFile(unittest.TestCase): - def setUp(self): - self.db = get_client().pymongo_test - self.db.fs.files.remove({}) - self.db.fs.chunks.remove({}) +class TestGridFileNoConnect(UnitTest): + """Test GridFile features on a client that does not connect.""" + + db: Database + + @classmethod + def setUpClass(cls): + cls.db = MongoClient(connect=False).pymongo_test + + def test_grid_in_custom_opts(self): + self.assertRaises(TypeError, GridIn, "foo") + + a = GridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) + + self.assertEqual(5, a._id) + self.assertEqual("my_file", a.filename) + self.assertEqual("my_file", a.name) + self.assertEqual("text/html", a.content_type) + self.assertEqual(1000, a.chunk_size) + self.assertEqual(["foo"], a.aliases) + self.assertEqual({"foo": 1, "bar": 2}, a.metadata) + self.assertEqual(3, a.bar) + self.assertEqual("hello", a.baz) + self.assertRaises(AttributeError, getattr, a, "mike") + + b = GridIn(self.db.fs, content_type="text/html", chunk_size=1000, baz=100) + self.assertEqual("text/html", b.content_type) + self.assertEqual(1000, b.chunk_size) + self.assertEqual(100, b.baz) - def tearDown(self): - self.db = None + +class TestGridFile(IntegrationTest): + def setUp(self): + super().setUp() + self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) def test_basic(self): f = GridIn(self.db.fs, filename="test") - f.write(b("hello world")) + f.write(b"hello world") f.close() - self.assertEqual(1, self.db.fs.files.find().count()) - self.assertEqual(1, self.db.fs.chunks.find().count()) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(1, self.db.fs.chunks.count_documents({})) g = GridOut(self.db.fs, f._id) - self.assertEqual(b("hello world"), g.read()) + self.assertEqual(b"hello world", g.read()) # make sure it's still there... g = GridOut(self.db.fs, f._id) - self.assertEqual(b("hello world"), g.read()) + self.assertEqual(b"hello world", g.read()) f = GridIn(self.db.fs, filename="test") f.close() - self.assertEqual(2, self.db.fs.files.find().count()) - self.assertEqual(1, self.db.fs.chunks.find().count()) + self.assertEqual(2, self.db.fs.files.count_documents({})) + self.assertEqual(1, self.db.fs.chunks.count_documents({})) g = GridOut(self.db.fs, f._id) - self.assertEqual(b(""), g.read()) + self.assertEqual(b"", g.read()) # test that reading 0 returns proper type - self.assertEqual(b(""), g.read(0)) + self.assertEqual(b"", g.read(0)) def test_md5(self): f = GridIn(self.db.fs) - f.write(b("hello world\n")) + f.write(b"hello world\n") f.close() - self.assertEqual("6f5902ac237024bdd0c176cb93063dc4", f.md5) + self.assertEqual(None, f.md5) def test_alternate_collection(self): - self.db.alt.files.remove({}) - self.db.alt.chunks.remove({}) + self.db.alt.files.delete_many({}) + self.db.alt.chunks.delete_many({}) f = GridIn(self.db.alt) - f.write(b("hello world")) + f.write(b"hello world") f.close() - self.assertEqual(1, self.db.alt.files.find().count()) - self.assertEqual(1, self.db.alt.chunks.find().count()) + self.assertEqual(1, self.db.alt.files.count_documents({})) + self.assertEqual(1, self.db.alt.chunks.count_documents({})) g = GridOut(self.db.alt, f._id) - self.assertEqual(b("hello world"), g.read()) - - # test that md5 still works... - self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", g.md5) - - def test_grid_file(self): - self.assertRaises(UnsupportedAPI, GridFile) + self.assertEqual(b"hello world", g.read()) def test_grid_in_default_opts(self): self.assertRaises(TypeError, GridIn, "foo") a = GridIn(self.db.fs) - self.assertTrue(isinstance(a._id, ObjectId)) + self.assertIsInstance(a._id, ObjectId) self.assertRaises(AttributeError, setattr, a, "_id", 5) self.assertEqual(None, a.filename) @@ -119,6 +160,7 @@ def test_grid_in_default_opts(self): self.assertEqual(None, a.content_type) a.content_type = "text/html" + self.assertEqual("text/html", a.content_type) self.assertRaises(AttributeError, getattr, a, "length") @@ -132,21 +174,27 @@ def test_grid_in_default_opts(self): self.assertRaises(AttributeError, getattr, a, "aliases") a.aliases = ["foo"] + self.assertEqual(["foo"], a.aliases) self.assertRaises(AttributeError, getattr, a, "metadata") a.metadata = {"foo": 1} + self.assertEqual({"foo": 1}, a.metadata) - self.assertRaises(AttributeError, getattr, a, "md5") self.assertRaises(AttributeError, setattr, a, "md5", 5) a.close() - a.forty_two = 42 + if _IS_SYNC: + a.forty_two = 42 + else: + self.assertRaises(AttributeError, setattr, a, "forty_two", 42) + a.set("forty_two", 42) + self.assertEqual(42, a.forty_two) - self.assertTrue(isinstance(a._id, ObjectId)) + self.assertIsInstance(a._id, ObjectId) self.assertRaises(AttributeError, setattr, a, "_id", 5) self.assertEqual("my_file", a.filename) @@ -160,14 +208,14 @@ def test_grid_in_default_opts(self): self.assertEqual(255 * 1024, a.chunk_size) self.assertRaises(AttributeError, setattr, a, "chunk_size", 5) - self.assertTrue(isinstance(a.upload_date, datetime.datetime)) + self.assertIsInstance(a.upload_date, datetime.datetime) self.assertRaises(AttributeError, setattr, a, "upload_date", 5) self.assertEqual(["foo"], a.aliases) self.assertEqual({"foo": 1}, a.metadata) - self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", a.md5) + self.assertEqual(None, a.md5) self.assertRaises(AttributeError, setattr, a, "md5", 5) # Make sure custom attributes that were set both before and after @@ -177,39 +225,21 @@ def test_grid_in_default_opts(self): self.assertEqual(a.aliases, b.aliases) self.assertEqual(a.forty_two, b.forty_two) - def test_grid_in_custom_opts(self): - self.assertRaises(TypeError, GridIn, "foo") - - a = GridIn(self.db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 1, "bar": 2}, bar=3, baz="hello") - - self.assertEqual(5, a._id) - self.assertEqual("my_file", a.filename) - self.assertEqual("my_file", a.name) - self.assertEqual("text/html", a.content_type) - self.assertEqual(1000, a.chunk_size) - self.assertEqual(["foo"], a.aliases) - self.assertEqual({"foo": 1, "bar": 2}, a.metadata) - self.assertEqual(3, a.bar) - self.assertEqual("hello", a.baz) - self.assertRaises(AttributeError, getattr, a, "mike") - - b = GridIn(self.db.fs, - content_type="text/html", chunk_size=1000, baz=100) - self.assertEqual("text/html", b.content_type) - self.assertEqual(1000, b.chunk_size) - self.assertEqual(100, b.baz) - def test_grid_out_default_opts(self): self.assertRaises(TypeError, GridOut, "foo") - self.assertRaises(NoFile, GridOut, self.db.fs, 5) + gout = GridOut(self.db.fs, 5) + with self.assertRaises(NoFile): + if not _IS_SYNC: + gout.open() + gout.name a = GridIn(self.db.fs) a.close() b = GridOut(self.db.fs, a._id) + if not _IS_SYNC: + b.open() self.assertEqual(a._id, b._id) self.assertEqual(0, b.length) @@ -217,56 +247,106 @@ def test_grid_out_default_opts(self): self.assertEqual(None, b.name) self.assertEqual(None, b.filename) self.assertEqual(255 * 1024, b.chunk_size) - self.assertTrue(isinstance(b.upload_date, datetime.datetime)) + self.assertIsInstance(b.upload_date, datetime.datetime) self.assertEqual(None, b.aliases) self.assertEqual(None, b.metadata) - self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", b.md5) - - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + self.assertEqual(None, b.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, b, attr, 5) + def test_grid_out_cursor_options(self): + self.assertRaises( + TypeError, GridOutCursor.__init__, self.db.fs, {}, projection={"filename": 1} + ) + + cursor = GridOutCursor(self.db.fs, {}) + cursor_clone = cursor.clone() + + cursor_dict = cursor.__dict__.copy() + cursor_dict.pop("_session") + cursor_clone_dict = cursor_clone.__dict__.copy() + cursor_clone_dict.pop("_session") + self.assertDictEqual(cursor_dict, cursor_clone_dict) + + self.assertRaises(NotImplementedError, cursor.add_option, 0) + self.assertRaises(NotImplementedError, cursor.remove_option, 0) + def test_grid_out_custom_opts(self): - one = GridIn(self.db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 1, "bar": 2}, bar=3, baz="hello") - one.write(b("hello world")) + one = GridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) + one.write(b"hello world") one.close() two = GridOut(self.db.fs, 5) + if not _IS_SYNC: + two.open() + self.assertEqual("my_file", two.name) self.assertEqual("my_file", two.filename) self.assertEqual(5, two._id) self.assertEqual(11, two.length) self.assertEqual("text/html", two.content_type) self.assertEqual(1000, two.chunk_size) - self.assertTrue(isinstance(two.upload_date, datetime.datetime)) + self.assertIsInstance(two.upload_date, datetime.datetime) self.assertEqual(["foo"], two.aliases) self.assertEqual({"foo": 1, "bar": 2}, two.metadata) self.assertEqual(3, two.bar) - self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", two.md5) - - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + self.assertEqual(None, two.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, two, attr, 5) def test_grid_out_file_document(self): one = GridIn(self.db.fs) - one.write(b("foo bar")) + one.write(b"foo bar") one.close() two = GridOut(self.db.fs, file_document=self.db.fs.files.find_one()) - self.assertEqual(b("foo bar"), two.read()) + self.assertEqual(b"foo bar", two.read()) three = GridOut(self.db.fs, 5, file_document=self.db.fs.files.find_one()) - self.assertEqual(b("foo bar"), three.read()) + self.assertEqual(b"foo bar", three.read()) - self.assertRaises(NoFile, GridOut, self.db.fs, file_document={}) + four = GridOut(self.db.fs, file_document={}) + with self.assertRaises(NoFile): + if not _IS_SYNC: + four.open() + four.name def test_write_file_like(self): one = GridIn(self.db.fs) - one.write(b("hello world")) + one.write(b"hello world") one.close() two = GridOut(self.db.fs, one._id) @@ -276,39 +356,55 @@ def test_write_file_like(self): three.close() four = GridOut(self.db.fs, three._id) - self.assertEqual(b("hello world"), four.read()) + self.assertEqual(b"hello world", four.read()) five = GridIn(self.db.fs, chunk_size=2) - five.write(b("hello")) - buffer = StringIO(b(" world")) + five.write(b"hello") + buffer = BytesIO(b" world") five.write(buffer) - five.write(b(" and mongodb")) + five.write(b" and mongodb") five.close() - self.assertEqual(b("hello world and mongodb"), - GridOut(self.db.fs, five._id).read()) + self.assertEqual(b"hello world and mongodb", GridOut(self.db.fs, five._id).read()) def test_write_lines(self): a = GridIn(self.db.fs) - a.writelines([b("hello "), b("world")]) + a.writelines([b"hello ", b"world"]) a.close() - self.assertEqual(b("hello world"), GridOut(self.db.fs, a._id).read()) + self.assertEqual(b"hello world", GridOut(self.db.fs, a._id).read()) def test_close(self): f = GridIn(self.db.fs) f.close() - self.assertRaises(ValueError, f.write, "test") + with self.assertRaises(ValueError): + f.write("test") f.close() + def test_closed(self): + f = GridIn(self.db.fs, chunkSize=5) + f.write(b"Hello world.\nHow are you?") + f.close() + + g = GridOut(self.db.fs, f._id) + if not _IS_SYNC: + g.open() + self.assertFalse(g.closed) + g.read(1) + self.assertFalse(g.closed) + g.read(100) + self.assertFalse(g.closed) + g.close() + self.assertTrue(g.closed) + def test_multi_chunk_file(self): - random_string = b('a') * (DEFAULT_CHUNK_SIZE + 1000) + random_string = b"a" * (DEFAULT_CHUNK_SIZE + 1000) f = GridIn(self.db.fs) f.write(random_string) f.close() - self.assertEqual(1, self.db.fs.files.find().count()) - self.assertEqual(2, self.db.fs.chunks.find().count()) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(2, self.db.fs.chunks.count_documents({})) g = GridOut(self.db.fs, f._id) self.assertEqual(random_string, g.read()) @@ -325,8 +421,8 @@ def helper(data): self.files += 1 self.chunks += len(data) - self.assertEqual(self.files, self.db.fs.files.find().count()) - self.assertEqual(self.chunks, self.db.fs.chunks.find().count()) + self.assertEqual(self.files, self.db.fs.files.count_documents({})) + self.assertEqual(self.chunks, self.db.fs.chunks.count_documents({})) g = GridOut(self.db.fs, f._id) self.assertEqual(data, g.read()) @@ -335,36 +431,38 @@ def helper(data): self.assertEqual(data, g.read(10) + g.read(10)) return True - qcheck.check_unittest(self, helper, - qcheck.gen_string(qcheck.gen_range(0, 20))) + qcheck.check_unittest(self, helper, qcheck.gen_string(qcheck.gen_range(0, 20))) def test_seek(self): f = GridIn(self.db.fs, chunkSize=3) - f.write(b("hello world")) + f.write(b"hello world") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual(b("hello world"), g.read()) + self.assertEqual(b"hello world", g.read()) g.seek(0) - self.assertEqual(b("hello world"), g.read()) + self.assertEqual(b"hello world", g.read()) g.seek(1) - self.assertEqual(b("ello world"), g.read()) - self.assertRaises(IOError, g.seek, -1) + self.assertEqual(b"ello world", g.read()) + with self.assertRaises(IOError): + g.seek(-1) g.seek(-3, _SEEK_END) - self.assertEqual(b("rld"), g.read()) + self.assertEqual(b"rld", g.read()) g.seek(0, _SEEK_END) - self.assertEqual(b(""), g.read()) - self.assertRaises(IOError, g.seek, -100, _SEEK_END) + self.assertEqual(b"", g.read()) + with self.assertRaises(IOError): + g.seek(-100, _SEEK_END) g.seek(3) g.seek(3, _SEEK_CUR) - self.assertEqual(b("world"), g.read()) - self.assertRaises(IOError, g.seek, -100, _SEEK_CUR) + self.assertEqual(b"world", g.read()) + with self.assertRaises(IOError): + g.seek(-100, _SEEK_CUR) def test_tell(self): f = GridIn(self.db.fs, chunkSize=3) - f.write(b("hello world")) + f.write(b"hello world") f.close() g = GridOut(self.db.fs, f._id) @@ -380,81 +478,158 @@ def test_tell(self): def test_multiple_reads(self): f = GridIn(self.db.fs, chunkSize=3) - f.write(b("hello world")) + f.write(b"hello world") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual(b("he"), g.read(2)) - self.assertEqual(b("ll"), g.read(2)) - self.assertEqual(b("o "), g.read(2)) - self.assertEqual(b("wo"), g.read(2)) - self.assertEqual(b("rl"), g.read(2)) - self.assertEqual(b("d"), g.read(2)) - self.assertEqual(b(""), g.read(2)) + self.assertEqual(b"he", g.read(2)) + self.assertEqual(b"ll", g.read(2)) + self.assertEqual(b"o ", g.read(2)) + self.assertEqual(b"wo", g.read(2)) + self.assertEqual(b"rl", g.read(2)) + self.assertEqual(b"d", g.read(2)) + self.assertEqual(b"", g.read(2)) def test_readline(self): f = GridIn(self.db.fs, chunkSize=5) - f.write(b("""Hello world, + f.write( + b"""Hello world, How are you? Hope all is well. -Bye""")) +Bye""" + ) f.close() # Try read(), then readline(). g = GridOut(self.db.fs, f._id) - self.assertEqual(b("H"), g.read(1)) - self.assertEqual(b("ello world,\n"), g.readline()) - self.assertEqual(b("How a"), g.readline(5)) - self.assertEqual(b(""), g.readline(0)) - self.assertEqual(b("re you?\n"), g.readline()) - self.assertEqual(b("Hope all is well.\n"), g.readline(1000)) - self.assertEqual(b("Bye"), g.readline()) - self.assertEqual(b(""), g.readline()) + self.assertEqual(b"H", g.read(1)) + self.assertEqual(b"ello world,\n", g.readline()) + self.assertEqual(b"How a", g.readline(5)) + self.assertEqual(b"", g.readline(0)) + self.assertEqual(b"re you?\n", g.readline()) + self.assertEqual(b"Hope all is well.\n", g.readline(1000)) + self.assertEqual(b"Bye", g.readline()) + self.assertEqual(b"", g.readline()) # Try readline() first, then read(). g = GridOut(self.db.fs, f._id) - self.assertEqual(b("He"), g.readline(2)) - self.assertEqual(b("l"), g.read(1)) - self.assertEqual(b("lo"), g.readline(2)) - self.assertEqual(b(" world,\n"), g.readline()) + self.assertEqual(b"He", g.readline(2)) + self.assertEqual(b"l", g.read(1)) + self.assertEqual(b"lo", g.readline(2)) + self.assertEqual(b" world,\n", g.readline()) # Only readline(). g = GridOut(self.db.fs, f._id) - self.assertEqual(b("H"), g.readline(1)) - self.assertEqual(b("e"), g.readline(1)) - self.assertEqual(b("llo world,\n"), g.readline()) + self.assertEqual(b"H", g.readline(1)) + self.assertEqual(b"e", g.readline(1)) + self.assertEqual(b"llo world,\n", g.readline()) + + def test_readlines(self): + f = GridIn(self.db.fs, chunkSize=5) + f.write( + b"""Hello world, +How are you? +Hope all is well. +Bye""" + ) + f.close() + + # Try read(), then readlines(). + g = GridOut(self.db.fs, f._id) + self.assertEqual(b"He", g.read(2)) + self.assertEqual([b"llo world,\n", b"How are you?\n"], g.readlines(11)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], g.readlines()) + self.assertEqual([], g.readlines()) + + # Try readline(), then readlines(). + g = GridOut(self.db.fs, f._id) + self.assertEqual(b"Hello world,\n", g.readline()) + self.assertEqual([b"How are you?\n", b"Hope all is well.\n"], g.readlines(13)) + self.assertEqual(b"Bye", g.readline()) + self.assertEqual([], g.readlines()) + + # Only readlines(). + g = GridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + g.readlines(), + ) + + g = GridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + g.readlines(0), + ) + + g = GridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], g.readlines(1)) + self.assertEqual([b"How are you?\n"], g.readlines(12)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], g.readlines(18)) + + # Try readlines() first, then read(). + g = GridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], g.readlines(1)) + self.assertEqual(b"H", g.read(1)) + self.assertEqual([b"ow are you?\n", b"Hope all is well.\n"], g.readlines(29)) + self.assertEqual([b"Bye"], g.readlines(1)) + + # Try readlines() first, then readline(). + g = GridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], g.readlines(1)) + self.assertEqual(b"How are you?\n", g.readline()) + self.assertEqual([b"Hope all is well.\n"], g.readlines(17)) + self.assertEqual(b"Bye", g.readline()) def test_iterator(self): f = GridIn(self.db.fs) f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([], list(g)) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], g.to_list()) f = GridIn(self.db.fs) - f.write(b("hello world")) + f.write(b"hello world\nhere are\nsome lines.") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([b("hello world")], list(g)) - self.assertEqual(b("hello"), g.read(5)) - self.assertEqual([b("hello world")], list(g)) - self.assertEqual(b(" worl"), g.read(5)) + if _IS_SYNC: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], list(g)) + else: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], g.to_list()) + + self.assertEqual(b"", g.read(5)) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], g.to_list()) + + g = GridOut(self.db.fs, f._id) + self.assertEqual(b"hello world\n", next(iter(g))) + self.assertEqual(b"here", g.read(4)) + self.assertEqual(b" are\n", next(iter(g))) + self.assertEqual(b"some lines", g.read(10)) + self.assertEqual(b".", next(iter(g))) + with self.assertRaises(StopIteration): + iter(g).__next__() f = GridIn(self.db.fs, chunk_size=2) - f.write(b("hello world")) + f.write(b"hello world") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([b("he"), b("ll"), b("o "), - b("wo"), b("rl"), b("d")], list(g)) + if _IS_SYNC: + self.assertEqual([b"hello world"], list(g)) + else: + self.assertEqual([b"hello world"], g.to_list()) def test_read_unaligned_buffer_size(self): - in_data = b("This is a text that doesn't " - "quite fit in a single 16-byte chunk.") + in_data = b"This is a text that doesn't quite fit in a single 16-byte chunk." f = GridIn(self.db.fs, chunkSize=16) f.write(in_data) f.close() g = GridOut(self.db.fs, f._id) - out_data = b('') + out_data = b"" while 1: s = g.read(13) if not s: @@ -464,7 +639,7 @@ def test_read_unaligned_buffer_size(self): self.assertEqual(in_data, out_data) def test_readchunk(self): - in_data = b('a') * 10 + in_data = b"a" * 10 f = GridIn(self.db.fs, chunkSize=3) f.write(in_data) f.close() @@ -483,21 +658,22 @@ def test_readchunk(self): def test_write_unicode(self): f = GridIn(self.db.fs) - self.assertRaises(TypeError, f.write, u"foo") + with self.assertRaises(TypeError): + f.write("foo") f = GridIn(self.db.fs, encoding="utf-8") - f.write(u"foo") + f.write("foo") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual(b("foo"), g.read()) + self.assertEqual(b"foo", g.read()) f = GridIn(self.db.fs, encoding="iso-8859-1") - f.write(u"aé") + f.write("aé") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual(u"aé".encode("iso-8859-1"), g.read()) + self.assertEqual("aé".encode("iso-8859-1"), g.read()) def test_set_after_close(self): f = GridIn(self.db.fs, _id="foo", bar="baz") @@ -508,8 +684,12 @@ def test_set_after_close(self): self.assertRaises(AttributeError, getattr, f, "uploadDate") self.assertRaises(AttributeError, setattr, f, "_id", 5) - f.bar = "foo" - f.baz = 5 + if _IS_SYNC: + f.bar = "foo" + f.baz = 5 + else: + f.set("bar", "foo") + f.set("baz", 5) self.assertEqual("foo", f._id) self.assertEqual("foo", f.bar) @@ -524,38 +704,54 @@ def test_set_after_close(self): self.assertTrue(f.uploadDate) self.assertRaises(AttributeError, setattr, f, "_id", 5) - f.bar = "a" - f.baz = "b" + if _IS_SYNC: + f.bar = "a" + f.baz = "b" + else: + f.set("bar", "a") + f.set("baz", "b") self.assertRaises(AttributeError, setattr, f, "upload_date", 5) g = GridOut(self.db.fs, f._id) + if not _IS_SYNC: + g.open() self.assertEqual("a", g.bar) self.assertEqual("b", g.baz) # Versions 2.0.1 and older saved a _closed field for some reason. self.assertRaises(AttributeError, getattr, g, "_closed") def test_context_manager(self): - if sys.version_info < (2, 6): - raise SkipTest("With statement requires Python >= 2.6") + contents = b"Imagine this is some important data..." - contents = b("Imagine this is some important data...") - # Hack around python2.4 an 2.5 not supporting 'with' syntax - exec """ -with GridIn(self.db.fs, filename="important") as infile: - infile.write(contents) + with GridIn(self.db.fs, filename="important") as infile: + infile.write(contents) -with GridOut(self.db.fs, infile._id) as outfile: - self.assertEqual(contents, outfile.read()) -""" + with GridOut(self.db.fs, infile._id) as outfile: + self.assertEqual(contents, outfile.read()) - def test_prechunked_string(self): + def test_exception_file_non_existence(self): + contents = b"Imagine this is some important data..." + + with self.assertRaises(ConnectionError): + with GridIn(self.db.fs, filename="important") as infile: + infile.write(contents) + raise ConnectionError("Test exception") + + # Expectation: File chunks are written, entry in files doesn't appear. + self.assertEqual( + self.db.fs.chunks.count_documents({"files_id": infile._id}), infile._chunk_number + ) + + self.assertIsNone(self.db.fs.files.find_one({"_id": infile._id})) + self.assertTrue(infile.closed) + def test_prechunked_string(self): def write_me(s, chunk_size): - buf = StringIO(s) + buf = BytesIO(s) infile = GridIn(self.db.fs) while True: to_write = buf.read(chunk_size) - if to_write == b(''): + if to_write == b"": break infile.write(to_write) infile.close() @@ -565,7 +761,7 @@ def write_me(s, chunk_size): data = outfile.read() self.assertEqual(s, data) - s = b('x' * DEFAULT_CHUNK_SIZE * 4) + s = b"x" * DEFAULT_CHUNK_SIZE * 4 # Test with default chunk size write_me(s, DEFAULT_CHUNK_SIZE) # Multiple @@ -575,44 +771,100 @@ def write_me(s, chunk_size): def test_grid_out_lazy_connect(self): fs = self.db.fs - outfile = GridOut(fs, file_id=-1, _connect=False) - self.assertRaises(NoFile, outfile.read) - self.assertRaises(NoFile, getattr, outfile, 'filename') + outfile = GridOut(fs, file_id=-1) + with self.assertRaises(NoFile): + outfile.read() + with self.assertRaises(NoFile): + if not _IS_SYNC: + outfile.open() + outfile.filename infile = GridIn(fs, filename=1) infile.close() - outfile = GridOut(fs, infile._id, _connect=False) + outfile = GridOut(fs, infile._id) outfile.read() outfile.filename + outfile = GridOut(fs, infile._id) + outfile.readchunk() + def test_grid_in_lazy_connect(self): - client = MongoClient('badhost', _connect=False) + client = self.simple_client("badhost", connect=False, serverSelectionTimeoutMS=10) fs = client.db.fs infile = GridIn(fs, file_id=-1, chunk_size=1) - self.assertRaises(ConnectionFailure, infile.write, b('data goes here')) - self.assertRaises(ConnectionFailure, infile.close) + with self.assertRaises(ServerSelectionTimeoutError): + infile.write(b"data") + with self.assertRaises(ServerSelectionTimeoutError): + infile.close() - def test_grid_out_cursor_options(self): - self.assertRaises(TypeError, GridOutCursor.__init__, self.db.fs, {}, - tailable=True) - self.assertRaises(TypeError, GridOutCursor.__init__, self.db.fs, {}, - fields={"filename":1}) + def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + GridIn((self.rs_or_single_client(w=0)).pymongo_test.fs) + + def test_survive_cursor_not_found(self): + # By default the find command returns 101 documents in the first batch. + # Use 102 batches to cause a single getMore. + chunk_size = 1024 + data = b"d" * (102 * chunk_size) + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + db = client.pymongo_test + with GridIn(db.fs, chunk_size=chunk_size) as infile: + infile.write(data) + + with GridOut(db.fs, infile._id) as outfile: + self.assertEqual(len(outfile.readchunk()), chunk_size) + + # Kill the cursor to simulate the cursor timing out on the server + # when an application spends a long time between two calls to + # readchunk(). + assert client.address is not None + client._close_cursor_now( + outfile._chunk_iter._cursor.cursor_id, + _CursorAddress(client.address, db.fs.chunks.full_name), # type: ignore[arg-type] + ) + + # Read the rest of the file without error. + self.assertEqual(len(outfile.read()), len(data) - chunk_size) + + # Paranoid, ensure that a getMore was actually sent. + self.assertIn("getMore", listener.started_command_names()) + + @client_context.require_sync + def test_zip(self): + zf = BytesIO() + z = zipfile.ZipFile(zf, "w") + z.writestr("test.txt", b"hello world") + z.close() + zf.seek(0) + + f = GridIn(self.db.fs, filename="test.zip") + f.write(zf) + f.close() + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(1, self.db.fs.chunks.count_documents({})) - cursor = GridOutCursor(self.db.fs, {}) - min_ms = self.db.fs.files.secondary_acceptable_latency_ms - new_ms = cursor._Cursor__secondary_acceptable_latency_ms - self.assertEqual(min_ms, new_ms) - cursor = GridOutCursor(self.db.fs, {}, - secondary_acceptable_latency_ms=100) - min_ms = self.db.fs.files.secondary_acceptable_latency_ms - new_ms = cursor._Cursor__secondary_acceptable_latency_ms - self.assertNotEqual(min_ms, new_ms) - cursor_clone = cursor.clone() - self.assertEqual(cursor_clone.__dict__, cursor.__dict__) + g = GridOut(self.db.fs, f._id) + z = zipfile.ZipFile(g) + self.assertSequenceEqual(z.namelist(), ["test.txt"]) + self.assertEqual(z.read("test.txt"), b"hello world") - self.assertRaises(NotImplementedError, cursor.add_option, 0) - self.assertRaises(NotImplementedError, cursor.remove_option, 0) + def test_grid_out_unsupported_operations(self): + f = GridIn(self.db.fs, chunkSize=3) + f.write(b"hello world") + f.close() + + g = GridOut(self.db.fs, f._id) + + self.assertRaises(io.UnsupportedOperation, g.writelines, [b"some", b"lines"]) + self.assertRaises(io.UnsupportedOperation, g.write, b"some text") + self.assertRaises(io.UnsupportedOperation, g.fileno) + self.assertRaises(io.UnsupportedOperation, g.truncate) + + self.assertFalse(g.writable()) + self.assertFalse(g.isatty()) if __name__ == "__main__": diff --git a/test/test_gridfs.py b/test/test_gridfs.py index d8834c7ef8..8bda041447 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -1,6 +1,5 @@ -# -*- coding: utf-8 -*- # -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,98 +13,127 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the gridfs package. -""" -import sys -sys.path[0:0] = [""] - -from pymongo.mongo_client import MongoClient -from pymongo.errors import ConnectionFailure -from pymongo.read_preferences import ReadPreference -from test.test_replica_set_client import TestReplicaSetClientBase +"""Tests for the gridfs package.""" +from __future__ import annotations +import asyncio import datetime -import unittest +import sys import threading import time -import gridfs +from io import BytesIO +from test.helpers import ConcurrentRunner +from unittest.mock import patch + +sys.path[0:0] = [""] -from bson.py3compat import b, StringIO -from gridfs.errors import (FileExists, - NoFile) -from test.test_client import get_client +from test import IntegrationTest, client_context, unittest from test.utils import joinall +from test.utils_shared import one +import gridfs +from bson.binary import Binary +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from gridfs.synchronous.grid_file import DEFAULT_CHUNK_SIZE, GridOutCursor +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, +) +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient + +_IS_SYNC = True -class JustWrite(threading.Thread): +class JustWrite(ConcurrentRunner): def __init__(self, fs, n): - threading.Thread.__init__(self) + super().__init__() self.fs = fs self.n = n - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): file = self.fs.new_file(filename="test") - file.write(b("hello")) + file.write(b"hello") file.close() -class JustRead(threading.Thread): - +class JustRead(ConcurrentRunner): def __init__(self, fs, n, results): - threading.Thread.__init__(self) + super().__init__() self.fs = fs self.n = n self.results = results - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): file = self.fs.get("test") data = file.read() self.results.append(data) - assert data == b("hello") + assert data == b"hello" -class TestGridfs(unittest.TestCase): +class TestGridfsNoConnect(unittest.TestCase): + db: Database def setUp(self): - self.db = get_client().pymongo_test - self.db.drop_collection("fs.files") - self.db.drop_collection("fs.chunks") - self.db.drop_collection("alt.files") - self.db.drop_collection("alt.chunks") - self.fs = gridfs.GridFS(self.db) - self.alt = gridfs.GridFS(self.db, "alt") - - def tearDown(self): - self.db = self.fs = self.alt = None + super().setUp() + self.db = MongoClient(connect=False).pymongo_test def test_gridfs(self): self.assertRaises(TypeError, gridfs.GridFS, "foo") self.assertRaises(TypeError, gridfs.GridFS, self.db, 5) + +class TestGridfs(IntegrationTest): + fs: gridfs.GridFS + alt: gridfs.GridFS + + def setUp(self): + super().setUp() + self.fs = gridfs.GridFS(self.db) + self.alt = gridfs.GridFS(self.db, "alt") + self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) + def test_basic(self): - oid = self.fs.put(b("hello world")) - self.assertEqual(b("hello world"), self.fs.get(oid).read()) - self.assertEqual(1, self.db.fs.files.count()) - self.assertEqual(1, self.db.fs.chunks.count()) + oid = self.fs.put(b"hello world") + self.assertEqual(b"hello world", (self.fs.get(oid)).read()) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(1, self.db.fs.chunks.count_documents({})) self.fs.delete(oid) - self.assertRaises(NoFile, self.fs.get, oid) - self.assertEqual(0, self.db.fs.files.count()) - self.assertEqual(0, self.db.fs.chunks.count()) - - self.assertRaises(NoFile, self.fs.get, "foo") - oid = self.fs.put(b("hello world"), _id="foo") + with self.assertRaises(NoFile): + self.fs.get(oid) + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + + with self.assertRaises(NoFile): + self.fs.get("foo") + oid = self.fs.put(b"hello world", _id="foo") self.assertEqual("foo", oid) - self.assertEqual(b("hello world"), self.fs.get("foo").read()) + self.assertEqual(b"hello world", (self.fs.get("foo")).read()) + + def test_multi_chunk_delete(self): + self.db.fs.drop() + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + gfs = gridfs.GridFS(self.db) + oid = gfs.put(b"hello", chunkSize=1) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(5, self.db.fs.chunks.count_documents({})) + gfs.delete(oid) + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) def test_list(self): self.assertEqual([], self.fs.list()) - self.fs.put(b("hello world")) + self.fs.put(b"hello world") self.assertEqual([], self.fs.list()) # PYTHON-598: in server versions before 2.5.x, creating an index on @@ -113,186 +141,248 @@ def test_list(self): self.fs.get_last_version() self.assertEqual([], self.fs.list()) - self.fs.put(b(""), filename="mike") - self.fs.put(b("foo"), filename="test") - self.fs.put(b(""), filename="hello world") + self.fs.put(b"", filename="mike") + self.fs.put(b"foo", filename="test") + self.fs.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), - set(self.fs.list())) + self.assertEqual({"mike", "test", "hello world"}, set(self.fs.list())) def test_empty_file(self): - oid = self.fs.put(b("")) - self.assertEqual(b(""), self.fs.get(oid).read()) - self.assertEqual(1, self.db.fs.files.count()) - self.assertEqual(0, self.db.fs.chunks.count()) + oid = self.fs.put(b"") + self.assertEqual(b"", (self.fs.get(oid)).read()) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) raw = self.db.fs.files.find_one() + assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) - self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) self.assertEqual(255 * 1024, raw["chunkSize"]) - self.assertTrue(isinstance(raw["md5"], basestring)) + self.assertNotIn("md5", raw) + + def test_corrupt_chunk(self): + files_id = self.fs.put(b"foobar") + self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) + try: + out = self.fs.get(files_id) + with self.assertRaises(CorruptGridFile): + out.read() + + out = self.fs.get(files_id) + with self.assertRaises(CorruptGridFile): + out.readline() + finally: + self.fs.delete(files_id) + + def test_put_ensures_index(self): + chunks = self.db.fs.chunks + files = self.db.fs.files + # Ensure the collections are removed. + chunks.drop() + files.drop() + self.fs.put(b"junk") + + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in (chunks.index_information()).values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in (files.index_information()).values() + ) + ) def test_alt_collection(self): - oid = self.alt.put(b("hello world")) - self.assertEqual(b("hello world"), self.alt.get(oid).read()) - self.assertEqual(1, self.db.alt.files.count()) - self.assertEqual(1, self.db.alt.chunks.count()) + oid = self.alt.put(b"hello world") + self.assertEqual(b"hello world", (self.alt.get(oid)).read()) + self.assertEqual(1, self.db.alt.files.count_documents({})) + self.assertEqual(1, self.db.alt.chunks.count_documents({})) self.alt.delete(oid) - self.assertRaises(NoFile, self.alt.get, oid) - self.assertEqual(0, self.db.alt.files.count()) - self.assertEqual(0, self.db.alt.chunks.count()) - - self.assertRaises(NoFile, self.alt.get, "foo") - oid = self.alt.put(b("hello world"), _id="foo") + with self.assertRaises(NoFile): + self.alt.get(oid) + self.assertEqual(0, self.db.alt.files.count_documents({})) + self.assertEqual(0, self.db.alt.chunks.count_documents({})) + + with self.assertRaises(NoFile): + self.alt.get("foo") + oid = self.alt.put(b"hello world", _id="foo") self.assertEqual("foo", oid) - self.assertEqual(b("hello world"), self.alt.get("foo").read()) + self.assertEqual(b"hello world", (self.alt.get("foo")).read()) - self.alt.put(b(""), filename="mike") - self.alt.put(b("foo"), filename="test") - self.alt.put(b(""), filename="hello world") + self.alt.put(b"", filename="mike") + self.alt.put(b"foo", filename="test") + self.alt.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), - set(self.alt.list())) + self.assertEqual({"mike", "test", "hello world"}, set(self.alt.list())) def test_threaded_reads(self): - self.fs.put(b("hello"), _id="test") + self.fs.put(b"hello", _id="test") - threads = [] - results = [] + tasks = [] + results: list = [] for i in range(10): - threads.append(JustRead(self.fs, 10, results)) - threads[i].start() + tasks.append(JustRead(self.fs, 10, results)) + tasks[i].start() - joinall(threads) + joinall(tasks) - self.assertEqual( - 100 * [b('hello')], - results - ) + self.assertEqual(100 * [b"hello"], results) def test_threaded_writes(self): - threads = [] + tasks = [] for i in range(10): - threads.append(JustWrite(self.fs, 10)) - threads[i].start() + tasks.append(JustWrite(self.fs, 10)) + tasks[i].start() - joinall(threads) + joinall(tasks) f = self.fs.get_last_version("test") - self.assertEqual(f.read(), b("hello")) + self.assertEqual(f.read(), b"hello") # Should have created 100 versions of 'test' file - self.assertEqual( - 100, - self.db.fs.files.find({'filename':'test'}).count() - ) + self.assertEqual(100, self.db.fs.files.count_documents({"filename": "test"})) def test_get_last_version(self): - one = self.fs.put(b("foo"), filename="test") + one = self.fs.put(b"foo", filename="test") time.sleep(0.01) two = self.fs.new_file(filename="test") - two.write(b("bar")) + two.write(b"bar") two.close() time.sleep(0.01) two = two._id - three = self.fs.put(b("baz"), filename="test") + three = self.fs.put(b"baz", filename="test") - self.assertEqual(b("baz"), self.fs.get_last_version("test").read()) + self.assertEqual(b"baz", (self.fs.get_last_version("test")).read()) self.fs.delete(three) - self.assertEqual(b("bar"), self.fs.get_last_version("test").read()) + self.assertEqual(b"bar", (self.fs.get_last_version("test")).read()) self.fs.delete(two) - self.assertEqual(b("foo"), self.fs.get_last_version("test").read()) + self.assertEqual(b"foo", (self.fs.get_last_version("test")).read()) self.fs.delete(one) - self.assertRaises(NoFile, self.fs.get_last_version, "test") + with self.assertRaises(NoFile): + self.fs.get_last_version("test") def test_get_last_version_with_metadata(self): - one = self.fs.put(b("foo"), filename="test", author="author") + one = self.fs.put(b"foo", filename="test", author="author") time.sleep(0.01) - two = self.fs.put(b("bar"), filename="test", author="author") + two = self.fs.put(b"bar", filename="test", author="author") - self.assertEqual(b("bar"), self.fs.get_last_version(author="author").read()) + self.assertEqual(b"bar", (self.fs.get_last_version(author="author")).read()) self.fs.delete(two) - self.assertEqual(b("foo"), self.fs.get_last_version(author="author").read()) + self.assertEqual(b"foo", (self.fs.get_last_version(author="author")).read()) self.fs.delete(one) - one = self.fs.put(b("foo"), filename="test", author="author1") + one = self.fs.put(b"foo", filename="test", author="author1") time.sleep(0.01) - two = self.fs.put(b("bar"), filename="test", author="author2") + two = self.fs.put(b"bar", filename="test", author="author2") - self.assertEqual(b("foo"), self.fs.get_last_version(author="author1").read()) - self.assertEqual(b("bar"), self.fs.get_last_version(author="author2").read()) - self.assertEqual(b("bar"), self.fs.get_last_version(filename="test").read()) + self.assertEqual(b"foo", (self.fs.get_last_version(author="author1")).read()) + self.assertEqual(b"bar", (self.fs.get_last_version(author="author2")).read()) + self.assertEqual(b"bar", (self.fs.get_last_version(filename="test")).read()) - self.assertRaises(NoFile, self.fs.get_last_version, author="author3") - self.assertRaises(NoFile, self.fs.get_last_version, filename="nottest", author="author1") + with self.assertRaises(NoFile): + self.fs.get_last_version(author="author3") + with self.assertRaises(NoFile): + self.fs.get_last_version(filename="nottest", author="author1") self.fs.delete(one) self.fs.delete(two) def test_get_version(self): - self.fs.put(b("foo"), filename="test") + self.fs.put(b"foo", filename="test") time.sleep(0.01) - self.fs.put(b("bar"), filename="test") + self.fs.put(b"bar", filename="test") time.sleep(0.01) - self.fs.put(b("baz"), filename="test") + self.fs.put(b"baz", filename="test") time.sleep(0.01) - self.assertEqual(b("foo"), self.fs.get_version("test", 0).read()) - self.assertEqual(b("bar"), self.fs.get_version("test", 1).read()) - self.assertEqual(b("baz"), self.fs.get_version("test", 2).read()) + self.assertEqual(b"foo", (self.fs.get_version("test", 0)).read()) + self.assertEqual(b"bar", (self.fs.get_version("test", 1)).read()) + self.assertEqual(b"baz", (self.fs.get_version("test", 2)).read()) - self.assertEqual(b("baz"), self.fs.get_version("test", -1).read()) - self.assertEqual(b("bar"), self.fs.get_version("test", -2).read()) - self.assertEqual(b("foo"), self.fs.get_version("test", -3).read()) + self.assertEqual(b"baz", (self.fs.get_version("test", -1)).read()) + self.assertEqual(b"bar", (self.fs.get_version("test", -2)).read()) + self.assertEqual(b"foo", (self.fs.get_version("test", -3)).read()) - self.assertRaises(NoFile, self.fs.get_version, "test", 3) - self.assertRaises(NoFile, self.fs.get_version, "test", -4) + with self.assertRaises(NoFile): + self.fs.get_version("test", 3) + with self.assertRaises(NoFile): + self.fs.get_version("test", -4) def test_get_version_with_metadata(self): - one = self.fs.put(b("foo"), filename="test", author="author1") + one = self.fs.put(b"foo", filename="test", author="author1") time.sleep(0.01) - two = self.fs.put(b("bar"), filename="test", author="author1") + two = self.fs.put(b"bar", filename="test", author="author1") time.sleep(0.01) - three = self.fs.put(b("baz"), filename="test", author="author2") + three = self.fs.put(b"baz", filename="test", author="author2") - self.assertEqual(b("foo"), self.fs.get_version(filename="test", author="author1", version=-2).read()) - self.assertEqual(b("bar"), self.fs.get_version(filename="test", author="author1", version=-1).read()) - self.assertEqual(b("foo"), self.fs.get_version(filename="test", author="author1", version=0).read()) - self.assertEqual(b("bar"), self.fs.get_version(filename="test", author="author1", version=1).read()) - self.assertEqual(b("baz"), self.fs.get_version(filename="test", author="author2", version=0).read()) - self.assertEqual(b("baz"), self.fs.get_version(filename="test", version=-1).read()) - self.assertEqual(b("baz"), self.fs.get_version(filename="test", version=2).read()) + self.assertEqual( + b"foo", + (self.fs.get_version(filename="test", author="author1", version=-2)).read(), + ) + self.assertEqual( + b"bar", + (self.fs.get_version(filename="test", author="author1", version=-1)).read(), + ) + self.assertEqual( + b"foo", + (self.fs.get_version(filename="test", author="author1", version=0)).read(), + ) + self.assertEqual( + b"bar", + (self.fs.get_version(filename="test", author="author1", version=1)).read(), + ) + self.assertEqual( + b"baz", + (self.fs.get_version(filename="test", author="author2", version=0)).read(), + ) + self.assertEqual(b"baz", (self.fs.get_version(filename="test", version=-1)).read()) + self.assertEqual(b"baz", (self.fs.get_version(filename="test", version=2)).read()) - self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author3") - self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author1", version=2) + with self.assertRaises(NoFile): + self.fs.get_version(filename="test", author="author3") + with self.assertRaises(NoFile): + self.fs.get_version(filename="test", author="author1", version=2) self.fs.delete(one) self.fs.delete(two) self.fs.delete(three) def test_put_filelike(self): - oid = self.fs.put(StringIO(b("hello world")), chunk_size=1) - self.assertEqual(11, self.db.fs.chunks.count()) - self.assertEqual(b("hello world"), self.fs.get(oid).read()) + oid = self.fs.put(BytesIO(b"hello world"), chunk_size=1) + self.assertEqual(11, self.db.fs.chunks.count_documents({})) + self.assertEqual(b"hello world", (self.fs.get(oid)).read()) def test_file_exists(self): - db = get_client(w=1).pymongo_test - fs = gridfs.GridFS(db) + oid = self.fs.put(b"hello") + with self.assertRaises(FileExists): + self.fs.put(b"world", _id=oid) - oid = fs.put(b("hello")) - self.assertRaises(FileExists, fs.put, b("world"), _id=oid) - - one = fs.new_file(_id=123) - one.write(b("some content")) + one = self.fs.new_file(_id=123) + one.write(b"some content") one.close() - two = fs.new_file(_id=123) - self.assertRaises(FileExists, two.write, b('x' * 262146)) + # Attempt to upload a file with more chunks to the same _id. + with patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_SIZE", DEFAULT_CHUNK_SIZE): + two = self.fs.new_file(_id=123) + with self.assertRaises(FileExists): + two.write(b"x" * DEFAULT_CHUNK_SIZE * 3) + # Original file is still readable (no extra chunks were uploaded). + self.assertEqual((self.fs.get(123)).read(), b"some content") + + two = self.fs.new_file(_id=123) + two.write(b"some content") + with self.assertRaises(FileExists): + two.close() + # Original file is still readable. + self.assertEqual((self.fs.get(123)).read(), b"some content") def test_exists(self): - oid = self.fs.put(b("hello")) + oid = self.fs.put(b"hello") self.assertTrue(self.fs.exists(oid)) self.assertTrue(self.fs.exists({"_id": oid})) self.assertTrue(self.fs.exists(_id=oid)) @@ -300,7 +390,7 @@ def test_exists(self): self.assertFalse(self.fs.exists(filename="mike")) self.assertFalse(self.fs.exists("mike")) - oid = self.fs.put(b("hello"), filename="mike", foo=12) + oid = self.fs.put(b"hello", filename="mike", foo=12) self.assertTrue(self.fs.exists(oid)) self.assertTrue(self.fs.exists({"_id": oid})) self.assertTrue(self.fs.exists(_id=oid)) @@ -317,141 +407,186 @@ def test_exists(self): self.assertFalse(self.fs.exists({"foo": {"$gt": 12}})) def test_put_unicode(self): - self.assertRaises(TypeError, self.fs.put, u"hello") + with self.assertRaises(TypeError): + self.fs.put("hello") - oid = self.fs.put(u"hello", encoding="utf-8") - self.assertEqual(b("hello"), self.fs.get(oid).read()) - self.assertEqual("utf-8", self.fs.get(oid).encoding) + oid = self.fs.put("hello", encoding="utf-8") + self.assertEqual(b"hello", (self.fs.get(oid)).read()) + self.assertEqual("utf-8", (self.fs.get(oid)).encoding) - oid = self.fs.put(u"aé", encoding="iso-8859-1") - self.assertEqual(u"aé".encode("iso-8859-1"), self.fs.get(oid).read()) - self.assertEqual("iso-8859-1", self.fs.get(oid).encoding) + oid = self.fs.put("aé", encoding="iso-8859-1") + self.assertEqual("aé".encode("iso-8859-1"), (self.fs.get(oid)).read()) + self.assertEqual("iso-8859-1", (self.fs.get(oid)).encoding) def test_missing_length_iter(self): # Test fix that guards against PHP-237 - self.fs.put(b(""), filename="empty") + self.fs.put(b"", filename="empty") doc = self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None doc.pop("length") - self.db.fs.files.save(doc) + self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) f = self.fs.get_last_version(filename="empty") def iterate_file(grid_file): - for chunk in grid_file: + for _chunk in grid_file: pass return True self.assertTrue(iterate_file(f)) - def test_request(self): - c = self.db.connection - c.start_request() - n = 5 - for i in range(n): - file = self.fs.new_file(filename="test") - file.write(b("hello")) - file.close() - - c.end_request() - - self.assertEqual( - n, - self.db.fs.files.find({'filename':'test'}).count() - ) - - def test_gridfs_request(self): - self.assertFalse(self.db.connection.in_request()) - self.fs.put(b("hello world")) - # Request started and ended by put(), we're back to original state - self.assertFalse(self.db.connection.in_request()) - def test_gridfs_lazy_connect(self): - client = MongoClient('badhost', _connect=False) + client = self.single_client("badhost", connect=False, serverSelectionTimeoutMS=10) db = client.db - self.assertRaises(ConnectionFailure, gridfs.GridFS, db) + gfs = gridfs.GridFS(db) + with self.assertRaises(ServerSelectionTimeoutError): + gfs.list() - fs = gridfs.GridFS(db, _connect=False) - f = fs.new_file() # Still no connection. - self.assertRaises(ConnectionFailure, f.close) + fs = gridfs.GridFS(db) + f = fs.new_file() + with self.assertRaises(ServerSelectionTimeoutError): + f.close() def test_gridfs_find(self): - self.fs.put(b("test2"), filename="two") + self.fs.put(b"test2", filename="two") time.sleep(0.01) - self.fs.put(b("test2+"), filename="two") + self.fs.put(b"test2+", filename="two") time.sleep(0.01) - self.fs.put(b("test1"), filename="one") + self.fs.put(b"test1", filename="one") time.sleep(0.01) - self.fs.put(b("test2++"), filename="two") - self.assertEqual(3, self.fs.find({"filename":"two"}).count()) - self.assertEqual(4, self.fs.find().count()) - cursor = self.fs.find(timeout=False).sort("uploadDate", -1).skip(1).limit(2) - # 2to3 hint... + self.fs.put(b"test2++", filename="two") + files = self.db.fs.files + self.assertEqual(3, files.count_documents({"filename": "two"})) + self.assertEqual(4, files.count_documents({})) + cursor = self.fs.find(no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) gout = cursor.next() - self.assertEqual(b("test1"), gout.read()) + self.assertEqual(b"test1", gout.read()) cursor.rewind() gout = cursor.next() - self.assertEqual(b("test1"), gout.read()) + self.assertEqual(b"test1", gout.read()) gout = cursor.next() - self.assertEqual(b("test2+"), gout.read()) - self.assertRaises(StopIteration, cursor.next) + self.assertEqual(b"test2+", gout.read()) + with self.assertRaises(StopIteration): + cursor.__next__() + cursor.rewind() + items = cursor.to_list() + self.assertEqual(len(items), 2) + cursor.rewind() + items = cursor.to_list(1) + self.assertEqual(len(items), 1) cursor.close() self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) + def test_delete_not_initialized(self): + # Creating a cursor with invalid arguments will not run __init__ + # but will still call __del__. + cursor = GridOutCursor.__new__(GridOutCursor) # Skip calling __init__ + with self.assertRaises(TypeError): + cursor.__init__(self.db.fs.files, {}, {"_id": True}) # type: ignore + cursor.__del__() # no error + + def test_gridfs_find_one(self): + self.assertEqual(None, self.fs.find_one()) + + id1 = self.fs.put(b"test1", filename="file1") + res = self.fs.find_one() + assert res is not None + self.assertEqual(b"test1", res.read()) + + id2 = self.fs.put(b"test2", filename="file2", meta="data") + res1 = self.fs.find_one(id1) + assert res1 is not None + self.assertEqual(b"test1", res1.read()) + res2 = self.fs.find_one(id2) + assert res2 is not None + self.assertEqual(b"test2", res2.read()) + + res3 = self.fs.find_one({"filename": "file1"}) + assert res3 is not None + self.assertEqual(b"test1", res3.read()) + + res4 = self.fs.find_one(id2) + assert res4 is not None + self.assertEqual("data", res4.meta) + + def test_grid_in_non_int_chunksize(self): + # Lua, and perhaps other buggy GridFS clients, store size as a float. + data = b"data" + self.fs.put(data, filename="f") + self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) + + self.assertEqual(data, (self.fs.get_version("f")).read()) + + def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + gridfs.GridFS((self.rs_or_single_client(w=0)).pymongo_test) + + def test_md5(self): + gin = self.fs.new_file() + gin.write(b"no md5 sum") + gin.close() + self.assertIsNone(gin.md5) + + gout = self.fs.get(gin._id) + self.assertIsNone(gout.md5) + + _id = self.fs.put(b"still no md5 sum") + gout = self.fs.get(_id) + self.assertIsNone(gout.md5) + + +class TestGridfsReplicaSet(IntegrationTest): + @client_context.require_secondaries_count(1) + def setUp(self): + super().setUp() + + @classmethod + @client_context.require_connection + def tearDownClass(cls): + client_context.client.drop_database("gfsreplica") -class TestGridfsReplicaSet(TestReplicaSetClientBase): def test_gridfs_replica_set(self): - rsc = self._get_client( - w=self.w, wtimeout=5000, - read_preference=ReadPreference.SECONDARY) + rsc = self.rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) - try: - fs = gridfs.GridFS(rsc.pymongo_test) - oid = fs.put(b('foo')) - content = fs.get(oid).read() - self.assertEqual(b('foo'), content) - finally: - rsc.close() + fs = gridfs.GridFS(rsc.gfsreplica, "gfsreplicatest") - def test_gridfs_secondary(self): - primary_host, primary_port = self.primary - primary_connection = MongoClient(primary_host, primary_port) + gin = fs.new_file() + self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY) + + oid = fs.put(b"foo") + content = (fs.get(oid)).read() + self.assertEqual(b"foo", content) - secondary_host, secondary_port = self.secondaries[0] - for secondary_connection in [ - MongoClient(secondary_host, secondary_port, slave_okay=True), - MongoClient(secondary_host, secondary_port, - read_preference=ReadPreference.SECONDARY), - ]: - primary_connection.pymongo_test.drop_collection("fs.files") - primary_connection.pymongo_test.drop_collection("fs.chunks") + def test_gridfs_secondary(self): + secondary_host, secondary_port = one(self.client.secondaries) + secondary_connection = self.single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) - # Should detect it's connected to secondary and not attempt to - # create index - fs = gridfs.GridFS(secondary_connection.pymongo_test) + # Should detect it's connected to secondary and not attempt to + # create index + fs = gridfs.GridFS(secondary_connection.gfsreplica, "gfssecondarytest") - # This won't detect secondary, raises error - self.assertRaises(ConnectionFailure, fs.put, b('foo')) + # This won't detect secondary, raises error + with self.assertRaises(NotPrimaryError): + fs.put(b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. - secondary_host, secondary_port = self.secondaries[0] - client = MongoClient( - secondary_host, secondary_port, - read_preference=ReadPreference.SECONDARY, - _connect=False) + secondary_host, secondary_port = one(self.client.secondaries) + client = self.single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) # Still no connection. - fs = gridfs.GridFS(client.test_gridfs_secondary_lazy, _connect=False) + fs = gridfs.GridFS(client.gfsreplica, "gfssecondarylazytest") # Connects, doesn't create index. - self.assertRaises(NoFile, fs.get_last_version) - self.assertRaises(ConnectionFailure, fs.put, 'data') - - def tearDown(self): - rsc = self._get_client() - rsc.pymongo_test.drop_collection('fs.files') - rsc.pymongo_test.drop_collection('fs.chunks') - rsc.close() + with self.assertRaises(NoFile): + fs.get_last_version() + with self.assertRaises(NotPrimaryError): + fs.put("data", encoding="utf-8") if __name__ == "__main__": diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py new file mode 100644 index 0000000000..9dbb082ee9 --- /dev/null +++ b/test/test_gridfs_bucket.py @@ -0,0 +1,563 @@ +# +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the gridfs package.""" +from __future__ import annotations + +import asyncio +import datetime +import itertools +import sys +import threading +import time +from io import BytesIO +from test.helpers import ConcurrentRunner +from unittest.mock import patch + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils import joinall +from test.utils_shared import one + +import gridfs +from bson.binary import Binary +from bson.int64 import Int64 +from bson.objectid import ObjectId +from bson.son import SON +from gridfs.errors import CorruptGridFile, NoFile +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, + WriteConcernError, +) +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.mongo_client import MongoClient + +_IS_SYNC = True + + +class JustWrite(ConcurrentRunner): + def __init__(self, gfs, num): + super().__init__() + self.gfs = gfs + self.num = num + self.daemon = True + + def run(self): + for _ in range(self.num): + file = self.gfs.open_upload_stream("test") + file.write(b"hello") + file.close() + + +class JustRead(ConcurrentRunner): + def __init__(self, gfs, num, results): + super().__init__() + self.gfs = gfs + self.num = num + self.results = results + self.daemon = True + + def run(self): + for _ in range(self.num): + file = self.gfs.open_download_stream_by_name("test") + data = file.read() + self.results.append(data) + assert data == b"hello" + + +class TestGridfs(IntegrationTest): + fs: gridfs.GridFSBucket + alt: gridfs.GridFSBucket + + def setUp(self): + super().setUp() + self.fs = gridfs.GridFSBucket(self.db) + self.alt = gridfs.GridFSBucket(self.db, bucket_name="alt") + self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) + + def test_basic(self): + oid = self.fs.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", (self.fs.open_download_stream(oid)).read()) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(1, self.db.fs.chunks.count_documents({})) + + self.fs.delete(oid) + with self.assertRaises(NoFile): + self.fs.open_download_stream(oid) + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + + def test_multi_chunk_delete(self): + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + gfs = gridfs.GridFSBucket(self.db) + oid = gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(5, self.db.fs.chunks.count_documents({})) + gfs.delete(oid) + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + + def test_delete_by_name(self): + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + gfs = gridfs.GridFSBucket(self.db) + gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(5, self.db.fs.chunks.count_documents({})) + gfs.delete_by_name("test_filename") + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + + def test_empty_file(self): + oid = self.fs.upload_from_stream("test_filename", b"") + self.assertEqual(b"", (self.fs.open_download_stream(oid)).read()) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + + raw = self.db.fs.files.find_one() + assert raw is not None + self.assertEqual(0, raw["length"]) + self.assertEqual(oid, raw["_id"]) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) + self.assertEqual(255 * 1024, raw["chunkSize"]) + self.assertNotIn("md5", raw) + + def test_corrupt_chunk(self): + files_id = self.fs.upload_from_stream("test_filename", b"foobar") + self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) + try: + out = self.fs.open_download_stream(files_id) + with self.assertRaises(CorruptGridFile): + out.read() + + out = self.fs.open_download_stream(files_id) + with self.assertRaises(CorruptGridFile): + out.readline() + finally: + self.fs.delete(files_id) + + def test_upload_ensures_index(self): + chunks = self.db.fs.chunks + files = self.db.fs.files + # Ensure the collections are removed. + chunks.drop() + files.drop() + self.fs.upload_from_stream("filename", b"junk") + + self.assertIn( + [("files_id", 1), ("n", 1)], + [info.get("key") for info in (chunks.index_information()).values()], + "Missing required index on chunks collection: {files_id: 1, n: 1}", + ) + + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", + ) + + def test_ensure_index_shell_compat(self): + files = self.db.fs.files + for i, j in itertools.combinations_with_replacement([1, 1.0, Int64(1)], 2): + # Create the index with different numeric types (as might be done + # from the mongo shell). + shell_index = [("filename", i), ("uploadDate", j)] + self.db.command( + "createIndexes", + files.name, + indexes=[{"key": SON(shell_index), "name": "filename_1.0_uploadDate_1.0"}], + ) + + # No error. + self.fs.upload_from_stream("filename", b"data") + + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", + ) + files.drop() + + def test_alt_collection(self): + oid = self.alt.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", (self.alt.open_download_stream(oid)).read()) + self.assertEqual(1, self.db.alt.files.count_documents({})) + self.assertEqual(1, self.db.alt.chunks.count_documents({})) + + self.alt.delete(oid) + with self.assertRaises(NoFile): + self.alt.open_download_stream(oid) + self.assertEqual(0, self.db.alt.files.count_documents({})) + self.assertEqual(0, self.db.alt.chunks.count_documents({})) + + with self.assertRaises(NoFile): + self.alt.open_download_stream("foo") + self.alt.upload_from_stream("foo", b"hello world") + self.assertEqual(b"hello world", (self.alt.open_download_stream_by_name("foo")).read()) + + self.alt.upload_from_stream("mike", b"") + self.alt.upload_from_stream("test", b"foo") + self.alt.upload_from_stream("hello world", b"") + + self.assertEqual( + {"mike", "test", "hello world", "foo"}, + {k["filename"] for k in self.db.alt.files.find().to_list()}, + ) + + def test_threaded_reads(self): + self.fs.upload_from_stream("test", b"hello") + + threads = [] + results: list = [] + for i in range(10): + threads.append(JustRead(self.fs, 10, results)) + threads[i].start() + + joinall(threads) + + self.assertEqual(100 * [b"hello"], results) + + def test_threaded_writes(self): + threads = [] + for i in range(10): + threads.append(JustWrite(self.fs, 10)) + threads[i].start() + + joinall(threads) + + fstr = self.fs.open_download_stream_by_name("test") + self.assertEqual(fstr.read(), b"hello") + + # Should have created 100 versions of 'test' file + self.assertEqual(100, self.db.fs.files.count_documents({"filename": "test"})) + + def test_get_last_version(self): + one = self.fs.upload_from_stream("test", b"foo") + time.sleep(0.01) + two = self.fs.open_upload_stream("test") + two.write(b"bar") + two.close() + time.sleep(0.01) + two = two._id + three = self.fs.upload_from_stream("test", b"baz") + + self.assertEqual(b"baz", (self.fs.open_download_stream_by_name("test")).read()) + self.fs.delete(three) + self.assertEqual(b"bar", (self.fs.open_download_stream_by_name("test")).read()) + self.fs.delete(two) + self.assertEqual(b"foo", (self.fs.open_download_stream_by_name("test")).read()) + self.fs.delete(one) + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("test") + + def test_get_version(self): + self.fs.upload_from_stream("test", b"foo") + time.sleep(0.01) + self.fs.upload_from_stream("test", b"bar") + time.sleep(0.01) + self.fs.upload_from_stream("test", b"baz") + time.sleep(0.01) + + self.assertEqual(b"foo", (self.fs.open_download_stream_by_name("test", revision=0)).read()) + self.assertEqual(b"bar", (self.fs.open_download_stream_by_name("test", revision=1)).read()) + self.assertEqual(b"baz", (self.fs.open_download_stream_by_name("test", revision=2)).read()) + + self.assertEqual(b"baz", (self.fs.open_download_stream_by_name("test", revision=-1)).read()) + self.assertEqual(b"bar", (self.fs.open_download_stream_by_name("test", revision=-2)).read()) + self.assertEqual(b"foo", (self.fs.open_download_stream_by_name("test", revision=-3)).read()) + + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("test", revision=3) + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("test", revision=-4) + + def test_upload_from_stream(self): + oid = self.fs.upload_from_stream("test_file", BytesIO(b"hello world"), chunk_size_bytes=1) + self.assertEqual(11, self.db.fs.chunks.count_documents({})) + self.assertEqual(b"hello world", (self.fs.open_download_stream(oid)).read()) + + def test_upload_from_stream_with_id(self): + oid = ObjectId() + self.fs.upload_from_stream_with_id( + oid, "test_file_custom_id", BytesIO(b"custom id"), chunk_size_bytes=1 + ) + self.assertEqual(b"custom id", (self.fs.open_download_stream(oid)).read()) + + @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 3) + @client_context.require_failCommand_fail_point + def test_upload_bulk_write_error(self): + # Test BulkWriteError from insert_many is converted to an insert_one style error. + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + gin = self.fs.open_upload_stream("test_file", chunk_size_bytes=1) + with self.fail_point(cause_wce): + # Assert we raise WriteConcernError, not BulkWriteError. + with self.assertRaises(WriteConcernError): + gin.write(b"hello world") + # 3 chunks were uploaded. + self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id})) + gin.abort() + + @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 10) + def test_upload_batching(self): + with self.fs.open_upload_stream("test_file", chunk_size_bytes=1) as gin: + gin.write(b"s" * (10 - 1)) + # No chunks were uploaded yet. + self.assertEqual(0, self.db.fs.chunks.count_documents({"files_id": gin._id})) + gin.write(b"s") + # All chunks were uploaded since we hit the _UPLOAD_BUFFER_CHUNKS limit. + self.assertEqual(10, self.db.fs.chunks.count_documents({"files_id": gin._id})) + + def test_open_upload_stream(self): + gin = self.fs.open_upload_stream("from_stream") + gin.write(b"from stream") + gin.close() + self.assertEqual(b"from stream", (self.fs.open_download_stream(gin._id)).read()) + + def test_open_upload_stream_with_id(self): + oid = ObjectId() + gin = self.fs.open_upload_stream_with_id(oid, "from_stream_custom_id") + gin.write(b"from stream with custom id") + gin.close() + self.assertEqual(b"from stream with custom id", (self.fs.open_download_stream(oid)).read()) + + def test_missing_length_iter(self): + # Test fix that guards against PHP-237 + self.fs.upload_from_stream("empty", b"") + doc = self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None + doc.pop("length") + self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) + fstr = self.fs.open_download_stream_by_name("empty") + + def iterate_file(grid_file): + for _ in grid_file: + pass + return True + + self.assertTrue(iterate_file(fstr)) + + def test_gridfs_lazy_connect(self): + client = self.single_client("badhost", connect=False, serverSelectionTimeoutMS=0) + cdb = client.db + gfs = gridfs.GridFSBucket(cdb) + with self.assertRaises(ServerSelectionTimeoutError): + gfs.delete(0) + + gfs = gridfs.GridFSBucket(cdb) + with self.assertRaises(ServerSelectionTimeoutError): + gfs.upload_from_stream("test", b"") # Still no connection. + + def test_gridfs_find(self): + self.fs.upload_from_stream("two", b"test2") + time.sleep(0.01) + self.fs.upload_from_stream("two", b"test2+") + time.sleep(0.01) + self.fs.upload_from_stream("one", b"test1") + time.sleep(0.01) + self.fs.upload_from_stream("two", b"test2++") + files = self.db.fs.files + self.assertEqual(3, files.count_documents({"filename": "two"})) + self.assertEqual(4, files.count_documents({})) + cursor = self.fs.find( + {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2 + ) + gout = cursor.next() + self.assertEqual(b"test1", gout.read()) + cursor.rewind() + gout = cursor.next() + self.assertEqual(b"test1", gout.read()) + gout = cursor.next() + self.assertEqual(b"test2+", gout.read()) + with self.assertRaises(StopIteration): + cursor.next() + cursor.close() + self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) + + def test_grid_in_non_int_chunksize(self): + # Lua, and perhaps other buggy GridFS clients, store size as a float. + data = b"data" + self.fs.upload_from_stream("f", data) + self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) + + self.assertEqual(data, (self.fs.open_download_stream_by_name("f")).read()) + + def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + gridfs.GridFSBucket((self.rs_or_single_client(w=0)).pymongo_test) + + def test_rename(self): + _id = self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("first_name")).read()) + + self.fs.rename(_id, "second_name") + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("first_name") + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("second_name")).read()) + + def test_rename_by_name(self): + _id = self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("first_name")).read()) + + self.fs.rename_by_name("first_name", "second_name") + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("first_name") + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("second_name")).read()) + + @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_SIZE", 5) + def test_abort(self): + gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) + gin.write(b"test1") + gin.write(b"test2") + gin.write(b"test3") + self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id})) + gin.abort() + self.assertTrue(gin.closed) + with self.assertRaises(ValueError): + gin.write(b"test4") + self.assertEqual(0, self.db.fs.chunks.count_documents({"files_id": gin._id})) + + def test_download_to_stream(self): + file1 = BytesIO(b"hello world") + # Test with one chunk. + oid = self.fs.upload_from_stream("one_chunk", file1) + self.assertEqual(1, self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + self.fs.download_to_stream(oid, file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + # Test with many chunks. + self.db.drop_collection("fs.files") + self.db.drop_collection("fs.chunks") + file1.seek(0) + oid = self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) + self.assertEqual(11, self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + self.fs.download_to_stream(oid, file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + def test_download_to_stream_by_name(self): + file1 = BytesIO(b"hello world") + # Test with one chunk. + _ = self.fs.upload_from_stream("one_chunk", file1) + self.assertEqual(1, self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + self.fs.download_to_stream_by_name("one_chunk", file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + # Test with many chunks. + self.db.drop_collection("fs.files") + self.db.drop_collection("fs.chunks") + file1.seek(0) + self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) + self.assertEqual(11, self.db.fs.chunks.count_documents({})) + + file2 = BytesIO() + self.fs.download_to_stream_by_name("many_chunks", file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + def test_md5(self): + gin = self.fs.open_upload_stream("no md5") + gin.write(b"no md5 sum") + gin.close() + self.assertIsNone(gin.md5) + + gout = self.fs.open_download_stream(gin._id) + self.assertIsNone(gout.md5) + + gin = self.fs.open_upload_stream_with_id(ObjectId(), "also no md5") + gin.write(b"also no md5 sum") + gin.close() + self.assertIsNone(gin.md5) + + gout = self.fs.open_download_stream(gin._id) + self.assertIsNone(gout.md5) + + +class TestGridfsBucketReplicaSet(IntegrationTest): + @client_context.require_secondaries_count(1) + def setUp(self): + super().setUp() + + @classmethod + @client_context.require_connection + def tearDownClass(cls): + client_context.client.drop_database("gfsbucketreplica") + + def test_gridfs_replica_set(self): + rsc = self.rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) + + gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest") + oid = gfs.upload_from_stream("test_filename", b"foo") + content = (gfs.open_download_stream(oid)).read() + self.assertEqual(b"foo", content) + + def test_gridfs_secondary(self): + secondary_host, secondary_port = one(self.client.secondaries) + secondary_connection = self.single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) + + # Should detect it's connected to secondary and not attempt to + # create index + gfs = gridfs.GridFSBucket(secondary_connection.gfsbucketreplica, "gfsbucketsecondarytest") + + # This won't detect secondary, raises error + with self.assertRaises(NotPrimaryError): + gfs.upload_from_stream("test_filename", b"foo") + + def test_gridfs_secondary_lazy(self): + # Should detect it's connected to secondary and not attempt to + # create index. + secondary_host, secondary_port = one(self.client.secondaries) + client = self.single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) + + # Still no connection. + gfs = gridfs.GridFSBucket(client.gfsbucketreplica, "gfsbucketsecondarylazytest") + + # Connects, doesn't create index. + with self.assertRaises(NoFile): + gfs.open_download_stream_by_name("test_filename") + with self.assertRaises(NotPrimaryError): + gfs.upload_from_stream("test_filename", b"data") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py new file mode 100644 index 0000000000..e84e19725e --- /dev/null +++ b/test/test_gridfs_spec.py @@ -0,0 +1,39 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the GridFS unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "gridfs") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "gridfs") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py new file mode 100644 index 0000000000..7864caf6e1 --- /dev/null +++ b/test/test_heartbeat_monitoring.py @@ -0,0 +1,96 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the monitoring of the server heartbeats.""" +from __future__ import annotations + +import sys +from test.utils import MockPool + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_knobs, unittest +from test.utils_shared import HeartbeatEventListener, wait_until + +from pymongo.errors import ConnectionFailure +from pymongo.hello import Hello, HelloCompat +from pymongo.synchronous.monitor import Monitor + +_IS_SYNC = True + + +class TestHeartbeatMonitoring(IntegrationTest): + def create_mock_monitor(self, responses, uri, expected_results): + listener = HeartbeatEventListener() + with client_knobs( + heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1 + ): + + class MockMonitor(Monitor): + def _check_with_socket(self, *args, **kwargs): + if isinstance(responses[1], Exception): + raise responses[1] + return Hello(responses[1]), 99 + + _ = self.single_client( + h=uri, + event_listeners=(listener,), + _monitor_class=MockMonitor, + _pool_class=MockPool, + connect=True, + ) + + expected_len = len(expected_results) + # Wait for *at least* expected_len number of results. The + # monitor thread may run multiple times during the execution + # of this test. + wait_until(lambda: len(listener.events) >= expected_len, "publish all events") + + # zip gives us len(expected_results) pairs. + for expected, actual in zip(expected_results, listener.events): + self.assertEqual(expected, actual.__class__.__name__) + self.assertEqual(actual.connection_id, responses[0]) + if expected != "ServerHeartbeatStartedEvent": + if isinstance(actual.reply, Hello): + self.assertEqual(actual.duration, 99) + self.assertEqual(actual.reply._doc, responses[1]) + else: + self.assertEqual(actual.reply, responses[1]) + + def test_standalone(self): + responses = ( + ("a", 27017), + {HelloCompat.LEGACY_CMD: True, "maxWireVersion": 4, "minWireVersion": 0, "ok": 1}, + ) + uri = "mongodb://a:27017" + expected_results = ["ServerHeartbeatStartedEvent", "ServerHeartbeatSucceededEvent"] + + self.create_mock_monitor(responses, uri, expected_results) + + def test_standalone_error(self): + responses = (("a", 27017), ConnectionFailure("SPECIAL MESSAGE")) + uri = "mongodb://a:27017" + # _check_with_socket failing results in a second attempt. + expected_results = [ + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + ] + + self.create_mock_monitor(responses, uri, expected_results) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_index_management.py b/test/test_index_management.py new file mode 100644 index 0000000000..dea8c0e2be --- /dev/null +++ b/test/test_index_management.py @@ -0,0 +1,379 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the auth spec tests.""" +from __future__ import annotations + +import asyncio +import os +import pathlib +import sys +import time +import uuid +from typing import Any, Mapping + +import pytest + +sys.path[0:0] = [""] + +from test import IntegrationTest, PyMongoTestCase, unittest +from test.unified_format import generate_test_classes +from test.utils_shared import AllowListEventListener, OvertCommandListener + +from pymongo.errors import OperationFailure +from pymongo.operations import SearchIndexModel +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + +pytestmark = pytest.mark.search_index + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "index_management") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "index_management") + +_NAME = "test-search-index" + + +class TestCreateSearchIndex(IntegrationTest): + def test_inputs(self): + listener = AllowListEventListener("createSearchIndexes") + client = self.simple_client(event_listeners=[listener]) + coll = client.test.test + coll.drop() + definition = dict(mappings=dict(dynamic=True)) + model_kwarg_list: list[Mapping[str, Any]] = [ + dict(definition=definition, name=None), + dict(definition=definition, name="test"), + ] + for model_kwargs in model_kwarg_list: + model = SearchIndexModel(**model_kwargs) + with self.assertRaises(OperationFailure): + coll.create_search_index(model) + with self.assertRaises(OperationFailure): + coll.create_search_index(model_kwargs) + + listener.reset() + with self.assertRaises(OperationFailure): + coll.create_search_index({"definition": definition, "arbitraryOption": 1}) + self.assertEqual( + {"definition": definition, "arbitraryOption": 1}, + listener.events[0].command["indexes"][0], + ) + + listener.reset() + with self.assertRaises(OperationFailure): + coll.create_search_index({"definition": definition, "type": "search"}) + self.assertEqual( + {"definition": definition, "type": "search"}, listener.events[0].command["indexes"][0] + ) + + +class SearchIndexIntegrationBase(PyMongoTestCase): + db_name = "test_search_index_base" + + @classmethod + def setUpClass(cls) -> None: + cls.url = os.environ.get("MONGODB_URI") + cls.username = os.environ["DB_USER"] + cls.password = os.environ["DB_PASSWORD"] + cls.listener = OvertCommandListener() + + def setUp(self) -> None: + self.client = self.simple_client( + self.url, + username=self.username, + password=self.password, + event_listeners=[self.listener], + ) + self.client.drop_database(_NAME) + self.db = self.client[self.db_name] + + def tearDown(self): + self.client.drop_database(_NAME) + + def wait_for_ready(self, coll, name=_NAME, predicate=None): + """Wait for a search index to be ready.""" + indices: list[Mapping[str, Any]] = [] + if predicate is None: + predicate = lambda index: index.get("queryable") is True + + while True: + indices = (coll.list_search_indexes(name)).to_list() + if len(indices) and predicate(indices[0]): + return indices[0] + time.sleep(5) + + +class TestSearchIndexIntegration(SearchIndexIntegrationBase): + db_name = "test_search_index" + + def test_comment_field(self): + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create a new search index on ``coll0`` that implicitly passes its type. + search_definition = {"mappings": {"dynamic": False}} + self.listener.reset() + implicit_search_resp = coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition}, comment="foo" + ) + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + # Get the index definition. + self.listener.reset() + (coll0.list_search_indexes(name=implicit_search_resp, comment="foo")).next() + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + +class TestSearchIndexProse(SearchIndexIntegrationBase): + db_name = "test_search_index_prose" + + def test_case_1(self): + """Driver can successfully create and list search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. Use the following definition: + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + coll0.insert_one({}) + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # An index with the ``name`` of ``test-search-index`` is present and the index has a field ``queryable`` with a value of ``true``. + index = self.wait_for_ready(coll0) + + # . Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + def test_case_2(self): + """Driver can successfully create multiple indexes in batch.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create two new search indexes on ``coll0`` with the ``createSearchIndexes`` helper. + name1 = "test-search-index-1" + name2 = "test-search-index-2" + definition = {"mappings": {"dynamic": False}} + index_definitions: list[dict[str, Any]] = [ + {"name": name1, "definition": definition}, + {"name": name2, "definition": definition}, + ] + coll0.create_search_indexes( + [SearchIndexModel(i["definition"], i["name"]) for i in index_definitions] + ) + + # .Assert that the command returns an array containing the new indexes' names: ``["test-search-index-1", "test-search-index-2"]``. + indices = (coll0.list_search_indexes()).to_list() + names = [i["name"] for i in indices] + self.assertIn(name1, names) + self.assertIn(name2, names) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied. + # An index with the ``name`` of ``test-search-index-1`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index1``. + # An index with the ``name`` of ``test-search-index-2`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index2``. + index1 = self.wait_for_ready(coll0, name1) + index2 = self.wait_for_ready(coll0, name2) + + # Assert that ``index1`` and ``index2`` have the property ``latestDefinition`` whose value is ``{ "mappings" : { "dynamic" : false } }`` + for index in [index1, index2]: + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], definition) + + def test_case_3(self): + """Driver can successfully drop search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, "test-search-index") + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + self.wait_for_ready(coll0) + + # Run a ``dropSearchIndex`` on ``coll0``, using ``test-search-index`` for the name. + coll0.drop_search_index(_NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until ``listSearchIndexes`` returns an empty array. + t0 = time.time() + while True: + indices = (coll0.list_search_indexes()).to_list() + if indices: + break + if (time.time() - t0) / 60 > 5: + raise TimeoutError("Timed out waiting for index deletion") + time.sleep(5) + + def test_case_4(self): + """Driver can update a search index.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + self.wait_for_ready(coll0) + + # Run a ``updateSearchIndex`` on ``coll0``. + # Assert that the command does not error and the server responds with a success. + model2: dict[str, Any] = {"name": _NAME, "definition": {"mappings": {"dynamic": True}}} + coll0.update_search_index(_NAME, model2["definition"]) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present. This index is referred to as ``index``. + # The index has a field ``queryable`` with a value of ``true`` and has a field ``status`` with the value of ``READY``. + predicate = lambda index: index.get("queryable") is True and index.get("status") == "READY" + self.wait_for_ready(coll0, predicate=predicate) + + # Assert that an index is present with the name ``test-search-index`` and the definition has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': true } }``. + index = ((coll0.list_search_indexes(_NAME)).to_list())[0] + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model2["definition"]) + + def test_case_5(self): + """``dropSearchIndex`` suppresses namespace not found errors.""" + # Create a driver-side collection object for a randomly generated collection name. Do not create this collection on the server. + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Run a ``dropSearchIndex`` command and assert that no error is thrown. + coll0.drop_search_index("foo") + + def test_case_6(self): + """Driver can successfully create and list search indexes with non-default readConcern and writeConcern.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Apply a write concern ``WriteConcern(w=1)`` and a read concern with ``ReadConcern(level="majority")`` to ``coll0``. + coll0 = coll0.with_options( + write_concern=WriteConcern(w="1"), read_concern=ReadConcern(level="majority") + ) + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. + name = "test-search-index-case6" + model = {"name": name, "definition": {"mappings": {"dynamic": False}}} + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index-case6"``. + self.assertEqual(resp, name) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # - An index with the ``name`` of ``test-search-index-case6`` is present and the index has a field ``queryable`` with a value of ``true``. + index = self.wait_for_ready(coll0, name) + + # Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + def test_case_7(self): + """Driver handles index types.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Use these search and vector search definitions for indexes. + search_definition = {"mappings": {"dynamic": False}} + vector_search_definition = { + "fields": [ + { + "type": "vector", + "path": "plot_embedding", + "numDimensions": 1536, + "similarity": "euclidean", + }, + ] + } + + # Create a new search index on ``coll0`` that implicitly passes its type. + implicit_search_resp = coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition} + ) + + # Get the index definition. + resp = (coll0.list_search_indexes(name=implicit_search_resp)).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new search index on ``coll0`` that explicitly passes its type. + explicit_search_resp = coll0.create_search_index( + model={"name": _NAME + "-explicit", "type": "search", "definition": search_definition} + ) + + # Get the index definition. + resp = (coll0.list_search_indexes(name=explicit_search_resp)).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new vector search index on ``coll0`` that explicitly passes its type. + explicit_vector_resp = coll0.create_search_index( + model={ + "name": _NAME + "-vector", + "type": "vectorSearch", + "definition": vector_search_definition, + } + ) + + # Get the index definition. + resp = (coll0.list_search_indexes(name=explicit_vector_resp)).next() + + # Assert that the index model contains the correct index type: ``"vectorSearch"``. + self.assertEqual(resp["type"], "vectorSearch") + + # Catch the error raised when trying to create a vector search index without specifying the type + with self.assertRaises(OperationFailure) as e: + coll0.create_search_index( + model={"name": _NAME + "-error", "definition": vector_search_definition} + ) + self.assertIn("Attribute mappings missing.", e.exception.details["errmsg"]) + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_json_util.py b/test/test_json_util.py index 82ce286b56..cf2c0efb93 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,56 +13,98 @@ # limitations under the License. """Test some utilities for working with JSON and PyMongo.""" +from __future__ import annotations -import unittest import datetime +import json import re import sys +import uuid +from collections import OrderedDict +from typing import Any, Tuple, Type -from nose.plugins.skip import SkipTest +from bson.codec_options import CodecOptions, DatetimeConversion sys.path[0:0] = [""] -import bson -from bson.py3compat import b -from bson import json_util -from bson.binary import Binary, MD5_SUBTYPE, USER_DEFINED_SUBTYPE +from test import unittest + +from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, DatetimeMS, json_util +from bson.binary import ( + ALL_UUID_REPRESENTATIONS, + MD5_SUBTYPE, + STANDARD, + USER_DEFINED_SUBTYPE, + Binary, + UuidRepresentation, +) from bson.code import Code +from bson.datetime_ms import _MAX_UTC_MS from bson.dbref import DBRef +from bson.decimal128 import Decimal128 +from bson.int64 import Int64 +from bson.json_util import ( + CANONICAL_JSON_OPTIONS, + LEGACY_JSON_OPTIONS, + RELAXED_JSON_OPTIONS, + DatetimeRepresentation, + JSONMode, + JSONOptions, +) from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId from bson.regex import Regex -from bson.son import RE_TYPE from bson.timestamp import Timestamp -from bson.tz_util import utc +from bson.tz_util import FixedOffset, utc -from test.test_client import get_client - -PY3 = sys.version_info[0] == 3 -PY24 = sys.version_info[:2] == (2, 4) +STRICT_JSON_OPTIONS = JSONOptions( + strict_number_long=True, + datetime_representation=DatetimeRepresentation.ISO8601, + strict_uuid=True, + json_mode=JSONMode.LEGACY, +) class TestJsonUtil(unittest.TestCase): + def round_tripped(self, doc, **kwargs): + return json_util.loads(json_util.dumps(doc, **kwargs), **kwargs) - def setUp(self): - if not json_util.json_lib: - raise SkipTest("No json or simplejson module") - - self.db = get_client().pymongo_test - - def tearDown(self): - self.db = None - - def round_tripped(self, doc): - return json_util.loads(json_util.dumps(doc)) - - def round_trip(self, doc): - self.assertEqual(doc, self.round_tripped(doc)) + def round_trip(self, doc, **kwargs): + self.assertEqual(doc, self.round_tripped(doc, **kwargs)) def test_basic(self): self.round_trip({"hello": "world"}) + def test_loads_bytes(self): + string = b'{"hello": "world"}' + self.assertEqual(json_util.loads(bytes(string)), {"hello": "world"}) + self.assertEqual(json_util.loads(bytearray(string)), {"hello": "world"}) + + def test_json_options_with_options(self): + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) + self.assertEqual(opts.datetime_representation, DatetimeRepresentation.NUMBERLONG) + opts2 = opts.with_options( + datetime_representation=DatetimeRepresentation.ISO8601, json_mode=JSONMode.LEGACY + ) + self.assertEqual(opts2.datetime_representation, DatetimeRepresentation.ISO8601) + + opts = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) + self.assertEqual(opts.strict_number_long, True) + opts2 = opts.with_options(strict_number_long=False) + self.assertEqual(opts2.strict_number_long, False) + + opts = json_util.CANONICAL_JSON_OPTIONS + self.assertNotEqual(opts.uuid_representation, UuidRepresentation.JAVA_LEGACY) + opts2 = opts.with_options(uuid_representation=UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts2.document_class, dict) + opts3 = opts2.with_options(document_class=SON) + self.assertEqual(opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts3.document_class, SON) + def test_objectid(self): self.round_trip({"id": ObjectId()}) @@ -71,76 +113,284 @@ def test_dbref(self): self.round_trip({"ref": DBRef("foo", 5, "db")}) self.round_trip({"ref": DBRef("foo", ObjectId())}) - if not PY24: - # Check order. - self.assertEqual( - '{"$ref": "collection", "$id": 1, "$db": "db"}', - json_util.dumps(DBRef('collection', 1, 'db'))) + # Check order. + self.assertEqual( + '{"$ref": "collection", "$id": 1, "$db": "db"}', + json_util.dumps(DBRef("collection", 1, "db")), + ) def test_datetime(self): + tz_aware_opts = json_util.DEFAULT_JSON_OPTIONS.with_options(tz_aware=True) # only millis, not micros - self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, - 49, 45, 191000, utc)}) + self.round_trip( + {"date": datetime.datetime(2009, 12, 9, 15, 49, 45, 191000, utc)}, + json_options=tz_aware_opts, + ) + self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, 49, 45, 191000)}) + + for jsn in [ + '{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}', + ]: + self.assertEqual(EPOCH_AWARE, json_util.loads(jsn, json_options=tz_aware_opts)["dt"]) + self.assertEqual(EPOCH_NAIVE, json_util.loads(jsn)["dt"]) + + dtm = datetime.datetime(1, 1, 1, 1, 1, 1, 0, utc) + jsn = '{"dt": {"$date": -62135593139000}}' + self.assertEqual(dtm, json_util.loads(jsn, json_options=tz_aware_opts)["dt"]) + jsn = '{"dt": {"$date": {"$numberLong": "-62135593139000"}}}' + self.assertEqual(dtm, json_util.loads(jsn, json_options=tz_aware_opts)["dt"]) + + # Test dumps format + pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)} + post_epoch = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc)} + self.assertEqual( + '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', json_util.dumps(pre_epoch) + ) + self.assertEqual( + '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', json_util.dumps(post_epoch) + ) + self.assertEqual( + '{"dt": {"$date": -62135593138990}}', + json_util.dumps(pre_epoch, json_options=LEGACY_JSON_OPTIONS), + ) + self.assertEqual( + '{"dt": {"$date": 63075661010}}', + json_util.dumps(post_epoch, json_options=LEGACY_JSON_OPTIONS), + ) + self.assertEqual( + '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', + json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS), + ) + self.assertEqual( + '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', + json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS), + ) - def test_regex_object_hook(self): - # simplejson or the builtin json module. - from bson.json_util import json + number_long_options = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) + self.assertEqual( + '{"dt": {"$date": {"$numberLong": "63075661010"}}}', + json_util.dumps(post_epoch, json_options=number_long_options), + ) + self.assertEqual( + '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', + json_util.dumps(pre_epoch, json_options=number_long_options), + ) + + # ISO8601 mode assumes naive datetimes are UTC + pre_epoch_naive = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000)} + post_epoch_naive = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)} + self.assertEqual( + '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', + json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS), + ) + self.assertEqual( + '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', + json_util.dumps(post_epoch_naive, json_options=STRICT_JSON_OPTIONS), + ) + # Test tz_aware and tzinfo options + self.assertEqual( + datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), + json_util.loads( + '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', json_options=tz_aware_opts + )["dt"], + ) + self.assertEqual( + datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), + json_util.loads( + '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', + json_options=JSONOptions(tz_aware=True, tzinfo=utc), + )["dt"], + ) + self.assertEqual( + datetime.datetime(1972, 1, 1, 1, 1, 1, 10000), + json_util.loads( + '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', + json_options=JSONOptions(tz_aware=False), + )["dt"], + ) + self.round_trip(pre_epoch_naive, json_options=JSONOptions(tz_aware=False)) + + # Test a non-utc timezone + pacific = FixedOffset(-8 * 60, "US/Pacific") + aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000, pacific)} + self.assertEqual( + '{"dt": {"$date": "2002-10-27T06:00:00.010-0800"}}', + json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS), + ) + self.round_trip( + aware_datetime, + json_options=JSONOptions(json_mode=JSONMode.LEGACY, tz_aware=True, tzinfo=pacific), + ) + self.round_trip( + aware_datetime, + json_options=JSONOptions( + datetime_representation=DatetimeRepresentation.ISO8601, + json_mode=JSONMode.LEGACY, + tz_aware=True, + tzinfo=pacific, + ), + ) + + def test_datetime_ms(self): + # Test ISO8601 in-range + dat_min: dict[str, Any] = {"x": DatetimeMS(0)} + dat_max: dict[str, Any] = {"x": DatetimeMS(_MAX_UTC_MS)} + opts = JSONOptions(datetime_representation=DatetimeRepresentation.ISO8601) + + self.assertEqual( + dat_min["x"].as_datetime(CodecOptions(tz_aware=False)), + json_util.loads(json_util.dumps(dat_min))["x"], + ) + self.assertEqual( + dat_max["x"].as_datetime(CodecOptions(tz_aware=False)), + json_util.loads(json_util.dumps(dat_max))["x"], + ) + + # Test ISO8601 out-of-range + dat_min = {"x": DatetimeMS(-1)} + dat_max = {"x": DatetimeMS(_MAX_UTC_MS + 1)} + + self.assertEqual('{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min)) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "' + str(int(dat_max["x"])) + '"}}}', + json_util.dumps(dat_max), + ) + # Test legacy. + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.LEGACY, json_mode=JSONMode.LEGACY + ) + self.assertEqual('{"x": {"$date": -1}}', json_util.dumps(dat_min, json_options=opts)) + self.assertEqual( + '{"x": {"$date": ' + str(int(dat_max["x"])) + "}}", + json_util.dumps(dat_max, json_options=opts), + ) + + # Test regular. + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min, json_options=opts) + ) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "' + str(int(dat_max["x"])) + '"}}}', + json_util.dumps(dat_max, json_options=opts), + ) + + # Test decode from datetime.datetime to DatetimeMS + dat_min = {"x": datetime.datetime.min} + dat_max = {"x": DatetimeMS(_MAX_UTC_MS).as_datetime(CodecOptions(tz_aware=False))} + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.ISO8601, + datetime_conversion=DatetimeConversion.DATETIME_MS, + ) + + self.assertEqual( + DatetimeMS(dat_min["x"]), + json_util.loads(json_util.dumps(dat_min), json_options=opts)["x"], + ) + self.assertEqual( + DatetimeMS(dat_max["x"]), + json_util.loads(json_util.dumps(dat_max), json_options=opts)["x"], + ) + + def test_parse_invalid_date(self): + # These cases should raise ValueError, not IndexError. + for invalid in [ + '{"dt": { "$date" : "1970-01-01T00:00:"}}', + '{"dt": { "$date" : "1970-01-01T01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:"}}', + '{"dt": { "$date" : "1970-01-01T01"}}', + '{"dt": { "$date" : "1970-01-01T"}}', + '{"dt": { "$date" : "1970-01-01"}}', + '{"dt": { "$date" : "1970-01-"}}', + '{"dt": { "$date" : "1970-01"}}', + '{"dt": { "$date" : "1970-"}}', + '{"dt": { "$date" : "1970"}}', + '{"dt": { "$date" : "1"}}', + '{"dt": { "$date" : ""}}', + ]: + with self.assertRaisesRegex(ValueError, "does not match"): + json_util.loads(invalid) + + def test_regex_object_hook(self): # Extended JSON format regular expression. - pat = 'a*b' + pat = "a*b" json_re = '{"$regex": "%s", "$options": "u"}' % pat loaded = json_util.object_hook(json.loads(json_re)) - self.assertTrue(isinstance(loaded, RE_TYPE)) - self.assertEqual(pat, loaded.pattern) - self.assertEqual(re.U, loaded.flags) - - loaded = json_util.object_hook(json.loads(json_re), compile_re=False) - self.assertTrue(isinstance(loaded, Regex)) + self.assertIsInstance(loaded, Regex) self.assertEqual(pat, loaded.pattern) self.assertEqual(re.U, loaded.flags) def test_regex(self): - for regex_instance in ( - re.compile("a*b", re.IGNORECASE), - Regex("a*b", re.IGNORECASE)): + for regex_instance in (re.compile("a*b", re.IGNORECASE), Regex("a*b", re.IGNORECASE)): res = self.round_tripped({"r": regex_instance})["r"] self.assertEqual("a*b", res.pattern) res = self.round_tripped({"r": Regex("a*b", re.IGNORECASE)})["r"] self.assertEqual("a*b", res.pattern) - if PY3: - # re.UNICODE is a default in python 3. - self.assertEqual(re.IGNORECASE | re.UNICODE, res.flags) - else: - self.assertEqual(re.IGNORECASE, res.flags) - - all_options = re.I|re.L|re.M|re.S|re.U|re.X - regex = re.compile("a*b", all_options) + self.assertEqual(re.IGNORECASE, res.flags) + + unicode_options = re.I | re.M | re.S | re.U | re.X + regex = re.compile("a*b", unicode_options) res = self.round_tripped({"r": regex})["r"] - self.assertEqual(all_options, res.flags) + self.assertEqual(unicode_options, res.flags) # Some tools may not add $options if no flags are set. - res = json_util.loads('{"r": {"$regex": "a*b"}}')['r'] - expected_flags = 0 - if PY3: - expected_flags = re.U - self.assertEqual(expected_flags, res.flags) + res = json_util.loads('{"r": {"$regex": "a*b"}}')["r"] + self.assertEqual(0, res.flags) self.assertEqual( - Regex('.*', 'ilm'), - json_util.loads( - '{"r": {"$regex": ".*", "$options": "ilm"}}', - compile_re=False)['r']) + Regex(".*", "ilm"), json_util.loads('{"r": {"$regex": ".*", "$options": "ilm"}}')["r"] + ) - if not PY24: - # Check order. - self.assertEqual( - '{"$regex": ".*", "$options": "mx"}', - json_util.dumps(Regex('.*', re.M | re.X))) + # Check order. + self.assertEqual( + '{"$regularExpression": {"pattern": ".*", "options": "mx"}}', + json_util.dumps(Regex(".*", re.M | re.X)), + ) - self.assertEqual( - '{"$regex": ".*", "$options": "mx"}', - json_util.dumps(re.compile(b('.*'), re.M | re.X))) + self.assertEqual( + '{"$regularExpression": {"pattern": ".*", "options": "mx"}}', + json_util.dumps(re.compile(b".*", re.M | re.X)), + ) + + self.assertEqual( + '{"$regex": ".*", "$options": "mx"}', + json_util.dumps(Regex(".*", re.M | re.X), json_options=LEGACY_JSON_OPTIONS), + ) + + def test_regex_validation(self): + non_str_types = [10, {}, []] + docs = [{"$regex": i} for i in non_str_types] + for doc in docs: + self.assertEqual(doc, json_util.loads(json.dumps(doc))) + + doc = {"$regex": ""} + self.assertIsInstance(json_util.loads(json.dumps(doc)), Regex) def test_minkey(self): self.round_trip({"m": MinKey()}) @@ -149,65 +399,151 @@ def test_maxkey(self): self.round_trip({"m": MaxKey()}) def test_timestamp(self): - res = json_util.dumps({"ts": Timestamp(4, 13)}, default=json_util.default) - if not PY24: - # Check order. - self.assertEqual('{"ts": {"t": 4, "i": 13}}', res) - - dct = json_util.loads(res) - self.assertEqual(dct['ts']['t'], 4) - self.assertEqual(dct['ts']['i'], 13) + dct = {"ts": Timestamp(4, 13)} + res = json_util.dumps(dct, default=json_util.default) + rtdct = json_util.loads(res) + self.assertEqual(dct, rtdct) + self.assertEqual('{"ts": {"$timestamp": {"t": 4, "i": 13}}}', res) + + def test_uuid_default(self): + # Cannot directly encode native UUIDs with the default + # uuid_representation. + doc = {"uuid": uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")} + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): + json_util.dumps(doc) + legacy_jsn = '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}' + expected = {"uuid": Binary(b"\xf4z\xc1\x0bX\xccCr\xa5g\x0e\x02\xb2\xc3\xd4y", 4)} + self.assertEqual(json_util.loads(legacy_jsn), expected) def test_uuid(self): - if not bson.has_uuid(): - raise SkipTest("No uuid module") - self.round_trip( - {'uuid': bson.uuid.UUID( - 'f47ac10b-58cc-4372-a567-0e02b2c3d479')}) + doc = {"uuid": uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")} + uuid_legacy_opts = LEGACY_JSON_OPTIONS.with_options( + uuid_representation=UuidRepresentation.PYTHON_LEGACY + ) + self.round_trip(doc, json_options=uuid_legacy_opts) + self.assertEqual( + '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}', + json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS), + ) + self.assertEqual( + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + json_util.dumps( + doc, + json_options=STRICT_JSON_OPTIONS.with_options( + uuid_representation=UuidRepresentation.PYTHON_LEGACY + ), + ), + ) + self.assertEqual( + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + json_util.dumps( + doc, + json_options=JSONOptions( + strict_uuid=True, json_mode=JSONMode.LEGACY, uuid_representation=STANDARD + ), + ), + ) + self.assertEqual( + doc, + json_util.loads( + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + json_options=uuid_legacy_opts, + ), + ) + for uuid_representation in set(ALL_UUID_REPRESENTATIONS) - {UuidRepresentation.UNSPECIFIED}: + options = JSONOptions( + strict_uuid=True, json_mode=JSONMode.LEGACY, uuid_representation=uuid_representation + ) + self.round_trip(doc, json_options=options) + # Ignore UUID representation when decoding BSON binary subtype 4. + self.assertEqual( + doc, + json_util.loads( + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + json_options=options, + ), + ) + + def test_uuid_uuid_rep_unspecified(self): + _uuid = uuid.uuid4() + options = JSONOptions( + strict_uuid=True, + json_mode=JSONMode.LEGACY, + uuid_representation=UuidRepresentation.UNSPECIFIED, + ) + + # Cannot directly encode native UUIDs with UNSPECIFIED. + doc: dict[str, Any] = {"uuid": _uuid} + with self.assertRaises(ValueError): + json_util.dumps(doc, json_options=options) + + # All UUID subtypes are decoded as Binary with UNSPECIFIED. + # subtype 3 + doc = {"uuid": Binary(_uuid.bytes, subtype=3)} + ext_json_str = json_util.dumps(doc) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) + # subtype 4 + doc = {"uuid": Binary(_uuid.bytes, subtype=4)} + ext_json_str = json_util.dumps(doc) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) + # $uuid-encoded fields + doc = {"uuid": Binary(_uuid.bytes, subtype=4)} + ext_json_str = json_util.dumps({"uuid": _uuid}, json_options=LEGACY_JSON_OPTIONS) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) def test_binary(self): - bin_type_dict = {"bin": Binary(b("\x00\x01\x02\x03\x04"))} + bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"} md5_type_dict = { - "md5": Binary(b(' n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac'), - MD5_SUBTYPE)} - custom_type_dict = {"custom": Binary(b("hello"), USER_DEFINED_SUBTYPE)} + "md5": Binary(b" n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac", MD5_SUBTYPE) + } + custom_type_dict = {"custom": Binary(b"hello", USER_DEFINED_SUBTYPE)} self.round_trip(bin_type_dict) self.round_trip(md5_type_dict) self.round_trip(custom_type_dict) + # Binary with subtype 0 is decoded into bytes in Python 3. + bin = json_util.loads('{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')["bin"] + self.assertEqual(type(bin), bytes) + # PYTHON-443 ensure old type formats are supported - json_bin_dump = json_util.dumps(bin_type_dict) - self.assertTrue('"$type": "00"' in json_bin_dump) - self.assertEqual(bin_type_dict, - json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}')) - - json_bin_dump = json_util.dumps(md5_type_dict) - if not PY24: - # Check order. - self.assertEqual( - '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",' - + ' "$type": "05"}}', - json_bin_dump) + json_bin_dump = json_util.dumps(bin_type_dict, json_options=LEGACY_JSON_OPTIONS) + self.assertIn('"$type": "00"', json_bin_dump) + self.assertEqual( + bin_type_dict, json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}') + ) + json_bin_dump = json_util.dumps(md5_type_dict, json_options=LEGACY_JSON_OPTIONS) + # Check order. + self.assertEqual( + '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==", "$type": "05"}}', json_bin_dump + ) - self.assertEqual(md5_type_dict, - json_util.loads('{"md5": {"$type": 5, "$binary":' - ' "IG43GK8JL9HRL4DK53HMrA=="}}')) + self.assertEqual( + md5_type_dict, + json_util.loads('{"md5": {"$type": 5, "$binary": "IG43GK8JL9HRL4DK53HMrA=="}}'), + ) - json_bin_dump = json_util.dumps(custom_type_dict) - self.assertTrue('"$type": "80"' in json_bin_dump) - self.assertEqual(custom_type_dict, - json_util.loads('{"custom": {"$type": 128, "$binary":' - ' "aGVsbG8="}}')) + json_bin_dump = json_util.dumps(custom_type_dict, json_options=LEGACY_JSON_OPTIONS) + self.assertIn('"$type": "80"', json_bin_dump) + self.assertEqual( + custom_type_dict, + json_util.loads('{"custom": {"$type": 128, "$binary": "aGVsbG8="}}'), + ) # Handle mongoexport where subtype >= 128 - self.assertEqual(128, - json_util.loads('{"custom": {"$type": "ffffff80", "$binary":' - ' "aGVsbG8="}}')['custom'].subtype) + self.assertEqual( + 128, + json_util.loads('{"custom": {"$type": "ffffff80", "$binary": "aGVsbG8="}}')[ + "custom" + ].subtype, + ) - self.assertEqual(255, - json_util.loads('{"custom": {"$type": "ffffffff", "$binary":' - ' "aGVsbG8="}}')['custom'].subtype) + self.assertEqual( + 255, + json_util.loads('{"custom": {"$type": "ffffffff", "$binary": "aGVsbG8="}}')[ + "custom" + ].subtype, + ) def test_code(self): self.round_trip({"code": Code("function x() { return 1; }")}) @@ -216,27 +552,89 @@ def test_code(self): res = json_util.dumps(code) self.assertEqual(code, json_util.loads(res)) - if not PY24: - # Check order. - self.assertEqual('{"$code": "return z", "$scope": {"z": 2}}', res) - - def test_cursor(self): - db = self.db - - db.drop_collection("test") - docs = [ - {'foo': [1, 2]}, - {'bar': {'hello': 'world'}}, - {'code': Code("function x() { return 1; }")}, - {'bin': Binary(b("\x00\x01\x02\x03\x04"))}, - {'dbref': {'_ref': DBRef('simple', - ObjectId('509b8db456c02c5ab7e63c34'))}} + # Check order. + self.assertEqual('{"$code": "return z", "$scope": {"z": 2}}', res) + + no_scope = Code("function() {}") + self.assertEqual('{"$code": "function() {}"}', json_util.dumps(no_scope)) + + def test_undefined(self): + jsn = '{"name": {"$undefined": true}}' + self.assertIsNone(json_util.loads(jsn)["name"]) + + def test_numberlong(self): + jsn = '{"weight": {"$numberLong": "65535"}}' + self.assertEqual(json_util.loads(jsn)["weight"], Int64(65535)) + self.assertEqual(json_util.dumps({"weight": Int64(65535)}), '{"weight": 65535}') + json_options = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) + self.assertEqual(json_util.dumps({"weight": Int64(65535)}, json_options=json_options), jsn) + # Ensure json_util.default converts Int64 to int in non-strict mode. + converted = json_util.default(Int64(65535)) + self.assertEqual(converted, 65535) + self.assertNotIsInstance(converted, Int64) + self.assertEqual( + json_util.default(Int64(65535), json_options=json_options), {"$numberLong": "65535"} + ) + + def test_loads_document_class(self): + json_doc = '{"foo": "bar", "b": 1, "d": {"a": 1}}' + expected_doc = {"foo": "bar", "b": 1, "d": {"a": 1}} + for cls in (dict, SON, OrderedDict): + doc = json_util.loads(json_doc, json_options=JSONOptions(document_class=cls)) + self.assertEqual(doc, expected_doc) + self.assertIsInstance(doc, cls) + self.assertIsInstance(doc["d"], cls) + + def test_encode_subclass(self): + cases: list[Tuple[Type, Any]] = [ + (int, (1,)), + (int, (2 << 60,)), + (float, (1.1,)), + (Int64, (64,)), + (Int64, (2 << 60,)), + (str, ("str",)), + (bytes, (b"bytes",)), + (datetime.datetime, (2024, 1, 16)), + (DatetimeMS, (1,)), + (uuid.UUID, ("f47ac10b-58cc-4372-a567-0e02b2c3d479",)), + (Binary, (b"1", USER_DEFINED_SUBTYPE)), + (Code, ("code",)), + (DBRef, ("coll", ObjectId())), + (ObjectId, ("65a6dab5f98bc03906ee3597",)), + (MaxKey, ()), + (MinKey, ()), + (Regex, ("pat",)), + (Timestamp, (1, 1)), + (Decimal128, ("0.5",)), + ] + allopts = [ + CANONICAL_JSON_OPTIONS.with_options(uuid_representation=STANDARD), + RELAXED_JSON_OPTIONS.with_options(uuid_representation=STANDARD), + LEGACY_JSON_OPTIONS.with_options(uuid_representation=STANDARD), ] + for cls, args in cases: + basic_obj = cls(*args) + my_cls = type(f"My{cls.__name__}", (cls,), {}) + my_obj = my_cls(*args) + for opts in allopts: + expected_json = json_util.dumps(basic_obj, json_options=opts) + self.assertEqual(json_util.dumps(my_obj, json_options=opts), expected_json) + + def test_encode_type_marker(self): + # Assert that a custom subclass can be JSON encoded based on the _type_marker attribute. + class MyMaxKey: + _type_marker = 127 + + expected_json = json_util.dumps(MaxKey()) + self.assertEqual(json_util.dumps(MyMaxKey()), expected_json) + + # Test a class that inherits from two built in types + class MyBinary(Binary): + pass + + expected_json = json_util.dumps(Binary(b"bin", USER_DEFINED_SUBTYPE)) + self.assertEqual(json_util.dumps(MyBinary(b"bin", USER_DEFINED_SUBTYPE)), expected_json) - db.test.insert(docs) - reloaded_docs = json_util.loads(json_util.dumps(db.test.find())) - for doc in docs: - self.assertTrue(doc in reloaded_docs) if __name__ == "__main__": unittest.main() diff --git a/test/test_json_util_integration.py b/test/test_json_util_integration.py new file mode 100644 index 0000000000..4ef5f10fe2 --- /dev/null +++ b/test/test_json_util_integration.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from test import IntegrationTest +from typing import Any, List, MutableMapping + +from bson import Binary, Code, DBRef, ObjectId, json_util +from bson.binary import USER_DEFINED_SUBTYPE + +_IS_SYNC = True + + +class TestJsonUtilRoundtrip(IntegrationTest): + def test_cursor(self): + db = self.db + + db.drop_collection("test") + docs: List[MutableMapping[str, Any]] = [ + {"foo": [1, 2]}, + {"bar": {"hello": "world"}}, + {"code": Code("function x() { return 1; }")}, + {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, + {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, + ] + + db.test.insert_many(docs) + reloaded_docs = json_util.loads(json_util.dumps((db.test.find()).to_list())) + for doc in docs: + self.assertIn(doc, reloaded_docs) diff --git a/test/test_legacy_connections.py b/test/test_legacy_connections.py deleted file mode 100644 index 83b475dc0d..0000000000 --- a/test/test_legacy_connections.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2013-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test deprecated client classes Connection and ReplicaSetConnection.""" - - -import sys -import unittest - -sys.path[0:0] = [""] - -from bson import ObjectId - -import pymongo -from pymongo.connection import Connection -from pymongo.replica_set_connection import ReplicaSetConnection -from pymongo.errors import ConfigurationError -from test import host, port, pair -from test.test_replica_set_client import TestReplicaSetClientBase -from test.utils import get_pool - - -class TestConnection(unittest.TestCase): - def test_connection(self): - c = Connection(host, port) - self.assertTrue(c.auto_start_request) - self.assertEqual(None, c.max_pool_size) - self.assertFalse(c.slave_okay) - self.assertFalse(c.safe) - self.assertEqual({}, c.get_lasterror_options()) - - # Connection's writes are unacknowledged by default - doc = {"_id": ObjectId()} - coll = c.pymongo_test.write_concern_test - coll.drop() - coll.insert(doc) - coll.insert(doc) - - c = Connection("mongodb://%s:%s/?safe=true" % (host, port)) - self.assertTrue(c.safe) - - # To preserve legacy Connection's behavior, max_size should be None. - # Pool should handle this without error. - self.assertEqual(None, get_pool(c).max_size) - c.end_request() - - # Connection's network_timeout argument is translated into - # socketTimeoutMS - self.assertEqual(123, Connection( - host, port, network_timeout=123)._MongoClient__net_timeout) - - for network_timeout in 'foo', 0, -1: - self.assertRaises( - ConfigurationError, - Connection, host, port, network_timeout=network_timeout) - - def test_connection_alias(self): - # Testing that pymongo module imports connection.Connection - self.assertEqual(Connection, pymongo.Connection) - - -class TestReplicaSetConnection(TestReplicaSetClientBase): - def test_replica_set_connection(self): - c = ReplicaSetConnection(pair, replicaSet=self.name) - self.assertTrue(c.auto_start_request) - self.assertEqual(None, c.max_pool_size) - self.assertFalse(c.slave_okay) - self.assertFalse(c.safe) - self.assertEqual({}, c.get_lasterror_options()) - - # ReplicaSetConnection's writes are unacknowledged by default - doc = {"_id": ObjectId()} - coll = c.pymongo_test.write_concern_test - coll.drop() - coll.insert(doc) - coll.insert(doc) - - c = ReplicaSetConnection("mongodb://%s:%s/?replicaSet=%s&safe=true" % ( - host, port, self.name)) - - self.assertTrue(c.safe) - - # To preserve legacy ReplicaSetConnection's behavior, max_size should - # be None. Pool should handle this without error. - pool = get_pool(c) - self.assertEqual(None, pool.max_size) - c.end_request() - - # ReplicaSetConnection's network_timeout argument is translated into - # socketTimeoutMS - self.assertEqual(123, ReplicaSetConnection( - pair, replicaSet=self.name, network_timeout=123 - )._MongoReplicaSetClient__net_timeout) - - for network_timeout in 'foo', 0, -1: - self.assertRaises( - ConfigurationError, - ReplicaSetConnection, pair, replicaSet=self.name, - network_timeout=network_timeout) - - def test_replica_set_connection_alias(self): - # Testing that pymongo module imports ReplicaSetConnection - self.assertEqual(ReplicaSetConnection, pymongo.ReplicaSetConnection) - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py new file mode 100644 index 0000000000..472ef51da3 --- /dev/null +++ b/test/test_load_balancer.py @@ -0,0 +1,194 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Load Balancer unified spec tests.""" +from __future__ import annotations + +import asyncio +import gc +import os +import pathlib +import sys +import threading +from asyncio import Event +from test.helpers import ConcurrentRunner, ExceptionCatchingTask +from test.utils import get_pool + +import pytest + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.unified_format import generate_test_classes +from test.utils_shared import ( + create_event, + wait_until, +) + +_IS_SYNC = True + +pytestmark = pytest.mark.load_balancer + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "load_balancer") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "load_balancer") + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + + +class TestLB(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + + def test_connections_are_only_returned_once(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3011 + self.skipTest("Test is flaky on PyPy") + pool = get_pool(self.client) + n_conns = len(pool.conns) + self.db.test.find_one({}) + self.assertEqual(len(pool.conns), n_conns) + (self.db.test.aggregate([{"$limit": 1}])).to_list() + self.assertEqual(len(pool.conns), n_conns) + + @client_context.require_load_balancer + def test_unpin_committed_transaction(self): + client = self.rs_client() + pool = get_pool(client) + coll = client[self.db.name].test + with client.start_session() as session: + with session.start_transaction(): + self.assertEqual(pool.active_sockets, 0) + coll.insert_one({}, session=session) + self.assertEqual(pool.active_sockets, 1) # Pinned. + self.assertEqual(pool.active_sockets, 1) # Still pinned. + self.assertEqual(pool.active_sockets, 0) # Unpinned. + + @client_context.require_failCommand_fail_point + def test_cursor_gc(self): + def create_resource(coll): + cursor = coll.find({}, batch_size=3) + next(cursor) + return cursor + + self._test_no_gc_deadlock(create_resource) + + @client_context.require_failCommand_fail_point + def test_command_cursor_gc(self): + def create_resource(coll): + cursor = coll.aggregate([], batchSize=3) + next(cursor) + return cursor + + self._test_no_gc_deadlock(create_resource) + + def _test_no_gc_deadlock(self, create_resource): + client = self.rs_client() + pool = get_pool(client) + coll = client[self.db.name].test + coll.insert_many([{} for _ in range(10)]) + self.assertEqual(pool.active_sockets, 0) + # Cause the initial find attempt to fail to induce a reference cycle. + args = { + "mode": {"times": 1}, + "data": { + "failCommands": ["find", "aggregate"], + "closeConnection": True, + }, + } + with self.fail_point(args): + resource = create_resource(coll) + if client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + task = PoolLocker(pool) + task.start() + self.assertTrue(task.wait(task.locked, 5), "timed out") + # Garbage collect the resource while the pool is locked to ensure we + # don't deadlock. + del resource + # On PyPy it can take a few rounds to collect the cursor. + for _ in range(3): + gc.collect() + task.unlock.set() + task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) + + wait_until(lambda: pool.active_sockets == 0, "return socket") + # Run another operation to ensure the socket still works. + coll.delete_many({}) + + @client_context.require_transactions + def test_session_gc(self): + client = self.rs_client() + pool = get_pool(client) + session = client.start_session() + session.start_transaction() + client.test_session_gc.test.find_one({}, session=session) + # Cleanup the transaction left open on the server + self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) + if client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + task = PoolLocker(pool) + task.start() + self.assertTrue(task.wait(task.locked, 5), "timed out") + # Garbage collect the session while the pool is locked to ensure we + # don't deadlock. + del session + # On PyPy it can take a few rounds to collect the session. + for _ in range(3): + gc.collect() + task.unlock.set() + task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) + + wait_until(lambda: pool.active_sockets == 0, "return socket") + # Run another operation to ensure the socket still works. + client[self.db.name].test.delete_many({}) + + +class PoolLocker(ExceptionCatchingTask): + def __init__(self, pool): + super().__init__(target=self.lock_pool) + self.pool = pool + self.daemon = True + self.locked = create_event() + self.unlock = create_event() + + def lock_pool(self): + with self.pool.lock: + self.locked.set() + # Wait for the unlock flag. + unlock_pool = self.wait(self.unlock, 10) + if not unlock_pool: + raise Exception("timed out waiting for unlock signal: deadlock?") + + def wait(self, event: Event, timeout: int): + if _IS_SYNC: + return event.wait(timeout) # type: ignore[call-arg] + else: + try: + asyncio.wait_for(event.wait(), timeout=timeout) + except asyncio.TimeoutError: + return False + return True + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_logger.py b/test/test_logger.py new file mode 100644 index 0000000000..a7d97927fa --- /dev/null +++ b/test/test_logger.py @@ -0,0 +1,144 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +from test import IntegrationTest, client_context, unittest +from unittest.mock import patch + +from bson import json_util +from pymongo.errors import OperationFailure +from pymongo.logger import _DEFAULT_DOCUMENT_LENGTH + +_IS_SYNC = True + + +# https://github.com/mongodb/specifications/tree/master/source/command-logging-and-monitoring/tests#prose-tests +class TestLogger(IntegrationTest): + def test_default_truncation_limit(self): + docs = [{"x": "y"} for _ in range(100)] + db = self.db + + with patch.dict("os.environ"): + os.environ.pop("MONGOB_LOG_MAX_DOCUMENT_LENGTH", None) + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + db.test.insert_many(docs) + + cmd_started_log = json_util.loads(cm.records[0].getMessage()) + self.assertEqual(len(cmd_started_log["command"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertLessEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + db.test.find({}).to_list() + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + def test_configured_truncation_limit(self): + cmd = {"hello": True} + db = self.db + with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": "5"}): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + db.command(cmd) + + cmd_started_log = json_util.loads(cm.records[0].getMessage()) + self.assertEqual(len(cmd_started_log["command"]), 5 + 3) + + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertLessEqual(len(cmd_succeeded_log["reply"]), 5 + 3) + with self.assertRaises(OperationFailure): + db.command({"notARealCommand": True}) + cmd_failed_log = json_util.loads(cm.records[-1].getMessage()) + self.assertEqual(len(cmd_failed_log["failure"]), 5 + 3) + + def test_truncation_multi_byte_codepoints(self): + document_lengths = ["20000", "20001", "20002"] + multi_byte_char_str_len = 50_000 + str_to_repeat = "界" + + multi_byte_char_str = "" + for i in range(multi_byte_char_str_len): + multi_byte_char_str += str_to_repeat + + for length in document_lengths: + with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": length}): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + self.db.test.insert_one({"x": multi_byte_char_str}) + cmd_started_log = json_util.loads(cm.records[0].getMessage())["command"] + + cmd_started_log = cmd_started_log[:-3] + last_3_bytes = cmd_started_log.encode()[-3:].decode() + + self.assertEqual(last_3_bytes, str_to_repeat) + + def test_logging_without_listeners(self): + c = self.single_client() + self.assertEqual(len(c._event_listeners.event_listeners()), 0) + with self.assertLogs("pymongo.connection", level="DEBUG") as cm: + c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + with self.assertLogs("pymongo.serverSelection", level="DEBUG") as cm: + c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + + @client_context.require_failCommand_fail_point + def test_logging_retry_read_attempts(self): + self.db.test.insert_one({"x": "1"}) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + }, + } + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + self.db.test.find_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying read attempt" in r.getMessage() + ] + self.assertEqual(len(retry_messages), 1) + + @client_context.require_failCommand_fail_point + @client_context.require_retryable_writes + def test_logging_retry_write_attempts(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + "failCommands": ["insert"], + }, + } + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + self.db.test.insert_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying write attempt" in r.getMessage() + ] + self.assertEqual(len(retry_messages), 1) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_master_slave_connection.py b/test/test_master_slave_connection.py deleted file mode 100644 index 5af88a3c20..0000000000 --- a/test/test_master_slave_connection.py +++ /dev/null @@ -1,522 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test for master slave connections.""" - -import datetime -import os -import sys -import threading -import time -import unittest -sys.path[0:0] = [""] - -from nose.plugins.skip import SkipTest - -from bson.son import SON -from bson.tz_util import utc -from pymongo import ReadPreference, thread_util -from pymongo.errors import ConnectionFailure, InvalidName -from pymongo.errors import CollectionInvalid, OperationFailure -from pymongo.errors import AutoReconnect -from pymongo.database import Database -from pymongo.mongo_client import MongoClient -from pymongo.collection import Collection -from pymongo.master_slave_connection import MasterSlaveConnection -from test import host, port, host2, port2, host3, port3 -from test.utils import TestRequestMixin, get_pool - - -class TestMasterSlaveConnection(unittest.TestCase, TestRequestMixin): - - def setUp(self): - self.master = MongoClient(host, port) - - self.slaves = [] - try: - self.slaves.append(MongoClient( - host2, port2, read_preference=ReadPreference.SECONDARY)) - except ConnectionFailure: - pass - - try: - self.slaves.append(MongoClient( - host3, port3, read_preference=ReadPreference.SECONDARY)) - except ConnectionFailure: - pass - - if not self.slaves: - raise SkipTest("Not connected to master-slave set") - - self.client = MasterSlaveConnection(self.master, self.slaves) - self.db = self.client.pymongo_test - - def tearDown(self): - try: - self.db.test.drop_indexes() - except Exception: - # Tests like test_disconnect can monkey with the client in ways - # that make this fail - pass - - self.master = self.slaves = self.db = self.client = None - super(TestMasterSlaveConnection, self).tearDown() - - def test_types(self): - self.assertRaises(TypeError, MasterSlaveConnection, 1) - self.assertRaises(TypeError, MasterSlaveConnection, self.master, 1) - self.assertRaises(TypeError, MasterSlaveConnection, self.master, [1]) - - def test_use_greenlets(self): - self.assertFalse(self.client.use_greenlets) - - if thread_util.have_gevent: - master = MongoClient(host, port, use_greenlets=True) - slaves = [ - MongoClient(slave.host, slave.port, use_greenlets=True) - for slave in self.slaves] - - self.assertTrue( - MasterSlaveConnection(master, slaves).use_greenlets) - - def test_repr(self): - self.assertEqual(repr(self.client), - "MasterSlaveConnection(%r, %r)" % - (self.master, self.slaves)) - - def test_disconnect(self): - class MongoClient(object): - def __init__(self): - self._disconnects = 0 - - def disconnect(self): - self._disconnects += 1 - - self.client._MasterSlaveConnection__master = MongoClient() - self.client._MasterSlaveConnection__slaves = [MongoClient(), - MongoClient()] - - self.client.disconnect() - self.assertEqual(1, - self.client._MasterSlaveConnection__master._disconnects) - self.assertEqual(1, - self.client._MasterSlaveConnection__slaves[0]._disconnects) - self.assertEqual(1, - self.client._MasterSlaveConnection__slaves[1]._disconnects) - - def test_continue_until_slave_works(self): - class Slave(object): - calls = 0 - - def __init__(self, fail): - self._fail = fail - - def _send_message_with_response(self, *args, **kwargs): - Slave.calls += 1 - if self._fail: - raise AutoReconnect() - return (None, 'sent') - - class NotRandomList(object): - last_idx = -1 - - def __init__(self): - self._items = [Slave(True), Slave(True), - Slave(False), Slave(True)] - - def __len__(self): - return len(self._items) - - def __getitem__(self, idx): - NotRandomList.last_idx = idx - return self._items.pop(0) - - self.client._MasterSlaveConnection__slaves = NotRandomList() - - response = self.client._send_message_with_response('message') - self.assertEqual((NotRandomList.last_idx, 'sent'), response) - self.assertNotEqual(-1, NotRandomList.last_idx) - self.assertEqual(3, Slave.calls) - - def test_raise_autoreconnect_if_all_slaves_fail(self): - class Slave(object): - calls = 0 - - def __init__(self, fail): - self._fail = fail - - def _send_message_with_response(self, *args, **kwargs): - Slave.calls += 1 - if self._fail: - raise AutoReconnect() - return 'sent' - - class NotRandomList(object): - def __init__(self): - self._items = [Slave(True), Slave(True), - Slave(True), Slave(True)] - - def __len__(self): - return len(self._items) - - def __getitem__(self, idx): - return self._items.pop(0) - - self.client._MasterSlaveConnection__slaves = NotRandomList() - - self.assertRaises(AutoReconnect, - self.client._send_message_with_response, 'message') - self.assertEqual(4, Slave.calls) - - def test_get_db(self): - - def make_db(base, name): - return base[name] - - self.assertRaises(InvalidName, make_db, self.client, "") - self.assertRaises(InvalidName, make_db, self.client, "te$t") - self.assertRaises(InvalidName, make_db, self.client, "te.t") - self.assertRaises(InvalidName, make_db, self.client, "te\\t") - self.assertRaises(InvalidName, make_db, self.client, "te/t") - self.assertRaises(InvalidName, make_db, self.client, "te st") - - self.assertTrue(isinstance(self.client.test, Database)) - self.assertEqual(self.client.test, self.client["test"]) - self.assertEqual(self.client.test, Database(self.client, - "test")) - - def test_database_names(self): - self.client.pymongo_test.test.save({"dummy": u"object"}) - self.client.pymongo_test_mike.test.save({"dummy": u"object"}) - - dbs = self.client.database_names() - self.assertTrue("pymongo_test" in dbs) - self.assertTrue("pymongo_test_mike" in dbs) - - def test_drop_database(self): - self.assertRaises(TypeError, self.client.drop_database, 5) - self.assertRaises(TypeError, self.client.drop_database, None) - - raise SkipTest("This test often fails due to SERVER-2329") - - self.client.pymongo_test.test.save({"dummy": u"object"}) - dbs = self.client.database_names() - self.assertTrue("pymongo_test" in dbs) - self.client.drop_database("pymongo_test") - dbs = self.client.database_names() - self.assertTrue("pymongo_test" not in dbs) - - self.client.pymongo_test.test.save({"dummy": u"object"}) - dbs = self.client.database_names() - self.assertTrue("pymongo_test" in dbs) - self.client.drop_database(self.client.pymongo_test) - dbs = self.client.database_names() - self.assertTrue("pymongo_test" not in dbs) - - def test_iteration(self): - - def iterate(): - [a for a in self.client] - - self.assertRaises(TypeError, iterate) - - def test_insert_find_one_in_request(self): - count = 0 - for i in range(100): - self.client.start_request() - self.db.test.remove({}) - self.db.test.insert({"x": i}) - try: - if i != self.db.test.find_one()["x"]: - count += 1 - except: - count += 1 - self.client.end_request() - self.assertFalse(count) - - def test_nested_request(self): - client = self.client - - def assertRequest(in_request): - self.assertEqual(in_request, client.in_request()) - self.assertEqual(in_request, client.master.in_request()) - - # MasterSlaveConnection is special, alas - it has no auto_start_request - # and it begins *not* in a request. When it's in a request, it sends - # all queries to primary. - self.assertFalse(client.in_request()) - self.assertFalse(client.master.in_request()) - - # Start and end request - client.start_request() - assertRequest(True) - client.end_request() - assertRequest(False) - - # Double-nesting - client.start_request() - client.start_request() - client.end_request() - assertRequest(True) - client.end_request() - assertRequest(False) - - def test_request_threads(self): - client = self.client - - # In a request, all ops go through master - pool = get_pool(client.master) - client.master.end_request() - self.assertNotInRequestAndDifferentSock(client, pool) - - started_request, ended_request = threading.Event(), threading.Event() - checked_request = threading.Event() - thread_done = [False] - - # Starting a request in one thread doesn't put the other thread in a - # request - def f(): - self.assertNotInRequestAndDifferentSock(client, pool) - client.start_request() - self.assertInRequestAndSameSock(client, pool) - started_request.set() - checked_request.wait() - checked_request.clear() - self.assertInRequestAndSameSock(client, pool) - client.end_request() - self.assertNotInRequestAndDifferentSock(client, pool) - ended_request.set() - checked_request.wait() - thread_done[0] = True - - t = threading.Thread(target=f) - t.setDaemon(True) - t.start() - started_request.wait() - self.assertNotInRequestAndDifferentSock(client, pool) - checked_request.set() - ended_request.wait() - self.assertNotInRequestAndDifferentSock(client, pool) - checked_request.set() - t.join() - self.assertNotInRequestAndDifferentSock(client, pool) - self.assertTrue(thread_done[0], "Thread didn't complete") - - # This was failing because commands were being sent to the slaves - def test_create_collection(self): - self.client.pymongo_test.test.drop() - - collection = self.db.create_collection('test') - self.assertTrue(isinstance(collection, Collection)) - - self.assertRaises(CollectionInvalid, self.db.create_collection, 'test') - - # Believe this was failing for the same reason... - def test_unique_index(self): - self.client.pymongo_test.test.drop() - self.db.test.create_index('username', unique=True) - - self.db.test.save({'username': 'mike'}) - self.assertRaises(OperationFailure, - self.db.test.save, {'username': 'mike'}) - - # NOTE this test is non-deterministic, but I expect - # some failures unless the db is pulling instantaneously... - def test_insert_find_one_with_slaves(self): - count = 0 - for i in range(100): - self.db.test.remove({}) - self.db.test.insert({"x": i}) - try: - if i != self.db.test.find_one()["x"]: - count += 1 - except: - count += 1 - self.assertTrue(count) - - # NOTE this test is non-deterministic, but hopefully we pause long enough - # for the slaves to pull... - def test_insert_find_one_with_pause(self): - count = 0 - - self.db.test.remove({}) - self.db.test.insert({"x": 5586}) - time.sleep(11) - for _ in range(10): - try: - if 5586 != self.db.test.find_one()["x"]: - count += 1 - except: - count += 1 - self.assertFalse(count) - - def test_kill_cursor_explicit(self): - c = self.client - c.slave_okay = True - db = c.pymongo_test - - test = db.master_slave_test_kill_cursor_explicit - test.drop() - - for i in range(20): - test.insert({"i": i}, w=1 + len(self.slaves)) - - st = time.time() - while time.time() - st < 120: - # Wait for replication -- the 'w' parameter should obviate this - # loop but it's not working reliably in Jenkins right now - if list(test.find({"i": 19})): - break - time.sleep(0.5) - else: - self.fail("Replication timeout, test coll has %s records" % ( - len(list(test.find())) - )) - - # Partially evaluate cursor so it's left alive, then kill it - cursor = test.find().batch_size(10) - self.assertNotEqual( - cursor._Cursor__connection_id, - -1, - "Expected cursor connected to a slave, not master") - - self.assertTrue(cursor.next()) - self.assertNotEqual(0, cursor.cursor_id) - - cursor_id = cursor.cursor_id - - # Cursor dead on server - trigger a getMore on the same cursor_id and - # check that the server returns an error. - cursor2 = cursor.clone() - cursor2._Cursor__id = cursor_id - - if (sys.platform.startswith('java') or - 'PyPy' in sys.version): - # Explicitly kill cursor. - cursor.close() - else: - # Implicitly kill it in CPython. - del cursor - - self.assertRaises(OperationFailure, lambda: list(cursor2)) - - def test_base_object(self): - c = self.client - self.assertFalse(c.slave_okay) - self.assertTrue(bool(c.read_preference)) - self.assertTrue(c.safe) - self.assertEqual({}, c.get_lasterror_options()) - db = c.pymongo_test - self.assertFalse(db.slave_okay) - self.assertTrue(bool(c.read_preference)) - self.assertTrue(db.safe) - self.assertEqual({}, db.get_lasterror_options()) - coll = db.test - coll.drop() - self.assertFalse(coll.slave_okay) - self.assertTrue(bool(c.read_preference)) - self.assertTrue(coll.safe) - self.assertEqual({}, coll.get_lasterror_options()) - cursor = coll.find() - self.assertFalse(cursor._Cursor__slave_okay) - self.assertTrue(bool(cursor._Cursor__read_preference)) - - w = 1 + len(self.slaves) - wtimeout=10000 # Wait 10 seconds for replication to complete - c.set_lasterror_options(w=w, wtimeout=wtimeout) - self.assertFalse(c.slave_okay) - self.assertTrue(bool(c.read_preference)) - self.assertTrue(c.safe) - self.assertEqual({'w': w, 'wtimeout': wtimeout}, c.get_lasterror_options()) - db = c.pymongo_test - self.assertFalse(db.slave_okay) - self.assertTrue(bool(c.read_preference)) - self.assertTrue(db.safe) - self.assertEqual({'w': w, 'wtimeout': wtimeout}, db.get_lasterror_options()) - coll = db.test - self.assertFalse(coll.slave_okay) - self.assertTrue(bool(c.read_preference)) - self.assertTrue(coll.safe) - self.assertEqual({'w': w, 'wtimeout': wtimeout}, - coll.get_lasterror_options()) - cursor = coll.find() - self.assertFalse(cursor._Cursor__slave_okay) - self.assertTrue(bool(cursor._Cursor__read_preference)) - - coll.insert({'foo': 'bar'}) - self.assertEqual(1, coll.find({'foo': 'bar'}).count()) - self.assertTrue(coll.find({'foo': 'bar'})) - coll.remove({'foo': 'bar'}) - self.assertEqual(0, coll.find({'foo': 'bar'}).count()) - - c.safe = False - c.unset_lasterror_options() - self.assertFalse(self.client.slave_okay) - self.assertTrue(bool(self.client.read_preference)) - self.assertFalse(self.client.safe) - self.assertEqual({}, self.client.get_lasterror_options()) - - def test_document_class(self): - c = MasterSlaveConnection(self.master, self.slaves) - db = c.pymongo_test - w = 1 + len(self.slaves) - db.test.insert({"x": 1}, w=w) - - self.assertEqual(dict, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), dict)) - self.assertFalse(isinstance(db.test.find_one(), SON)) - - c.document_class = SON - - self.assertEqual(SON, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), SON)) - self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON)) - - c = MasterSlaveConnection(self.master, self.slaves, document_class=SON) - db = c.pymongo_test - - self.assertEqual(SON, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), SON)) - self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON)) - - c.document_class = dict - - self.assertEqual(dict, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), dict)) - self.assertFalse(isinstance(db.test.find_one(), SON)) - - def test_tz_aware(self): - dt = datetime.datetime.utcnow() - client = MasterSlaveConnection(self.master, self.slaves) - self.assertEqual(False, client.tz_aware) - db = client.pymongo_test - w = 1 + len(self.slaves) - db.tztest.insert({'dt': dt}, w=w) - self.assertEqual(None, db.tztest.find_one()['dt'].tzinfo) - - client = MasterSlaveConnection(self.master, self.slaves, tz_aware=True) - self.assertEqual(True, client.tz_aware) - db = client.pymongo_test - db.tztest.insert({'dt': dt}, w=w) - self.assertEqual(utc, db.tztest.find_one()['dt'].tzinfo) - - client = MasterSlaveConnection(self.master, self.slaves, tz_aware=False) - self.assertEqual(False, client.tz_aware) - db = client.pymongo_test - db.tztest.insert({'dt': dt}, w=w) - self.assertEqual(None, db.tztest.find_one()['dt'].tzinfo) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py new file mode 100644 index 0000000000..56e047fd4b --- /dev/null +++ b/test/test_max_staleness.py @@ -0,0 +1,149 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test maxStalenessSeconds support.""" +from __future__ import annotations + +import asyncio +import os +import sys +import time +import warnings +from pathlib import Path + +from pymongo import MongoClient +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test import PyMongoTestCase, client_context, unittest +from test.utils_selection_tests import create_selection_tests + +from pymongo.errors import ConfigurationError +from pymongo.server_selectors import writable_server_selector + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "max_staleness") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "max_staleness") + + +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore + pass + + +class TestMaxStaleness(PyMongoTestCase): + def test_max_staleness(self): + client = self.simple_client() + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary") + self.assertEqual(-1, client.read_preference.max_staleness) + + # These tests are specified in max-staleness-tests.rst. + with self.assertRaises(ConfigurationError): + # Default read pref "primary" can't be used with max staleness. + self.simple_client("mongodb://a/?maxStalenessSeconds=120") + + with self.assertRaises(ConfigurationError): + # Read pref "primary" can't be used with max staleness. + self.simple_client("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") + + client = self.simple_client("mongodb://host/?maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client( + "mongodb://host/?readPreference=secondary&maxStalenessSeconds=120" + ) + self.assertEqual(120, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") + self.assertEqual(1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client(maxStalenessSeconds=-1, readPreference="nearest") + self.assertEqual(-1, client.read_preference.max_staleness) + + with self.assertRaises(TypeError): + # Prohibit None. + self.simple_client(maxStalenessSeconds=None, readPreference="nearest") + + def test_max_staleness_float(self): + with self.assertRaises(TypeError) as ctx: + self.rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") + + self.assertIn("must be an integer", str(ctx.exception)) + + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest" + ) + + # Option was ignored. + self.assertEqual(-1, client.read_preference.max_staleness) + self.assertIn("must be an integer", str(ctx[0])) + + def test_max_staleness_zero(self): + # Zero is too small. + with self.assertRaises(ValueError) as ctx: + self.rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") + + self.assertIn("must be a positive integer", str(ctx.exception)) + + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=0&readPreference=nearest" + ) + + # Option was ignored. + self.assertEqual(-1, client.read_preference.max_staleness) + self.assertIn("must be a positive integer", str(ctx[0])) + + @client_context.require_replica_set + def test_last_write_date(self): + # From max-staleness-tests.rst, "Parse lastWriteDate". + client = self.rs_or_single_client(heartbeatFrequencyMS=500) + client.pymongo_test.test.insert_one({}) + # Wait for the server description to be updated. + time.sleep(1) + server = client._topology.select_server(writable_server_selector, _Op.TEST) + first = server.description.last_write_date + self.assertTrue(first) + # The first last_write_date may correspond to a internal server write, + # sleep so that the next write does not occur within the same second. + time.sleep(1) + client.pymongo_test.test.insert_one({}) + # Wait for the server description to be updated. + time.sleep(1) + server = client._topology.select_server(writable_server_selector, _Op.TEST) + second = server.description.last_write_date + assert first is not None + + assert second is not None + self.assertGreater(second, first) + self.assertLess(second, first + 10) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_mongos_ha.py b/test/test_mongos_ha.py deleted file mode 100644 index 3032f5a375..0000000000 --- a/test/test_mongos_ha.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2013-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test MongoClient's mongos high-availability features using a mock.""" - -import sys -import threading -import unittest - -sys.path[0:0] = [""] - -from pymongo.errors import AutoReconnect -from test.pymongo_mocks import MockClient - - -class FindOne(threading.Thread): - def __init__(self, client): - super(FindOne, self).__init__() - self.client = client - self.passed = False - - def run(self): - self.client.db.collection.find_one() - self.passed = True # No exception raised. - - -def do_find_one(client, nthreads): - threads = [FindOne(client) for _ in range(nthreads)] - for t in threads: - t.start() - - for t in threads: - t.join() - - for t in threads: - assert t.passed - - -class TestMongosHA(unittest.TestCase): - def mock_client(self, connect): - return MockClient( - standalones=[], - members=[], - mongoses=['a:1', 'b:2', 'c:3'], - host='a:1,b:2,c:3', - _connect=connect) - - def test_lazy_connect(self): - nthreads = 10 - client = self.mock_client(False) - self.assertEqual(0, len(client.nodes)) - - # Trigger initial connection. - do_find_one(client, nthreads) - self.assertEqual(3, len(client.nodes)) - - def test_reconnect(self): - nthreads = 10 - client = self.mock_client(True) - self.assertEqual(3, len(client.nodes)) - - # Trigger reconnect. - client.disconnect() - do_find_one(client, nthreads) - self.assertEqual(3, len(client.nodes)) - - def test_failover(self): - nthreads = 1 - - # ['1:1', '2:2', '3:3', ...] - mock_hosts = ['%d:%d' % (i, i) for i in range(50)] - client = MockClient( - standalones=[], - members=[], - mongoses=mock_hosts, - host=','.join(mock_hosts)) - - self.assertEqual(len(mock_hosts), len(client.nodes)) - - # Our chosen mongos goes down. - client.kill_host('%s:%s' % (client.host, client.port)) - - # Trigger failover. AutoReconnect should be raised exactly once. - errors = [] - passed = [] - - def f(): - try: - client.db.collection.find_one() - except AutoReconnect: - errors.append(True) - - # Second attempt succeeds. - client.db.collection.find_one() - - passed.append(True) - - threads = [threading.Thread(target=f) for _ in range(nthreads)] - for t in threads: - t.start() - - for t in threads: - t.join() - - self.assertEqual(1, len(errors)) - self.assertEqual(nthreads, len(passed)) - - # Down host is still in list. - self.assertEqual(len(mock_hosts), len(client.nodes)) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py new file mode 100644 index 0000000000..8c31854343 --- /dev/null +++ b/test/test_mongos_load_balancing.py @@ -0,0 +1,199 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MongoClient's mongos load balancing using a mock.""" +from __future__ import annotations + +import asyncio +import sys +import threading +from test.helpers import ConcurrentRunner + +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test import MockClientTest, client_context, connected, unittest +from test.pymongo_mocks import MockClient +from test.utils_shared import wait_until + +from pymongo.errors import AutoReconnect, InvalidOperation +from pymongo.server_selectors import writable_server_selector +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = True + + +class SimpleOp(ConcurrentRunner): + def __init__(self, client): + super().__init__() + self.client = client + self.passed = False + + def run(self): + self.client.db.command("ping") + self.passed = True # No exception raised. + + +def do_simple_op(client, ntasks): + tasks = [SimpleOp(client) for _ in range(ntasks)] + for t in tasks: + t.start() + + for t in tasks: + t.join() + + for t in tasks: + assert t.passed + + +def writable_addresses(topology): + return { + server.description.address + for server in topology.select_servers(writable_server_selector, _Op.TEST) + } + + +class TestMongosLoadBalancing(MockClientTest): + @client_context.require_connection + @client_context.require_no_load_balancer + def setUp(self): + super().setUp() + + def mock_client(self, **kwargs): + mock_client = MockClient( + standalones=[], + members=[], + mongoses=["a:1", "b:2", "c:3"], + host="a:1,b:2,c:3", + connect=False, + **kwargs, + ) + self.addCleanup(mock_client.close) + + # Latencies in seconds. + mock_client.mock_rtts["a:1"] = 0.020 + mock_client.mock_rtts["b:2"] = 0.025 + mock_client.mock_rtts["c:3"] = 0.045 + return mock_client + + def test_lazy_connect(self): + # While connected() ensures we can trigger connection from the main + # thread and wait for the monitors, this test triggers connection from + # several threads at once to check for data races. + nthreads = 10 + client = self.mock_client() + self.assertEqual(0, len(client.nodes)) + + # Trigger initial connection. + do_simple_op(client, nthreads) + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + def test_failover(self): + ntasks = 10 + client = connected(self.mock_client(localThresholdMS=0.001)) + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + # Our chosen mongos goes down. + client.kill_host("a:1") + + # Trigger failover to higher-latency nodes. AutoReconnect should be + # raised at most once in each thread. + passed = [] + + def f(): + try: + client.db.command("ping") + except AutoReconnect: + # Second attempt succeeds. + client.db.command("ping") + + passed.append(True) + + tasks = [ConcurrentRunner(target=f) for _ in range(ntasks)] + for t in tasks: + t.start() + + for t in tasks: + t.join() + + self.assertEqual(ntasks, len(passed)) + + # Down host removed from list. + self.assertEqual(2, len(client.nodes)) + + def test_local_threshold(self): + client = connected(self.mock_client(localThresholdMS=30)) + self.assertEqual(30, client.options.local_threshold_ms) + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + topology = client._topology + + # All are within a 30-ms latency window, see self.mock_client(). + self.assertEqual({("a", 1), ("b", 2), ("c", 3)}, writable_addresses(topology)) + + # No error + client.admin.command("ping") + + client = connected(self.mock_client(localThresholdMS=0)) + self.assertEqual(0, client.options.local_threshold_ms) + # No error + client.db.command("ping") + # Our chosen mongos goes down. + client.kill_host("{}:{}".format(*next(iter(client.nodes)))) + try: + client.db.command("ping") + except: + pass + + # We eventually connect to a new mongos. + def connect_to_new_mongos(): + try: + return client.db.command("ping") + except AutoReconnect: + pass + + wait_until(connect_to_new_mongos, "connect to a new mongos") + + def test_load_balancing(self): + # Although the server selection JSON tests already prove that + # select_servers works for sharded topologies, here we do an end-to-end + # test of discovering servers' round trip times and configuring + # localThresholdMS. + client = connected(self.mock_client()) + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + # Prohibited for topology type Sharded. + with self.assertRaises(InvalidOperation): + client.address + + topology = client._topology + self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) + + # a and b are within the 15-ms latency window, see self.mock_client(). + self.assertEqual({("a", 1), ("b", 2)}, writable_addresses(topology)) + + client.mock_rtts["a:1"] = 0.045 + + # Discover only b is within latency window. + def predicate(): + return {("b", 2)} == writable_addresses(topology) + + wait_until( + predicate, + 'discover server "a" is too far', + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_monitor.py b/test/test_monitor.py new file mode 100644 index 0000000000..c10662c893 --- /dev/null +++ b/test/test_monitor.py @@ -0,0 +1,121 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the monitor module.""" +from __future__ import annotations + +import asyncio +import gc +import subprocess +import sys +import warnings +from functools import partial + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, connected, unittest +from test.utils import ( + wait_until, +) +from test.utils_shared import ServerAndTopologyEventListener, gevent_monkey_patched + +from pymongo.periodic_executor import _EXECUTORS + +_IS_SYNC = True + + +def unregistered(ref): + gc.collect() + return ref not in _EXECUTORS + + +def get_executors(client): + executors = [] + for server in client._topology._servers.values(): + executors.append(server._monitor._executor) + executors.append(server._monitor._rtt_monitor._executor) + executors.append(client._kill_cursors_executor) + executors.append(client._topology._Topology__events_executor) + return [e for e in executors if e is not None] + + +class TestMonitor(IntegrationTest): + def create_client(self): + listener = ServerAndTopologyEventListener() + client = self.unmanaged_single_client(event_listeners=[listener]) + connected(client) + return client + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-5283 fails often on PyPy") + @unittest.skipIf( + gevent_monkey_patched(), "PYTHON-5516 Resources are not cleared when using gevent" + ) + def test_cleanup_executors_on_client_del(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + client = self.create_client() + executors = get_executors(client) + self.assertEqual(len(executors), 4) + + # Each executor stores a weakref to itself in _EXECUTORS. + executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] + + del executors + del client + + for ref, name in executor_refs: + wait_until(partial(unregistered, ref), f"unregister executor: {name}", timeout=5) + + def resource_warning_caught(): + gc.collect() + for warning in w: + if ( + issubclass(warning.category, ResourceWarning) + and "Call MongoClient.close() to safely shut down your client and free up resources." + in str(warning.message) + ): + return True + return False + + wait_until(resource_warning_caught, "catch resource warning") + + def test_cleanup_executors_on_client_close(self): + client = self.create_client() + executors = get_executors(client) + self.assertEqual(len(executors), 4) + + client.close() + + for executor in executors: + wait_until(lambda: executor._stopped, f"closed executor: {executor._name}", timeout=5) + + @client_context.require_sync + def test_no_thread_start_runtime_err_on_shutdown(self): + """Test we silence noisy runtime errors fired when the MongoClient spawns a new thread + on process shutdown.""" + command = [ + sys.executable, + "-c", + "from pymongo import MongoClient; c = MongoClient()", + ] + completed_process: subprocess.CompletedProcess = subprocess.run( + command, capture_output=True + ) + + self.assertFalse(completed_process.stderr) + self.assertFalse(completed_process.stdout) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_monitoring.py b/test/test_monitoring.py new file mode 100644 index 0000000000..f5a18af9ed --- /dev/null +++ b/test/test_monitoring.py @@ -0,0 +1,1268 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import asyncio +import copy +import datetime +import sys +import time +from typing import Any + +sys.path[0:0] = [""] + +from test import ( + IntegrationTest, + client_context, + client_knobs, + sanitize_cmd, + unittest, +) +from test.utils_shared import ( + EventListener, + OvertCommandListener, + wait_until, +) + +from bson.int64 import Int64 +from bson.objectid import ObjectId +from bson.son import SON +from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne, monitoring +from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class TestCommandMonitoring(IntegrationTest): + listener: EventListener + + @classmethod + def setUpClass(cls) -> None: + cls.listener = OvertCommandListener() + + @client_context.require_connection + def setUp(self) -> None: + super().setUp() + self.listener.reset() + self.client = self.rs_or_single_client(event_listeners=[self.listener], retryWrites=False) + + def test_started_simple(self): + self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) + self.assertEqual(self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + + def test_succeeded_simple(self): + self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertEqual("ping", succeeded.command_name) + self.assertEqual(self.client.address, succeeded.connection_id) + self.assertEqual(1, succeeded.reply.get("ok")) + self.assertIsInstance(succeeded.request_id, int) + self.assertIsInstance(succeeded.duration_micros, int) + + def test_failed_simple(self): + try: + self.client.pymongo_test.command("oops!") + except OperationFailure: + pass + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertEqual("oops!", failed.command_name) + self.assertEqual(self.client.address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) + + def test_find_one(self): + self.client.pymongo_test.test.find_one() + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("find", "test"), ("filter", {}), ("limit", 1), ("singleBatch", True)]), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + + def test_find_and_get_more(self): + self.client.pymongo_test.test.drop() + self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) + self.listener.reset() + cursor = self.client.pymongo_test.test.find(projection={"_id": False}, batch_size=4) + for _ in range(4): + next(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 4)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("find", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["id"], cursor_id) + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(csr["firstBatch"], [{} for _ in range(4)]) + + self.listener.reset() + # Next batch. Exhausting the cursor could cause a getMore + # that returns id of 0 and no results. + next(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("getMore", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["id"], cursor_id) + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(csr["nextBatch"], [{} for _ in range(4)]) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(cursor.to_list()) + + def test_find_with_explain(self): + cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) + self.client.pymongo_test.test.drop() + self.client.pymongo_test.test.insert_one({}) + self.listener.reset() + coll = self.client.pymongo_test.test + # Test that we publish the unwrapped command. + if self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + res = coll.find().explain() + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(cmd, started.command) + self.assertEqual("explain", started.command_name) + self.assertEqual(self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("explain", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(self.client.address, succeeded.connection_id) + self.assertEqual(res, succeeded.reply) + + def _test_find_options(self, query, expected_cmd): + coll = self.client.pymongo_test.test + coll.drop() + coll.create_index("x") + coll.insert_many([{"x": i} for i in range(5)]) + + # Test that we publish the unwrapped command. + self.listener.reset() + if self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + + cursor = coll.find(**query) + + next(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(expected_cmd, started.command) + self.assertEqual("find", started.command_name) + self.assertEqual(self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("find", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(self.client.address, succeeded.connection_id) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(cursor.to_list()) + + def test_find_options(self): + query = { + "filter": {}, + "hint": [("x", 1)], + "max_time_ms": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "return_key": True, + "show_record_id": True, + "projection": {"x": False}, + "skip": 1, + "no_cursor_timeout": True, + "sort": [("_id", 1)], + "allow_partial_results": True, + "comment": "this is a test", + "batch_size": 2, + } + + cmd = { + "find": "test", + "filter": {}, + "hint": SON([("x", 1)]), + "comment": "this is a test", + "maxTimeMS": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "returnKey": True, + "showRecordId": True, + "sort": SON([("_id", 1)]), + "projection": {"x": False}, + "skip": 1, + "batchSize": 2, + "noCursorTimeout": True, + "allowPartialResults": True, + } + + if client_context.version < (4, 1, 0, -1): + query["max_scan"] = 10 + cmd["maxScan"] = 10 + + self._test_find_options(query, cmd) + + @client_context.require_version_max(3, 7, 2) + def test_find_snapshot(self): + # Test "snapshot" parameter separately, can't combine with "sort". + query = {"filter": {}, "snapshot": True} + + cmd = {"find": "test", "filter": {}, "snapshot": True} + + self._test_find_options(query, cmd) + + def test_command_and_get_more(self): + self.client.pymongo_test.test.drop() + self.client.pymongo_test.test.insert_many([{"x": 1} for _ in range(10)]) + self.listener.reset() + coll = self.client.pymongo_test.test + # Test that we publish the unwrapped command. + if self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + cursor = coll.aggregate([{"$project": {"_id": False, "x": 1}}], batchSize=4) + for _ in range(4): + next(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON( + [ + ("aggregate", "test"), + ("pipeline", [{"$project": {"_id": False, "x": 1}}]), + ("cursor", {"batchSize": 4}), + ] + ), + started.command, + ) + self.assertEqual("aggregate", started.command_name) + self.assertEqual(self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("aggregate", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_cursor = { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{"x": 1} for _ in range(4)], + } + self.assertEqualCommand(expected_cursor, succeeded.reply.get("cursor")) + + self.listener.reset() + next(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("getMore", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_result = { + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "nextBatch": [{"x": 1} for _ in range(4)], + }, + "ok": 1.0, + } + self.assertEqualReply(expected_result, succeeded.reply) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(cursor.to_list()) + + def test_get_more_failure(self): + address = self.client.address + coll = self.client.pymongo_test.test + cursor_id = Int64(12345) + cursor_doc = {"id": cursor_id, "firstBatch": [], "ns": coll.full_name} + cursor = CommandCursor(coll, cursor_doc, address) + try: + next(cursor) + except Exception: + pass + started = self.listener.started_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + failed = self.listener.failed_events[0] + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test")]), started.command + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertIsInstance(failed.duration_micros, int) + self.assertEqual("getMore", failed.command_name) + self.assertIsInstance(failed.request_id, int) + self.assertEqual(cursor.address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + + @client_context.require_replica_set + @client_context.require_secondaries_count(1) + def test_not_primary_error(self): + address = next(iter(client_context.client.secondaries)) + client = self.single_client(*address, event_listeners=[self.listener]) + # Clear authentication command results from the listener. + client.admin.command("ping") + self.listener.reset() + error = None + try: + client.pymongo_test.test.find_one_and_delete({}) + except NotPrimaryError as exc: + error = exc.errors + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertEqual("findAndModify", failed.command_name) + self.assertEqual(address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) + self.assertEqual(error, failed.failure) + + @client_context.require_no_mongos + def test_exhaust(self): + self.client.pymongo_test.test.drop() + self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) + self.listener.reset() + cursor = self.client.pymongo_test.test.find( + projection={"_id": False}, batch_size=5, cursor_type=CursorType.EXHAUST + ) + next(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 5)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(cursor.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("find", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_result = { + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{} for _ in range(5)], + }, + "ok": 1, + } + self.assertEqualReply(expected_result, succeeded.reply) + + self.listener.reset() + tuple(cursor.to_list()) + self.assertEqual(0, len(self.listener.failed_events)) + for event in self.listener.started_events: + self.assertIsInstance(event, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), + event.command, + ) + self.assertEqual("getMore", event.command_name) + self.assertEqual(cursor.address, event.connection_id) + self.assertEqual("pymongo_test", event.database_name) + self.assertIsInstance(event.request_id, int) + for event in self.listener.succeeded_events: + self.assertIsInstance(event, monitoring.CommandSucceededEvent) + self.assertIsInstance(event.duration_micros, int) + self.assertEqual("getMore", event.command_name) + self.assertIsInstance(event.request_id, int) + self.assertEqual(cursor.address, event.connection_id) + # Last getMore receives a response with cursor id 0. + self.assertEqual(0, self.listener.succeeded_events[-1].reply["cursor"]["id"]) + + def test_kill_cursors(self): + with client_knobs(kill_cursor_frequency=0.01): + self.client.pymongo_test.test.drop() + self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) + cursor = self.client.pymongo_test.test.find().batch_size(5) + next(cursor) + cursor_id = cursor.cursor_id + self.listener.reset() + cursor.close() + time.sleep(2) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + # There could be more than one cursor_id here depending on + # when the thread last ran. + self.assertIn(cursor_id, started.command["cursors"]) + self.assertEqual("killCursors", started.command_name) + self.assertIs(type(started.connection_id), tuple) + self.assertEqual(cursor.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("killCursors", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertIs(type(succeeded.connection_id), tuple) + self.assertEqual(cursor.address, succeeded.connection_id) + # There could be more than one cursor_id here depending on + # when the thread last ran. + self.assertIn( + cursor_id, succeeded.reply["cursorsUnknown"] + succeeded.reply["cursorsKilled"] + ) + + def test_non_bulk_writes(self): + coll = self.client.pymongo_test.test + coll.drop() + self.listener.reset() + + # Implied write concern insert_one + res = coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # Unacknowledged insert_one + self.listener.reset() + coll = coll.with_options(write_concern=WriteConcern(w=0)) + res = coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 0}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertEqualReply(succeeded.reply, {"ok": 1}) + + # Explicit write concern insert_one + self.listener.reset() + coll = coll.with_options(write_concern=WriteConcern(w=1)) + res = coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # delete_many + self.listener.reset() + res = coll.delete_many({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 1}), ("limit", 0)])]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(res.deleted_count, reply.get("n")) + + # replace_one + self.listener.reset() + oid = ObjectId() + res = coll.replace_one({"_id": oid}, {"_id": oid, "x": 1}, upsert=True) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": oid}), + ("u", {"_id": oid, "x": 1}), + ("multi", False), + ("upsert", True), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + self.assertEqual([{"index": 0, "_id": oid}], reply.get("upserted")) + + # update_one + self.listener.reset() + res = coll.update_one({"x": 1}, {"$inc": {"x": 1}}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 1}), + ("u", {"$inc": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # update_many + self.listener.reset() + res = coll.update_many({"x": 2}, {"$inc": {"x": 1}}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 2}), + ("u", {"$inc": {"x": 1}}), + ("multi", True), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # delete_one + self.listener.reset() + _ = coll.delete_one({"x": 3}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 3}), ("limit", 1)])]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + self.assertEqual(0, coll.count_documents({})) + + # write errors + coll.insert_one({"_id": 1}) + try: + self.listener.reset() + coll.insert_one({"_id": 1}) + except OperationFailure: + pass + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": 1}]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(0, reply.get("n")) + errors = reply.get("writeErrors") + self.assertIsInstance(errors, list) + error = errors[0] + self.assertEqual(0, error.get("index")) + self.assertIsInstance(error.get("code"), int) + self.assertIsInstance(error.get("errmsg"), str) + + def test_insert_many(self): + # This always uses the bulk API. + coll = self.client.pymongo_test.test + coll.drop() + self.listener.reset() + + big = "x" * (1024 * 1024 * 4) + docs = [{"_id": i, "big": big} for i in range(6)] + coll.insert_many(docs) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + documents = [] + count = 0 + operation_id = started[0].operation_id + self.assertIsInstance(operation_id, int) + for start, succeed in zip(started, succeeded): + self.assertIsInstance(start, monitoring.CommandStartedEvent) + cmd = sanitize_cmd(start.command) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + reply = succeed.reply + self.assertEqual(1, reply.get("ok")) + count += reply.get("n", 0) + self.assertEqual(documents, docs) + self.assertEqual(6, count) + + def test_insert_many_unacknowledged(self): + coll = self.client.pymongo_test.test + coll.drop() + unack_coll = coll.with_options(write_concern=WriteConcern(w=0)) + self.listener.reset() + + # Force two batches on legacy servers. + big = "x" * (1024 * 1024 * 12) + docs = [{"_id": i, "big": big} for i in range(6)] + unack_coll.insert_many(docs) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + documents = [] + operation_id = started[0].operation_id + self.assertIsInstance(operation_id, int) + for start, succeed in zip(started, succeeded): + self.assertIsInstance(start, monitoring.CommandStartedEvent) + cmd = sanitize_cmd(start.command) + cmd.pop("writeConcern", None) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + self.assertEqual(1, succeed.reply.get("ok")) + self.assertEqual(documents, docs) + + def check(): + return coll.count_documents({}) == 6 + + wait_until(check, "insert documents with w=0") + + def test_bulk_write(self): + coll = self.client.pymongo_test.test + coll.drop() + self.listener.reset() + + coll.bulk_write( + [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 1}, {"$set": {"x": 1}}), + DeleteOne({"_id": 1}), + ] + ) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + operation_id = started[0].operation_id + pairs = list(zip(started, succeeded)) + self.assertEqual(3, len(pairs)) + for start, succeed in pairs: + self.assertIsInstance(start, monitoring.CommandStartedEvent) + self.assertEqual("pymongo_test", start.database_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + + expected = SON([("insert", coll.name), ("ordered", True), ("documents", [{"_id": 1}])]) + self.assertEqualCommand(expected, started[0].command) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": 1}), + ("u", {"$set": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ] + ) + self.assertEqualCommand(expected, started[1].command) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"_id": 1}), ("limit", 1)])]), + ] + ) + self.assertEqualCommand(expected, started[2].command) + + @client_context.require_failCommand_fail_point + def test_bulk_write_command_network_error(self): + coll = self.client.pymongo_test.test + self.listener.reset() + + insert_network_error = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + }, + } + with self.fail_point(insert_network_error): + with self.assertRaises(AutoReconnect): + coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.failed_events + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, "insert") + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure["errtype"], "AutoReconnect") + self.assertTrue(event.failure["errmsg"]) + + @client_context.require_failCommand_fail_point + def test_bulk_write_command_error(self): + coll = self.client.pymongo_test.test + self.listener.reset() + + insert_command_error = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": False, + "errorCode": 10107, # Not primary + }, + } + with self.fail_point(insert_command_error): + with self.assertRaises(NotPrimaryError): + coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.failed_events + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, "insert") + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure["code"], 10107) + self.assertTrue(event.failure["errmsg"]) + + def test_write_errors(self): + coll = self.client.pymongo_test.test + coll.drop() + self.listener.reset() + + try: + coll.bulk_write( + [ + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + DeleteOne({"_id": 1}), + ], + ordered=False, + ) + except OperationFailure: + pass + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + operation_id = started[0].operation_id + pairs = list(zip(started, succeeded)) + errors = [] + for start, succeed in pairs: + self.assertIsInstance(start, monitoring.CommandStartedEvent) + self.assertEqual("pymongo_test", start.database_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + if "writeErrors" in succeed.reply: + errors.extend(succeed.reply["writeErrors"]) + + self.assertEqual(2, len(errors)) + fields = {"index", "code", "errmsg"} + for error in errors: + self.assertLessEqual(fields, set(error)) + + def test_first_batch_helper(self): + # Regardless of server version and use of helpers._first_batch + # this test should still pass. + self.listener.reset() + tuple((self.client.pymongo_test.test.list_indexes()).to_list()) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON([("listIndexes", "test"), ("cursor", {})]) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("listIndexes", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertIn("cursor", succeeded.reply) + self.assertIn("ok", succeeded.reply) + + self.listener.reset() + + @client_context.require_version_max(6, 1, 99) + def test_sensitive_commands(self): + listener = EventListener() + client = self.rs_or_single_client(event_listeners=[listener]) + listeners = client._event_listeners + + listener.reset() + cmd = SON([("getnonce", 1)]) + listeners.publish_command_start(cmd, "pymongo_test", 12345, client.address, None) # type: ignore[arg-type] + delta = datetime.timedelta(milliseconds=100) + listeners.publish_command_success( + delta, + {"nonce": "e474f4561c5eb40b", "ok": 1.0}, + "getnonce", + 12345, + self.client.address, # type: ignore[arg-type] + None, + database_name="pymongo_test", + ) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqual({}, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getnonce", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertEqual(succeeded.duration_micros, 100000) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertEqual({}, succeeded.reply) + + +class TestGlobalListener(IntegrationTest): + listener: EventListener + saved_listeners: Any + + @classmethod + def setUpClass(cls) -> None: + cls.listener = OvertCommandListener() + # We plan to call register(), which internally modifies _LISTENERS. + cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) + monitoring.register(cls.listener) + + @client_context.require_connection + def setUp(self): + super().setUp() + self.listener.reset() + self.client = self.single_client() + # Get one (authenticated) socket in the pool. + self.client.pymongo_test.command("ping") + + @classmethod + def tearDownClass(cls): + monitoring._LISTENERS = cls.saved_listeners + + def test_simple(self): + self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) + self.assertEqual(self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + + +class TestEventClasses(unittest.TestCase): + def test_command_event_repr(self): + request_id, connection_id, operation_id, db_name = 1, ("localhost", 27017), 2, "admin" + event = monitoring.CommandStartedEvent( + {"ping": 1}, db_name, request_id, connection_id, operation_id + ) + self.assertEqual( + repr(event), + "", + ) + delta = datetime.timedelta(milliseconds=100) + event = monitoring.CommandSucceededEvent( + delta, {"ok": 1}, "ping", request_id, connection_id, operation_id, database_name=db_name + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.CommandFailedEvent( + delta, {"ok": 0}, "ping", request_id, connection_id, operation_id, database_name=db_name + ) + self.assertEqual( + repr(event), + "", + ) + + def test_server_heartbeat_event_repr(self): + connection_id = ("localhost", 27017) + event = monitoring.ServerHeartbeatStartedEvent(connection_id) + self.assertEqual( + repr(event), "" + ) + delta = 0.1 + event = monitoring.ServerHeartbeatSucceededEvent( + delta, + {"ok": 1}, # type: ignore[arg-type] + connection_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerHeartbeatFailedEvent( + delta, + "ERROR", # type: ignore[arg-type] + connection_id, + ) + self.assertEqual( + repr(event), + "", + ) + + def test_server_event_repr(self): + server_address = ("localhost", 27017) + topology_id = ObjectId("000000000000000000000001") + event = monitoring.ServerOpeningEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerDescriptionChangedEvent( + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + server_address, + topology_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerClosedEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "", + ) + + def test_topology_event_repr(self): + topology_id = ObjectId("000000000000000000000001") + event = monitoring.TopologyOpenedEvent(topology_id) + self.assertEqual(repr(event), "") + event = monitoring.TopologyDescriptionChangedEvent( + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + topology_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.TopologyClosedEvent(topology_id) + self.assertEqual(repr(event), "") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_objectid.py b/test/test_objectid.py index dc33366b22..dbc61951d1 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,23 +13,21 @@ # limitations under the License. """Tests for the objectid module.""" +from __future__ import annotations import datetime import pickle -import unittest +import struct import sys -import time + sys.path[0:0] = [""] -from nose.plugins.skip import SkipTest +from test import SkipTest, unittest +from test.utils_shared import oid_generated_on_process from bson.errors import InvalidId -from bson.objectid import ObjectId -from bson.py3compat import b, binary_type -from bson.tz_util import (FixedOffset, - utc) - -PY3 = sys.version_info[0] == 3 +from bson.objectid import _MAX_COUNTER_VALUE, ObjectId +from bson.tz_util import FixedOffset, utc def oid(x): @@ -46,95 +44,67 @@ def test_creation(self): self.assertRaises(InvalidId, ObjectId, "12345678901") self.assertRaises(InvalidId, ObjectId, "1234567890123") self.assertTrue(ObjectId()) - self.assertTrue(ObjectId(b("123456789012"))) + self.assertTrue(ObjectId(b"123456789012")) a = ObjectId() self.assertTrue(ObjectId(a)) def test_unicode(self): a = ObjectId() - self.assertEqual(a, ObjectId(unicode(a))) - self.assertEqual(ObjectId("123456789012123456789012"), - ObjectId(u"123456789012123456789012")) - self.assertRaises(InvalidId, ObjectId, u"hello") + self.assertEqual(a, ObjectId(a)) + self.assertRaises(InvalidId, ObjectId, "hello") def test_from_hex(self): ObjectId("123456789012123456789012") self.assertRaises(InvalidId, ObjectId, "123456789012123456789G12") - self.assertRaises(InvalidId, ObjectId, u"123456789012123456789G12") def test_repr_str(self): - self.assertEqual(repr(ObjectId("1234567890abcdef12345678")), - "ObjectId('1234567890abcdef12345678')") - self.assertEqual(str(ObjectId("1234567890abcdef12345678")), - "1234567890abcdef12345678") - self.assertEqual(str(ObjectId(b("123456789012"))), - "313233343536373839303132") - self.assertEqual(ObjectId("1234567890abcdef12345678").binary, - b('\x124Vx\x90\xab\xcd\xef\x124Vx')) - self.assertEqual(str(ObjectId(b('\x124Vx\x90\xab\xcd\xef\x124Vx'))), - "1234567890abcdef12345678") + self.assertEqual( + repr(ObjectId("1234567890abcdef12345678")), "ObjectId('1234567890abcdef12345678')" + ) + self.assertEqual(str(ObjectId("1234567890abcdef12345678")), "1234567890abcdef12345678") + self.assertEqual(str(ObjectId(b"123456789012")), "313233343536373839303132") + self.assertEqual( + ObjectId("1234567890abcdef12345678").binary, b"\x124Vx\x90\xab\xcd\xef\x124Vx" + ) + self.assertEqual( + str(ObjectId(b"\x124Vx\x90\xab\xcd\xef\x124Vx")), "1234567890abcdef12345678" + ) def test_equality(self): a = ObjectId() self.assertEqual(a, ObjectId(a)) - self.assertEqual(ObjectId(b("123456789012")), - ObjectId(b("123456789012"))) + self.assertEqual(ObjectId(b"123456789012"), ObjectId(b"123456789012")) self.assertNotEqual(ObjectId(), ObjectId()) - self.assertNotEqual(ObjectId(b("123456789012")), b("123456789012")) + self.assertNotEqual(ObjectId(b"123456789012"), b"123456789012") # Explicitly test inequality self.assertFalse(a != ObjectId(a)) - self.assertFalse(ObjectId(b("123456789012")) != - ObjectId(b("123456789012"))) + self.assertFalse(ObjectId(b"123456789012") != ObjectId(b"123456789012")) def test_binary_str_equivalence(self): a = ObjectId() self.assertEqual(a, ObjectId(a.binary)) self.assertEqual(a, ObjectId(str(a))) - def test_multiprocessing(self): - # multiprocessing on windows is weird and I don't feel like figuring it - # out right now. this should fix buildbot. - if sys.platform == "win32": - raise SkipTest("Can't fork on Windows") - - try: - import multiprocessing - except ImportError: - raise SkipTest("No multiprocessing module") - - pool = multiprocessing.Pool(2) - ids = pool.map(oid, range(20)) - pool.close() - pool.join() - - map = {} - - for id in ids: - self.assertTrue(id not in map) - map[id] = True - def test_generation_time(self): - d1 = datetime.datetime.utcnow() + d1 = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d2 = ObjectId().generation_time self.assertEqual(utc, d2.tzinfo) d2 = d2.replace(tzinfo=None) - self.assertTrue(d2 - d1 < datetime.timedelta(seconds=2)) + self.assertLess(d2 - d1, datetime.timedelta(seconds=2)) def test_from_datetime(self): - if 'PyPy 1.8.0' in sys.version: - # See https://bugs.pypy.org/issue1092 - raise SkipTest("datetime.timedelta is broken in pypy 1.8.0") - d = datetime.datetime.utcnow() + d = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d = d - datetime.timedelta(microseconds=d.microsecond) oid = ObjectId.from_datetime(d) self.assertEqual(d, oid.generation_time.replace(tzinfo=None)) self.assertEqual("0" * 16, str(oid)[8:]) - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) - as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc) + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) + offset = aware.utcoffset() + assert offset is not None + as_utc = (aware - offset).replace(tzinfo=utc) oid = ObjectId.from_datetime(aware) self.assertEqual(as_utc, oid.generation_time) @@ -145,42 +115,38 @@ def test_pickling(self): self.assertEqual(orig, pickle.loads(pkl)) def test_pickle_backwards_compatability(self): - # For a full discussion see http://bugs.python.org/issue6137 - if sys.version.startswith('3.0'): - raise SkipTest("Python 3.0.x can't unpickle " - "objects pickled in Python 2.x.") - # This string was generated by pickling an ObjectId in pymongo # version 1.9 - pickled_with_1_9 = b( - "ccopy_reg\n_reconstructor\np0\n" - "(cbson.objectid\nObjectId\np1\nc__builtin__\n" - "object\np2\nNtp3\nRp4\n" - "(dp5\nS'_ObjectId__id'\np6\n" - "S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np7\nsb.") + pickled_with_1_9 = ( + b"ccopy_reg\n_reconstructor\np0\n" + b"(cbson.objectid\nObjectId\np1\nc__builtin__\n" + b"object\np2\nNtp3\nRp4\n" + b"(dp5\nS'_ObjectId__id'\np6\n" + b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np7\nsb." + ) # We also test against a hardcoded "New" pickle format so that we # make sure we're backward compatible with the current version in # the future as well. - pickled_with_1_10 = b( - "ccopy_reg\n_reconstructor\np0\n" - "(cbson.objectid\nObjectId\np1\nc__builtin__\n" - "object\np2\nNtp3\nRp4\n" - "S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb." - ) - - if PY3: - # Have to load using 'latin-1' since these were pickled in python2.x. - oid_1_9 = pickle.loads(pickled_with_1_9, encoding='latin-1') - oid_1_10 = pickle.loads(pickled_with_1_10, encoding='latin-1') - else: - oid_1_9 = pickle.loads(pickled_with_1_9) - oid_1_10 = pickle.loads(pickled_with_1_10) + pickled_with_1_10 = ( + b"ccopy_reg\n_reconstructor\np0\n" + b"(cbson.objectid\nObjectId\np1\nc__builtin__\n" + b"object\np2\nNtp3\nRp4\n" + b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb." + ) + + # Have to load using 'latin-1' since these were pickled in python2.x. + oid_1_9 = pickle.loads(pickled_with_1_9, encoding="latin-1") + oid_1_10 = pickle.loads(pickled_with_1_10, encoding="latin-1") self.assertEqual(oid_1_9, ObjectId("4d9a66561376c00b88000000")) self.assertEqual(oid_1_9, oid_1_10) + def test_random_bytes(self): + self.assertTrue(oid_generated_on_process(ObjectId())) + def test_is_valid(self): + self.assertFalse(ObjectId.is_valid(None)) self.assertFalse(ObjectId.is_valid(4)) self.assertFalse(ObjectId.is_valid(175.0)) self.assertFalse(ObjectId.is_valid({"test": 4})) @@ -189,8 +155,47 @@ def test_is_valid(self): self.assertFalse(ObjectId.is_valid("12345678901")) self.assertFalse(ObjectId.is_valid("1234567890123")) - self.assertTrue(ObjectId.is_valid(b("123456789012"))) + self.assertTrue(ObjectId.is_valid(b"123456789012")) self.assertTrue(ObjectId.is_valid("123456789012123456789012")) + def test_counter_overflow(self): + # Spec-test to check counter overflows from max value to 0. + ObjectId._inc = _MAX_COUNTER_VALUE + ObjectId() + self.assertEqual(ObjectId._inc, 0) + + def test_timestamp_values(self): + # Spec-test to check timestamp field is interpreted correctly. + TEST_DATA = { + 0x00000000: (1970, 1, 1, 0, 0, 0), + 0x7FFFFFFF: (2038, 1, 19, 3, 14, 7), + 0x80000000: (2038, 1, 19, 3, 14, 8), + 0xFFFFFFFF: (2106, 2, 7, 6, 28, 15), + } + + def generate_objectid_with_timestamp(timestamp): + oid = ObjectId() + _, trailing_bytes = struct.unpack(">IQ", oid.binary) + new_oid = struct.pack(">IQ", timestamp, trailing_bytes) + return ObjectId(new_oid) + + for tstamp, exp_datetime_args in TEST_DATA.items(): + oid = generate_objectid_with_timestamp(tstamp) + # 32-bit platforms may overflow in datetime.fromtimestamp. + if tstamp > 0x7FFFFFFF and sys.maxsize < 2**32: + try: + oid.generation_time + except (OverflowError, ValueError): + continue + self.assertEqual(oid.generation_time, datetime.datetime(*exp_datetime_args, tzinfo=utc)) + + def test_random_regenerated_on_pid_change(self): + # Test that change of pid triggers new random number generation. + random_original = ObjectId._random() + ObjectId._pid += 1 + random_new = ObjectId._random() + self.assertNotEqual(random_original, random_new) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_ocsp_cache.py b/test/test_ocsp_cache.py new file mode 100644 index 0000000000..1cc025ccb2 --- /dev/null +++ b/test/test_ocsp_cache.py @@ -0,0 +1,138 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the pymongo ocsp_support module.""" +from __future__ import annotations + +import random +import sys +from collections import namedtuple +from datetime import datetime, timedelta, timezone +from os import urandom +from time import sleep +from typing import Any + +sys.path[0:0] = [""] + +from test import unittest + +from pymongo.ocsp_cache import _OCSPCache + + +class TestOcspCache(unittest.TestCase): + MockHashAlgorithm: Any + MockOcspRequest: Any + MockOcspResponse: Any + + @classmethod + def setUpClass(cls): + cls.MockHashAlgorithm = namedtuple("MockHashAlgorithm", ["name"]) # type: ignore + cls.MockOcspRequest = namedtuple( # type: ignore + "MockOcspRequest", + ["hash_algorithm", "issuer_name_hash", "issuer_key_hash", "serial_number"], + ) + cls.MockOcspResponse = namedtuple( # type: ignore + "MockOcspResponse", ["this_update", "next_update"] + ) + + def setUp(self): + self.cache = _OCSPCache() + + def _create_mock_request(self): + hash_algorithm = self.MockHashAlgorithm(random.choice(["sha1", "md5", "sha256"])) + issuer_name_hash = urandom(8) + issuer_key_hash = urandom(8) + serial_number = random.randint(0, 10**10) + return self.MockOcspRequest( + hash_algorithm=hash_algorithm, + issuer_name_hash=issuer_name_hash, + issuer_key_hash=issuer_key_hash, + serial_number=serial_number, + ) + + def _create_mock_response(self, this_update_delta_seconds, next_update_delta_seconds): + now = datetime.now(tz=timezone.utc).replace(tzinfo=None) + this_update = now + timedelta(seconds=this_update_delta_seconds) + if next_update_delta_seconds is not None: + next_update = now + timedelta(seconds=next_update_delta_seconds) + else: + next_update = None + return self.MockOcspResponse(this_update=this_update, next_update=next_update) + + def _add_mock_cache_entry(self, mock_request, mock_response): + key = self.cache._get_cache_key(mock_request) + self.cache._data[key] = mock_response + + def test_simple(self): + # Start with 1 valid entry in the cache. + request = self._create_mock_request() + response = self._create_mock_response(-10, +3600) + self._add_mock_cache_entry(request, response) + + # Ensure entry can be retrieved. + self.assertEqual(self.cache[request], response) + + # Valid entries with an earlier next_update have no effect. + response_1 = self._create_mock_response(-20, +1800) + self.cache[request] = response_1 + self.assertEqual(self.cache[request], response) + + # Invalid entries with a later this_update have no effect. + response_2 = self._create_mock_response(+20, +1800) + self.cache[request] = response_2 + self.assertEqual(self.cache[request], response) + + # Invalid entries with passed next_update have no effect. + response_3 = self._create_mock_response(-10, -5) + self.cache[request] = response_3 + self.assertEqual(self.cache[request], response) + + # Valid entries with a later next_update update the cache. + response_new = self._create_mock_response(-5, +7200) + self.cache[request] = response_new + self.assertEqual(self.cache[request], response_new) + + # Entries with an unset next_update purge the cache. + response_notset = self._create_mock_response(-5, None) + self.cache[request] = response_notset + with self.assertRaises(KeyError): + _ = self.cache[request] + + def test_invalidate(self): + # Start with 1 valid entry in the cache. + request = self._create_mock_request() + response = self._create_mock_response(-10, +0.25) + self._add_mock_cache_entry(request, response) + + # Ensure entry can be retrieved. + self.assertEqual(self.cache[request], response) + + # Wait for entry to become invalid and ensure KeyError is raised. + sleep(0.5) + with self.assertRaises(KeyError): + _ = self.cache[request] + + def test_non_existent(self): + # Start with 1 valid entry in the cache. + request = self._create_mock_request() + response = self._create_mock_response(-10, +10) + self._add_mock_cache_entry(request, response) + + # Attempt to retrieve non-existent entry must raise KeyError. + with self.assertRaises(KeyError): + _ = self.cache[self._create_mock_request()] + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py new file mode 100644 index 0000000000..648e46815a --- /dev/null +++ b/test/test_on_demand_csfle.py @@ -0,0 +1,115 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption with on demand credentials.""" +from __future__ import annotations + +import os +import sys +import unittest + +import pytest + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context + +from bson.codec_options import CodecOptions +from pymongo.synchronous.encryption import ( + _HAVE_PYMONGOCRYPT, + ClientEncryption, + EncryptionError, +) + +_IS_SYNC = True + +pytestmark = pytest.mark.kms + + +class TestonDemandGCPCredentials(IntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUp(self): + super().setUp() + self.master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = ClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("gcp", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = ClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + self.client_encryption.create_data_key("gcp", self.master_key) + + +class TestonDemandAzureCredentials(IntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUp(self): + super().setUp() + self.master_key = { + "keyVaultEndpoint": os.environ["KEY_VAULT_ENDPOINT"], + "keyName": os.environ["KEY_NAME"], + } + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = ClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("azure", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = ClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + self.client_encryption.create_data_key("azure", self.master_key) + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/test/test_operations.py b/test/test_operations.py new file mode 100644 index 0000000000..3ee6677735 --- /dev/null +++ b/test/test_operations.py @@ -0,0 +1,80 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the operations module.""" +from __future__ import annotations + +from test import UnitTest, unittest + +from pymongo import ASCENDING, DESCENDING +from pymongo.collation import Collation +from pymongo.errors import OperationFailure +from pymongo.operations import IndexModel, SearchIndexModel + + +class TestOperationsBase(UnitTest): + """Base class for testing operations module.""" + + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + +class TestIndexModel(TestOperationsBase): + """Test IndexModel features.""" + + def test_repr(self): + # Based on examples in test_collection.py + self.assertRepr(IndexModel("hello")) + self.assertRepr(IndexModel([("hello", DESCENDING), ("world", ASCENDING)])) + self.assertRepr( + IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world") + ) + # Test all the kwargs + self.assertRepr(IndexModel("name", name="name")) + self.assertRepr(IndexModel("unique", unique=False)) + self.assertRepr(IndexModel("background", background=True)) + self.assertRepr(IndexModel("sparse", sparse=True)) + self.assertRepr(IndexModel("bucketSize", bucketSize=1)) + self.assertRepr(IndexModel("min", min=1)) + self.assertRepr(IndexModel("max", max=1)) + self.assertRepr(IndexModel("expireAfterSeconds", expireAfterSeconds=1)) + self.assertRepr( + IndexModel("partialFilterExpression", partialFilterExpression={"hello": "world"}) + ) + self.assertRepr(IndexModel("collation", collation=Collation(locale="en_US"))) + self.assertRepr(IndexModel("wildcardProjection", wildcardProjection={"$**": 1})) + self.assertRepr(IndexModel("hidden", hidden=False)) + # Test string literal + self.assertEqual(repr(IndexModel("hello")), "IndexModel({'hello': 1}, name='hello_1')") + self.assertEqual( + repr(IndexModel({"hello": 1, "world": -1})), + "IndexModel({'hello': 1, 'world': -1}, name='hello_1_world_-1')", + ) + + +class TestSearchIndexModel(TestOperationsBase): + """Test SearchIndexModel features.""" + + def test_repr(self): + self.assertRepr(SearchIndexModel({"hello": "hello"}, key=1)) + self.assertEqual( + repr(SearchIndexModel({"hello": "hello"}, key=1)), + "SearchIndexModel(definition={'hello': 'hello'}, key=1)", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_pooling.py b/test/test_pooling.py index 1aa07d0954..cb5b206996 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,168 +13,599 @@ # limitations under the License. """Test built in connection-pooling with threads.""" +from __future__ import annotations +import asyncio +import gc +import random +import socket import sys -import thread import time -import unittest +from test.utils import flaky, get_pool, joinall + +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.son import SON +from pymongo import MongoClient, message, timeout +from pymongo.errors import AutoReconnect, ConnectionFailure, DuplicateKeyError +from pymongo.hello import HelloCompat +from pymongo.lock import _create_lock sys.path[0:0] = [""] -from nose.plugins.skip import SkipTest - -from test import host, port -from test.test_pooling_base import ( - _TestPooling, _TestMaxPoolSize, _TestMaxOpenSockets, - _TestPoolSocketSharing, _TestWaitQueueMultiple, one) -from test.utils import get_pool - - -class TestPoolingThreads(_TestPooling, unittest.TestCase): - use_greenlets = False - - def test_request_with_fork(self): - if sys.platform == "win32": - raise SkipTest("Can't test forking on Windows") - - try: - from multiprocessing import Process, Pipe - except ImportError: - raise SkipTest("No multiprocessing module") - - coll = self.c.pymongo_test.test - coll.remove() - coll.insert({'_id': 1}) - coll.find_one() - self.assert_pool_size(1) - self.c.start_request() - self.assert_pool_size(1) - coll.find_one() - self.assert_pool_size(0) - self.assert_request_with_socket() - - def f(pipe): - # We can still query server without error - self.assertEqual({'_id':1}, coll.find_one()) - - # Pool has detected that we forked, but resumed the request - self.assert_request_with_socket() - self.assert_pool_size(0) - pipe.send("success") - - parent_conn, child_conn = Pipe() - p = Process(target=f, args=(child_conn,)) - p.start() - p.join(1) - p.terminate() - child_conn.close() - self.assertEqual("success", parent_conn.recv()) - - def test_primitive_thread(self): - p = self.get_pool((host, port), 10, None, None, False) +from test import IntegrationTest, client_context, unittest +from test.helpers import ConcurrentRunner +from test.utils_shared import delay + +from pymongo.socket_checker import SocketChecker +from pymongo.synchronous.pool import Pool, PoolOptions + +_IS_SYNC = True + + +N = 10 +DB = "pymongo-pooling-tests" + + +def gc_collect_until_done(tasks, timeout=60): + start = time.time() + running = list(tasks) + while running: + assert (time.time() - start) < timeout, "Tasks timed out" + for t in running: + t.join(0.1) + if not t.is_alive(): + running.remove(t) + gc.collect() + + +class MongoTask(ConcurrentRunner): + """A thread/Task that uses a MongoClient.""" + + def __init__(self, client): + super().__init__() + self.daemon = True # Don't hang whole test if task hangs. + self.client = client + self.db = self.client[DB] + self.passed = False + + def run(self): + self.run_mongo_thread() + self.passed = True - # Test that start/end_request work with a thread begun from thread - # module, rather than threading module - lock = thread.allocate_lock() - lock.acquire() + def run_mongo_thread(self): + raise NotImplementedError - sock_ids = [] - def run_in_request(): - p.start_request() - sock0 = p.get_socket() - sock1 = p.get_socket() - sock_ids.extend([id(sock0), id(sock1)]) - p.maybe_return_socket(sock0) - p.maybe_return_socket(sock1) - p.end_request() - lock.release() +class InsertOneAndFind(MongoTask): + def run_mongo_thread(self): + for _ in range(N): + rand = random.randint(0, N) + _id = (self.db.sf.insert_one({"x": rand})).inserted_id + assert rand == (self.db.sf.find_one(_id))["x"] - thread.start_new_thread(run_in_request, ()) - # Join thread - acquired = False - for i in range(30): - time.sleep(0.5) - acquired = lock.acquire(0) - if acquired: - break +class Unique(MongoTask): + def run_mongo_thread(self): + for _ in range(N): + self.db.unique.insert_one({}) # no error - self.assertTrue(acquired, "Thread is hung") - self.assertEqual(sock_ids[0], sock_ids[1]) - def test_pool_with_fork(self): - # Test that separate MongoClients have separate Pools, and that the - # driver can create a new MongoClient after forking - if sys.platform == "win32": - raise SkipTest("Can't test forking on Windows") +class NonUnique(MongoTask): + def run_mongo_thread(self): + for _ in range(N): + try: + self.db.unique.insert_one({"_id": "jesse"}) + except DuplicateKeyError: + pass + else: + raise AssertionError("Should have raised DuplicateKeyError") - try: - from multiprocessing import Process, Pipe - except ImportError: - raise SkipTest("No multiprocessing module") - a = self.get_client(auto_start_request=False) - a.pymongo_test.test.remove() - a.pymongo_test.test.insert({'_id':1}) - a.pymongo_test.test.find_one() - self.assertEqual(1, len(get_pool(a).sockets)) - a_sock = one(get_pool(a).sockets) +class SocketGetter(MongoTask): + """Utility for TestPooling. - def loop(pipe): - c = self.get_client(auto_start_request=False) - self.assertEqual(1,len(get_pool(c).sockets)) - c.pymongo_test.test.find_one() - self.assertEqual(1,len(get_pool(c).sockets)) - pipe.send(one(get_pool(c).sockets).sock.getsockname()) - - cp1, cc1 = Pipe() - cp2, cc2 = Pipe() - - p1 = Process(target=loop, args=(cc1,)) - p2 = Process(target=loop, args=(cc2,)) - - p1.start() - p2.start() - - p1.join(1) - p2.join(1) - - p1.terminate() - p2.terminate() - - p1.join() - p2.join() - - cc1.close() - cc2.close() + Checks out a socket and holds it forever. Used in + test_no_wait_queue_timeout. + """ - b_sock = cp1.recv() - c_sock = cp2.recv() - self.assertTrue(a_sock.sock.getsockname() != b_sock) - self.assertTrue(a_sock.sock.getsockname() != c_sock) - self.assertTrue(b_sock != c_sock) + def __init__(self, client, pool): + super().__init__(client) + self.state = "init" + self.pool = pool + self.sock = None - # a_sock, created by parent process, is still in the pool - d_sock = get_pool(a).get_socket() - self.assertEqual(a_sock, d_sock) - d_sock.close() + def run_mongo_thread(self): + self.state = "get_socket" + # Call 'pin_cursor' so we can hold the socket. + with self.pool.checkout() as sock: + sock.pin_cursor() + self.sock = sock -class TestMaxPoolSizeThreads(_TestMaxPoolSize, unittest.TestCase): - use_greenlets = False + self.state = "connection" + def release_conn(self): + if self.sock: + self.sock.unpin() + self.sock = None + return True + return False -class TestPoolSocketSharingThreads(_TestPoolSocketSharing, unittest.TestCase): - use_greenlets = False +def run_cases(client, cases): + tasks = [] + n_runs = 5 -class TestMaxOpenSocketsThreads(_TestMaxOpenSockets, unittest.TestCase): - use_greenlets = False + for case in cases: + for _i in range(n_runs): + t = case(client) + t.start() + tasks.append(t) + for t in tasks: + t.join() -class TestWaitQueueMultipleThreads(_TestWaitQueueMultiple, unittest.TestCase): - use_greenlets = False + for t in tasks: + assert t.passed, "%s.run() threw an exception" % repr(t) + + +class _TestPoolingBase(IntegrationTest): + """Base class for all connection-pool tests.""" + + @client_context.require_connection + def setUp(self): + super().setUp() + self.c = self.rs_or_single_client() + db = self.c[DB] + db.unique.drop() + db.test.drop() + db.unique.insert_one({"_id": "jesse"}) + db.test.insert_many([{} for _ in range(10)]) + + def create_pool(self, pair=None, *args, **kwargs): + if pair is None: + pair = (client_context.host, client_context.port) + # Start the pool with the correct ssl options. + pool_options = client_context.client._topology_settings.pool_options + kwargs["ssl_context"] = pool_options._ssl_context + kwargs["tls_allow_invalid_hostnames"] = pool_options.tls_allow_invalid_hostnames + kwargs["server_api"] = pool_options.server_api + pool = Pool(pair, PoolOptions(*args, **kwargs)) + pool.ready() + return pool + + +class TestPooling(_TestPoolingBase): + def test_max_pool_size_validation(self): + host, port = client_context.host, client_context.port + self.assertRaises(ValueError, MongoClient, host=host, port=port, maxPoolSize=-1) + + self.assertRaises(ValueError, MongoClient, host=host, port=port, maxPoolSize="foo") + + c = MongoClient(host=host, port=port, maxPoolSize=100, connect=False) + self.assertEqual(c.options.pool_options.max_pool_size, 100) + + def test_no_disconnect(self): + run_cases(self.c, [NonUnique, Unique, InsertOneAndFind]) + + def test_pool_reuses_open_socket(self): + # Test Pool's _check_closed() method doesn't close a healthy socket. + cx_pool = self.create_pool(max_pool_size=10) + cx_pool._check_interval_seconds = 0 # Always check. + with cx_pool.checkout() as conn: + pass + + with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + def test_get_socket_and_exception(self): + # get_socket() returns socket after a non-network error. + cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1) + with self.assertRaises(ZeroDivisionError): + with cx_pool.checkout() as conn: + 1 / 0 + + # Socket was returned, not closed. + with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + def test_pool_removes_closed_socket(self): + # Test that Pool removes explicitly closed socket. + cx_pool = self.create_pool() + + with cx_pool.checkout() as conn: + # Use Connection's API to close the socket. + conn.close_conn(None) + + self.assertEqual(0, len(cx_pool.conns)) + + def test_pool_removes_dead_socket(self): + # Test that Pool removes dead socket and the socket doesn't return + # itself PYTHON-344 + cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1) + cx_pool._check_interval_seconds = 0 # Always check. + + with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's + # closed. + conn.conn.close() + self.assertTrue(conn.conn_closed()) + + with cx_pool.checkout() as new_connection: + self.assertEqual(0, len(cx_pool.conns)) + self.assertNotEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + # Semaphore was released. + with cx_pool.checkout(): + pass + + def test_socket_closed(self): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((client_context.host, client_context.port)) + socket_checker = SocketChecker() + self.assertFalse(socket_checker.socket_closed(s)) + s.close() + self.assertTrue(socket_checker.socket_closed(s)) + + def test_socket_checker(self): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((client_context.host, client_context.port)) + socket_checker = SocketChecker() + # Socket has nothing to read. + self.assertFalse(socket_checker.select(s, read=True)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0.05)) + # Socket is writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) + # Make the socket readable + _, msg, _ = message._query( + 0, "admin.$cmd", 0, -1, SON([("ping", 1)]), None, DEFAULT_CODEC_OPTIONS + ) + s.sendall(msg) + # Block until the socket is readable. + self.assertTrue(socket_checker.select(s, read=True, timeout=None)) + self.assertTrue(socket_checker.select(s, read=True)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0.05)) + # Socket is still writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) + s.close() + self.assertTrue(socket_checker.socket_closed(s)) + + def test_return_socket_after_reset(self): + pool = self.create_pool() + with pool.checkout() as sock: + self.assertEqual(pool.active_sockets, 1) + self.assertEqual(pool.operation_count, 1) + pool.reset() + + self.assertTrue(sock.closed) + self.assertEqual(0, len(pool.conns)) + self.assertEqual(pool.active_sockets, 0) + self.assertEqual(pool.operation_count, 0) + + def test_pool_check(self): + # Test that Pool recovers from two connection failures in a row. + # This exercises code at the end of Pool._check(). + cx_pool = self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=1) + cx_pool._check_interval_seconds = 0 # Always check. + self.addCleanup(cx_pool.close) + + with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's + # closed. + conn.conn.close() + + # Swap pool's address with a bad one. + address, cx_pool.address = cx_pool.address, ("foo.com", 1234) + with self.assertRaises(AutoReconnect): + with cx_pool.checkout(): + pass + + # Back to normal, semaphore was correctly released. + cx_pool.address = address + with cx_pool.checkout(): + pass + + def test_wait_queue_timeout(self): + wait_queue_timeout = 2 # Seconds + pool = self.create_pool(max_pool_size=1, wait_queue_timeout=wait_queue_timeout) + self.addCleanup(pool.close) + + with pool.checkout(): + start = time.time() + with self.assertRaises(ConnectionFailure): + with pool.checkout(): + pass + + duration = time.time() - start + self.assertLess( + abs(wait_queue_timeout - duration), + 1, + f"Waited {duration:.2f} seconds for a socket, expected {wait_queue_timeout:f}", + ) + + def test_no_wait_queue_timeout(self): + # Verify get_socket() with no wait_queue_timeout blocks forever. + pool = self.create_pool(max_pool_size=1) + self.addCleanup(pool.close) + + # Reach max_size. + with pool.checkout() as s1: + t = SocketGetter(self.c, pool) + t.start() + while t.state != "get_socket": + time.sleep(0.1) + + time.sleep(1) + self.assertEqual(t.state, "get_socket") + + while t.state != "connection": + time.sleep(0.1) + + self.assertEqual(t.state, "connection") + self.assertEqual(t.sock, s1) + # Cleanup + t.release_conn() + t.join() + pool.close() + + def test_checkout_more_than_max_pool_size(self): + pool = self.create_pool(max_pool_size=2) + + socks = [] + for _ in range(2): + # Call 'pin_cursor' so we can hold the socket. + with pool.checkout() as sock: + sock.pin_cursor() + socks.append(sock) + + tasks = [] + for _ in range(10): + t = SocketGetter(self.c, pool) + t.start() + tasks.append(t) + time.sleep(1) + for t in tasks: + self.assertEqual(t.state, "get_socket") + # Cleanup + for socket_info in socks: + socket_info.unpin() + while tasks: + to_remove = [] + for t in tasks: + if t.release_conn(): + to_remove.append(t) + t.join() + for t in to_remove: + tasks.remove(t) + time.sleep(0.05) + pool.close() + + def test_maxConnecting(self): + client = self.rs_or_single_client() + self.client.test.test.insert_one({}) + self.addCleanup(self.client.test.test.delete_many, {}) + pool = get_pool(client) + docs = [] + + # Run 50 short running operations + def find_one(): + docs.append(client.test.test.find_one({})) + + tasks = [ConcurrentRunner(target=find_one) for _ in range(50)] + for task in tasks: + task.start() + for task in tasks: + task.join(10) + + self.assertEqual(len(docs), 50) + self.assertLessEqual(len(pool.conns), 50) + # TLS and auth make connection establishment more expensive than + # the query which leads to more threads hitting maxConnecting. + # The end result is fewer total connections and better latency. + if client_context.tls and client_context.auth_enabled: + self.assertLessEqual(len(pool.conns), 30) + else: + self.assertLessEqual(len(pool.conns), 50) + # MongoDB 4.4.1 with auth + ssl: + # maxConnecting = 2: 6 connections in ~0.231+ seconds + # maxConnecting = unbounded: 50 connections in ~0.642+ seconds + # + # MongoDB 4.4.1 with no-auth no-ssl Python 3.8: + # maxConnecting = 2: 15-22 connections in ~0.108+ seconds + # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds + print(len(pool.conns)) + + @client_context.require_failCommand_appName + def test_csot_timeout_message(self): + client = self.rs_or_single_client(appName="connectionTimeoutApp") + # Mock an operation failing due to pymongo.timeout(). + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + client.db.t.insert_one({"x": 1}) + + with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + with timeout(0.5): + client.db.t.find_one({"$where": delay(2)}) + + self.assertIn("(configured timeouts: timeoutMS: 500.0ms", str(error.exception)) + + @client_context.require_failCommand_appName + def test_socket_timeout_message(self): + client = self.rs_or_single_client(socketTimeoutMS=500, appName="connectionTimeoutApp") + # Mock an operation failing due to socketTimeoutMS. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + client.db.t.insert_one({"x": 1}) + + with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + client.db.t.find_one({"$where": delay(2)}) + + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 20000.0ms)", + str(error.exception), + ) + + @client_context.require_failCommand_appName + def test_connection_timeout_message(self): + # Mock a connection creation failing due to timeout. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "appName": "connectionTimeoutApp", + }, + } + + client = self.rs_or_single_client( + connectTimeoutMS=500, + socketTimeoutMS=500, + appName="connectionTimeoutApp", + heartbeatFrequencyMS=1000000, + ) + client.admin.command("ping") + pool = get_pool(client) + pool.reset_without_pause() + with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + client.admin.command("ping") + + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 500.0ms)", + str(error.exception), + ) + + +class TestPoolMaxSize(_TestPoolingBase): + def test_max_pool_size(self): + max_pool_size = 4 + c = self.rs_or_single_client(maxPoolSize=max_pool_size) + collection = c[DB].test + + # Need one document. + collection.drop() + collection.insert_one({}) + + # ntasks had better be much larger than max_pool_size to ensure that + # max_pool_size connections are actually required at some point in this + # test's execution. + cx_pool = get_pool(c) + ntasks = 10 + tasks = [] + lock = _create_lock() + self.n_passed = 0 + + def f(): + for _ in range(5): + collection.find_one({"$where": delay(0.1)}) + assert len(cx_pool.conns) <= max_pool_size + + with lock: + self.n_passed += 1 + + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) + t.start() + + joinall(tasks) + self.assertEqual(ntasks, self.n_passed) + self.assertGreater(len(cx_pool.conns), 1) + self.assertEqual(0, cx_pool.requests) + + def test_max_pool_size_none(self): + c = self.rs_or_single_client(maxPoolSize=None) + collection = c[DB].test + + # Need one document. + collection.drop() + collection.insert_one({}) + + cx_pool = get_pool(c) + ntasks = 10 + tasks = [] + lock = _create_lock() + self.n_passed = 0 + + def f(): + for _ in range(5): + collection.find_one({"$where": delay(0.1)}) + + with lock: + self.n_passed += 1 + + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) + t.start() + + joinall(tasks) + self.assertEqual(ntasks, self.n_passed) + self.assertGreater(len(cx_pool.conns), 1) + self.assertEqual(cx_pool.max_pool_size, float("inf")) + + def test_max_pool_size_zero(self): + c = self.rs_or_single_client(maxPoolSize=0) + pool = get_pool(c) + self.assertEqual(pool.max_pool_size, float("inf")) + + def test_max_pool_size_with_connection_failure(self): + # The pool acquires its semaphore before attempting to connect; ensure + # it releases the semaphore on connection failure. + test_pool = Pool( + ("somedomainthatdoesntexist.org", 27017), + PoolOptions(max_pool_size=1, connect_timeout=1, socket_timeout=1, wait_queue_timeout=1), + ) + test_pool.ready() + + # First call to get_socket fails; if pool doesn't release its semaphore + # then the second call raises "ConnectionFailure: Timed out waiting for + # socket from pool" instead of AutoReconnect. + for _i in range(2): + with self.assertRaises(AutoReconnect) as context: + with test_pool.checkout(): + pass + + # Testing for AutoReconnect instead of ConnectionFailure, above, + # is sufficient right *now* to catch a semaphore leak. But that + # seems error-prone, so check the message too. + self.assertNotIn("waiting for socket from pool", str(context.exception)) if __name__ == "__main__": diff --git a/test/test_pooling_base.py b/test/test_pooling_base.py deleted file mode 100644 index 30db9ac428..0000000000 --- a/test/test_pooling_base.py +++ /dev/null @@ -1,1239 +0,0 @@ -# Copyright 2012-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Base classes to test built-in connection-pooling with threads or greenlets. -""" - -import gc -import random -import socket -import sys -import thread -import threading -import time - -sys.path[0:0] = [""] - -from nose.plugins.skip import SkipTest - -import pymongo.pool -from pymongo.mongo_client import MongoClient -from pymongo.pool import Pool, NO_REQUEST, NO_SOCKET_YET, SocketInfo -from pymongo.errors import ConfigurationError, ConnectionFailure -from pymongo.errors import ExceededMaxWaiters -from test import version, host, port -from test.test_client import get_client -from test.utils import delay, is_mongos, one, get_pool - -N = 10 -DB = "pymongo-pooling-tests" - - -if sys.version_info[0] >= 3: - from imp import reload - - -try: - import gevent - from gevent import Greenlet, monkey, hub - import gevent.coros, gevent.event - has_gevent = True -except ImportError: - has_gevent = False - - -def gc_collect_until_done(threads, timeout=60): - start = time.time() - running = list(threads) - while running: - assert (time.time() - start) < timeout, "Threads timed out" - for t in running: - t.thread.join(0.1) - if not t.alive: - running.remove(t) - gc.collect() - - -class MongoThread(object): - """A thread, or a greenlet, that uses a MongoClient""" - def __init__(self, test_case): - self.use_greenlets = test_case.use_greenlets - self.client = test_case.c - self.db = self.client[DB] - self.ut = test_case - self.passed = False - - def start(self): - if self.use_greenlets: - # A Gevent extended Greenlet - self.thread = Greenlet(self.run) - else: - self.thread = threading.Thread(target=self.run) - self.thread.setDaemon(True) # Don't hang whole test if thread hangs - - self.thread.start() - - @property - def alive(self): - if self.use_greenlets: - return not self.thread.dead - else: - return self.thread.isAlive() - - def join(self): - self.thread.join(20) - if self.use_greenlets: - msg = "Greenlet timeout" - else: - msg = "Thread timeout" - assert not self.alive, msg - self.thread = None - - def run(self): - self.run_mongo_thread() - - # No exceptions thrown - self.passed = True - - def run_mongo_thread(self): - raise NotImplementedError() - - -class SaveAndFind(MongoThread): - - def run_mongo_thread(self): - for _ in xrange(N): - rand = random.randint(0, N) - _id = self.db.sf.save({"x": rand}) - self.ut.assertEqual(rand, self.db.sf.find_one(_id)["x"]) - - -class Unique(MongoThread): - - def run_mongo_thread(self): - for _ in xrange(N): - self.client.start_request() - self.db.unique.insert({}) # no error - self.client.end_request() - - -class NonUnique(MongoThread): - - def run_mongo_thread(self): - for _ in xrange(N): - self.client.start_request() - self.db.unique.insert({"_id": "jesse"}, w=0) - self.ut.assertNotEqual(None, self.db.error()) - self.client.end_request() - - -class Disconnect(MongoThread): - - def run_mongo_thread(self): - for _ in xrange(N): - self.client.disconnect() - - -class NoRequest(MongoThread): - - def run_mongo_thread(self): - self.client.start_request() - errors = 0 - for _ in xrange(N): - self.db.unique.insert({"_id": "jesse"}, w=0) - if not self.db.error(): - errors += 1 - - self.client.end_request() - self.ut.assertEqual(0, errors) - - -def run_cases(ut, cases): - threads = [] - nruns = 10 - if ( - ut.use_greenlets and sys.platform == 'darwin' - and gevent.version_info[0] < 1 - ): - # Gevent 0.13.6 bug on Mac, Greenlet.join() hangs if more than - # about 35 Greenlets share a MongoClient. Apparently fixed in - # recent Gevent development. - nruns = 5 - - for case in cases: - for i in range(nruns): - t = case(ut) - t.start() - threads.append(t) - - for t in threads: - t.join() - - for t in threads: - assert t.passed, "%s.run_mongo_thread() threw an exception" % repr(t) - - -class OneOp(MongoThread): - - def __init__(self, ut): - super(OneOp, self).__init__(ut) - - def run_mongo_thread(self): - pool = get_pool(self.client) - assert len(pool.sockets) == 1, "Expected 1 socket, found %d" % ( - len(pool.sockets) - ) - - sock_info = one(pool.sockets) - - self.client.start_request() - - # start_request() hasn't yet moved the socket from the general pool into - # the request - assert len(pool.sockets) == 1 - assert one(pool.sockets) == sock_info - - self.client[DB].test.find_one() - - # find_one() causes the socket to be used in the request, so now it's - # bound to this thread - assert len(pool.sockets) == 0 - assert pool._get_request_state() == sock_info - self.client.end_request() - - # The socket is back in the pool - assert len(pool.sockets) == 1 - assert one(pool.sockets) == sock_info - - -class CreateAndReleaseSocket(MongoThread): - """A thread or greenlet that acquires a socket, waits for all other threads - to reach rendezvous point, then terminates. - """ - class Rendezvous(object): - def __init__(self, nthreads, use_greenlets): - self.nthreads = nthreads - self.nthreads_run = 0 - self.use_greenlets = use_greenlets - if use_greenlets: - self.lock = gevent.coros.RLock() - else: - self.lock = threading.Lock() - self.reset_ready() - - def reset_ready(self): - if self.use_greenlets: - self.ready = gevent.event.Event() - else: - self.ready = threading.Event() - - def __init__(self, ut, client, start_request, end_request, rendezvous): - super(CreateAndReleaseSocket, self).__init__(ut) - self.client = client - self.start_request = start_request - self.end_request = end_request - self.rendezvous = rendezvous - - def run_mongo_thread(self): - # Do an operation that requires a socket. - # test_max_pool_size uses this to spin up lots of threads requiring - # lots of simultaneous connections, to ensure that Pool obeys its - # max_size configuration and closes extra sockets as they're returned. - for i in range(self.start_request): - self.client.start_request() - - # Use a socket - self.client[DB].test.find_one() - - # Don't finish until all threads reach this point - r = self.rendezvous - r.lock.acquire() - r.nthreads_run += 1 - if r.nthreads_run == r.nthreads: - # Everyone's here, let them finish - r.ready.set() - r.lock.release() - else: - r.lock.release() - r.ready.wait(30) # Wait thirty seconds.... - assert r.ready.isSet(), "Rendezvous timed out" - - for i in range(self.end_request): - self.client.end_request() - - -class CreateAndReleaseSocketNoRendezvous(MongoThread): - """A thread or greenlet that acquires a socket and terminates without - waiting for other threads to reach rendezvous point. - """ - class Rendezvous(object): - def __init__(self, nthreads, use_greenlets): - self.nthreads = nthreads - self.nthreads_run = 0 - if use_greenlets: - self.lock = gevent.coros.RLock() - self.ready = gevent.event.Event() - else: - self.lock = threading.Lock() - self.ready = threading.Event() - - def __init__(self, ut, client, start_request, end_request): - super(CreateAndReleaseSocketNoRendezvous, self).__init__(ut) - self.client = client - self.start_request = start_request - self.end_request = end_request - - def run_mongo_thread(self): - # Do an operation that requires a socket. - # test_max_pool_size uses this to spin up lots of threads requiring - # lots of simultaneous connections, to ensure that Pool obeys its - # max_size configuration and closes extra sockets as they're returned. - for i in range(self.start_request): - self.client.start_request() - - # Use a socket - self.client[DB].test.find_one() - for i in range(self.end_request): - self.client.end_request() - - -class _TestPoolingBase(object): - """Base class for all client-pool tests. Doesn't inherit from - unittest.TestCase, and its name is prefixed with "_" to avoid being - run by nose. Real tests double-inherit from this base and from TestCase. - """ - use_greenlets = False - - def setUp(self): - if self.use_greenlets: - if not has_gevent: - raise SkipTest("Gevent not installed") - - # Note we don't do patch_thread() or patch_all() - we're - # testing here that patch_thread() is unnecessary for - # the client pool to work properly. - monkey.patch_socket() - - self.c = self.get_client(auto_start_request=False) - - # reset the db - db = self.c[DB] - db.unique.drop() - db.test.drop() - db.unique.insert({"_id": "jesse"}) - - db.test.insert([{} for i in range(10)]) - - def tearDown(self): - self.c.close() - self.c = None - if self.use_greenlets: - # Undo patch - reload(socket) - - def get_client(self, *args, **kwargs): - opts = kwargs.copy() - opts['use_greenlets'] = self.use_greenlets - return get_client(*args, **opts) - - def get_pool(self, *args, **kwargs): - kwargs['use_greenlets'] = self.use_greenlets - return Pool(*args, **kwargs) - - def sleep(self, seconds): - if self.use_greenlets: - gevent.sleep(seconds) - else: - time.sleep(seconds) - - def assert_no_request(self): - self.assertTrue( - self.c._MongoClient__member is None or - NO_REQUEST == get_pool(self.c)._get_request_state() - ) - - def assert_request_without_socket(self): - self.assertEqual( - NO_SOCKET_YET, get_pool(self.c)._get_request_state() - ) - - def assert_request_with_socket(self): - self.assertTrue(isinstance( - get_pool(self.c)._get_request_state(), SocketInfo - )) - - def assert_pool_size(self, pool_size): - if pool_size == 0: - self.assertTrue( - self.c._MongoClient__member is None - or not get_pool(self.c).sockets - ) - else: - self.assertEqual( - pool_size, len(get_pool(self.c).sockets) - ) - - -class _TestPooling(_TestPoolingBase): - """Basic pool tests, to be run both with threads and with greenlets.""" - def test_max_pool_size_validation(self): - self.assertRaises( - ConfigurationError, MongoClient, host=host, port=port, - max_pool_size=-1 - ) - - self.assertRaises( - ConfigurationError, MongoClient, host=host, port=port, - max_pool_size='foo' - ) - - c = MongoClient(host=host, port=port, max_pool_size=100) - self.assertEqual(c.max_pool_size, 100) - - def test_no_disconnect(self): - run_cases(self, [NoRequest, NonUnique, Unique, SaveAndFind]) - - def test_simple_disconnect(self): - # MongoClient just created, expect 1 free socket - self.assert_pool_size(1) - self.assert_no_request() - - self.c.start_request() - self.assert_request_without_socket() - cursor = self.c[DB].stuff.find() - - # Cursor hasn't actually caused a request yet, so there's still 1 free - # socket. - self.assert_pool_size(1) - self.assert_request_without_socket() - - # Actually make a request to server, triggering a socket to be - # allocated to the request - list(cursor) - self.assert_pool_size(0) - self.assert_request_with_socket() - - # Pool returns to its original state - self.c.end_request() - self.assert_no_request() - self.assert_pool_size(1) - - self.c.disconnect() - self.assert_pool_size(0) - self.assert_no_request() - - def test_disconnect(self): - run_cases(self, [SaveAndFind, Disconnect, Unique]) - - def test_independent_pools(self): - # Test for regression of very early PyMongo bug: separate pools shared - # state. - p = self.get_pool((host, port), 10, None, None, False) - self.c.start_request() - self.c.pymongo_test.test.find_one() - self.assertEqual(set(), p.sockets) - self.c.end_request() - self.assert_pool_size(1) - self.assertEqual(set(), p.sockets) - - def test_dependent_pools(self): - self.assert_pool_size(1) - self.c.start_request() - self.assert_request_without_socket() - self.c.pymongo_test.test.find_one() - self.assert_request_with_socket() - self.assert_pool_size(0) - self.c.end_request() - self.assert_pool_size(1) - - t = OneOp(self) - t.start() - t.join() - self.assertTrue(t.passed, "OneOp.run() threw exception") - - self.assert_pool_size(1) - self.c.pymongo_test.test.find_one() - self.assert_pool_size(1) - - def test_multiple_connections(self): - a = self.get_client(auto_start_request=False) - b = self.get_client(auto_start_request=False) - self.assertEqual(1, len(get_pool(a).sockets)) - self.assertEqual(1, len(get_pool(b).sockets)) - - a.start_request() - a.pymongo_test.test.find_one() - self.assertEqual(0, len(get_pool(a).sockets)) - a.end_request() - self.assertEqual(1, len(get_pool(a).sockets)) - self.assertEqual(1, len(get_pool(b).sockets)) - a_sock = one(get_pool(a).sockets) - - b.end_request() - self.assertEqual(1, len(get_pool(a).sockets)) - self.assertEqual(1, len(get_pool(b).sockets)) - - b.start_request() - b.pymongo_test.test.find_one() - self.assertEqual(1, len(get_pool(a).sockets)) - self.assertEqual(0, len(get_pool(b).sockets)) - - b.end_request() - b_sock = one(get_pool(b).sockets) - b.pymongo_test.test.find_one() - a.pymongo_test.test.find_one() - - self.assertEqual(b_sock, - get_pool(b).get_socket()) - self.assertEqual(a_sock, - get_pool(a).get_socket()) - - a_sock.close() - b_sock.close() - - def test_request(self): - # Check that Pool gives two different sockets in two calls to - # get_socket() -- doesn't automatically put us in a request any more - cx_pool = self.get_pool( - pair=(host,port), - max_size=10, - net_timeout=1000, - conn_timeout=1000, - use_ssl=False - ) - - sock0 = cx_pool.get_socket() - sock1 = cx_pool.get_socket() - - self.assertNotEqual(sock0, sock1) - - # Now in a request, we'll get the same socket both times - cx_pool.start_request() - - sock2 = cx_pool.get_socket() - sock3 = cx_pool.get_socket() - self.assertEqual(sock2, sock3) - - # Pool didn't keep reference to sock0 or sock1; sock2 and 3 are new - self.assertNotEqual(sock0, sock2) - self.assertNotEqual(sock1, sock2) - - # Return the request sock to pool - cx_pool.end_request() - - sock4 = cx_pool.get_socket() - sock5 = cx_pool.get_socket() - - # Not in a request any more, we get different sockets - self.assertNotEqual(sock4, sock5) - - # end_request() returned sock2 to pool - self.assertEqual(sock4, sock2) - - for s in [sock0, sock1, sock2, sock3, sock4, sock5]: - s.close() - - def test_reset_and_request(self): - # reset() is called after a fork, or after a socket error. Ensure that - # a new request is begun if a request was in progress when the reset() - # occurred, otherwise no request is begun. - p = self.get_pool((host, port), 10, None, None, False) - self.assertFalse(p.in_request()) - p.start_request() - self.assertTrue(p.in_request()) - p.reset() - self.assertTrue(p.in_request()) - p.end_request() - self.assertFalse(p.in_request()) - p.reset() - self.assertFalse(p.in_request()) - - def test_pool_reuses_open_socket(self): - # Test Pool's _check_closed() method doesn't close a healthy socket - cx_pool = self.get_pool((host,port), 10, None, None, False) - cx_pool._check_interval_seconds = 0 # Always check. - sock_info = cx_pool.get_socket() - cx_pool.maybe_return_socket(sock_info) - - new_sock_info = cx_pool.get_socket() - self.assertEqual(sock_info, new_sock_info) - cx_pool.maybe_return_socket(new_sock_info) - self.assertEqual(1, len(cx_pool.sockets)) - - def test_pool_removes_dead_socket(self): - # Test that Pool removes dead socket and the socket doesn't return - # itself PYTHON-344 - cx_pool = self.get_pool((host,port), 10, None, None, False) - cx_pool._check_interval_seconds = 0 # Always check. - sock_info = cx_pool.get_socket() - - # Simulate a closed socket without telling the SocketInfo it's closed - sock_info.sock.close() - self.assertTrue(pymongo.pool._closed(sock_info.sock)) - cx_pool.maybe_return_socket(sock_info) - new_sock_info = cx_pool.get_socket() - self.assertEqual(0, len(cx_pool.sockets)) - self.assertNotEqual(sock_info, new_sock_info) - cx_pool.maybe_return_socket(new_sock_info) - self.assertEqual(1, len(cx_pool.sockets)) - - def test_pool_removes_dead_request_socket_after_check(self): - # Test that Pool keeps request going even if a socket dies in request - cx_pool = self.get_pool((host,port), 10, None, None, False) - cx_pool._check_interval_seconds = 0 # Always check. - cx_pool.start_request() - - # Get the request socket - sock_info = cx_pool.get_socket() - self.assertEqual(0, len(cx_pool.sockets)) - self.assertEqual(sock_info, cx_pool._get_request_state()) - sock_info.sock.close() - cx_pool.maybe_return_socket(sock_info) - - # Although the request socket died, we're still in a request with a - # new socket - new_sock_info = cx_pool.get_socket() - self.assertTrue(cx_pool.in_request()) - self.assertNotEqual(sock_info, new_sock_info) - self.assertEqual(new_sock_info, cx_pool._get_request_state()) - cx_pool.maybe_return_socket(new_sock_info) - self.assertEqual(new_sock_info, cx_pool._get_request_state()) - self.assertEqual(0, len(cx_pool.sockets)) - - cx_pool.end_request() - self.assertEqual(1, len(cx_pool.sockets)) - - def test_pool_removes_dead_request_socket(self): - # Test that Pool keeps request going even if a socket dies in request - cx_pool = self.get_pool((host,port), 10, None, None, False) - cx_pool.start_request() - - # Get the request socket - sock_info = cx_pool.get_socket() - self.assertEqual(0, len(cx_pool.sockets)) - self.assertEqual(sock_info, cx_pool._get_request_state()) - - # Unlike in test_pool_removes_dead_request_socket_after_check, we - # set sock_info.closed and *don't* wait for it to be checked. - sock_info.close() - cx_pool.maybe_return_socket(sock_info) - - # Although the request socket died, we're still in a request with a - # new socket - new_sock_info = cx_pool.get_socket() - self.assertTrue(cx_pool.in_request()) - self.assertNotEqual(sock_info, new_sock_info) - self.assertEqual(new_sock_info, cx_pool._get_request_state()) - cx_pool.maybe_return_socket(new_sock_info) - self.assertEqual(new_sock_info, cx_pool._get_request_state()) - self.assertEqual(0, len(cx_pool.sockets)) - - cx_pool.end_request() - self.assertEqual(1, len(cx_pool.sockets)) - - def test_pool_removes_dead_socket_after_request(self): - # Test that Pool handles a socket dying that *used* to be the request - # socket. - cx_pool = self.get_pool((host,port), 10, None, None, False) - cx_pool._check_interval_seconds = 0 # Always check. - cx_pool.start_request() - - # Get the request socket - sock_info = cx_pool.get_socket() - self.assertEqual(sock_info, cx_pool._get_request_state()) - cx_pool.maybe_return_socket(sock_info) - - # End request - cx_pool.end_request() - self.assertEqual(1, len(cx_pool.sockets)) - - # Kill old request socket - sock_info.sock.close() - - # Dead socket detected and removed - new_sock_info = cx_pool.get_socket() - self.assertFalse(cx_pool.in_request()) - self.assertNotEqual(sock_info, new_sock_info) - self.assertEqual(0, len(cx_pool.sockets)) - self.assertFalse(pymongo.pool._closed(new_sock_info.sock)) - cx_pool.maybe_return_socket(new_sock_info) - self.assertEqual(1, len(cx_pool.sockets)) - - def test_dead_request_socket_with_max_size(self): - # When a pool replaces a dead request socket, the semaphore it uses - # to enforce max_size should remain unaffected. - cx_pool = self.get_pool( - (host, port), 1, None, None, False, wait_queue_timeout=1) - - cx_pool._check_interval_seconds = 0 # Always check. - cx_pool.start_request() - - # Get and close the request socket. - request_sock_info = cx_pool.get_socket() - request_sock_info.sock.close() - cx_pool.maybe_return_socket(request_sock_info) - - # Detects closed socket and creates new one, semaphore value still 0. - request_sock_info_2 = cx_pool.get_socket() - self.assertNotEqual(request_sock_info, request_sock_info_2) - cx_pool.maybe_return_socket(request_sock_info_2) - cx_pool.end_request() - - # Semaphore value now 1; we can get a socket. - sock_info = cx_pool.get_socket() - - # Clean up. - cx_pool.maybe_return_socket(sock_info) - - def test_socket_reclamation(self): - if sys.platform.startswith('java'): - raise SkipTest("Jython can't do socket reclamation") - - # Check that if a thread starts a request and dies without ending - # the request, that the socket is reclaimed into the pool. - cx_pool = self.get_pool( - pair=(host,port), - max_size=10, - net_timeout=1000, - conn_timeout=1000, - use_ssl=False, - ) - - self.assertEqual(0, len(cx_pool.sockets)) - - lock = None - the_sock = [None] - - def leak_request(): - self.assertEqual(NO_REQUEST, cx_pool._get_request_state()) - cx_pool.start_request() - self.assertEqual(NO_SOCKET_YET, cx_pool._get_request_state()) - sock_info = cx_pool.get_socket() - self.assertEqual(sock_info, cx_pool._get_request_state()) - the_sock[0] = id(sock_info.sock) - cx_pool.maybe_return_socket(sock_info) - - if not self.use_greenlets: - lock.release() - - if self.use_greenlets: - g = Greenlet(leak_request) - g.start() - g.join(1) - self.assertTrue(g.ready(), "Greenlet is hung") - - # In Gevent after 0.13.8, join() returns before the Greenlet.link - # callback fires. Give it a moment to reclaim the socket. - gevent.sleep(0.1) - else: - lock = thread.allocate_lock() - lock.acquire() - - # Start a thread WITHOUT a threading.Thread - important to test that - # Pool can deal with primitive threads. - thread.start_new_thread(leak_request, ()) - - # Join thread - acquired = lock.acquire() - self.assertTrue(acquired, "Thread is hung") - - # Make sure thread is really gone - time.sleep(1) - - if 'PyPy' in sys.version: - gc.collect() - - # Access the thread local from the main thread to trigger the - # ThreadVigil's delete callback, returning the request socket to - # the pool. - # In Python 2.7.0 and lesser, a dead thread's locals are deleted - # and those locals' weakref callbacks are fired only when another - # thread accesses the locals and finds the thread state is stale, - # see http://bugs.python.org/issue1868. Accessing the thread - # local from the main thread is a necessary part of this test, and - # realistic: in a multithreaded web server a new thread will access - # Pool._ident._local soon after an old thread has died. - cx_pool._ident.get() - - # Pool reclaimed the socket - self.assertEqual(1, len(cx_pool.sockets)) - self.assertEqual(the_sock[0], id(one(cx_pool.sockets).sock)) - self.assertEqual(0, len(cx_pool._tid_to_sock)) - - -class _TestMaxPoolSize(_TestPoolingBase): - """Test that connection pool keeps proper number of idle sockets open, - no matter how start/end_request are called. To be run both with threads and - with greenlets. - """ - def _test_max_pool_size( - self, start_request, end_request, max_pool_size=4, nthreads=10): - """Start `nthreads` threads. Each calls start_request `start_request` - times, then find_one and waits at a barrier; once all reach the barrier - each calls end_request `end_request` times. The test asserts that the - pool ends with min(max_pool_size, nthreads) sockets or, if - start_request wasn't called, at least one socket. - - This tests both max_pool_size enforcement and that leaked request - sockets are eventually returned to the pool when their threads end. - - You may need to increase ulimit -n on Mac. - - If you increase nthreads over about 35, note a - Gevent 0.13.6 bug on Mac: Greenlet.join() hangs if more than - about 35 Greenlets share a MongoClient. Apparently fixed in - recent Gevent development. - """ - if start_request: - if max_pool_size is not None and max_pool_size < nthreads: - raise AssertionError("Deadlock") - - c = self.get_client( - max_pool_size=max_pool_size, auto_start_request=False) - - rendezvous = CreateAndReleaseSocket.Rendezvous( - nthreads, self.use_greenlets) - - threads = [] - for i in range(nthreads): - t = CreateAndReleaseSocket( - self, c, start_request, end_request, rendezvous) - threads.append(t) - - for t in threads: - t.start() - - if 'PyPy' in sys.version: - # With PyPy we need to kick off the gc whenever the threads hit the - # rendezvous since nthreads > max_pool_size. - gc_collect_until_done(threads) - else: - for t in threads: - t.join() - - # join() returns before the thread state is cleared; give it time. - self.sleep(1) - - for t in threads: - self.assertTrue(t.passed) - - # Socket-reclamation doesn't work in Jython - if not sys.platform.startswith('java'): - cx_pool = get_pool(c) - - # Socket-reclamation depends on timely garbage-collection - if 'PyPy' in sys.version: - gc.collect() - - if self.use_greenlets: - # Wait for Greenlet.link() callbacks to execute - the_hub = hub.get_hub() - if hasattr(the_hub, 'join'): - # Gevent 1.0 - the_hub.join() - else: - # Gevent 0.13 and less - the_hub.shutdown() - - if start_request: - # Trigger final cleanup in Python <= 2.7.0. - cx_pool._ident.get() - expected_idle = min(max_pool_size, nthreads) - message = ( - '%d idle sockets (expected %d) and %d request sockets' - ' (expected 0)' % ( - len(cx_pool.sockets), expected_idle, - len(cx_pool._tid_to_sock))) - - self.assertEqual( - expected_idle, len(cx_pool.sockets), message) - else: - # Without calling start_request(), threads can safely share - # sockets; the number running concurrently, and hence the - # number of sockets needed, is between 1 and 10, depending - # on thread-scheduling. - self.assertTrue(len(cx_pool.sockets) >= 1) - - # thread.join completes slightly *before* thread locals are - # cleaned up, so wait up to 5 seconds for them. - self.sleep(0.1) - cx_pool._ident.get() - start = time.time() - - while ( - not cx_pool.sockets - and cx_pool._socket_semaphore.counter < max_pool_size - and (time.time() - start) < 5 - ): - self.sleep(0.1) - cx_pool._ident.get() - - if max_pool_size is not None: - self.assertEqual( - max_pool_size, - cx_pool._socket_semaphore.counter) - - self.assertEqual(0, len(cx_pool._tid_to_sock)) - - def _test_max_pool_size_no_rendezvous(self, start_request, end_request): - max_pool_size = 5 - c = self.get_client( - max_pool_size=max_pool_size, auto_start_request=False) - - # If you increase nthreads over about 35, note a - # Gevent 0.13.6 bug on Mac, Greenlet.join() hangs if more than - # about 35 Greenlets share a MongoClient. Apparently fixed in - # recent Gevent development. - - # On the other hand, nthreads had better be much larger than - # max_pool_size to ensure that max_pool_size sockets are actually - # required at some point in this test's execution. - nthreads = 10 - - if (sys.platform.startswith('java') - and start_request > end_request - and nthreads > max_pool_size): - - # Since Jython can't reclaim the socket and release the semaphore - # after a thread leaks a request, we'll exhaust the semaphore and - # deadlock. - raise SkipTest("Jython can't do socket reclamation") - - threads = [] - for i in range(nthreads): - t = CreateAndReleaseSocketNoRendezvous( - self, c, start_request, end_request) - threads.append(t) - - for t in threads: - t.start() - - if 'PyPy' in sys.version: - # With PyPy we need to kick off the gc whenever the threads hit the - # rendezvous since nthreads > max_pool_size. - gc_collect_until_done(threads) - else: - for t in threads: - t.join() - - for t in threads: - self.assertTrue(t.passed) - - cx_pool = get_pool(c) - - # Socket-reclamation depends on timely garbage-collection - if 'PyPy' in sys.version: - gc.collect() - - if self.use_greenlets: - # Wait for Greenlet.link() callbacks to execute - the_hub = hub.get_hub() - if hasattr(the_hub, 'join'): - # Gevent 1.0 - the_hub.join() - else: - # Gevent 0.13 and less - the_hub.shutdown() - - # thread.join completes slightly *before* thread locals are - # cleaned up, so wait up to 5 seconds for them. - self.sleep(0.1) - cx_pool._ident.get() - start = time.time() - - while ( - not cx_pool.sockets - and cx_pool._socket_semaphore.counter < max_pool_size - and (time.time() - start) < 5 - ): - self.sleep(0.1) - cx_pool._ident.get() - - self.assertTrue(len(cx_pool.sockets) >= 1) - self.assertEqual(max_pool_size, cx_pool._socket_semaphore.counter) - - def test_max_pool_size(self): - self._test_max_pool_size( - start_request=0, end_request=0, nthreads=10, max_pool_size=4) - - def test_max_pool_size_none(self): - self._test_max_pool_size( - start_request=0, end_request=0, nthreads=10, max_pool_size=None) - - def test_max_pool_size_with_request(self): - self._test_max_pool_size( - start_request=1, end_request=1, nthreads=10, max_pool_size=10) - - def test_max_pool_size_with_multiple_request(self): - self._test_max_pool_size( - start_request=10, end_request=10, nthreads=10, max_pool_size=10) - - def test_max_pool_size_with_redundant_request(self): - self._test_max_pool_size( - start_request=2, end_request=1, nthreads=10, max_pool_size=10) - - def test_max_pool_size_with_redundant_request2(self): - self._test_max_pool_size( - start_request=20, end_request=1, nthreads=10, max_pool_size=10) - - def test_max_pool_size_with_redundant_request_no_rendezvous(self): - self._test_max_pool_size_no_rendezvous(2, 1) - - def test_max_pool_size_with_redundant_request_no_rendezvous2(self): - self._test_max_pool_size_no_rendezvous(20, 1) - - def test_max_pool_size_with_leaked_request(self): - # Call start_request() but not end_request() -- when threads die, they - # should return their request sockets to the pool. - self._test_max_pool_size( - start_request=1, end_request=0, nthreads=10, max_pool_size=10) - - def test_max_pool_size_with_leaked_request_no_rendezvous(self): - self._test_max_pool_size_no_rendezvous(1, 0) - - def test_max_pool_size_with_end_request_only(self): - # Call end_request() but not start_request() - self._test_max_pool_size(0, 1) - - def test_max_pool_size_with_connection_failure(self): - # The pool acquires its semaphore before attempting to connect; ensure - # it releases the semaphore on connection failure. - class TestPool(Pool): - def connect(self): - raise socket.error() - - test_pool = TestPool( - pair=('example.com', 27017), - max_size=1, - net_timeout=1, - conn_timeout=1, - use_ssl=False, - wait_queue_timeout=1, - use_greenlets=self.use_greenlets) - - # First call to get_socket fails; if pool doesn't release its semaphore - # then the second call raises "ConnectionFailure: Timed out waiting for - # socket from pool" instead of the socket.error. - for i in range(2): - self.assertRaises(socket.error, test_pool.get_socket) - - -class SocketGetter(MongoThread): - """Utility for _TestMaxOpenSockets and _TestWaitQueueMultiple""" - def __init__(self, test_case, pool): - super(SocketGetter, self).__init__(test_case) - self.state = 'init' - self.pool = pool - self.sock = None - - def run(self): - self.state = 'get_socket' - self.sock = self.pool.get_socket() - self.state = 'sock' - - -class _TestMaxOpenSockets(_TestPoolingBase): - """Test that connection pool doesn't open more than max_size sockets. - To be run both with threads and with greenlets. - """ - def get_pool_with_wait_queue_timeout(self, wait_queue_timeout): - return self.get_pool((host, port), - 1, None, None, - False, - wait_queue_timeout=wait_queue_timeout, - wait_queue_multiple=None) - - def test_wait_queue_timeout(self): - wait_queue_timeout = 2 # Seconds - pool = self.get_pool_with_wait_queue_timeout(wait_queue_timeout) - sock_info = pool.get_socket() - start = time.time() - self.assertRaises(ConnectionFailure, pool.get_socket) - duration = time.time() - start - self.assertTrue( - abs(wait_queue_timeout - duration) < 1, - "Waited %.2f seconds for a socket, expected %f" % ( - duration, wait_queue_timeout)) - - sock_info.close() - - def test_blocking(self): - # Verify get_socket() with no wait_queue_timeout blocks forever. - pool = self.get_pool_with_wait_queue_timeout(None) - - # Reach max_size. - s1 = pool.get_socket() - t = SocketGetter(self, pool) - t.start() - while t.state != 'get_socket': - self.sleep(0.1) - - self.sleep(1) - self.assertEqual(t.state, 'get_socket') - pool.maybe_return_socket(s1) - while t.state != 'sock': - self.sleep(0.1) - - self.assertEqual(t.state, 'sock') - self.assertEqual(t.sock, s1) - s1.close() - - -class _TestWaitQueueMultiple(_TestPoolingBase): - """Test that connection pool doesn't allow more than - waitQueueMultiple * max_size waiters. - To be run both with threads and with greenlets. - """ - def get_pool_with_wait_queue_multiple(self, wait_queue_multiple): - return self.get_pool((host, port), - 2, None, None, - False, - wait_queue_timeout=None, - wait_queue_multiple=wait_queue_multiple) - - def test_wait_queue_multiple(self): - pool = self.get_pool_with_wait_queue_multiple(3) - - # Reach max_size sockets. - socket_info_0 = pool.get_socket() - socket_info_1 = pool.get_socket() - - # Reach max_size * wait_queue_multiple waiters. - threads = [] - for _ in xrange(6): - t = SocketGetter(self, pool) - t.start() - threads.append(t) - - self.sleep(1) - for t in threads: - self.assertEqual(t.state, 'get_socket') - - self.assertRaises(ExceededMaxWaiters, pool.get_socket) - socket_info_0.close() - socket_info_1.close() - - def test_wait_queue_multiple_unset(self): - pool = self.get_pool_with_wait_queue_multiple(None) - socks = [] - for _ in xrange(2): - sock = pool.get_socket() - socks.append(sock) - threads = [] - for _ in xrange(30): - t = SocketGetter(self, pool) - t.start() - threads.append(t) - self.sleep(1) - for t in threads: - self.assertEqual(t.state, 'get_socket') - - for socket_info in socks: - socket_info.close() - - -class _TestPoolSocketSharing(_TestPoolingBase): - """Directly test that two simultaneous operations don't share a socket. To - be run both with threads and with greenlets. - """ - def _test_pool(self, use_request): - """ - Test that the connection pool prevents both threads and greenlets from - using a socket at the same time. - - Sequence: - gr0: start a slow find() - gr1: start a fast find() - gr1: get results - gr0: get results - """ - cx = get_client( - use_greenlets=self.use_greenlets, - auto_start_request=False - ) - - db = cx.pymongo_test - db.test.remove() - db.test.insert({'_id': 1}) - - history = [] - - def find_fast(): - if use_request: - cx.start_request() - - history.append('find_fast start') - - # With greenlets and the old connection._Pool, this would throw - # AssertionError: "This event is already used by another - # greenlet" - self.assertEqual({'_id': 1}, db.test.find_one()) - history.append('find_fast done') - - if use_request: - cx.end_request() - - def find_slow(): - if use_request: - cx.start_request() - - history.append('find_slow start') - - # Javascript function that pauses N seconds per document - fn = delay(10) - if (is_mongos(db.connection) or not - version.at_least(db.connection, (1, 7, 2))): - # mongos doesn't support eval so we have to use $where - # which is less reliable in this context. - self.assertEqual(1, db.test.find({"$where": fn}).count()) - else: - # 'nolock' allows find_fast to start and finish while we're - # waiting for this to complete. - self.assertEqual({'ok': 1.0, 'retval': True}, - db.command('eval', fn, nolock=True)) - - history.append('find_slow done') - - if use_request: - cx.end_request() - - if self.use_greenlets: - gr0, gr1 = Greenlet(find_slow), Greenlet(find_fast) - gr0.start() - gr1.start_later(.1) - else: - gr0 = threading.Thread(target=find_slow) - gr0.setDaemon(True) - gr1 = threading.Thread(target=find_fast) - gr1.setDaemon(True) - - gr0.start() - time.sleep(.1) - gr1.start() - - gr0.join() - gr1.join() - - self.assertEqual([ - 'find_slow start', - 'find_fast start', - 'find_fast done', - 'find_slow done', - ], history) - - def test_pool(self): - self._test_pool(use_request=False) - - def test_pool_request(self): - self._test_pool(use_request=True) diff --git a/test/test_pooling_gevent.py b/test/test_pooling_gevent.py deleted file mode 100644 index 88bb2c0832..0000000000 --- a/test/test_pooling_gevent.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright 2012-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Tests for connection-pooling with greenlets and Gevent""" - -import gc -import sys -import time -import unittest - -from nose.plugins.skip import SkipTest - -from pymongo import pool -from pymongo.errors import ConfigurationError -from test import host, port -from test.utils import looplet -from test.test_pooling_base import ( - _TestPooling, _TestMaxPoolSize, _TestMaxOpenSockets, - _TestPoolSocketSharing, _TestWaitQueueMultiple, has_gevent) - - -class TestPoolingGevent(_TestPooling, unittest.TestCase): - """Apply all the standard pool tests with greenlets and Gevent""" - use_greenlets = True - - -class TestPoolingGeventSpecial(unittest.TestCase): - """Do a few special greenlet tests that don't use TestPoolingBase""" - def test_greenlet_sockets(self): - # Check that Pool gives two sockets to two greenlets - try: - import greenlet - import gevent - except ImportError: - raise SkipTest('Gevent not installed') - - cx_pool = pool.Pool( - pair=(host, port), - max_size=10, - net_timeout=1000, - conn_timeout=1000, - use_ssl=False, - use_greenlets=True) - - socks = [] - - def get_socket(): - cx_pool.start_request() - socks.append(cx_pool.get_socket()) - - looplet([ - greenlet.greenlet(get_socket), - greenlet.greenlet(get_socket), - ]) - - self.assertEqual(2, len(socks)) - self.assertNotEqual(socks[0], socks[1]) - - def test_greenlet_sockets_with_request(self): - # Verify two assumptions: that start_request() with two greenlets but - # not use_greenlets fails, meaning that the two greenlets will - # share one socket. Also check that start_request() with use_greenlets - # succeeds, meaning that two greenlets will get different sockets. - - try: - import greenlet - import gevent - except ImportError: - raise SkipTest('Gevent not installed') - - pool_args = dict( - pair=(host,port), - max_size=10, - net_timeout=1000, - conn_timeout=1000, - use_ssl=False, - ) - - for use_greenlets, use_request, expect_success in [ - (True, True, True), - (True, False, False), - (False, True, False), - (False, False, False), - ]: - pool_args_cp = pool_args.copy() - pool_args_cp['use_greenlets'] = use_greenlets - cx_pool = pool.Pool(**pool_args_cp) - - # Map: greenlet -> socket - greenlet2socks = {} - main = greenlet.getcurrent() - - def get_socket_in_request(): - # Get a socket from the pool twice, switching contexts each time - if use_request: - cx_pool.start_request() - - main.switch() - - for _ in range(2): - sock = cx_pool.get_socket() - cx_pool.maybe_return_socket(sock) - greenlet2socks.setdefault( - greenlet.getcurrent(), [] - ).append(id(sock)) - - main.switch() - - cx_pool.end_request() - - greenlets = [ - greenlet.greenlet(get_socket_in_request), - greenlet.greenlet(get_socket_in_request), - ] - - # Run both greenlets to completion - looplet(greenlets) - - socks_for_gr0 = greenlet2socks[greenlets[0]] - socks_for_gr1 = greenlet2socks[greenlets[1]] - - # Whether we expect requests to work or not, we definitely expect - # greenlet2socks to have the same number of keys and values - self.assertEqual(2, len(greenlet2socks)) - self.assertEqual(2, len(socks_for_gr0)) - self.assertEqual(2, len(socks_for_gr1)) - - # If we started a request, then there was a point at which we had - # 2 active sockets, otherwise we always used one. - if use_request and use_greenlets: - self.assertEqual(2, len(cx_pool.sockets)) - else: - self.assertEqual(1, len(cx_pool.sockets)) - - # Again, regardless of whether requests work, a greenlet will get - # the same socket each time it calls get_socket() within a request. - # What we're really testing is that the two *different* greenlets - # get *different* sockets from each other. - self.assertEqual( - socks_for_gr0[0], socks_for_gr0[1], - "Expected greenlet 0 to get the same socket for each call " - "to get_socket()" - ) - - self.assertEqual( - socks_for_gr1[0], socks_for_gr1[1], - "Expected greenlet 1 to get the same socket for each call " - "to get_socket()" - ) - - if expect_success: - # We passed use_greenlets=True, so start_request successfully - # distinguished between the two greenlets. - self.assertNotEqual( - socks_for_gr0[0], socks_for_gr1[0], - "Expected two greenlets to get two different sockets" - ) - - else: - # We passed use_greenlets=False, so start_request didn't - # distinguish between the two greenlets, and it gave them both - # the same socket. - self.assertEqual( - socks_for_gr0[0], socks_for_gr1[0], - "Expected two greenlets to get same socket" - ) - - -class TestMaxPoolSizeGevent(_TestMaxPoolSize, unittest.TestCase): - use_greenlets = True - - -class TestPoolSocketSharingGevent(_TestPoolSocketSharing, unittest.TestCase): - use_greenlets = True - - -class TestMaxOpenSocketsGevent(_TestMaxOpenSockets, unittest.TestCase): - use_greenlets = True - - -class TestWaitQueueMultipleGevent(_TestWaitQueueMultiple, unittest.TestCase): - use_greenlets = True - - -class TestUseGreenletsWithoutGevent(unittest.TestCase): - def test_use_greenlets_without_gevent(self): - # Verify that Pool(use_greenlets=True) raises ConfigurationError if - # Gevent is not installed, and that its destructor runs without error. - if has_gevent: - raise SkipTest( - "Gevent is installed, can't test what happens calling " - "Pool(use_greenlets=True) when Gevent is unavailable") - - if 'java' in sys.platform: - raise SkipTest("Can't rely on __del__ in Jython") - - # Possible outcomes of __del__. - DID_NOT_RUN, RAISED, SUCCESS = range(3) - outcome = [DID_NOT_RUN] - - class TestPool(pool.Pool): - def __del__(self): - try: - pool.Pool.__del__(self) # Pool is old-style, no super() - outcome[0] = SUCCESS - except: - outcome[0] = RAISED - - # Pool raises ConfigurationError, "The Gevent module is not available". - self.assertRaises( - ConfigurationError, - TestPool, - pair=(host, port), - max_size=10, - net_timeout=1000, - conn_timeout=1000, - use_ssl=False, - use_greenlets=True) - - # Convince PyPy to call __del__. - for _ in range(10): - if outcome[0] == DID_NOT_RUN: - gc.collect() - time.sleep(0.1) - - if outcome[0] == DID_NOT_RUN: - self.fail("Pool.__del__ didn't run") - elif outcome[0] == RAISED: - self.fail("Pool.__del__ raised exception") - - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_pymongo.py b/test/test_pymongo.py index a861b1f780..fd8ece6c03 100644 --- a/test/test_pymongo.py +++ b/test/test_pymongo.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,21 +13,30 @@ # limitations under the License. """Test the pymongo module itself.""" +from __future__ import annotations -import unittest -import os import sys + sys.path[0:0] = [""] +from test import unittest + import pymongo -from test import host, port +from pymongo._version import get_version_tuple + class TestPyMongo(unittest.TestCase): def test_mongo_client_alias(self): # Testing that pymongo module imports mongo_client.MongoClient - c = pymongo.MongoClient(host, port) - self.assertEqual(c.host, host) - self.assertEqual(c.port, port) + self.assertEqual(pymongo.MongoClient, pymongo.synchronous.mongo_client.MongoClient) + + def test_get_version_tuple(self): + self.assertEqual(get_version_tuple("4.8.0.dev1"), (4, 8, 0, ".dev1")) + self.assertEqual(get_version_tuple("4.8.1"), (4, 8, 1)) + self.assertEqual(get_version_tuple("5.0.0rc1"), (5, 0, 0, "rc1")) + self.assertEqual(get_version_tuple("5.0"), (5, 0)) + with self.assertRaises(ValueError): + get_version_tuple("5") if __name__ == "__main__": diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py new file mode 100644 index 0000000000..4d9a3ceb05 --- /dev/null +++ b/test/test_raw_bson.py @@ -0,0 +1,219 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import datetime +import sys +import uuid + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest + +from bson import Code, DBRef, decode, encode +from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.errors import InvalidBSON +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument +from bson.son import SON + +_IS_SYNC = True + + +class TestRawBSONDocument(IntegrationTest): + # {'_id': ObjectId('556df68b6e32ab21a95e0785'), + # 'name': 'Sherlock', + # 'addresses': [{'street': 'Baker Street'}]} + bson_string = ( + b"Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t" + b"\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e" + b"\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00" + ) + document = RawBSONDocument(bson_string) + + def tearDown(self): + if client_context.connected: + self.client.pymongo_test.test_raw.drop() + + def test_decode(self): + self.assertEqual("Sherlock", self.document["name"]) + first_address = self.document["addresses"][0] + self.assertIsInstance(first_address, RawBSONDocument) + self.assertEqual("Baker Street", first_address["street"]) + + def test_raw(self): + self.assertEqual(self.bson_string, self.document.raw) + + def test_empty_doc(self): + doc = RawBSONDocument(encode({})) + with self.assertRaises(KeyError): + doc["does-not-exist"] + + def test_invalid_bson_sequence(self): + bson_byte_sequence = encode({"a": 1}) + encode({}) + with self.assertRaisesRegex(InvalidBSON, "invalid object length"): + RawBSONDocument(bson_byte_sequence) + + def test_invalid_bson_eoo(self): + invalid_bson_eoo = encode({"a": 1})[:-1] + b"\x01" + with self.assertRaisesRegex(InvalidBSON, "bad eoo"): + RawBSONDocument(invalid_bson_eoo) + + @client_context.require_connection + def test_round_trip(self): + db = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ) + db.test_raw.insert_one(self.document) + result = db.test_raw.find_one(self.document["_id"]) + assert result is not None + self.assertIsInstance(result, RawBSONDocument) + self.assertEqual(dict(self.document.items()), dict(result.items())) + + @client_context.require_connection + def test_round_trip_raw_uuid(self): + coll = self.client.get_database("pymongo_test").test_raw + uid = uuid.uuid4() + doc = {"_id": 1, "bin4": Binary(uid.bytes, 4), "bin3": Binary(uid.bytes, 3)} + raw = RawBSONDocument(encode(doc)) + coll.insert_one(raw) + self.assertEqual(coll.find_one(), doc) + uuid_coll = coll.with_options( + codec_options=coll.codec_options.with_options( + uuid_representation=UuidRepresentation.STANDARD + ) + ) + self.assertEqual( + uuid_coll.find_one(), {"_id": 1, "bin4": uid, "bin3": Binary(uid.bytes, 3)} + ) + + # Test that the raw bytes haven't changed. + raw_coll = coll.with_options(codec_options=DEFAULT_RAW_BSON_OPTIONS) + self.assertEqual(raw_coll.find_one(), raw) + + def test_with_codec_options(self): + # {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} + # encoded with JAVA_LEGACY uuid representation. + bson_string = ( + b"-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02" + b"\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM" + b"\x01\x00\x00\x00" + ) + document = RawBSONDocument( + bson_string, + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) + + self.assertEqual(uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), document["_id"]) + + @client_context.require_connection + def test_round_trip_codec_options(self): + doc = { + "date": datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + "_id": uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), + } + db = self.client.pymongo_test + coll = db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ) + coll.insert_one(doc) + raw_java_legacy = CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ) + coll = db.get_collection("test_raw", codec_options=raw_java_legacy) + self.assertEqual( + RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), coll.find_one() + ) + + @client_context.require_connection + def test_raw_bson_document_embedded(self): + doc = {"embedded": self.document} + db = self.client.pymongo_test + db.test_raw.insert_one(doc) + result = db.test_raw.find_one() + assert result is not None + self.assertEqual(decode(self.document.raw), result["embedded"]) + + # Make sure that CodecOptions are preserved. + # {'embedded': [ + # {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} + # ]} + # encoded with JAVA_LEGACY uuid representation. + bson_string = ( + b"D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00" + b"\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00" + b"\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00" + b"\x00" + ) + rbd = RawBSONDocument( + bson_string, + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) + + db.test_raw.drop() + db.test_raw.insert_one(rbd) + result = db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ).find_one() + assert result is not None + self.assertEqual(rbd["embedded"][0]["_id"], result["embedded"][0]["_id"]) + + @client_context.require_connection + def test_write_response_raw_bson(self): + coll = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ).test_raw + + # No Exceptions raised while handling write response. + coll.insert_one(self.document) + coll.delete_one(self.document) + coll.insert_many([self.document]) + coll.delete_many(self.document) + coll.update_one(self.document, {"$set": {"a": "b"}}, upsert=True) + coll.update_many(self.document, {"$set": {"b": "c"}}) + + def test_preserve_key_ordering(self): + keyvaluepairs = [ + ("a", 1), + ("b", 2), + ("c", 3), + ] + rawdoc = RawBSONDocument(encode(SON(keyvaluepairs))) + + for rkey, elt in zip(rawdoc, keyvaluepairs): + self.assertEqual(rkey, elt[0]) + + def test_contains_code_with_scope(self): + doc = RawBSONDocument(encode({"value": Code("x=1", scope={})})) + + self.assertEqual(decode(encode(doc)), {"value": Code("x=1", {})}) + self.assertEqual(doc["value"].scope, RawBSONDocument(encode({}))) + + def test_contains_dbref(self): + doc = RawBSONDocument(encode({"value": DBRef("test", "id")})) + raw = {"$ref": "test", "$id": "id"} + raw_encoded = encode(decode(encode(raw))) + + self.assertEqual(decode(encode(doc)), {"value": DBRef("test", "id")}) + self.assertEqual(doc["value"].raw, raw_encoded) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_read_concern.py b/test/test_read_concern.py new file mode 100644 index 0000000000..62b2491475 --- /dev/null +++ b/test/test_read_concern.py @@ -0,0 +1,120 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the read_concern module.""" +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context +from test.utils_shared import OvertCommandListener + +from bson.son import SON +from pymongo.errors import OperationFailure +from pymongo.read_concern import ReadConcern + +_IS_SYNC = True + + +class TestReadConcern(IntegrationTest): + listener: OvertCommandListener + + @client_context.require_connection + def setUp(self): + super().setUp() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test + client_context.client.pymongo_test.create_collection("coll") + + def tearDown(self): + client_context.client.pymongo_test.drop_collection("coll") + + def test_read_concern(self): + rc = ReadConcern() + self.assertIsNone(rc.level) + self.assertTrue(rc.ok_for_legacy) + + rc = ReadConcern("majority") + self.assertEqual("majority", rc.level) + self.assertFalse(rc.ok_for_legacy) + + rc = ReadConcern("local") + self.assertEqual("local", rc.level) + self.assertTrue(rc.ok_for_legacy) + + self.assertRaises(TypeError, ReadConcern, 42) + + def test_read_concern_uri(self): + uri = f"mongodb://{client_context.pair}/?readConcernLevel=majority" + client = self.rs_or_single_client(uri, connect=False) + self.assertEqual(ReadConcern("majority"), client.read_concern) + + def test_invalid_read_concern(self): + coll = self.db.get_collection("coll", read_concern=ReadConcern("unknown")) + # We rely on the server to validate read concern. + with self.assertRaises(OperationFailure): + coll.find_one() + + def test_find_command(self): + # readConcern not sent in command if not specified. + coll = self.db.coll + coll.find({"field": "value"}).to_list() + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + self.listener.reset() + + # Explicitly set readConcern to 'local'. + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + coll.find({"field": "value"}).to_list() + self.assertEqualCommand( + SON( + [ + ("find", "coll"), + ("filter", {"field": "value"}), + ("readConcern", {"level": "local"}), + ] + ), + self.listener.started_events[0].command, + ) + + def test_command_cursor(self): + # readConcern not sent in command if not specified. + coll = self.db.coll + (coll.aggregate([{"$match": {"field": "value"}}])).to_list() + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + self.listener.reset() + + # Explicitly set readConcern to 'local'. + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + (coll.aggregate([{"$match": {"field": "value"}}])).to_list() + self.assertEqual({"level": "local"}, self.listener.started_events[0].command["readConcern"]) + + def test_aggregate_out(self): + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + (coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}])).to_list() + + # Aggregate with $out supports readConcern MongoDB 4.2 onwards. + if client_context.version >= (4, 1): + self.assertIn("readConcern", self.listener.started_events[0].command) + else: + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 457ea6b512..084abdf3e1 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -1,4 +1,4 @@ -# Copyright 2011-2014 MongoDB, Inc. +# Copyright 2011-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,619 +13,706 @@ # limitations under the License. """Test the replica_set_connection module.""" -import random +from __future__ import annotations +import contextlib +import copy +import pickle +import random import sys -import unittest +from typing import Any -from nose.plugins.skip import SkipTest +from pymongo.operations import _Op sys.path[0:0] = [""] -from bson.son import SON -from pymongo.cursor import _QUERY_OPTIONS -from pymongo.mongo_replica_set_client import MongoReplicaSetClient -from pymongo.read_preferences import (ReadPreference, modes, MovingAverage, - secondary_ok_commands) -from pymongo.errors import ConfigurationError - -from test.test_replica_set_client import TestReplicaSetClientBase -from test.test_client import get_client -from test import version, utils, host, port +from test import ( + IntegrationTest, + SkipTest, + client_context, + connected, + unittest, +) +from test.utils_shared import ( + OvertCommandListener, + _ignore_deprecations, + one, + wait_until, +) +from test.version import Version - -class TestReadPreferencesBase(TestReplicaSetClientBase): +from bson.son import SON +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.message import _maybe_add_read_preference +from pymongo.read_preferences import ( + MovingAverage, + Nearest, + Primary, + PrimaryPreferred, + ReadPreference, + Secondary, + SecondaryPreferred, +) +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import Selection, readable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class TestSelections(IntegrationTest): + @client_context.require_connection + def test_bool(self): + client = self.single_client() + + def predicate(): + return client.address + + wait_until(predicate, "discover primary") + selection = Selection.from_topology_description(client._topology.description) + + self.assertTrue(selection) + self.assertFalse(selection.with_server_descriptions([])) + + +class TestReadPreferenceObjects(unittest.TestCase): + prefs = [ + Primary(), + PrimaryPreferred(), + Secondary(), + Nearest(tag_sets=[{"a": 1}, {"b": 2}]), + SecondaryPreferred(max_staleness=30), + ] + + def test_pickle(self): + for pref in self.prefs: + self.assertEqual(pref, pickle.loads(pickle.dumps(pref))) + + def test_copy(self): + for pref in self.prefs: + self.assertEqual(pref, copy.copy(pref)) + + def test_deepcopy(self): + for pref in self.prefs: + self.assertEqual(pref, copy.deepcopy(pref)) + + +class TestReadPreferencesBase(IntegrationTest): + @client_context.require_secondaries_count(1) def setUp(self): - super(TestReadPreferencesBase, self).setUp() + super().setUp() # Insert some data so we can use cursors in read_from_which_host - c = self._get_client() - c.pymongo_test.test.drop() - c.pymongo_test.test.insert([{'_id': i} for i in range(10)], w=self.w) + self.client.pymongo_test.test.drop() + self.client.get_database( + "pymongo_test", write_concern=WriteConcern(w=client_context.w) + ).test.insert_many([{"_id": i} for i in range(10)]) - def tearDown(self): - super(TestReadPreferencesBase, self).tearDown() - c = self._get_client() - c.pymongo_test.test.drop() + self.addCleanup(self.client.pymongo_test.test.drop) def read_from_which_host(self, client): - """Do a find() on the client and return which host was used - """ + """Do a find() on the client and return which host was used""" cursor = client.pymongo_test.test.find() - cursor.next() - return cursor._Cursor__connection_id + next(cursor) + return cursor.address def read_from_which_kind(self, client): """Do a find() on the client and return 'primary' or 'secondary' - depending on which the client used. + depending on which the client used. """ - connection_id = self.read_from_which_host(client) - if connection_id == client.primary: - return 'primary' - elif connection_id in client.secondaries: - return 'secondary' + address = self.read_from_which_host(client) + if address == client.primary: + return "primary" + elif address in client.secondaries: + return "secondary" else: self.fail( - 'Cursor used connection id %s, expected either primary ' - '%s or secondaries %s' % ( - connection_id, client.primary, client.secondaries)) + f"Cursor used address {address}, expected either primary " + f"{client.primary} or secondaries {client.secondaries}" + ) def assertReadsFrom(self, expected, **kwargs): - c = self._get_client(**kwargs) + c = self.rs_client(**kwargs) + + def predicate(): + return len(c.nodes - c.arbiters) == client_context.w + + wait_until(predicate, "discovered all nodes") + used = self.read_from_which_kind(c) - self.assertEqual(expected, used, 'Cursor used %s, expected %s' % ( - expected, used)) + self.assertEqual(expected, used, f"Cursor used {used}, expected {expected}") + + +class TestSingleSecondaryOk(TestReadPreferencesBase): + def test_reads_from_secondary(self): + host, port = next(iter(self.client.secondaries)) + # Direct connection to a secondary. + client = self.single_client(host, port) + self.assertFalse(client.is_primary) + + # Regardless of read preference, we should be able to do + # "reads" with a direct connection to a secondary. + # See server-selection.rst#topology-type-single. + self.assertEqual(client.read_preference, ReadPreference.PRIMARY) + + db = client.pymongo_test + coll = db.test + + # Test find and find_one. + self.assertIsNotNone(coll.find_one()) + self.assertEqual(10, len(coll.find().to_list())) + + # Test some database helpers. + self.assertIsNotNone(db.list_collection_names()) + self.assertIsNotNone(db.validate_collection("test")) + self.assertIsNotNone(db.command("ping")) + + # Test some collection helpers. + self.assertEqual(10, coll.count_documents({})) + self.assertEqual(10, len(coll.distinct("_id"))) + self.assertIsNotNone(coll.aggregate([])) + self.assertIsNotNone(coll.index_information()) class TestReadPreferences(TestReadPreferencesBase): def test_mode_validation(self): - # 'modes' are imported from read_preferences.py - for mode in modes: - self.assertEqual(mode, self._get_client( - read_preference=mode).read_preference) + for mode in ( + ReadPreference.PRIMARY, + ReadPreference.PRIMARY_PREFERRED, + ReadPreference.SECONDARY, + ReadPreference.SECONDARY_PREFERRED, + ReadPreference.NEAREST, + ): + self.assertEqual(mode, (self.rs_client(read_preference=mode)).read_preference) - self.assertRaises(ConfigurationError, self._get_client, - read_preference='foo') + with self.assertRaises(TypeError): + self.rs_client(read_preference="foo") def test_tag_sets_validation(self): - # Can't use tags with PRIMARY - self.assertRaises(ConfigurationError, self._get_client, - tag_sets=[{'k': 'v'}]) + S = Secondary(tag_sets=[{}]) + self.assertEqual([{}], (self.rs_client(read_preference=S)).read_preference.tag_sets) - # ... but empty tag sets are ok with PRIMARY - self.assertEqual([{}], self._get_client(tag_sets=[{}]).tag_sets) + S = Secondary(tag_sets=[{"k": "v"}]) + self.assertEqual([{"k": "v"}], (self.rs_client(read_preference=S)).read_preference.tag_sets) - S = ReadPreference.SECONDARY - self.assertEqual([{}], self._get_client(read_preference=S).tag_sets) - - self.assertEqual([{'k': 'v'}], self._get_client( - read_preference=S, tag_sets=[{'k': 'v'}]).tag_sets) - - self.assertEqual([{'k': 'v'}, {}], self._get_client( - read_preference=S, tag_sets=[{'k': 'v'}, {}]).tag_sets) + S = Secondary(tag_sets=[{"k": "v"}, {}]) + self.assertEqual( + [{"k": "v"}, {}], + (self.rs_client(read_preference=S)).read_preference.tag_sets, + ) - self.assertRaises(ConfigurationError, self._get_client, - read_preference=S, tag_sets=[]) + self.assertRaises(ValueError, Secondary, tag_sets=[]) # One dict not ok, must be a list of dicts - self.assertRaises(ConfigurationError, self._get_client, - read_preference=S, tag_sets={'k': 'v'}) + self.assertRaises(TypeError, Secondary, tag_sets={"k": "v"}) + + self.assertRaises(TypeError, Secondary, tag_sets="foo") - self.assertRaises(ConfigurationError, self._get_client, - read_preference=S, tag_sets='foo') + self.assertRaises(TypeError, Secondary, tag_sets=["foo"]) - self.assertRaises(ConfigurationError, self._get_client, - read_preference=S, tag_sets=['foo']) + def test_threshold_validation(self): + self.assertEqual( + 17, + (self.rs_client(localThresholdMS=17, connect=False)).options.local_threshold_ms, + ) - def test_latency_validation(self): - self.assertEqual(17, self._get_client( - secondary_acceptable_latency_ms=17 - ).secondary_acceptable_latency_ms) + self.assertEqual( + 42, + (self.rs_client(localThresholdMS=42, connect=False)).options.local_threshold_ms, + ) - self.assertEqual(42, self._get_client( - secondaryAcceptableLatencyMS=42 - ).secondary_acceptable_latency_ms) + self.assertEqual( + 666, + (self.rs_client(localThresholdMS=666, connect=False)).options.local_threshold_ms, + ) - self.assertEqual(666, self._get_client( - secondaryacceptablelatencyms=666 - ).secondary_acceptable_latency_ms) + self.assertEqual( + 0, + (self.rs_client(localThresholdMS=0, connect=False)).options.local_threshold_ms, + ) + + with self.assertRaises(ValueError): + self.rs_client(localthresholdms=-1) + + def test_zero_latency(self): + ping_times: set = set() + # Generate unique ping times. + while len(ping_times) < len(self.client.nodes): + ping_times.add(random.random()) + for ping_time, host in zip(ping_times, self.client.nodes): + ServerDescription._host_to_round_trip_time[host] = ping_time + try: + client = connected(self.rs_client(readPreference="nearest", localThresholdMS=0)) + wait_until(lambda: client.nodes == self.client.nodes, "discovered all nodes") + host = self.read_from_which_host(client) + for _ in range(5): + self.assertEqual(host, self.read_from_which_host(client)) + finally: + ServerDescription._host_to_round_trip_time.clear() def test_primary(self): - self.assertReadsFrom('primary', - read_preference=ReadPreference.PRIMARY) + self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY) def test_primary_with_tags(self): # Tags not allowed with PRIMARY - self.assertRaises(ConfigurationError, - self._get_client, tag_sets=[{'dc': 'ny'}]) + with self.assertRaises(ConfigurationError): + self.rs_client(tag_sets=[{"dc": "ny"}]) def test_primary_preferred(self): - self.assertReadsFrom('primary', - read_preference=ReadPreference.PRIMARY_PREFERRED) + self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED) def test_secondary(self): - self.assertReadsFrom('secondary', - read_preference=ReadPreference.SECONDARY) + self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY) def test_secondary_preferred(self): - self.assertReadsFrom('secondary', - read_preference=ReadPreference.SECONDARY_PREFERRED) - - def test_secondary_only(self): - # Test deprecated mode SECONDARY_ONLY, which is now a synonym for - # SECONDARY - self.assertEqual( - ReadPreference.SECONDARY, ReadPreference.SECONDARY_ONLY) + self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY_PREFERRED) def test_nearest(self): - # With high secondaryAcceptableLatencyMS, expect to read from any + # With high localThresholdMS, expect to read from any # member - c = self._get_client( - read_preference=ReadPreference.NEAREST, - secondaryAcceptableLatencyMS=10000, # 10 seconds - auto_start_request=False) + c = self.rs_client( + read_preference=ReadPreference.NEAREST, localThresholdMS=10000 + ) # 10 seconds - data_members = set(self.hosts).difference(set(self.arbiters)) + data_members = {self.client.primary} | self.client.secondaries # This is a probabilistic test; track which members we've read from so # far, and keep reading until we've used all the members or give up. # Chance of using only 2 of 3 members 10k times if there's no bug = # 3 * (2/3)**10000, very low. - used = set() + used: set = set() i = 0 while data_members.difference(used) and i < 10000: - host = self.read_from_which_host(c) - used.add(host) + address = self.read_from_which_host(c) + used.add(address) i += 1 not_used = data_members.difference(used) - latencies = ', '.join( - '%s: %dms' % (member.host, member.ping_time.get()) - for member in c._MongoReplicaSetClient__rs_state.members) + latencies = ", ".join( + "%s: %sms" % (server.description.address, server.description.round_trip_time) + for server in (c._get_topology()).select_servers(readable_server_selector, _Op.TEST) + ) - self.assertFalse(not_used, + self.assertFalse( + not_used, "Expected to use primary and all secondaries for mode NEAREST," - " but didn't use %s\nlatencies: %s" % (not_used, latencies)) + f" but didn't use {not_used}\nlatencies: {latencies}", + ) -class ReadPrefTester(MongoReplicaSetClient): +class ReadPrefTester(MongoClient): def __init__(self, *args, **kwargs): self.has_read_from = set() - super(ReadPrefTester, self).__init__(*args, **kwargs) + client_options = client_context.client_options + client_options.update(kwargs) + super().__init__(*args, **client_options) - def _MongoReplicaSetClient__send_and_receive(self, member, *args, **kwargs): - self.has_read_from.add(member) - rsc = super(ReadPrefTester, self) - return rsc._MongoReplicaSetClient__send_and_receive( - member, *args, **kwargs) + def _conn_for_reads(self, read_preference, session, operation): + context = super()._conn_for_reads(read_preference, session, operation) + return context + @contextlib.contextmanager + def _conn_from_server(self, read_preference, server, session): + context = super()._conn_from_server(read_preference, server, session) + with context as (conn, read_preference): + self.record_a_read(conn.address) + yield conn, read_preference + + def record_a_read(self, address): + server = (self._get_topology()).select_server_by_address(address, _Op.TEST, 0) + self.has_read_from.add(server) -class TestCommandAndReadPreference(TestReplicaSetClientBase): - def setUp(self): - super(TestCommandAndReadPreference, self).setUp() - # Need auto_start_request False to avoid pinning members. +_PREF_MAP = [ + (Primary, SERVER_TYPE.RSPrimary), + (PrimaryPreferred, SERVER_TYPE.RSPrimary), + (Secondary, SERVER_TYPE.RSSecondary), + (SecondaryPreferred, SERVER_TYPE.RSSecondary), + (Nearest, "any"), +] + + +class TestCommandAndReadPreference(IntegrationTest): + c: ReadPrefTester + client_version: Version + + @client_context.require_secondaries_count(1) + def setUp(self): + super().setUp() self.c = ReadPrefTester( - '%s:%s' % (host, port), - replicaSet=self.name, auto_start_request=False, - # Effectively ignore members' ping times so we can test the effect - # of ReadPreference modes only - secondary_acceptable_latency_ms=1000*1000) + # Ignore round trip times, to test ReadPreference modes only. + localThresholdMS=1000 * 1000, + ) + self.client_version = Version.from_client(self.c) + # mapReduce fails if the collection does not exist. + coll = self.c.pymongo_test.get_collection( + "test", write_concern=WriteConcern(w=client_context.w) + ) + coll.insert_one({}) def tearDown(self): - # We create a lot of collections and indexes in these tests, so drop - # the database. - self.c.drop_database('pymongo_test') + self.c.drop_database("pymongo_test") self.c.close() - self.c = None - super(TestCommandAndReadPreference, self).tearDown() - def executed_on_which_member(self, client, fn, *args, **kwargs): + def executed_on_which_server(self, client, fn, *args, **kwargs): + """Execute fn(*args, **kwargs) and return the Server instance used.""" client.has_read_from.clear() fn(*args, **kwargs) self.assertEqual(1, len(client.has_read_from)) - member, = client.has_read_from - return member - - def assertExecutedOn(self, state, client, fn, *args, **kwargs): - member = self.executed_on_which_member(client, fn, *args, **kwargs) - if state == 'primary': - self.assertTrue(member.is_primary) - elif state == 'secondary': - self.assertFalse(member.is_primary) - else: - self.fail("Bad state %s" % repr(state)) - - def _test_fn(self, obedient, fn): - if not obedient: - for mode in modes: - self.c.read_preference = mode + return one(client.has_read_from) - # Run it a few times to make sure we don't just get lucky the - # first time. - for _ in range(10): - self.assertExecutedOn('primary', self.c, fn) - else: - for mode, expected_state in [ - (ReadPreference.PRIMARY, 'primary'), - (ReadPreference.PRIMARY_PREFERRED, 'primary'), - (ReadPreference.SECONDARY, 'secondary'), - (ReadPreference.SECONDARY_PREFERRED, 'secondary'), - (ReadPreference.NEAREST, 'any'), - ]: - self.c.read_preference = mode - for _ in range(10): - if expected_state in ('primary', 'secondary'): - self.assertExecutedOn(expected_state, self.c, fn) - elif expected_state == 'any': - used = set() - for _ in range(1000): - member = self.executed_on_which_member( - self.c, fn) - used.add(member.host) - if len(used) == len(self.c.secondaries) + 1: - # Success - break - - unused = self.c.secondaries.union( - set([self.c.primary]) - ).difference(used) - if unused: - self.fail( - "Some members not used for NEAREST: %s" % ( - unused)) + def assertExecutedOn(self, server_type, client, fn, *args, **kwargs): + server = self.executed_on_which_server(client, fn, *args, **kwargs) + self.assertEqual( + SERVER_TYPE._fields[server_type], SERVER_TYPE._fields[server.description.server_type] + ) + + def _test_fn(self, server_type, fn): + for _ in range(10): + if server_type == "any": + used = set() + for _ in range(1000): + server = self.executed_on_which_server(self.c, fn) + used.add(server.description.address) + if len(used) == len(self.c.secondaries) + 1: + # Success + break + + assert self.c.primary is not None + unused = (self.c.secondaries).union({self.c.primary}).difference(used) + if unused: + self.fail("Some members not used for NEAREST: %s" % (unused)) + else: + self.assertExecutedOn(server_type, self.c, fn) + + def _test_primary_helper(self, func): + # Helpers that ignore read preference. + self._test_fn(SERVER_TYPE.RSPrimary, func) + + def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs): + for mode, server_type in _PREF_MAP: + new_coll = coll.with_options(read_preference=mode()) + + def func(): + return getattr(new_coll, meth)(*args, **kwargs) + + if secondary_ok: + self._test_fn(server_type, func) + else: + self._test_fn(SERVER_TYPE.RSPrimary, func) def test_command(self): - # Test generic 'command' method. Some commands obey read preference, - # most don't. - # Disobedient commands, always go to primary - self._test_fn(False, lambda: self.c.pymongo_test.command('ping')) - self._test_fn(False, lambda: self.c.admin.command('buildinfo')) - - # Obedient commands. - self._test_fn(True, lambda: self.c.pymongo_test.command('group', { - 'ns': 'test', 'key': {'a': 1}, '$reduce': 'function(obj, prev) { }', - 'initial': {}})) - - self._test_fn(True, lambda: self.c.pymongo_test.command('dbStats')) - - # collStats fails if no collection - self.c.pymongo_test.test.insert({}, w=self.w) - self._test_fn(True, lambda: self.c.pymongo_test.command( - 'collStats', 'test')) - - # Count - self._test_fn(True, lambda: self.c.pymongo_test.command( - 'count', 'test')) - self._test_fn(True, lambda: self.c.pymongo_test.command( - 'count', 'test', query={'a': 1})) - self._test_fn(True, lambda: self.c.pymongo_test.command(SON([ - ('count', 'test'), ('query', {'a': 1})]))) - - # Distinct - self._test_fn(True, lambda: self.c.pymongo_test.command( - 'distinct', 'test', key={'a': 1})) - self._test_fn(True, lambda: self.c.pymongo_test.command( - 'distinct', 'test', key={'a': 1}, query={'a': 1})) - self._test_fn(True, lambda: self.c.pymongo_test.command(SON([ - ('distinct', 'test'), ('key', {'a': 1}), ('query', {'a': 1})]))) - - # Geo stuff. Make sure a 2d index is created and replicated - self.c.pymongo_test.system.indexes.insert({ - 'key' : { 'location' : '2d' }, 'ns' : 'pymongo_test.test', - 'name' : 'location_2d' }, w=self.w) - - self.c.pymongo_test.system.indexes.insert(SON([ - ('ns', 'pymongo_test.test'), - ('key', SON([('location', 'geoHaystack'), ('key', 1)])), - ('bucketSize', 100), - ('name', 'location_geoHaystack'), - ]), w=self.w) - - self._test_fn(True, lambda: self.c.pymongo_test.command( - 'geoNear', 'test', near=[0, 0])) - self._test_fn(True, lambda: self.c.pymongo_test.command(SON([ - ('geoNear', 'test'), ('near', [0, 0])]))) - - self._test_fn(True, lambda: self.c.pymongo_test.command( - 'geoSearch', 'test', near=[33, 33], maxDistance=6, - search={'type': 'restaurant' }, limit=30)) - - self._test_fn(True, lambda: self.c.pymongo_test.command(SON([ - ('geoSearch', 'test'), ('near', [33, 33]), ('maxDistance', 6), - ('search', {'type': 'restaurant'}), ('limit', 30)]))) - - if version.at_least(self.c, (2, 1, 0)): - self._test_fn(True, lambda: self.c.pymongo_test.command(SON([ - ('aggregate', 'test'), - ('pipeline', []) - ]))) - - # Text search. - if version.at_least(self.c, (2, 3, 2)): - utils.enable_text_search(self.c) - db = self.c.pymongo_test - - # Only way to create an index and wait for all members to build it. - index = { - 'ns': 'pymongo_test.test', - 'name': 't_text', - 'key': {'t': 'text'}} - - db.system.indexes.insert( - index, manipulate=False, check_keys=False, w=self.w) - - self._test_fn(True, lambda: self.c.pymongo_test.command(SON([ - ('text', 'test'), - ('search', 'foo')]))) - - self.c.pymongo_test.test.drop_indexes() - - def test_map_reduce_command(self): - # mapreduce fails if no collection - self.c.pymongo_test.test.insert({}, w=self.w) - - # Non-inline mapreduce always goes to primary, doesn't obey read prefs. - # Test with command in a SON and with kwargs - self._test_fn(False, lambda: self.c.pymongo_test.command(SON([ - ('mapreduce', 'test'), - ('map', 'function() { }'), - ('reduce', 'function() { }'), - ('out', 'mr_out') - ]))) - - self._test_fn(False, lambda: self.c.pymongo_test.command( - 'mapreduce', 'test', map='function() { }', - reduce='function() { }', out='mr_out')) - - self._test_fn(False, lambda: self.c.pymongo_test.command( - 'mapreduce', 'test', map='function() { }', - reduce='function() { }', out={'replace': 'some_collection'})) - - # Inline mapreduce obeys read prefs - self._test_fn(True, lambda: self.c.pymongo_test.command( - 'mapreduce', 'test', map='function() { }', - reduce='function() { }', out={'inline': True})) - - self._test_fn(True, lambda: self.c.pymongo_test.command(SON([ - ('mapreduce', 'test'), - ('map', 'function() { }'), - ('reduce', 'function() { }'), - ('out', {'inline': True}) - ]))) - - def test_aggregate_command_with_out(self): - if not version.at_least(self.c, (2, 5, 2)): - raise SkipTest("Aggregation with $out requires MongoDB >= 2.5.2") - - # Tests aggregate command when pipeline contains $out. - self.c.pymongo_test.test.insert({"x": 1, "y": 1}, w=self.w) - self.c.pymongo_test.test.insert({"x": 1, "y": 2}, w=self.w) - self.c.pymongo_test.test.insert({"x": 2, "y": 1}, w=self.w) - self.c.pymongo_test.test.insert({"x": 2, "y": 2}, w=self.w) - - # Aggregate with $out always goes to primary, doesn't obey read prefs. - # Test aggregate command sent directly to db.command. - self._test_fn(False, lambda: self.c.pymongo_test.command( - "aggregate", "test", - pipeline=[{"$match": {"x": 1}}, {"$out": "agg_out"}] - )) - - # Test aggregate when sent through the collection aggregate function. - self._test_fn(False, lambda: self.c.pymongo_test.test.aggregate( - [{"$match": {"x": 2}}, {"$out": "agg_out"}] - )) - - self.c.pymongo_test.drop_collection("test") - self.c.pymongo_test.drop_collection("agg_out") + # Test that the generic command helper obeys the read preference + # passed to it. + for mode, server_type in _PREF_MAP: - def test_create_collection(self): - # Collections should be created on primary, obviously - self._test_fn(False, lambda: self.c.pymongo_test.command( - 'create', 'some_collection%s' % random.randint(0, sys.maxint))) - - self._test_fn(False, lambda: self.c.pymongo_test.create_collection( - 'some_collection%s' % random.randint(0, sys.maxint))) - - def test_drop_collection(self): - self._test_fn(False, lambda: self.c.pymongo_test.drop_collection( - 'some_collection')) - - self._test_fn(False, lambda: self.c.pymongo_test.some_collection.drop()) + def func(): + return self.c.pymongo_test.command("dbStats", read_preference=mode()) - def test_group(self): - self._test_fn(True, lambda: self.c.pymongo_test.test.group( - {'a': 1}, {}, {}, 'function() { }')) + self._test_fn(server_type, func) - def test_map_reduce(self): - # mapreduce fails if no collection - self.c.pymongo_test.test.insert({}, w=self.w) - - self._test_fn(False, lambda: self.c.pymongo_test.test.map_reduce( - 'function() { }', 'function() { }', 'mr_out')) - - self._test_fn(True, lambda: self.c.pymongo_test.test.map_reduce( - 'function() { }', 'function() { }', {'inline': 1})) - - def test_inline_map_reduce(self): - # mapreduce fails if no collection - self.c.pymongo_test.test.insert({}, w=self.w) + def test_create_collection(self): + # create_collection runs listCollections on the primary to check if + # the collection already exists. + def func(): + return self.c.pymongo_test.create_collection( + "some_collection%s" % random.randint(0, sys.maxsize) + ) - self._test_fn(True, lambda: self.c.pymongo_test.test.inline_map_reduce( - 'function() { }', 'function() { }')) + self._test_primary_helper(func) - self._test_fn(True, lambda: self.c.pymongo_test.test.inline_map_reduce( - 'function() { }', 'function() { }', full_response=True)) + def test_count_documents(self): + self._test_coll_helper(True, self.c.pymongo_test.test, "count_documents", {}) - def test_count(self): - self._test_fn(True, lambda: self.c.pymongo_test.test.count()) - self._test_fn(True, lambda: self.c.pymongo_test.test.find().count()) + def test_estimated_document_count(self): + self._test_coll_helper(True, self.c.pymongo_test.test, "estimated_document_count") def test_distinct(self): - self._test_fn(True, lambda: self.c.pymongo_test.test.distinct('a')) - self._test_fn(True, - lambda: self.c.pymongo_test.test.find().distinct('a')) + self._test_coll_helper(True, self.c.pymongo_test.test, "distinct", "a") def test_aggregate(self): - if version.at_least(self.c, (2, 1, 0)): - self._test_fn(True, lambda: self.c.pymongo_test.test.aggregate([])) + self._test_coll_helper( + True, self.c.pymongo_test.test, "aggregate", [{"$project": {"_id": 1}}] + ) + def test_aggregate_write(self): + # 5.0 servers support $out on secondaries. + secondary_ok = client_context.version.at_least(5, 0) + self._test_coll_helper( + secondary_ok, + self.c.pymongo_test.test, + "aggregate", + [{"$project": {"_id": 1}}, {"$out": "agg_write_test"}], + ) -class TestMovingAverage(unittest.TestCase): - def test_empty_init(self): - self.assertRaises(AssertionError, MovingAverage, []) +class TestMovingAverage(unittest.TestCase): def test_moving_average(self): - avg = MovingAverage([10]) - self.assertEqual(10, avg.get()) - avg2 = avg.clone_with(20) - self.assertEqual(15, avg2.get()) - avg3 = avg2.clone_with(30) - self.assertEqual(20, avg3.get()) - avg4 = avg3.clone_with(-100) - self.assertEqual((10 + 20 + 30 - 100) / 4., avg4.get()) - avg5 = avg4.clone_with(17) - self.assertEqual((10 + 20 + 30 - 100 + 17) / 5., avg5.get()) - avg6 = avg5.clone_with(43) - self.assertEqual((20 + 30 - 100 + 17 + 43) / 5., avg6.get()) - avg7 = avg6.clone_with(-1111) - self.assertEqual((30 - 100 + 17 + 43 - 1111) / 5., avg7.get()) - - -class TestMongosConnection(unittest.TestCase): - def test_mongos_connection(self): - c = get_client() - is_mongos = utils.is_mongos(c) - - # Test default mode, PRIMARY - cursor = c.pymongo_test.test.find() - if is_mongos: - # We only set $readPreference if it's something other than - # PRIMARY to avoid problems with mongos versions that don't - # support read preferences. - self.assertEqual( - None, - cursor._Cursor__query_spec().get('$readPreference') - ) - else: - self.assertFalse( - '$readPreference' in cursor._Cursor__query_spec()) - - # Copy these constants for brevity - PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED - SECONDARY = ReadPreference.SECONDARY - SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED - NEAREST = ReadPreference.NEAREST - SLAVE_OKAY = _QUERY_OPTIONS['slave_okay'] - - # Test non-PRIMARY modes which can be combined with tags - for kwarg, value, mongos_mode in ( - ('read_preference', PRIMARY_PREFERRED, 'primaryPreferred'), - ('read_preference', SECONDARY, 'secondary'), - ('read_preference', SECONDARY_PREFERRED, 'secondaryPreferred'), - ('read_preference', NEAREST, 'nearest'), - ('slave_okay', True, 'secondaryPreferred'), - ('slave_okay', False, 'primary') - ): - for tag_sets in ( - None, [{}] - ): - # Create a client e.g. with read_preference=NEAREST or - # slave_okay=True - c = get_client(tag_sets=tag_sets, **{kwarg: value}) - - self.assertEqual(is_mongos, c.is_mongos) - cursor = c.pymongo_test.test.find() - if is_mongos: - # We don't set $readPreference for SECONDARY_PREFERRED - # unless tags are in use. slaveOkay has the same effect. - if mongos_mode == 'secondaryPreferred': - self.assertEqual( - None, - cursor._Cursor__query_spec().get('$readPreference')) - - self.assertTrue( - cursor._Cursor__query_options() & SLAVE_OKAY) - - # Don't send $readPreference for PRIMARY either - elif mongos_mode == 'primary': - self.assertEqual( - None, - cursor._Cursor__query_spec().get('$readPreference')) - - self.assertFalse( - cursor._Cursor__query_options() & SLAVE_OKAY) - else: - self.assertEqual( - {'mode': mongos_mode}, - cursor._Cursor__query_spec().get('$readPreference')) - - self.assertTrue( - cursor._Cursor__query_options() & SLAVE_OKAY) - else: - self.assertFalse( - '$readPreference' in cursor._Cursor__query_spec()) - - for tag_sets in ( - [{'dc': 'la'}], - [{'dc': 'la'}, {'dc': 'sf'}], - [{'dc': 'la'}, {'dc': 'sf'}, {}], - ): - if kwarg == 'slave_okay': - # Can't use tags with slave_okay True or False, need a - # real read preference - self.assertRaises( - ConfigurationError, - get_client, tag_sets=tag_sets, **{kwarg: value}) - - continue - - c = get_client(tag_sets=tag_sets, **{kwarg: value}) - - self.assertEqual(is_mongos, c.is_mongos) - cursor = c.pymongo_test.test.find() - if is_mongos: - self.assertEqual( - {'mode': mongos_mode, 'tags': tag_sets}, - cursor._Cursor__query_spec().get('$readPreference')) + avg = MovingAverage() + self.assertIsNone(avg.get()) + avg.add_sample(10) + self.assertAlmostEqual(10, avg.get()) # type: ignore + avg.add_sample(20) + self.assertAlmostEqual(12, avg.get()) # type: ignore + avg.add_sample(30) + self.assertAlmostEqual(15.6, avg.get()) # type: ignore + + +class TestMongosAndReadPreference(IntegrationTest): + def test_read_preference_document(self): + pref = Primary() + self.assertEqual(pref.document, {"mode": "primary"}) + + pref = PrimaryPreferred() + self.assertEqual(pref.document, {"mode": "primaryPreferred"}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "primaryPreferred", "tags": [{"dc": "sf"}]}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, + {"mode": "primaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) + + pref = Secondary() + self.assertEqual(pref.document, {"mode": "secondary"}) + pref = Secondary(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}]}) + pref = Secondary(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) + + pref = SecondaryPreferred() + self.assertEqual(pref.document, {"mode": "secondaryPreferred"}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}]}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, + {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) + + pref = Nearest() + self.assertEqual(pref.document, {"mode": "nearest"}) + pref = Nearest(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}]}) + pref = Nearest(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) + + with self.assertRaises(TypeError): + # Float is prohibited. + Nearest(max_staleness=1.5) # type: ignore + + with self.assertRaises(ValueError): + Nearest(max_staleness=0) + + with self.assertRaises(ValueError): + Nearest(max_staleness=-2) + + def test_read_preference_document_hedge(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for mode, cls in cases.items(): + with self.assertRaises(TypeError): + cls(hedge=[]) # type: ignore + with _ignore_deprecations(): + pref = cls(hedge={}) + self.assertEqual(pref.document, {"mode": mode}) + out = _maybe_add_read_preference({}, pref) + if cls == SecondaryPreferred: + # SecondaryPreferred without hedge doesn't add $readPreference. + self.assertEqual(out, {}) else: - self.assertFalse( - '$readPreference' in cursor._Cursor__query_spec()) - - def test_only_secondary_ok_commands_have_read_prefs(self): - c = get_client(read_preference=ReadPreference.SECONDARY) - is_mongos = utils.is_mongos(c) - if not is_mongos: - raise SkipTest("Only mongos have read_prefs added to the spec") - - # Ensure secondary_ok_commands have readPreference - for cmd in secondary_ok_commands: - if cmd == 'mapreduce': # map reduce is a special case - continue - command = SON([(cmd, 1)]) - cursor = c.pymongo_test["$cmd"].find(command.copy()) - # White-listed commands also have to be wrapped in $query - command = SON([('$query', command)]) - command['$readPreference'] = {'mode': 'secondary'} - self.assertEqual(command, cursor._Cursor__query_spec()) - - # map_reduce inline should have read prefs - command = SON([('mapreduce', 'test'), ('out', {'inline': 1})]) - cursor = c.pymongo_test["$cmd"].find(command.copy()) - # White-listed commands also have to be wrapped in $query - command = SON([('$query', command)]) - command['$readPreference'] = {'mode': 'secondary'} - self.assertEqual(command, cursor._Cursor__query_spec()) - - # map_reduce that outputs to a collection shouldn't have read prefs - command = SON([('mapreduce', 'test'), ('out', {'mrtest': 1})]) - cursor = c.pymongo_test["$cmd"].find(command.copy()) - self.assertEqual(command, cursor._Cursor__query_spec()) - - # Other commands shouldn't be changed - for cmd in ('drop', 'create', 'any-future-cmd'): - command = SON([(cmd, 1)]) - cursor = c.pymongo_test["$cmd"].find(command.copy()) - self.assertEqual(command, cursor._Cursor__query_spec()) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge: dict[str, Any] = {"enabled": True} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": False} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": False, "extra": "option"} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + def test_read_preference_hedge_deprecated(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for _, cls in cases.items(): + with self.assertRaises(DeprecationWarning): + cls(hedge={"enabled": True}) + + def test_send_hedge(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + if client_context.supports_secondary_read_pref: + cases["secondary"] = Secondary + listener = OvertCommandListener() + client = self.rs_client(event_listeners=[listener]) + client.admin.command("ping") + for _mode, cls in cases.items(): + with _ignore_deprecations(): + pref = cls(hedge={"enabled": True}) + coll = client.test.get_collection("test", read_preference=pref) + listener.reset() + coll.find_one() + started = listener.started_events + self.assertEqual(len(started), 1, started) + cmd = started[0].command + if client_context.is_rs or client_context.is_mongos: + self.assertIn("$readPreference", cmd) + self.assertEqual(cmd["$readPreference"], pref.document) + else: + self.assertNotIn("$readPreference", cmd) + + def test_maybe_add_read_preference(self): + # Primary doesn't add $readPreference + out = _maybe_add_read_preference({}, Primary()) + self.assertEqual(out, {}) + + pref = PrimaryPreferred() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = PrimaryPreferred(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + pref = Secondary() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Secondary(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + # SecondaryPreferred without tag_sets or max_staleness doesn't add + # $readPreference + pref = SecondaryPreferred() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, {}) + pref = SecondaryPreferred(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = SecondaryPreferred(max_staleness=120) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + pref = Nearest() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))]) + pref = Nearest() + out = _maybe_add_read_preference(criteria, pref) + self.assertEqual( + out, + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference(criteria, pref) + self.assertEqual( + out, + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + + @client_context.require_mongos + def test_mongos(self): + res = client_context.client.config.shards.find_one() + assert res is not None + shard = res["host"] + num_members = shard.count(",") + 1 + if num_members == 1: + raise SkipTest("Need a replica set shard to test.") + coll = client_context.client.pymongo_test.get_collection( + "test", write_concern=WriteConcern(w=num_members) + ) + coll.drop() + res = coll.insert_many([{} for _ in range(5)]) + first_id = res.inserted_ids[0] + last_id = res.inserted_ids[-1] + + # Note - this isn't a perfect test since there's no way to + # tell what shard member a query ran on. + for pref in (Primary(), PrimaryPreferred(), Secondary(), SecondaryPreferred(), Nearest()): + qcoll = coll.with_options(read_preference=pref) + results = qcoll.find().sort([("_id", 1)]).to_list() + self.assertEqual(first_id, results[0]["_id"]) + self.assertEqual(last_id, results[-1]["_id"]) + results = qcoll.find().sort([("_id", -1)]).to_list() + self.assertEqual(first_id, results[-1]["_id"]) + self.assertEqual(last_id, results[0]["_id"]) + + @client_context.require_mongos + def test_mongos_max_staleness(self): + # Sanity check that we're sending maxStalenessSeconds + coll = client_context.client.pymongo_test.get_collection( + "test", read_preference=SecondaryPreferred(max_staleness=120) + ) + # No error + coll.find_one() + + coll = client_context.client.pymongo_test.get_collection( + "test", read_preference=SecondaryPreferred(max_staleness=10) + ) + try: + coll.find_one() + except OperationFailure as exc: + self.assertEqual(160, exc.code) + else: + self.fail("mongos accepted invalid staleness") + + coll = ( + self.single_client(readPreference="secondaryPreferred", maxStalenessSeconds=120) + ).pymongo_test.test + # No error + coll.find_one() + + coll = ( + self.single_client(readPreference="secondaryPreferred", maxStalenessSeconds=10) + ).pymongo_test.test + try: + coll.find_one() + except OperationFailure as exc: + self.assertEqual(160, exc.code) + else: + self.fail("mongos accepted invalid staleness") + if __name__ == "__main__": unittest.main() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py new file mode 100644 index 0000000000..4b816b7af9 --- /dev/null +++ b/test/test_read_write_concern_spec.py @@ -0,0 +1,344 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the read and write concern tests.""" +from __future__ import annotations + +import json +import os +import sys +import warnings +from pathlib import Path + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.unified_format import generate_test_classes +from test.utils_shared import OvertCommandListener + +from pymongo import DESCENDING +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + WriteConcernError, + WriteError, + WTimeoutError, +) +from pymongo.operations import IndexModel, InsertOne +from pymongo.read_concern import ReadConcern +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "read_write_concern") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "read_write_concern") + + +class TestReadWriteConcernSpec(IntegrationTest): + def test_omit_default_read_write_concern(self): + listener = OvertCommandListener() + # Client with default readConcern and writeConcern + client = self.rs_or_single_client(event_listeners=[listener]) + collection = client.pymongo_test.collection + # Prepare for tests of find() and aggregate(). + collection.insert_many([{} for _ in range(10)]) + self.addCleanup(collection.drop) + self.addCleanup(client.pymongo_test.collection2.drop) + # Commands MUST NOT send the default read/write concern to the server. + + def rename_and_drop(): + # Ensure collection exists. + collection.insert_one({}) + collection.rename("collection2") + client.pymongo_test.collection2.drop() + + def insert_command_default_write_concern(): + collection.database.command( + "insert", "collection", documents=[{}], write_concern=WriteConcern() + ) + + def aggregate_op(): + (collection.aggregate([])).to_list() + + ops = [ + ("aggregate", aggregate_op), + ("find", lambda: collection.find().to_list()), + ("insert_one", lambda: collection.insert_one({})), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), + ("command", insert_command_default_write_concern), + ] + + for name, f in ops: + listener.reset() + f() + + self.assertGreaterEqual(len(listener.started_events), 1) + for _i, event in enumerate(listener.started_events): + self.assertNotIn( + "readConcern", + event.command, + f"{name} sent default readConcern with {event.command_name}", + ) + self.assertNotIn( + "writeConcern", + event.command, + f"{name} sent default writeConcern with {event.command_name}", + ) + + def assertWriteOpsRaise(self, write_concern, expected_exception): + wc = write_concern.document + # Set socket timeout to avoid indefinite stalls + client = self.rs_or_single_client( + w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000 + ) + db = client.get_database("pymongo_test") + coll = db.test + + def insert_command(): + coll.database.command( + "insert", + "new_collection", + documents=[{}], + writeConcern=write_concern.document, + parse_write_concern_error=True, + ) + + ops = [ + ("insert_one", lambda: coll.insert_one({})), + ("insert_many", lambda: coll.insert_many([{}, {}])), + ("update_one", lambda: coll.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: coll.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: coll.delete_one({})), + ("delete_many", lambda: coll.delete_many({})), + ("bulk_write", lambda: coll.bulk_write([InsertOne({})])), + ("command", insert_command), + ("aggregate", lambda: coll.aggregate([{"$out": "out"}])), + # SERVER-46668 Delete all the documents in the collection to + # workaround a hang in createIndexes. + ("delete_many", lambda: coll.delete_many({})), + ("create_index", lambda: coll.create_index([("a", DESCENDING)])), + ("create_indexes", lambda: coll.create_indexes([IndexModel("b")])), + ("drop_index", lambda: coll.drop_index([("a", DESCENDING)])), + ("create", lambda: db.create_collection("new")), + ("rename", lambda: coll.rename("new")), + ("drop", lambda: db.new.drop()), + ] + # SERVER-47194: dropDatabase does not respect wtimeout in 3.6. + if client_context.version[:2] != (3, 6): + ops.append(("drop_database", lambda: client.drop_database(db))) + + for name, f in ops: + # Ensure insert_many and bulk_write still raise BulkWriteError. + if name in ("insert_many", "bulk_write"): + expected = BulkWriteError + else: + expected = expected_exception + with self.assertRaises(expected, msg=name) as cm: + f() + if expected == BulkWriteError: + bulk_result = cm.exception.details + assert bulk_result is not None + wc_errors = bulk_result["writeConcernErrors"] + self.assertTrue(wc_errors) + + @client_context.require_replica_set + def test_raise_write_concern_error(self): + self.addCleanup(client_context.client.drop_database, "pymongo_test") + assert client_context.w is not None + self.assertWriteOpsRaise( + WriteConcern(w=client_context.w + 1, wtimeout=1), WriteConcernError + ) + + @client_context.require_secondaries_count(1) + @client_context.require_test_commands + def test_raise_wtimeout(self): + self.addCleanup(client_context.client.drop_database, "pymongo_test") + self.addCleanup(self.enable_replication, client_context.client) + # Disable replication to guarantee a wtimeout error. + self.disable_replication(client_context.client) + self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), WTimeoutError) + + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 1 (included here instead of test_client_bulk_write.py) + @client_context.require_failCommand_fail_point + def test_error_includes_errInfo(self): + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + "errInfo": {"writeConcern": {"w": 2, "wtimeout": 0, "provenance": "clientSupplied"}}, + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + with self.fail_point(cause_wce): + # Write concern error on insert includes errInfo. + with self.assertRaises(WriteConcernError) as ctx: + self.db.test.insert_one({}) + self.assertEqual(ctx.exception.details, expected_wce) + + # Test bulk_write as well. + with self.assertRaises(BulkWriteError) as ctx: + self.db.test.bulk_write([InsertOne({})]) + expected_details = { + "writeErrors": [], + "writeConcernErrors": [expected_wce], + "nInserted": 1, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + self.assertEqual(ctx.exception.details, expected_details) + + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 2 (included here instead of test_client_bulk_write.py) + @client_context.require_version_min(4, 9) + def test_write_error_details_exposes_errinfo(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + db = client.errinfotest + self.addCleanup(client.drop_database, "errinfotest") + validator = {"x": {"$type": "string"}} + db.create_collection("test", validator=validator) + with self.assertRaises(WriteError) as ctx: + db.test.insert_one({"x": 1}) + self.assertEqual(ctx.exception.code, 121) + self.assertIsNotNone(ctx.exception.details) + assert ctx.exception.details is not None + self.assertIsNotNone(ctx.exception.details.get("errInfo")) + for event in listener.succeeded_events: + if event.command_name == "insert": + self.assertEqual(event.reply["writeErrors"][0], ctx.exception.details) + break + else: + self.fail("Couldn't find insert event.") + + +def normalize_write_concern(concern): + result = {} + for key in concern: + if key.lower() == "wtimeoutms": + result["wtimeout"] = concern[key] + elif key == "journal": + result["j"] = concern[key] + else: + result[key] = concern[key] + return result + + +def create_connection_string_test(test_case): + def run_test(self): + uri = test_case["uri"] + valid = test_case["valid"] + warning = test_case["warning"] + + if not valid: + if warning is False: + self.assertRaises((ConfigurationError, ValueError), MongoClient, uri, connect=False) + else: + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + self.assertRaises(UserWarning, MongoClient, uri, connect=False) + else: + client = MongoClient(uri, connect=False) + if "writeConcern" in test_case: + document = client.write_concern.document + self.assertEqual(document, normalize_write_concern(test_case["writeConcern"])) + if "readConcern" in test_case: + document = client.read_concern.document + self.assertEqual(document, test_case["readConcern"]) + + return run_test + + +def create_document_test(test_case): + def run_test(self): + valid = test_case["valid"] + + if "writeConcern" in test_case: + normalized = normalize_write_concern(test_case["writeConcern"]) + if not valid: + self.assertRaises((ConfigurationError, ValueError), WriteConcern, **normalized) + else: + write_concern = WriteConcern(**normalized) + self.assertEqual(write_concern.document, test_case["writeConcernDocument"]) + self.assertEqual(write_concern.acknowledged, test_case["isAcknowledged"]) + self.assertEqual(write_concern.is_server_default, test_case["isServerDefault"]) + if "readConcern" in test_case: + # Any string for 'level' is equally valid + read_concern = ReadConcern(**test_case["readConcern"]) + self.assertEqual(read_concern.document, test_case["readConcernDocument"]) + self.assertEqual(not bool(read_concern.level), test_case["isServerDefault"]) + + return run_test + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + dirname = os.path.split(dirpath)[-1] + + if dirname == "operation": + # This directory is tested by TestOperations. + continue + elif dirname == "connection-string": + create_test = create_connection_string_test + else: + create_test = create_document_test + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as test_stream: + test_cases = json.load(test_stream)["tests"] + + fname = os.path.splitext(filename)[0] + for test_case in test_cases: + new_test = create_test(test_case) + test_name = "test_{}_{}_{}".format( + dirname.replace("-", "_"), + fname.replace("-", "_"), + str(test_case["description"].lower().replace(" ", "_")), + ) + + new_test.__name__ = test_name + setattr(TestReadWriteConcernSpec, new_test.__name__, new_test) + + +create_tests() + + +# Generate unified tests. +# PyMongo does not support MapReduce. +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "operation"), + module=__name__, + expected_failures=["MapReduce .*"], + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_replica_set_client.py b/test/test_replica_set_client.py deleted file mode 100644 index cf1bfee6b2..0000000000 --- a/test/test_replica_set_client.py +++ /dev/null @@ -1,1242 +0,0 @@ -# Copyright 2011-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test the mongo_replica_set_client module.""" - -# TODO: anywhere we wait for refresh in tests, consider just refreshing w/ sync - -import copy -import datetime -import signal -import socket -import sys -import time -import thread -import threading -import traceback -import unittest - -sys.path[0:0] = [""] - -from nose.plugins.skip import SkipTest - -from bson.son import SON -from bson.tz_util import utc -from pymongo.mongo_client import MongoClient -from pymongo.read_preferences import ReadPreference -from pymongo.member import PRIMARY, SECONDARY, OTHER -from pymongo.mongo_replica_set_client import MongoReplicaSetClient -from pymongo.mongo_replica_set_client import _partition_node, have_gevent -from pymongo.database import Database -from pymongo.pool import SocketInfo -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure, - InvalidName, - OperationFailure, InvalidOperation) -from test import version, port, pair -from test.pymongo_mocks import MockReplicaSetClient -from test.utils import ( - delay, assertReadFrom, assertReadFromAll, read_from_which_host, - remove_all_users, assertRaisesExactly, TestRequestMixin, one, - server_started_with_auth, pools_from_rs_client, get_pool, - _TestLazyConnectMixin) - - -class TestReplicaSetClientAgainstStandalone(unittest.TestCase): - """This is a funny beast -- we want to run tests for MongoReplicaSetClient - but only if the database at DB_IP and DB_PORT is a standalone. - """ - def setUp(self): - client = MongoClient(pair) - response = client.admin.command('ismaster') - if 'setName' in response: - raise SkipTest("Connected to a replica set, not a standalone mongod") - - def test_connect(self): - self.assertRaises(ConfigurationError, MongoReplicaSetClient, - pair, replicaSet='anything', - connectTimeoutMS=600) - - -class TestReplicaSetClientBase(unittest.TestCase): - def setUp(self): - client = MongoClient(pair) - response = client.admin.command('ismaster') - if 'setName' in response: - self.name = str(response['setName']) - self.w = len(response['hosts']) - self.hosts = set([_partition_node(h) - for h in response["hosts"]]) - self.arbiters = set([_partition_node(h) - for h in response.get("arbiters", [])]) - - repl_set_status = client.admin.command('replSetGetStatus') - primary_info = [ - m for m in repl_set_status['members'] - if m['stateStr'] == 'PRIMARY' - ][0] - - self.primary = _partition_node(primary_info['name']) - self.secondaries = [ - _partition_node(m['name']) for m in repl_set_status['members'] - if m['stateStr'] == 'SECONDARY' - ] - else: - raise SkipTest("Not connected to a replica set") - - super(TestReplicaSetClientBase, self).setUp() - - def _get_client(self, **kwargs): - return MongoReplicaSetClient(pair, - replicaSet=self.name, - **kwargs) - - -class TestReplicaSetClient(TestReplicaSetClientBase, TestRequestMixin): - def assertSoon(self, fn, msg=None): - start = time.time() - while time.time() - start < 10: - if fn(): - return - - time.sleep(0.1) - - self.fail(msg) - - def assertIsInstance(self, obj, cls, msg=None): - """Backport from Python 2.7.""" - if not isinstance(obj, cls): - standardMsg = '%r is not an instance of %r' % (obj, cls) - self.fail(self._formatMessage(msg, standardMsg)) - - def test_init_disconnected(self): - c = self._get_client(_connect=False) - - self.assertIsInstance(c.is_mongos, bool) - self.assertIsInstance(c.max_pool_size, int) - self.assertIsInstance(c.use_greenlets, bool) - self.assertIsInstance(c.auto_start_request, bool) - self.assertIsInstance(c.tz_aware, bool) - self.assertIsInstance(c.max_bson_size, int) - self.assertIsInstance(c.min_wire_version, int) - self.assertIsInstance(c.max_wire_version, int) - self.assertIsInstance(c.seeds, set) - self.assertIsInstance(c.hosts, frozenset) - self.assertIsInstance(c.arbiters, frozenset) - self.assertEqual(dict, c.get_document_class()) - self.assertFalse(c.primary) - self.assertFalse(c.secondaries) - - c.pymongo_test.test.find_one() # Auto-connect for read. - self.assertTrue(c.primary) - self.assertTrue(c.secondaries) - - if version.at_least(c, (2, 5, 4, -1)): - self.assertTrue(c.max_wire_version > 0) - else: - self.assertEqual(c.max_wire_version, 0) - self.assertTrue(c.min_wire_version >= 0) - - c = self._get_client(_connect=False) - c.pymongo_test.test.update({}, {}) # Auto-connect for write. - self.assertTrue(c.primary) - - c = self._get_client(_connect=False) - c.pymongo_test.test.insert({}) # Auto-connect for write. - self.assertTrue(c.primary) - - c = self._get_client(_connect=False) - c.pymongo_test.test.remove({}) # Auto-connect for write. - self.assertTrue(c.primary) - - c = MongoReplicaSetClient( - "somedomainthatdoesntexist.org", replicaSet="rs", - connectTimeoutMS=1, _connect=False) - - self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) - - def test_init_disconnected_with_auth_failure(self): - c = MongoReplicaSetClient( - "mongodb://user:pass@somedomainthatdoesntexist", replicaSet="rs", - connectTimeoutMS=1, _connect=False) - - self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) - - def test_init_disconnected_with_auth(self): - c = self._get_client() - if not server_started_with_auth(c): - raise SkipTest('Authentication is not enabled on server') - - c.admin.add_user("admin", "pass") - c.admin.authenticate("admin", "pass") - try: - c.pymongo_test.add_user("user", "pass", roles=['readWrite', 'userAdmin']) - - # Auth with lazy connection. - host = one(self.hosts) - uri = "mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s" % ( - host[0], host[1], self.name) - - authenticated_client = MongoReplicaSetClient(uri, _connect=False) - authenticated_client.pymongo_test.test.find_one() - - # Wrong password. - bad_uri = "mongodb://user:wrong@%s:%d/pymongo_test?replicaSet=%s" % ( - host[0], host[1], self.name) - - bad_client = MongoReplicaSetClient(bad_uri, _connect=False) - self.assertRaises( - OperationFailure, bad_client.pymongo_test.test.find_one) - - finally: - # Clean up. - remove_all_users(c.pymongo_test) - remove_all_users(c.admin) - - def test_connect(self): - assertRaisesExactly(ConnectionFailure, MongoReplicaSetClient, - "somedomainthatdoesntexist.org:27017", - replicaSet=self.name, - connectTimeoutMS=600) - self.assertRaises(ConfigurationError, MongoReplicaSetClient, - pair, replicaSet='fdlksjfdslkjfd') - self.assertTrue(MongoReplicaSetClient(pair, replicaSet=self.name)) - - def test_repr(self): - client = self._get_client() - - # Quirk: the RS client makes a frozenset of hosts from a dict's keys, - # so we must do the same to achieve the same order. - host_dict = dict([(host, 1) for host in self.hosts]) - hosts_set = frozenset(host_dict) - hosts_repr = ', '.join([ - repr(unicode('%s:%s' % host)) for host in hosts_set]) - - self.assertEqual(repr(client), - "MongoReplicaSetClient([%s])" % hosts_repr) - - def test_properties(self): - c = MongoReplicaSetClient(pair, replicaSet=self.name) - c.admin.command('ping') - self.assertEqual(c.primary, self.primary) - self.assertEqual(c.hosts, self.hosts) - self.assertEqual(c.arbiters, self.arbiters) - self.assertEqual(c.max_pool_size, 100) - self.assertEqual(c.document_class, dict) - self.assertEqual(c.tz_aware, False) - - # Make sure MRSC's properties are copied to Database and Collection - for obj in c, c.pymongo_test, c.pymongo_test.test: - self.assertEqual(obj.read_preference, ReadPreference.PRIMARY) - self.assertEqual(obj.tag_sets, [{}]) - self.assertEqual(obj.secondary_acceptable_latency_ms, 15) - self.assertEqual(obj.slave_okay, False) - self.assertEqual(obj.write_concern, {}) - - cursor = c.pymongo_test.test.find() - self.assertEqual( - ReadPreference.PRIMARY, cursor._Cursor__read_preference) - self.assertEqual([{}], cursor._Cursor__tag_sets) - self.assertEqual(15, cursor._Cursor__secondary_acceptable_latency_ms) - self.assertEqual(False, cursor._Cursor__slave_okay) - c.close() - - tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}] - c = MongoReplicaSetClient(pair, replicaSet=self.name, max_pool_size=25, - document_class=SON, tz_aware=True, - slaveOk=False, - read_preference=ReadPreference.SECONDARY, - tag_sets=copy.deepcopy(tag_sets), - secondary_acceptable_latency_ms=77) - c.admin.command('ping') - self.assertEqual(c.primary, self.primary) - self.assertEqual(c.hosts, self.hosts) - self.assertEqual(c.arbiters, self.arbiters) - self.assertEqual(c.max_pool_size, 25) - self.assertEqual(c.document_class, SON) - self.assertEqual(c.tz_aware, True) - - for obj in c, c.pymongo_test, c.pymongo_test.test: - self.assertEqual(obj.read_preference, ReadPreference.SECONDARY) - self.assertEqual(obj.tag_sets, tag_sets) - self.assertEqual(obj.secondary_acceptable_latency_ms, 77) - self.assertEqual(obj.slave_okay, False) - self.assertEqual(obj.safe, True) - - cursor = c.pymongo_test.test.find() - self.assertEqual( - ReadPreference.SECONDARY, cursor._Cursor__read_preference) - self.assertEqual(tag_sets, cursor._Cursor__tag_sets) - self.assertEqual(77, cursor._Cursor__secondary_acceptable_latency_ms) - self.assertEqual(False, cursor._Cursor__slave_okay) - - cursor = c.pymongo_test.test.find( - read_preference=ReadPreference.NEAREST, - tag_sets=[{'dc':'ny'}, {}], - secondary_acceptable_latency_ms=123) - - self.assertEqual( - ReadPreference.NEAREST, cursor._Cursor__read_preference) - self.assertEqual([{'dc':'ny'}, {}], cursor._Cursor__tag_sets) - self.assertEqual(123, cursor._Cursor__secondary_acceptable_latency_ms) - self.assertEqual(False, cursor._Cursor__slave_okay) - - if version.at_least(c, (1, 7, 4)): - self.assertEqual(c.max_bson_size, 16777216) - else: - self.assertEqual(c.max_bson_size, 4194304) - c.close() - - def test_use_greenlets(self): - self.assertFalse( - MongoReplicaSetClient(pair, replicaSet=self.name).use_greenlets) - - if have_gevent: - self.assertTrue(MongoReplicaSetClient( - pair, replicaSet=self.name, use_greenlets=True).use_greenlets) - - def test_get_db(self): - client = self._get_client() - - def make_db(base, name): - return base[name] - - self.assertRaises(InvalidName, make_db, client, "") - self.assertRaises(InvalidName, make_db, client, "te$t") - self.assertRaises(InvalidName, make_db, client, "te.t") - self.assertRaises(InvalidName, make_db, client, "te\\t") - self.assertRaises(InvalidName, make_db, client, "te/t") - self.assertRaises(InvalidName, make_db, client, "te st") - - self.assertTrue(isinstance(client.test, Database)) - self.assertEqual(client.test, client["test"]) - self.assertEqual(client.test, Database(client, "test")) - client.close() - - def test_auto_reconnect_exception_when_read_preference_is_secondary(self): - c = self._get_client() - db = c.pymongo_test - - def raise_socket_error(*args, **kwargs): - raise socket.error - - old_sendall = socket.socket.sendall - socket.socket.sendall = raise_socket_error - - try: - cursor = db.test.find(read_preference=ReadPreference.SECONDARY) - self.assertRaises(AutoReconnect, cursor.next) - finally: - socket.socket.sendall = old_sendall - - def test_lazy_auth_raises_operation_failure(self): - # Check if we have the prerequisites to run this test. - c = self._get_client() - if not server_started_with_auth(c): - raise SkipTest('Authentication is not enabled on server') - - lazy_client = MongoReplicaSetClient( - "mongodb://user:wrong@%s/pymongo_test" % pair, - replicaSet=self.name, - _connect=False) - - assertRaisesExactly( - OperationFailure, lazy_client.test.collection.find_one) - - def test_operations(self): - c = self._get_client() - - # Check explicitly for a case we've commonly hit in tests: - # a replica set is started with a tiny oplog, a previous - # test does a big insert that leaves the secondaries - # permanently "RECOVERING", and our insert(w=self.w) hangs - # forever. - rs_status = c.admin.command('replSetGetStatus') - members = rs_status['members'] - self.assertFalse( - [m for m in members if m['stateStr'] == 'RECOVERING'], - "Replica set is recovering, try a larger oplogSize next time" - ) - - db = c.pymongo_test - db.test.remove({}) - self.assertEqual(0, db.test.count()) - db.test.insert({'foo': 'x'}, w=self.w, wtimeout=10000) - self.assertEqual(1, db.test.count()) - - cursor = db.test.find() - doc = cursor.next() - self.assertEqual('x', doc['foo']) - # Ensure we read from the primary - self.assertEqual(c.primary, cursor._Cursor__connection_id) - - cursor = db.test.find(read_preference=ReadPreference.SECONDARY) - doc = cursor.next() - self.assertEqual('x', doc['foo']) - # Ensure we didn't read from the primary - self.assertTrue(cursor._Cursor__connection_id in c.secondaries) - - self.assertEqual(1, db.test.count()) - db.test.remove({}) - self.assertEqual(0, db.test.count()) - db.test.drop() - c.close() - - def test_database_names(self): - client = self._get_client() - - client.pymongo_test.test.save({"dummy": u"object"}) - client.pymongo_test_mike.test.save({"dummy": u"object"}) - - dbs = client.database_names() - self.assertTrue("pymongo_test" in dbs) - self.assertTrue("pymongo_test_mike" in dbs) - client.close() - - def test_drop_database(self): - client = self._get_client() - - self.assertRaises(TypeError, client.drop_database, 5) - self.assertRaises(TypeError, client.drop_database, None) - - client.pymongo_test.test.save({"dummy": u"object"}) - dbs = client.database_names() - self.assertTrue("pymongo_test" in dbs) - client.drop_database("pymongo_test") - dbs = client.database_names() - self.assertTrue("pymongo_test" not in dbs) - - client.pymongo_test.test.save({"dummy": u"object"}) - dbs = client.database_names() - self.assertTrue("pymongo_test" in dbs) - client.drop_database(client.pymongo_test) - dbs = client.database_names() - self.assertTrue("pymongo_test" not in dbs) - client.close() - - def test_copy_db(self): - c = self._get_client() - # We test copy twice; once starting in a request and once not. In - # either case the copy should succeed (because it starts a request - # internally) and should leave us in the same state as before the copy. - c.start_request() - - self.assertRaises(TypeError, c.copy_database, 4, "foo") - self.assertRaises(TypeError, c.copy_database, "foo", 4) - - self.assertRaises(InvalidName, c.copy_database, "foo", "$foo") - - c.pymongo_test.test.drop() - c.drop_database("pymongo_test1") - c.drop_database("pymongo_test2") - - c.pymongo_test.test.insert({"foo": "bar"}) - - self.assertFalse("pymongo_test1" in c.database_names()) - self.assertFalse("pymongo_test2" in c.database_names()) - - c.copy_database("pymongo_test", "pymongo_test1") - # copy_database() didn't accidentally end the request - self.assertTrue(c.in_request()) - - self.assertTrue("pymongo_test1" in c.database_names()) - self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"]) - - c.end_request() - - self.assertFalse(c.in_request()) - c.copy_database("pymongo_test", "pymongo_test2", pair) - # copy_database() didn't accidentally restart the request - self.assertFalse(c.in_request()) - - time.sleep(1) - - self.assertTrue("pymongo_test2" in c.database_names()) - self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"]) - - if version.at_least(c, (1, 3, 3, 1)) and server_started_with_auth(c): - c.drop_database("pymongo_test1") - - c.admin.add_user("admin", "password") - c.admin.authenticate("admin", "password") - try: - c.pymongo_test.add_user("mike", "password") - - self.assertRaises(OperationFailure, c.copy_database, - "pymongo_test", "pymongo_test1", - username="foo", password="bar") - self.assertFalse("pymongo_test1" in c.database_names()) - - self.assertRaises(OperationFailure, c.copy_database, - "pymongo_test", "pymongo_test1", - username="mike", password="bar") - self.assertFalse("pymongo_test1" in c.database_names()) - - c.copy_database("pymongo_test", "pymongo_test1", - username="mike", password="password") - self.assertTrue("pymongo_test1" in c.database_names()) - res = c.pymongo_test1.test.find_one(_must_use_master=True) - self.assertEqual("bar", res["foo"]) - finally: - # Cleanup - remove_all_users(c.pymongo_test) - c.admin.remove_user("admin") - c.close() - - def test_get_default_database(self): - host = one(self.hosts) - uri = "mongodb://%s:%d/foo?replicaSet=%s" % ( - host[0], host[1], self.name) - - c = MongoReplicaSetClient(uri, _connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database()) - - def test_get_default_database_error(self): - host = one(self.hosts) - # URI with no database. - uri = "mongodb://%s:%d/?replicaSet=%s" % ( - host[0], host[1], self.name) - - c = MongoReplicaSetClient(uri, _connect=False) - self.assertRaises(ConfigurationError, c.get_default_database) - - def test_get_default_database_with_authsource(self): - # Ensure we distinguish database name from authSource. - host = one(self.hosts) - uri = "mongodb://%s:%d/foo?replicaSet=%s&authSource=src" % ( - host[0], host[1], self.name) - - c = MongoReplicaSetClient(uri, _connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database()) - - def test_iteration(self): - client = self._get_client() - - def iterate(): - [a for a in client] - - self.assertRaises(TypeError, iterate) - client.close() - - def test_disconnect(self): - c = self._get_client() - coll = c.pymongo_test.bar - - c.disconnect() - c.disconnect() - - coll.count() - - c.disconnect() - c.disconnect() - - coll.count() - - def test_close(self): - # Multiple threads can call close() at once without error, and all - # operations raise InvalidOperation afterward. - c = self._get_client() - nthreads = 10 - outcomes = [] - - def close(): - c.close() - outcomes.append(True) - - threads = [threading.Thread(target=close) for _ in range(nthreads)] - for t in threads: - t.start() - - for t in threads: - t.join(10) - - self.assertEqual(nthreads, len(outcomes)) - self.assertRaises(InvalidOperation, c.db.collection.find_one) - self.assertRaises(InvalidOperation, c.db.collection.insert, {}) - - def test_fork(self): - # After a fork the monitor thread is gone. - # Verify that schedule_refresh restarts it. - if sys.platform == "win32": - raise SkipTest("Can't fork on Windows") - - try: - from multiprocessing import Process, Pipe - except ImportError: - raise SkipTest("No multiprocessing module") - - client = self._get_client() - - def f(pipe): - try: - # Trigger a refresh. - self.assertFalse( - client._MongoReplicaSetClient__monitor.isAlive()) - - client.disconnect() - self.assertSoon( - lambda: client._MongoReplicaSetClient__monitor.isAlive()) - - client.db.collection.find_one() # No error. - except: - traceback.print_exc() - pipe.send(True) - - cp, cc = Pipe() - p = Process(target=f, args=(cc,)) - p.start() - p.join(10) - cc.close() - - # recv will only have data if the subprocess failed - try: - cp.recv() - self.fail() - except EOFError: - pass - - def test_document_class(self): - c = self._get_client() - db = c.pymongo_test - db.test.insert({"x": 1}) - - self.assertEqual(dict, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), dict)) - self.assertFalse(isinstance(db.test.find_one(), SON)) - - c.document_class = SON - - self.assertEqual(SON, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), SON)) - self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON)) - c.close() - - c = self._get_client(document_class=SON) - db = c.pymongo_test - - self.assertEqual(SON, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), SON)) - self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON)) - - c.document_class = dict - - self.assertEqual(dict, c.document_class) - self.assertTrue(isinstance(db.test.find_one(), dict)) - self.assertFalse(isinstance(db.test.find_one(), SON)) - c.close() - - def test_network_timeout_validation(self): - c = self._get_client(socketTimeoutMS=10 * 1000) - self.assertEqual(10, c._MongoReplicaSetClient__net_timeout) - - c = self._get_client(socketTimeoutMS=None) - self.assertEqual(None, c._MongoReplicaSetClient__net_timeout) - - self.assertRaises(ConfigurationError, - self._get_client, socketTimeoutMS=0) - - self.assertRaises(ConfigurationError, - self._get_client, socketTimeoutMS=-1) - - self.assertRaises(ConfigurationError, - self._get_client, socketTimeoutMS=1e10) - - self.assertRaises(ConfigurationError, - self._get_client, socketTimeoutMS='foo') - - # network_timeout is gone from MongoReplicaSetClient, remains in - # deprecated ReplicaSetConnection - self.assertRaises(ConfigurationError, - self._get_client, network_timeout=10) - - def test_network_timeout(self): - no_timeout = self._get_client() - timeout_sec = 1 - timeout = self._get_client(socketTimeoutMS=timeout_sec*1000) - - no_timeout.pymongo_test.drop_collection("test") - no_timeout.pymongo_test.test.insert({"x": 1}) - - # A $where clause that takes a second longer than the timeout. - query = {'$where': delay(1 + timeout_sec)} - no_timeout.pymongo_test.test.find_one(query) # No error. - - try: - timeout.pymongo_test.test.find_one(query) - except AutoReconnect, e: - self.assertTrue('%d: timed out' % (port,) in e.args[0]) - else: - self.fail('RS client should have raised timeout error') - - timeout.pymongo_test.test.find_one(query, network_timeout=None) - - try: - no_timeout.pymongo_test.test.find_one(query, network_timeout=0.1) - except AutoReconnect, e: - self.assertTrue('%d: timed out' % (port,) in e.args[0]) - else: - self.fail('RS client should have raised timeout error') - - try: - timeout.pymongo_test.test.find_one( - query, - read_preference=ReadPreference.SECONDARY) - except AutoReconnect, e: - # Like 'No replica set secondary available for query with - # ReadPreference SECONDARY. host:27018: timed out, - # host:27019: timed out'. - self.assertTrue( - str(e).startswith('No replica set secondary available')) - - self.assertTrue('timed out' in str(e)) - else: - self.fail('RS client should have raised timeout error') - - no_timeout.close() - timeout.close() - - def test_timeout_does_not_mark_member_down(self): - # If a query times out, the RS client shouldn't mark the member "down". - c = self._get_client(socketTimeoutMS=3000) - collection = c.pymongo_test.test - collection.insert({}, w=self.w) - - # Query the primary. - self.assertRaises( - ConnectionFailure, - collection.find_one, - {'$where': delay(5)}) - - # primary_member returns None if primary is marked "down". - rs_state = c._MongoReplicaSetClient__rs_state - self.assertTrue(rs_state.primary_member) - - collection.find_one() # No error. - - # Query the secondary. - self.assertRaises( - ConnectionFailure, - collection.find_one, - {'$where': delay(5)}, - read_preference=SECONDARY) - - rs_state = c._MongoReplicaSetClient__rs_state - secondary_host = one(rs_state.secondaries) - self.assertTrue(rs_state.get(secondary_host)) - collection.find_one(read_preference=SECONDARY) # No error. - - def test_waitQueueTimeoutMS(self): - client = self._get_client(waitQueueTimeoutMS=2000) - pool = get_pool(client) - self.assertEqual(pool.wait_queue_timeout, 2) - - def test_waitQueueMultiple(self): - client = self._get_client(max_pool_size=3, waitQueueMultiple=2) - pool = get_pool(client) - self.assertEqual(pool.wait_queue_multiple, 2) - self.assertEqual(pool._socket_semaphore.waiter_semaphore.counter, 6) - - def test_tz_aware(self): - self.assertRaises(ConfigurationError, MongoReplicaSetClient, - tz_aware='foo', replicaSet=self.name) - - aware = self._get_client(tz_aware=True) - naive = self._get_client() - aware.pymongo_test.drop_collection("test") - - now = datetime.datetime.utcnow() - aware.pymongo_test.test.insert({"x": now}) - time.sleep(1) - - self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo) - self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo) - self.assertEqual( - aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None), - naive.pymongo_test.test.find_one()["x"]) - - def test_ipv6(self): - try: - client = MongoReplicaSetClient("[::1]:%d" % (port,), - replicaSet=self.name) - except: - # Either mongod was started without --ipv6 - # or the OS doesn't support it (or both). - raise SkipTest("No IPv6") - - # Try a few simple things - client = MongoReplicaSetClient("mongodb://[::1]:%d" % (port,), - replicaSet=self.name) - client = MongoReplicaSetClient("mongodb://[::1]:%d/?w=0;" - "replicaSet=%s" % (port, self.name)) - client = MongoReplicaSetClient("[::1]:%d,localhost:" - "%d" % (port, port), - replicaSet=self.name) - client = MongoReplicaSetClient("localhost:%d,[::1]:" - "%d" % (port, port), - replicaSet=self.name) - client.pymongo_test.test.save({"dummy": u"object"}) - client.pymongo_test_bernie.test.save({"dummy": u"object"}) - - dbs = client.database_names() - self.assertTrue("pymongo_test" in dbs) - self.assertTrue("pymongo_test_bernie" in dbs) - client.close() - - def _test_kill_cursor_explicit(self, read_pref): - c = self._get_client(read_preference=read_pref) - db = c.pymongo_test - db.drop_collection("test") - - test = db.test - test.insert([{"i": i} for i in range(20)], w=1 + len(c.secondaries)) - - # Partially evaluate cursor so it's left alive, then kill it - cursor = test.find().batch_size(10) - cursor.next() - self.assertNotEqual(0, cursor.cursor_id) - - connection_id = cursor._Cursor__connection_id - writer = c._MongoReplicaSetClient__rs_state.writer - if read_pref == ReadPreference.PRIMARY: - msg = "Expected cursor's connection_id to be %s, got %s" % ( - writer, connection_id) - - self.assertEqual(connection_id, writer, msg) - else: - self.assertNotEqual(connection_id, writer, - "Expected cursor's connection_id not to be primary") - - cursor_id = cursor.cursor_id - - # Cursor dead on server - trigger a getMore on the same cursor_id and - # check that the server returns an error. - cursor2 = cursor.clone() - cursor2._Cursor__id = cursor_id - - if (sys.platform.startswith('java') or - 'PyPy' in sys.version): - # Explicitly kill cursor. - cursor.close() - else: - # Implicitly kill it in CPython. - del cursor - - self.assertRaises(OperationFailure, lambda: list(cursor2)) - - def test_kill_cursor_explicit_primary(self): - self._test_kill_cursor_explicit(ReadPreference.PRIMARY) - - def test_kill_cursor_explicit_secondary(self): - self._test_kill_cursor_explicit(ReadPreference.SECONDARY) - - def test_interrupt_signal(self): - if sys.platform.startswith('java'): - raise SkipTest("Can't test interrupts in Jython") - - # Test fix for PYTHON-294 -- make sure client closes its socket if it - # gets an interrupt while waiting to recv() from it. - c = self._get_client() - db = c.pymongo_test - - # A $where clause which takes 1.5 sec to execute - where = delay(1.5) - - # Need exactly 1 document so find() will execute its $where clause once - db.drop_collection('foo') - db.foo.insert({'_id': 1}) - - old_signal_handler = None - - try: - def interrupter(): - time.sleep(0.25) - - # Raises KeyboardInterrupt in the main thread - thread.interrupt_main() - - thread.start_new_thread(interrupter, ()) - - raised = False - try: - # Will be interrupted by a KeyboardInterrupt. - db.foo.find({'$where': where}).next() - except KeyboardInterrupt: - raised = True - - # Can't use self.assertRaises() because it doesn't catch system - # exceptions - self.assertTrue(raised, "Didn't raise expected ConnectionFailure") - - # Raises AssertionError due to PYTHON-294 -- Mongo's response to the - # previous find() is still waiting to be read on the socket, so the - # request id's don't match. - self.assertEqual( - {'_id': 1}, - db.foo.find().next() - ) - finally: - if old_signal_handler: - signal.signal(signal.SIGALRM, old_signal_handler) - - def test_operation_failure_without_request(self): - # Ensure MongoReplicaSetClient doesn't close socket after it gets an - # error response to getLastError. PYTHON-395. - c = self._get_client(auto_start_request=False) - pool = get_pool(c) - self.assertEqual(1, len(pool.sockets)) - old_sock_info = iter(pool.sockets).next() - c.pymongo_test.test.drop() - c.pymongo_test.test.insert({'_id': 'foo'}) - self.assertRaises( - OperationFailure, - c.pymongo_test.test.insert, {'_id': 'foo'}) - - self.assertEqual(1, len(pool.sockets)) - new_sock_info = iter(pool.sockets).next() - - self.assertEqual(old_sock_info, new_sock_info) - c.close() - - def test_operation_failure_with_request(self): - # Ensure MongoReplicaSetClient doesn't close socket after it gets an - # error response to getLastError. PYTHON-395. - c = self._get_client(auto_start_request=True) - c.pymongo_test.test.find_one() - pool = get_pool(c) - - # Client reserved a socket for this thread - self.assertTrue(isinstance(pool._get_request_state(), SocketInfo)) - - old_sock_info = pool._get_request_state() - c.pymongo_test.test.drop() - c.pymongo_test.test.insert({'_id': 'foo'}) - self.assertRaises( - OperationFailure, - c.pymongo_test.test.insert, {'_id': 'foo'}) - - # OperationFailure doesn't affect the request socket - self.assertEqual(old_sock_info, pool._get_request_state()) - c.close() - - def test_auto_start_request(self): - for bad_horrible_value in (None, 5, 'hi!'): - self.assertRaises( - (TypeError, ConfigurationError), - lambda: self._get_client(auto_start_request=bad_horrible_value) - ) - - client = self._get_client(auto_start_request=True) - self.assertTrue(client.auto_start_request) - pools = pools_from_rs_client(client) - self.assertInRequestAndSameSock(client, pools) - - primary_pool = get_pool(client) - - # Trigger the RSC to actually start a request on primary pool - client.pymongo_test.test.find_one() - self.assertTrue(primary_pool.in_request()) - - # Trigger the RSC to actually start a request on secondary pool - cursor = client.pymongo_test.test.find( - read_preference=ReadPreference.SECONDARY) - try: - cursor.next() - except StopIteration: - # No results, no problem - pass - - secondary = cursor._Cursor__connection_id - rs_state = client._MongoReplicaSetClient__rs_state - secondary_pool = rs_state.get(secondary).pool - self.assertTrue(secondary_pool.in_request()) - - client.end_request() - self.assertNotInRequestAndDifferentSock(client, pools) - for pool in pools: - self.assertFalse(pool.in_request()) - client.start_request() - self.assertInRequestAndSameSock(client, pools) - client.close() - - client = self._get_client() - pools = pools_from_rs_client(client) - self.assertNotInRequestAndDifferentSock(client, pools) - client.start_request() - self.assertInRequestAndSameSock(client, pools) - client.end_request() - self.assertNotInRequestAndDifferentSock(client, pools) - client.close() - - def test_nested_request(self): - client = self._get_client(auto_start_request=True) - try: - pools = pools_from_rs_client(client) - self.assertTrue(client.in_request()) - - # Start and end request - we're still in "outer" original request - client.start_request() - self.assertInRequestAndSameSock(client, pools) - client.end_request() - self.assertInRequestAndSameSock(client, pools) - - # Double-nesting - client.start_request() - client.start_request() - self.assertEqual( - 3, client._MongoReplicaSetClient__request_counter.get()) - - for pool in pools: - # MRSC only called start_request() once per pool, although its - # own counter is 2. - self.assertEqual(1, pool._request_counter.get()) - - client.end_request() - client.end_request() - self.assertInRequestAndSameSock(client, pools) - - self.assertEqual( - 1, client._MongoReplicaSetClient__request_counter.get()) - - for pool in pools: - self.assertEqual(1, pool._request_counter.get()) - - # Finally, end original request - client.end_request() - for pool in pools: - self.assertFalse(pool.in_request()) - - self.assertNotInRequestAndDifferentSock(client, pools) - finally: - client.close() - - def test_request_threads(self): - client = self._get_client() - try: - pools = pools_from_rs_client(client) - self.assertNotInRequestAndDifferentSock(client, pools) - - started_request, ended_request = threading.Event(), threading.Event() - checked_request = threading.Event() - thread_done = [False] - - # Starting a request in one thread doesn't put the other thread in a - # request - def f(): - self.assertNotInRequestAndDifferentSock(client, pools) - client.start_request() - self.assertInRequestAndSameSock(client, pools) - started_request.set() - checked_request.wait() - checked_request.clear() - self.assertInRequestAndSameSock(client, pools) - client.end_request() - self.assertNotInRequestAndDifferentSock(client, pools) - ended_request.set() - checked_request.wait() - thread_done[0] = True - - t = threading.Thread(target=f) - t.setDaemon(True) - t.start() - started_request.wait() - self.assertNotInRequestAndDifferentSock(client, pools) - checked_request.set() - ended_request.wait() - self.assertNotInRequestAndDifferentSock(client, pools) - checked_request.set() - t.join() - self.assertNotInRequestAndDifferentSock(client, pools) - self.assertTrue(thread_done[0], "Thread didn't complete") - finally: - client.close() - - def test_schedule_refresh(self): - client = self._get_client() - new_rs_state = rs_state = client._MongoReplicaSetClient__rs_state - for host in rs_state.hosts: - new_rs_state = new_rs_state.clone_with_host_down(host, 'error!') - - client._MongoReplicaSetClient__rs_state = new_rs_state - client._MongoReplicaSetClient__schedule_refresh(sync=True) - rs_state = client._MongoReplicaSetClient__rs_state - self.assertEqual( - self.w, len(rs_state.members), - "MongoReplicaSetClient didn't detect members are up") - - client.close() - - def test_pinned_member(self): - latency = 1000 * 1000 - client = self._get_client(secondary_acceptable_latency_ms=latency) - - host = read_from_which_host(client, ReadPreference.SECONDARY) - self.assertTrue(host in client.secondaries) - - # No pinning since we're not in a request - assertReadFromAll( - self, client, client.secondaries, - ReadPreference.SECONDARY, None, latency) - - assertReadFromAll( - self, client, list(client.secondaries) + [client.primary], - ReadPreference.NEAREST, None, latency) - - client.start_request() - host = read_from_which_host(client, ReadPreference.SECONDARY) - self.assertTrue(host in client.secondaries) - assertReadFrom(self, client, host, ReadPreference.SECONDARY) - - # Changing any part of read preference (mode, tag_sets, latency) - # unpins the current host and pins to a new one - primary = client.primary - assertReadFrom(self, client, primary, ReadPreference.PRIMARY_PREFERRED) - - host = read_from_which_host(client, ReadPreference.NEAREST) - assertReadFrom(self, client, host, ReadPreference.NEAREST) - - assertReadFrom(self, client, primary, ReadPreference.PRIMARY_PREFERRED) - - host = read_from_which_host(client, ReadPreference.SECONDARY_PREFERRED) - self.assertTrue(host in client.secondaries) - assertReadFrom(self, client, host, ReadPreference.SECONDARY_PREFERRED) - - # Unpin - client.end_request() - assertReadFromAll( - self, client, list(client.secondaries) + [client.primary], - ReadPreference.NEAREST, None, latency) - - def test_alive(self): - client = self._get_client() - self.assertTrue(client.alive()) - - client = MongoReplicaSetClient( - 'doesnt exist', replicaSet='rs', _connect=False) - - self.assertFalse(client.alive()) - - -class TestReplicaSetWireVersion(unittest.TestCase): - def test_wire_version(self): - c = MockReplicaSetClient( - standalones=[], - members=['a:1', 'b:2', 'c:3'], - mongoses=[], - host='a:1', - replicaSet='rs', - _connect=False) - - c.set_wire_version_range('a:1', 1, 5) - c.set_wire_version_range('b:2', 0, 1) - c.set_wire_version_range('c:3', 1, 2) - c.db.collection.find_one() # Connect. - self.assertEqual(c.min_wire_version, 1) - self.assertEqual(c.max_wire_version, 5) - - c.set_wire_version_range('a:1', 2, 2) - c.refresh() - self.assertEqual(c.min_wire_version, 2) - self.assertEqual(c.max_wire_version, 2) - - # A secondary doesn't overlap with us. - c.set_wire_version_range('b:2', 5, 6) - - # refresh() raises, as do all following operations. - self.assertRaises(ConfigurationError, c.refresh) - self.assertRaises(ConfigurationError, c.db.collection.find_one) - self.assertRaises(ConfigurationError, c.db.collection.insert, {}) - - -# Test concurrent access to a lazily-connecting RS client. -class TestReplicaSetClientLazyConnect( - TestReplicaSetClientBase, - _TestLazyConnectMixin): - - def test_read_mode_secondary(self): - client = MongoReplicaSetClient( - pair, replicaSet=self.name, _connect=False, - read_preference=ReadPreference.SECONDARY) - - # No error. - client.pymongo_test.test_collection.find_one() - - -# Test concurrent access to a lazily-connecting RS client, with Gevent. -class TestReplicaSetClientLazyConnectGevent( - TestReplicaSetClientBase, - _TestLazyConnectMixin): - use_greenlets = True - - -class TestReplicaSetClientLazyConnectBadSeeds( - TestReplicaSetClientBase, - _TestLazyConnectMixin): - - def _get_client(self, **kwargs): - kwargs.setdefault('connectTimeoutMS', 500) - - # Assume there are no open mongods listening on a.com, b.com, .... - bad_seeds = ['%s.com' % chr(ord('a') + i) for i in range(5)] - seeds = ','.join(bad_seeds + [pair]) - client = MongoReplicaSetClient(seeds, replicaSet=self.name, **kwargs) - - # In case of a slow test machine. - client._refresh_timeout_sec = 30 - return client - - -class TestReplicaSetClientInternalIPs(unittest.TestCase): - def test_connect_with_internal_ips(self): - # Client is passed an IP it can reach, 'a:1', but the RS config - # only contains unreachable IPs like 'internal-ip'. PYTHON-608. - assertRaisesExactly( - ConnectionFailure, - MockReplicaSetClient, - standalones=[], - members=['a:1'], - mongoses=[], - ismaster_hosts=['internal-ip:27017'], - host='a:1', - replicaSet='rs') - - -class TestReplicaSetClientMaxWriteBatchSize(unittest.TestCase): - def test_max_write_batch_size(self): - c = MockReplicaSetClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs', - _connect=False) - - c.set_max_write_batch_size('a:1', 1) - c.set_max_write_batch_size('b:2', 2) - - # Starts with default max batch size. - self.assertEqual(1000, c.max_write_batch_size) - c.db.collection.find_one() # Connect. - - # Uses primary's max batch size. - self.assertEqual(c.max_write_batch_size, 1) - - # b becomes primary. - c.mock_primary = 'b:2' - c.refresh() - self.assertEqual(c.max_write_batch_size, 2) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index 43bc5a0e22..3371543f27 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -1,4 +1,4 @@ -# Copyright 2013-2014 MongoDB, Inc. +# Copyright 2013-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,159 +13,173 @@ # limitations under the License. """Test clients and replica set configuration changes, using mocks.""" +from __future__ import annotations import sys -import unittest sys.path[0:0] = [""] -from pymongo.errors import ConfigurationError, ConnectionFailure +from test import MockClientTest, client_context, client_knobs, unittest +from test.pymongo_mocks import MockClient +from test.utils_shared import wait_until + from pymongo import ReadPreference -from test.pymongo_mocks import MockClient, MockReplicaSetClient +from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError + +@client_context.require_connection +@client_context.require_no_load_balancer +def setUpModule(): + pass -class TestSecondaryBecomesStandalone(unittest.TestCase): + +class TestSecondaryBecomesStandalone(MockClientTest): # An administrator removes a secondary from a 3-node set and # brings it back up as standalone, without updating the other # members' config. Verify we don't continue using it. def test_client(self): c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs') - - # MongoClient connects to primary by default. - self.assertEqual('a', c.host) - self.assertEqual(1, c.port) + host="a:1,b:2,c:3", + replicaSet="rs", + serverSelectionTimeoutMS=100, + connect=False, + ) + self.addCleanup(c.close) # C is brought up as a standalone. - c.mock_members.remove('c:3') - c.mock_standalones.append('c:3') + c.mock_members.remove("c:3") + c.mock_standalones.append("c:3") # Fail over. - c.kill_host('a:1') - c.kill_host('b:2') - - # Force reconnect. - c.disconnect() + c.kill_host("a:1") + c.kill_host("b:2") - try: - c.db.collection.find_one() - except ConfigurationError, e: - self.assertTrue('not a member of replica set' in str(e)) - else: - self.fail("MongoClient didn't raise AutoReconnect") + with self.assertRaises(ServerSelectionTimeoutError): + c.db.command("ping") + with self.assertRaises(ServerSelectionTimeoutError): + _ = c.address - self.assertEqual(None, c.host) - self.assertEqual(None, c.port) + # Client can still discover the primary node + c.revive_host("a:1") + wait_until(lambda: c.address is not None, "connect to primary") + self.assertEqual(c.address, ("a", 1)) def test_replica_set_client(self): - c = MockReplicaSetClient( + c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs') + host="a:1,b:2,c:3", + replicaSet="rs", + ) + self.addCleanup(c.close) - self.assertTrue(('b', 2) in c.secondaries) - self.assertTrue(('c', 3) in c.secondaries) + wait_until(lambda: ("b", 2) in c.secondaries, 'discover host "b"') + + wait_until(lambda: ("c", 3) in c.secondaries, 'discover host "c"') # C is brought up as a standalone. - c.mock_members.remove('c:3') - c.mock_standalones.append('c:3') - c.refresh() + c.mock_members.remove("c:3") + c.mock_standalones.append("c:3") + + wait_until(lambda: {("b", 2)} == c.secondaries, "update the list of secondaries") - self.assertEqual(('a', 1), c.primary) - self.assertEqual(set([('b', 2)]), c.secondaries) + self.assertEqual(("a", 1), c.primary) -class TestSecondaryRemoved(unittest.TestCase): +class TestSecondaryRemoved(MockClientTest): # An administrator removes a secondary from a 3-node set *without* # restarting it as standalone. def test_replica_set_client(self): - c = MockReplicaSetClient( + c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs') + host="a:1,b:2,c:3", + replicaSet="rs", + ) + self.addCleanup(c.close) - self.assertTrue(('b', 2) in c.secondaries) - self.assertTrue(('c', 3) in c.secondaries) + wait_until(lambda: ("b", 2) in c.secondaries, 'discover host "b"') + wait_until(lambda: ("c", 3) in c.secondaries, 'discover host "c"') # C is removed. - c.mock_ismaster_hosts.remove('c:3') - c.refresh() + c.mock_hello_hosts.remove("c:3") + wait_until(lambda: {("b", 2)} == c.secondaries, "update list of secondaries") - self.assertEqual(('a', 1), c.primary) - self.assertEqual(set([('b', 2)]), c.secondaries) + self.assertEqual(("a", 1), c.primary) -class TestSocketError(unittest.TestCase): +class TestSocketError(MockClientTest): def test_socket_error_marks_member_down(self): - c = MockReplicaSetClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs') - - self.assertEqual(2, len(c._MongoReplicaSetClient__rs_state.members)) - - # b now raises socket.error. - c.mock_down_hosts.append('b:2') - self.assertRaises( - ConnectionFailure, - c.db.collection.find_one, read_preference=ReadPreference.SECONDARY) - - self.assertEqual(1, len(c._MongoReplicaSetClient__rs_state.members)) - - -class TestSecondaryAdded(unittest.TestCase): + # Disable background refresh. + with client_knobs(heartbeat_frequency=999999): + c = MockClient( + standalones=[], + members=["a:1", "b:2"], + mongoses=[], + host="a:1", + replicaSet="rs", + serverSelectionTimeoutMS=100, + ) + self.addCleanup(c.close) + + wait_until(lambda: len(c.nodes) == 2, "discover both nodes") + + # b now raises socket.error. + c.mock_down_hosts.append("b:2") + self.assertRaises( + ConnectionFailure, + c.db.collection.with_options(read_preference=ReadPreference.SECONDARY).find_one, + ) + + self.assertEqual(1, len(c.nodes)) + + +class TestSecondaryAdded(MockClientTest): def test_client(self): c = MockClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs') + standalones=[], members=["a:1", "b:2"], mongoses=[], host="a:1", replicaSet="rs" + ) + self.addCleanup(c.close) + + wait_until(lambda: len(c.nodes) == 2, "discover both nodes") # MongoClient connects to primary by default. - self.assertEqual('a', c.host) - self.assertEqual(1, c.port) - self.assertEqual(set([('a', 1), ('b', 2)]), c.nodes) + self.assertEqual(c.address, ("a", 1)) + self.assertEqual({("a", 1), ("b", 2)}, c.nodes) # C is added. - c.mock_members.append('c:3') - c.mock_ismaster_hosts.append('c:3') + c.mock_members.append("c:3") + c.mock_hello_hosts.append("c:3") + + c.db.command("ping") - c.disconnect() - c.db.collection.find_one() + self.assertEqual(c.address, ("a", 1)) - self.assertEqual('a', c.host) - self.assertEqual(1, c.port) - self.assertEqual(set([('a', 1), ('b', 2), ('c', 3)]), c.nodes) + wait_until( + lambda: {("a", 1), ("b", 2), ("c", 3)} == c.nodes, "reconnect to both secondaries" + ) def test_replica_set_client(self): - c = MockReplicaSetClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs') + c = MockClient( + standalones=[], members=["a:1", "b:2"], mongoses=[], host="a:1", replicaSet="rs" + ) + self.addCleanup(c.close) - self.assertEqual(('a', 1), c.primary) - self.assertEqual(set([('b', 2)]), c.secondaries) + wait_until(lambda: c.primary == ("a", 1), "discover the primary") + wait_until(lambda: {("b", 2)} == c.secondaries, "discover the secondary") # C is added. - c.mock_members.append('c:3') - c.mock_ismaster_hosts.append('c:3') - c.refresh() + c.mock_members.append("c:3") + c.mock_hello_hosts.append("c:3") + + wait_until(lambda: {("b", 2), ("c", 3)} == c.secondaries, "discover the new secondary") - self.assertEqual(('a', 1), c.primary) - self.assertEqual(set([('b', 2), ('c', 3)]), c.secondaries) + self.assertEqual(("a", 1), c.primary) if __name__ == "__main__": diff --git a/test/test_results.py b/test/test_results.py new file mode 100644 index 0000000000..deb09d7ed4 --- /dev/null +++ b/test/test_results.py @@ -0,0 +1,160 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test results module.""" +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from pymongo.errors import InvalidOperation +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) + + +class TestResults(unittest.TestCase): + def repr_test(self, cls, result_arg): + for acknowledged in (True, False): + result = cls(result_arg, acknowledged) + expected_repr = "%s(%r, acknowledged=%r)" % (cls.__name__, result_arg, acknowledged) + self.assertEqual(acknowledged, result.acknowledged) + self.assertEqual(expected_repr, repr(result)) + + def test_bulk_write_result(self): + raw_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 1, + "nUpserted": 2, + "nMatched": 2, + "nModified": 2, + "nRemoved": 2, + "upserted": [ + {"index": 5, "_id": 1}, + {"index": 9, "_id": 2}, + ], + } + self.repr_test(BulkWriteResult, raw_result) + + result = BulkWriteResult(raw_result, True) + self.assertEqual(raw_result, result.bulk_api_result) + self.assertEqual(raw_result["nInserted"], result.inserted_count) + self.assertEqual(raw_result["nMatched"], result.matched_count) + self.assertEqual(raw_result["nModified"], result.modified_count) + self.assertEqual(raw_result["nRemoved"], result.deleted_count) + self.assertEqual(raw_result["nUpserted"], result.upserted_count) + self.assertEqual({5: 1, 9: 2}, result.upserted_ids) + + result = BulkWriteResult(raw_result, False) + self.assertEqual(raw_result, result.bulk_api_result) + error_msg = "A value for .* is not available when" + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.inserted_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.matched_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.modified_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.deleted_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.upserted_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.upserted_ids + + def test_delete_result(self): + raw_result = {"n": 5} + self.repr_test(DeleteResult, {"n": 0}) + + result = DeleteResult(raw_result, True) + self.assertEqual(raw_result, result.raw_result) + self.assertEqual(raw_result["n"], result.deleted_count) + + result = DeleteResult(raw_result, False) + self.assertEqual(raw_result, result.raw_result) + error_msg = "A value for .* is not available when" + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.deleted_count + + def test_insert_many_result(self): + inserted_ids = [1, 2, 3] + self.repr_test(InsertManyResult, inserted_ids) + + for acknowledged in (True, False): + result = InsertManyResult(inserted_ids, acknowledged) + self.assertEqual(inserted_ids, result.inserted_ids) + + def test_insert_one_result(self): + self.repr_test(InsertOneResult, 0) + + for acknowledged in (True, False): + result = InsertOneResult(0, acknowledged) + self.assertEqual(0, result.inserted_id) + + def test_update_result(self): + raw_result = { + "n": 1, + "nModified": 1, + "upserted": None, + } + self.repr_test(UpdateResult, raw_result) + + result = UpdateResult(raw_result, True) + self.assertEqual(raw_result, result.raw_result) + self.assertEqual(raw_result["n"], result.matched_count) + self.assertEqual(raw_result["nModified"], result.modified_count) + self.assertEqual(raw_result["upserted"], result.upserted_id) + self.assertEqual(result.did_upsert, True) + + raw_result_2 = { + "n": 1, + "nModified": 1, + "upserted": [ + {"index": 5, "_id": 1}, + ], + } + self.repr_test(UpdateResult, raw_result_2) + + result = UpdateResult(raw_result_2, True) + self.assertEqual(result.did_upsert, True) + + raw_result_3 = { + "n": 1, + "nModified": 1, + } + self.repr_test(UpdateResult, raw_result_3) + + result = UpdateResult(raw_result_3, True) + self.assertEqual(result.did_upsert, False) + + result = UpdateResult(raw_result, False) + self.assertEqual(raw_result, result.raw_result) + error_msg = "A value for .* is not available when" + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.matched_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.modified_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.upserted_id + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py new file mode 100644 index 0000000000..c9f72ae547 --- /dev/null +++ b/test/test_retryable_reads.py @@ -0,0 +1,264 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test retryable reads spec.""" +from __future__ import annotations + +import os +import pprint +import sys +import threading +from test.utils import set_fail_point + +from pymongo.errors import OperationFailure + +sys.path[0:0] = [""] + +from test import ( + IntegrationTest, + PyMongoTestCase, + client_context, + client_knobs, + unittest, +) +from test.utils_shared import ( + CMAPListener, + OvertCommandListener, +) + +from pymongo.monitoring import ( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) + +_IS_SYNC = True + + +class TestClientOptions(PyMongoTestCase): + def test_default(self): + client = self.simple_client(connect=False) + self.assertEqual(client.options.retry_reads, True) + + def test_kwargs(self): + client = self.simple_client(retryReads=True, connect=False) + self.assertEqual(client.options.retry_reads, True) + client = self.simple_client(retryReads=False, connect=False) + self.assertEqual(client.options.retry_reads, False) + + def test_uri(self): + client = self.simple_client("mongodb://h/?retryReads=true", connect=False) + self.assertEqual(client.options.retry_reads, True) + client = self.simple_client("mongodb://h/?retryReads=false", connect=False) + self.assertEqual(client.options.retry_reads, False) + + +class FindThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + def run(self): + self.collection.find_one({}) + self.passed = True + + +class TestPoolPausedError(IntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + + @client_context.require_sync + @client_context.require_failCommand_blockConnection + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + def test_pool_paused_error_is_retryable(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3519 + self.skipTest("Test is flaky on PyPy") + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = self.rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [FindThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + }, + } + with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.started_events + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.succeeded_events + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.failed_events + self.assertEqual(1, len(failed), msg) + + +class TestRetryableReads(IntegrationTest): + @client_context.require_multiple_mongoses + @client_context.require_failCommand_fail_point + def test_retryable_reads_are_retried_on_a_different_mongos_when_one_is_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 6}, + } + + mongos_clients = [] + + for mongos in client_context.mongos_seeds().split(","): + client = self.rs_or_single_client(mongos) + set_fail_point(client, fail_command) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = self.rs_or_single_client( + client_context.mongos_seeds(), + event_listeners=[listener], + retryReads=True, + ) + + with self.assertRaises(OperationFailure): + client.t.t.find_one({}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + + # Assert that both events occurred on different mongos. + assert listener.failed_events[0].connection_id != listener.failed_events[1].connection_id + + @client_context.require_multiple_mongoses + @client_context.require_failCommand_fail_point + def test_retryable_reads_are_retried_on_the_same_mongos_when_no_others_are_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 6}, + } + + host = client_context.mongos_seeds().split(",")[0] + mongos_client = self.rs_or_single_client(host) + set_fail_point(mongos_client, fail_command) + + listener = OvertCommandListener() + client = self.rs_or_single_client( + host, + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + client.t.t.find_one({}) + + # Disable failpoint. + fail_command["mode"] = "off" + set_fail_point(mongos_client, fail_command) + + # Assert that exactly one failed command event and one succeeded command event occurred. + self.assertEqual(len(listener.failed_events), 1) + self.assertEqual(len(listener.succeeded_events), 1) + + # Assert that both events occurred on the same mongos. + assert listener.succeeded_events[0].connection_id == listener.failed_events[0].connection_id + + @client_context.require_failCommand_fail_point + def test_retryable_reads_are_retried_on_the_same_implicit_session(self): + listener = OvertCommandListener() + client = self.rs_or_single_client( + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + client.t.t.insert_one({"x": 1}) + + commands = [ + ("aggregate", lambda: client.t.t.count_documents({})), + ("aggregate", lambda: client.t.t.aggregate([{"$match": {}}])), + ("count", lambda: client.t.t.estimated_document_count()), + ("distinct", lambda: client.t.t.distinct("x")), + ("find", lambda: client.t.t.find_one({})), + ("listDatabases", lambda: client.list_databases()), + ("listCollections", lambda: client.t.list_collections()), + ("listIndexes", lambda: client.t.t.list_indexes()), + ] + + for command_name, operation in commands: + listener.reset() + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": [command_name], "errorCode": 6}, + } + + with self.fail_point(fail_command): + operation() + + # Assert that both events occurred on the same session. + command_docs = [ + event.command + for event in listener.started_events + if event.command_name == command_name + ] + self.assertEqual(len(command_docs), 2) + self.assertEqual(command_docs[0]["lsid"], command_docs[1]["lsid"]) + self.assertIsNot(command_docs[0], command_docs[1]) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_retryable_reads_unified.py b/test/test_retryable_reads_unified.py new file mode 100644 index 0000000000..b1c6435c9a --- /dev/null +++ b/test/test_retryable_reads_unified.py @@ -0,0 +1,46 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Reads unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_reads/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_reads/unified") + +# Generate unified tests. +# PyMongo does not support MapReduce, ListDatabaseObjects or ListCollectionObjects. +globals().update( + generate_test_classes( + TEST_PATH, + module=__name__, + expected_failures=["ListDatabaseObjects .*", "ListCollectionObjects .*", "MapReduce .*"], + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py new file mode 100644 index 0000000000..a74a3e8030 --- /dev/null +++ b/test/test_retryable_writes.py @@ -0,0 +1,601 @@ +# Copyright 2017-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test retryable writes.""" +from __future__ import annotations + +import asyncio +import copy +import pprint +import sys +import threading +from test.utils import flaky, set_fail_point + +sys.path[0:0] = [""] + +from test import ( + IntegrationTest, + SkipTest, + client_context, + unittest, +) +from test.helpers import client_knobs +from test.utils_shared import ( + CMAPListener, + DeprecationFilter, + EventListener, + OvertCommandListener, +) +from test.version import Version + +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.int64 import Int64 +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from pymongo.errors import ( + AutoReconnect, + ConnectionFailure, + OperationFailure, + ServerSelectionTimeoutError, + WriteConcernError, +) +from pymongo.monitoring import ( + CommandSucceededEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class InsertEventListener(EventListener): + def succeeded(self, event: CommandSucceededEvent) -> None: + super().succeeded(event) + if ( + event.command_name == "insert" + and event.reply.get("writeConcernError", {}).get("code", None) == 91 + ): + client_context.client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError", "NoWritesPerformed"], + "failCommands": ["insert"], + }, + } + ) + + +def retryable_single_statement_ops(coll): + return [ + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {"ordered": False}), + (coll.bulk_write, [[ReplaceOne({}, {"a1": 1})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a2": 1}), ReplaceOne({}, {"a3": 1})]], {}), + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a4": 1}}), UpdateOne({}, {"$set": {"a5": 1}})]], + {}, + ), + (coll.bulk_write, [[DeleteOne({})]], {}), + (coll.bulk_write, [[DeleteOne({}), DeleteOne({})]], {}), + (coll.insert_one, [{}], {}), + (coll.insert_many, [[{}, {}]], {}), + (coll.replace_one, [{}, {"a6": 1}], {}), + (coll.update_one, [{}, {"$set": {"a7": 1}}], {}), + (coll.delete_one, [{}], {}), + (coll.find_one_and_replace, [{}, {"a8": 1}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a9": 1}}], {}), + (coll.find_one_and_delete, [{}, {"a10": 1}], {}), + ] + + +def non_retryable_single_statement_ops(coll): + return [ + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a": 1}}), UpdateMany({}, {"$set": {"a": 1}})]], + {}, + ), + (coll.bulk_write, [[DeleteOne({}), DeleteMany({})]], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), + (coll.delete_many, [{}], {}), + ] + + +class IgnoreDeprecationsTest(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + deprecation_filter: DeprecationFilter + + def setUp(self) -> None: + super().setUp() + self.deprecation_filter = DeprecationFilter() + + def tearDown(self) -> None: + super().tearDown() + self.deprecation_filter.stop() + + +class TestRetryableWrites(IgnoreDeprecationsTest): + listener: OvertCommandListener + knobs: client_knobs + + def setUp(self) -> None: + super().setUp() + # Speed up the tests by decreasing the heartbeat frequency. + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(retryWrites=True, event_listeners=[self.listener]) + self.db = self.client.pymongo_test + + if client_context.is_rs and client_context.test_commands_enabled: + self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "alwaysOn")]) + ) + + def tearDown(self): + if client_context.is_rs and client_context.test_commands_enabled: + self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) + ) + self.knobs.disable() + super().tearDown() + + def test_supported_single_statement_no_retry(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(retryWrites=False, event_listeners=[listener]) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + method(*args, **kwargs) + for event in listener.started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + def test_supported_single_statement_unsupported_cluster(self): + if client_context.is_rs or client_context.is_mongos: + raise SkipTest("This cluster supports retryable writes") + + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() + method(*args, **kwargs) + + for event in self.listener.started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + def test_unsupported_single_statement(self): + coll = self.db.retryable_write_test + coll.insert_many([{}, {}]) + coll_w0 = coll.with_options(write_concern=WriteConcern(w=0)) + for method, args, kwargs in non_retryable_single_statement_ops( + coll + ) + retryable_single_statement_ops(coll_w0): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() + method(*args, **kwargs) + started_events = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), len(started_events), msg) + self.assertEqual(len(self.listener.failed_events), 0, msg) + for event in started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + def test_server_selection_timeout_not_retried(self): + """A ServerSelectionTimeoutError is not retried.""" + listener = OvertCommandListener() + client = self.simple_client( + "somedomainthatdoesntexist.org", + serverSelectionTimeoutMS=1, + retryWrites=True, + event_listeners=[listener], + ) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + with self.assertRaises(ServerSelectionTimeoutError, msg=msg): + method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 0, msg) + + @client_context.require_replica_set + @client_context.require_test_commands + def test_retry_timeout_raises_original_error(self): + """A ServerSelectionTimeoutError on the retry attempt raises the + original error. + """ + listener = OvertCommandListener() + client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) + topology = client._topology + select_server = topology.select_server + + def mock_select_server(*args, **kwargs): + server = select_server(*args, **kwargs) + + def raise_error(*args, **kwargs): + raise ServerSelectionTimeoutError("No primary available for writes") + + # Raise ServerSelectionTimeout on the retry attempt. + topology.select_server = raise_error + return server + + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + topology.select_server = mock_select_server + with self.assertRaises(ConnectionFailure, msg=msg): + method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 1, msg) + + @client_context.require_replica_set + @client_context.require_test_commands + def test_batch_splitting(self): + """Test retry succeeds after failures during batch splitting.""" + large = "s" * 1024 * 1024 * 15 + coll = self.db.retryable_write_test + coll.delete_many({}) + self.listener.reset() + bulk_result = coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + UpdateOne({"_id": 1, "l": large}, {"$unset": {"l": 1}, "$inc": {"count": 1}}), + UpdateOne({"_id": 2, "l": large}, {"$set": {"foo": "bar"}}), + DeleteOne({"l": large}), + DeleteOne({"l": large}), + ] + ) + # Each command should fail and be retried. + # With OP_MSG 3 inserts are one batch. 2 updates another. + # 2 deletes a third. + self.assertEqual(len(self.listener.started_events), 6) + self.assertEqual(coll.find_one(), {"_id": 1, "count": 1}) + # Assert the final result + expected_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 3, + "nUpserted": 0, + "nMatched": 2, + "nModified": 2, + "nRemoved": 2, + "upserted": [], + } + self.assertEqual(bulk_result.bulk_api_result, expected_result) + + @client_context.require_replica_set + @client_context.require_test_commands + def test_batch_splitting_retry_fails(self): + """Test retry fails during batch splitting.""" + large = "s" * 1024 * 1024 * 15 + coll = self.db.retryable_write_test + coll.delete_many({}) + self.client.admin.command( + SON( + [ + ("configureFailPoint", "onPrimaryTransactionalWrite"), + ("mode", {"skip": 3}), # The number of _documents_ to skip. + ("data", {"failBeforeCommitExceptionCode": 1}), + ] + ) + ) + self.listener.reset() + with self.client.start_session() as session: + initial_txn = session._transaction_id + try: + coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + InsertOne({"_id": 4, "l": large}), + ], + session=session, + ) + except ConnectionFailure: + pass + else: + self.fail("bulk_write should have failed") + + started = self.listener.started_events + self.assertEqual(len(started), 3) + self.assertEqual(len(self.listener.succeeded_events), 1) + expected_txn = Int64(initial_txn + 1) + self.assertEqual(started[0].command["txnNumber"], expected_txn) + self.assertEqual(started[0].command["lsid"], session.session_id) + expected_txn = Int64(initial_txn + 2) + self.assertEqual(started[1].command["txnNumber"], expected_txn) + self.assertEqual(started[1].command["lsid"], session.session_id) + started[1].command.pop("$clusterTime") + started[2].command.pop("$clusterTime") + self.assertEqual(started[1].command, started[2].command) + final_txn = session._transaction_id + self.assertEqual(final_txn, expected_txn) + self.assertEqual(coll.find_one(projection={"_id": True}), {"_id": 1}) + + @client_context.require_multiple_mongoses + @client_context.require_failCommand_fail_point + def test_retryable_writes_in_sharded_cluster_multiple_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + "appName": "retryableWriteTest", + }, + } + + mongos_clients = [] + + for mongos in client_context.mongos_seeds().split(","): + client = self.rs_or_single_client(mongos) + set_fail_point(client, fail_command) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = self.rs_or_single_client( + client_context.mongos_seeds(), + appName="retryableWriteTest", + event_listeners=[listener], + retryWrites=True, + ) + + with self.assertRaises(AutoReconnect): + client.t.t.insert_one({"x": 1}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + + +class TestWriteConcernError(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + fail_insert: dict + + @client_context.require_replica_set + @client_context.require_failCommand_fail_point + def setUp(self) -> None: + super().setUp() + self.fail_insert = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": { + "failCommands": ["insert"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}, + }, + } + + @client_context.require_version_min(4, 0) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + def test_RetryableWriteError_error_label(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) + + # Ensure collection exists. + client.pymongo_test.testcoll.insert_one({}) + + with self.fail_point(self.fail_insert): + with self.assertRaises(WriteConcernError) as cm: + client.pymongo_test.testcoll.insert_one({}) + self.assertTrue(cm.exception.has_error_label("RetryableWriteError")) + + if client_context.version >= Version(4, 4): + # In MongoDB 4.4+ we rely on the server returning the error label. + self.assertIn("RetryableWriteError", listener.succeeded_events[-1].reply["errorLabels"]) + + @client_context.require_version_min(4, 4) + def test_RetryableWriteError_error_label_RawBSONDocument(self): + # using RawBSONDocument should not cause errorLabel parsing to fail + with self.fail_point(self.fail_insert): + with self.client.start_session() as s: + s._start_retryable_write() + result = self.client.pymongo_test.command( + "insert", + "testcoll", + documents=[{"_id": 1}], + txnNumber=s._transaction_id, + session=s, + codec_options=DEFAULT_CODEC_OPTIONS.with_options( + document_class=RawBSONDocument + ), + ) + + self.assertIn("writeConcernError", result) + self.assertIn("RetryableWriteError", result["errorLabels"]) + + +class InsertThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + def run(self): + self.collection.insert_one({}) + self.passed = True + + +class TestPoolPausedError(IntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + + @client_context.require_sync + @client_context.require_failCommand_blockConnection + @client_context.require_retryable_writes + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + @flaky(reason="PYTHON-5291") + def test_pool_paused_error_is_retryable(self): + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = self.rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + "errorLabels": ["RetryableWriteError"], + }, + } + with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.started_events + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.succeeded_events + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.failed_events + self.assertEqual(1, len(failed), msg) + + @client_context.require_sync + @client_context.require_failCommand_fail_point + @client_context.require_replica_set + @client_context.require_version_min( + 6, 0, 0 + ) # the spec requires that this prose test only be run on 6.0+ + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + def test_returns_original_error_code( + self, + ): + cmd_listener = InsertEventListener() + client = self.rs_or_single_client(retryWrites=True, event_listeners=[cmd_listener]) + client.test.test.drop() + cmd_listener.reset() + client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "writeConcernError": { + "code": 91, + "errorLabels": ["RetryableWriteError"], + }, + "failCommands": ["insert"], + }, + } + ) + with self.assertRaises(WriteConcernError) as exc: + client.test.test.insert_one({"_id": 1}) + self.assertEqual(exc.exception.code, 91) + client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": "off", + } + ) + + +# TODO: Make this a real integration test where we stepdown the primary. +class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): + @client_context.require_replica_set + def test_increment_transaction_id_without_sending_command(self): + """Test that the txnNumber field is properly incremented, even when + the first attempt fails before sending the command. + """ + listener = OvertCommandListener() + client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) + topology = client._topology + select_server = topology.select_server + + def raise_connection_err_select_server(*args, **kwargs): + # Raise ConnectionFailure on the first attempt and perform + # normal selection on the retry attempt. + topology.select_server = select_server + raise ConnectionFailure("Connection refused") + + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + listener.reset() + topology.select_server = raise_connection_err_select_server + with client.start_session() as session: + kwargs = copy.deepcopy(kwargs) + kwargs["session"] = session + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + initial_txn_id = session._transaction_id + + # Each operation should fail on the first attempt and succeed + # on the second. + method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 1, msg) + retry_cmd = listener.started_events[0].command + sent_txn_id = retry_cmd["txnNumber"] + final_txn_id = session._transaction_id + self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) + self.assertEqual(sent_txn_id, final_txn_id, msg) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_retryable_writes_unified.py b/test/test_retryable_writes_unified.py new file mode 100644 index 0000000000..036c410e24 --- /dev/null +++ b/test/test_retryable_writes_unified.py @@ -0,0 +1,39 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Writes unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_writes/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_writes/unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_run_command.py b/test/test_run_command.py new file mode 100644 index 0000000000..d2ef43b97e --- /dev/null +++ b/test/test_run_command.py @@ -0,0 +1,41 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run Command unified tests.""" +from __future__ import annotations + +import os +import unittest +from pathlib import Path +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "run_command") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "run_command") + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_saslprep.py b/test/test_saslprep.py new file mode 100644 index 0000000000..e825cafa35 --- /dev/null +++ b/test/test_saslprep.py @@ -0,0 +1,44 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from pymongo.saslprep import saslprep + + +class TestSASLprep(unittest.TestCase): + def test_saslprep(self): + try: + import stringprep + except ImportError: + self.assertRaises(TypeError, saslprep, "anything...") + # Bytes strings are ignored. + self.assertEqual(saslprep(b"user"), b"user") + else: + # Examples from RFC4013, Section 3. + self.assertEqual(saslprep("I\u00ADX"), "IX") + self.assertEqual(saslprep("user"), "user") + self.assertEqual(saslprep("USER"), "USER") + self.assertEqual(saslprep("\u00AA"), "a") + self.assertEqual(saslprep("\u2168"), "IX") + self.assertRaises(ValueError, saslprep, "\u0007") + self.assertRaises(ValueError, saslprep, "\u0627\u0031") + + # Bytes strings are ignored. + self.assertEqual(saslprep(b"user"), b"user") diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py new file mode 100644 index 0000000000..2167e561cf --- /dev/null +++ b/test/test_sdam_monitoring_spec.py @@ -0,0 +1,374 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the sdam monitoring spec tests.""" +from __future__ import annotations + +import asyncio +import json +import os +import sys +import time +from pathlib import Path + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, client_knobs, unittest +from test.utils_shared import ( + ServerAndTopologyEventListener, + server_name_to_type, + wait_until, +) + +from bson.json_util import object_hook +from pymongo import MongoClient, monitoring +from pymongo.common import clean_node +from pymongo.errors import ConnectionFailure, NotPrimaryError +from pymongo.hello import Hello +from pymongo.server_description import ServerDescription +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.monitor import Monitor +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sdam_monitoring") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sdam_monitoring") + + +def compare_server_descriptions(expected, actual): + if (expected["address"] != "{}:{}".format(*actual.address)) or ( + server_name_to_type(expected["type"]) != actual.server_type + ): + return False + expected_hosts = set(expected["arbiters"] + expected["passives"] + expected["hosts"]) + return expected_hosts == {"{}:{}".format(*s) for s in actual.all_hosts} + + +def compare_topology_descriptions(expected, actual): + if TOPOLOGY_TYPE.__getattribute__(expected["topologyType"]) != actual.topology_type: + return False + expected = expected["servers"] + actual = actual.server_descriptions() + if len(expected) != len(actual): + return False + for exp_server in expected: + for _address, actual_server in actual.items(): + if compare_server_descriptions(exp_server, actual_server): + break + else: + return False + return True + + +def compare_events(expected_dict, actual): + if not expected_dict: + return False, "Error: Bad expected value in YAML test" + if not actual: + return False, "Error: Event published was None" + + expected_type, expected = list(expected_dict.items())[0] + + if expected_type == "server_opening_event": + if not isinstance(actual, monitoring.ServerOpeningEvent): + return False, "Expected ServerOpeningEvent, got %s" % (actual.__class__) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerOpeningEvent published with wrong address (expected" " {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + elif expected_type == "server_description_changed_event": + if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): + return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerDescriptionChangedEvent has wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + if not compare_server_descriptions(expected["newDescription"], actual.new_description): + return (False, "New ServerDescription incorrect in ServerDescriptionChangedEvent") + if not compare_server_descriptions( + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous ServerDescription incorrect in ServerDescriptionChangedEvent", + ) + + elif expected_type == "server_closed_event": + if not isinstance(actual, monitoring.ServerClosedEvent): + return False, "Expected ServerClosedEvent, got %s" % (actual.__class__) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerClosedEvent published with wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + elif expected_type == "topology_opening_event": + if not isinstance(actual, monitoring.TopologyOpenedEvent): + return False, "Expected TopologyOpenedEvent, got %s" % (actual.__class__) + + elif expected_type == "topology_description_changed_event": + if not isinstance(actual, monitoring.TopologyDescriptionChangedEvent): + return ( + False, + "Expected TopologyDescriptionChangedEvent, got %s" % (actual.__class__), + ) + if not compare_topology_descriptions(expected["newDescription"], actual.new_description): + return ( + False, + "New TopologyDescription incorrect in TopologyDescriptionChangedEvent", + ) + if not compare_topology_descriptions( + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous TopologyDescription incorrect in TopologyDescriptionChangedEvent", + ) + + elif expected_type == "topology_closed_event": + if not isinstance(actual, monitoring.TopologyClosedEvent): + return False, "Expected TopologyClosedEvent, got %s" % (actual.__class__) + + else: + return False, f"Incorrect event: expected {expected_type}, actual {actual}" + + return True, "" + + +def compare_multiple_events(i, expected_results, actual_results): + events_in_a_row = [] + j = i + while j < len(expected_results) and isinstance(actual_results[j], actual_results[i].__class__): + events_in_a_row.append(actual_results[j]) + j += 1 + message = "" + for event in events_in_a_row: + for k in range(i, j): + passed, message = compare_events(expected_results[k], event) + if passed: + expected_results[k] = None + break + else: + return i, False, message + return j, True, "" + + +class TestAllScenarios(IntegrationTest): + def setUp(self): + super().setUp() + self.all_listener = ServerAndTopologyEventListener() + + +def create_test(scenario_def): + def run_scenario(self): + with client_knobs(events_queue_frequency=0.05, min_heartbeat_interval=0.05): + _run_scenario(self) + + def _run_scenario(self): + class NoopMonitor(Monitor): + """Override the _run method to do nothing.""" + + def _run(self): + time.sleep(0.05) + + m = MongoClient( + host=scenario_def["uri"], + port=27017, + event_listeners=[self.all_listener], + _monitor_class=NoopMonitor, + ) + topology = m._get_topology() + + try: + for phase in scenario_def["phases"]: + for source, response in phase.get("responses", []): + source_address = clean_node(source) + topology.on_change( + ServerDescription( + address=source_address, hello=Hello(response), round_trip_time=0 + ) + ) + + expected_results = phase["outcome"]["events"] + expected_len = len(expected_results) + wait_until( + lambda: len(self.all_listener.results) >= expected_len, + "publish all events", + timeout=15, + ) + + # Wait some time to catch possible lagging extra events. + wait_until(lambda: topology._events.empty(), "publish lagging events") + + i = 0 + while i < expected_len: + result = ( + self.all_listener.results[i] if len(self.all_listener.results) > i else None + ) + # The order of ServerOpening/ClosedEvents doesn't matter + if isinstance( + result, (monitoring.ServerOpeningEvent, monitoring.ServerClosedEvent) + ): + i, passed, message = compare_multiple_events( + i, expected_results, self.all_listener.results + ) + self.assertTrue(passed, message) + else: + self.assertTrue(*compare_events(expected_results[i], result)) + i += 1 + + # Assert no extra events. + extra_events = self.all_listener.results[expected_len:] + if extra_events: + self.fail(f"Extra events {extra_events!r}") + + self.all_listener.reset() + finally: + m.close() + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json.load(scenario_stream, object_hook=object_hook) + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{os.path.splitext(filename)[0]}" + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + + +class TestSdamMonitoring(IntegrationTest): + knobs: client_knobs + listener: ServerAndTopologyEventListener + test_client: MongoClient + coll: Collection + + @classmethod + def setUpClass(cls): + # Speed up the tests by decreasing the event publish frequency. + cls.knobs = client_knobs( + events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1 + ) + cls.knobs.enable() + cls.listener = ServerAndTopologyEventListener() + + @classmethod + def tearDownClass(cls): + cls.knobs.disable() + + @client_context.require_failCommand_fail_point + def setUp(self): + super().setUp() + + retry_writes = client_context.supports_transactions() + self.test_client = self.rs_or_single_client( + event_listeners=[self.listener], retryWrites=retry_writes + ) + self.coll = self.test_client[self.client.db.name].test + self.coll.insert_one({}) + self.listener.reset() + + def tearDown(self): + super().tearDown() + + def _test_app_error(self, fail_command_opts, expected_error): + address = self.test_client.address + + # Test that an application error causes a ServerDescriptionChangedEvent + # to be published. + data = {"failCommands": ["insert"]} + data.update(fail_command_opts) + fail_insert = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": data, + } + with self.fail_point(fail_insert): + if self.test_client.options.retry_writes: + self.coll.insert_one({}) + else: + with self.assertRaises(expected_error): + self.coll.insert_one({}) + self.coll.insert_one({}) + + def marked_unknown(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.new_description.is_server_type_known + ) + + def discovered_node(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) + + def marked_unknown_and_rediscovered(): + return ( + len(self.listener.matching(marked_unknown)) >= 1 + and len(self.listener.matching(discovered_node)) >= 1 + ) + + # Topology events are not published synchronously + wait_until(marked_unknown_and_rediscovered, "rediscover node") + + # Expect a single ServerDescriptionChangedEvent for the network error. + marked_unknown_events = self.listener.matching(marked_unknown) + self.assertEqual(len(marked_unknown_events), 1, marked_unknown_events) + self.assertIsInstance(marked_unknown_events[0].new_description.error, expected_error) + + def test_network_error_publishes_events(self): + self._test_app_error({"closeConnection": True}, ConnectionFailure) + + # In 4.4+, not primary errors from failCommand don't cause SDAM state + # changes because topologyVersion is not incremented. + @client_context.require_version_max(4, 3) + def test_not_primary_error_publishes_events(self): + self._test_app_error( + {"errorCode": 10107, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) + + def test_shutdown_error_publishes_events(self): + self._test_app_error( + {"errorCode": 91, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server.py b/test/test_server.py new file mode 100644 index 0000000000..ab5a40a79b --- /dev/null +++ b/test/test_server.py @@ -0,0 +1,38 @@ +# Copyright 2014-2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the server module.""" +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from pymongo.hello import Hello +from pymongo.server_description import ServerDescription +from pymongo.synchronous.server import Server + + +class TestServer(unittest.TestCase): + def test_repr(self): + hello = Hello({"ok": 1}) + sd = ServerDescription(("localhost", 27017), hello) + server = Server(sd, pool=object(), monitor=object()) # type: ignore[arg-type] + self.assertIn("Standalone", str(server)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server_description.py b/test/test_server_description.py new file mode 100644 index 0000000000..e8c0098cb6 --- /dev/null +++ b/test/test_server_description.py @@ -0,0 +1,208 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the server_description module.""" +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from bson.int64 import Int64 +from bson.objectid import ObjectId +from pymongo import common +from pymongo.hello import Hello, HelloCompat +from pymongo.server_description import ServerDescription +from pymongo.server_type import SERVER_TYPE + +address = ("localhost", 27017) + + +def parse_hello_response(doc): + hello_response = Hello(doc) + return ServerDescription(address, hello_response) + + +class TestServerDescription(unittest.TestCase): + def test_unknown(self): + # Default, no hello_response. + s = ServerDescription(address) + self.assertEqual(SERVER_TYPE.Unknown, s.server_type) + self.assertFalse(s.is_writable) + self.assertFalse(s.is_readable) + + def test_mongos(self): + s = parse_hello_response({"ok": 1, "msg": "isdbgrid"}) + self.assertEqual(SERVER_TYPE.Mongos, s.server_type) + self.assertEqual("Mongos", s.server_type_name) + self.assertTrue(s.is_writable) + self.assertTrue(s.is_readable) + + def test_primary(self): + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs"}) + + self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) + self.assertEqual("RSPrimary", s.server_type_name) + self.assertTrue(s.is_writable) + self.assertTrue(s.is_readable) + + def test_secondary(self): + s = parse_hello_response( + {"ok": 1, HelloCompat.LEGACY_CMD: False, "secondary": True, "setName": "rs"} + ) + + self.assertEqual(SERVER_TYPE.RSSecondary, s.server_type) + self.assertEqual("RSSecondary", s.server_type_name) + self.assertFalse(s.is_writable) + self.assertTrue(s.is_readable) + + def test_arbiter(self): + s = parse_hello_response( + {"ok": 1, HelloCompat.LEGACY_CMD: False, "arbiterOnly": True, "setName": "rs"} + ) + + self.assertEqual(SERVER_TYPE.RSArbiter, s.server_type) + self.assertEqual("RSArbiter", s.server_type_name) + self.assertFalse(s.is_writable) + self.assertFalse(s.is_readable) + + def test_other(self): + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: False, "setName": "rs"}) + + self.assertEqual(SERVER_TYPE.RSOther, s.server_type) + self.assertEqual("RSOther", s.server_type_name) + + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "hidden": True, + "setName": "rs", + } + ) + + self.assertEqual(SERVER_TYPE.RSOther, s.server_type) + self.assertFalse(s.is_writable) + self.assertFalse(s.is_readable) + + def test_ghost(self): + s = parse_hello_response({"ok": 1, "isreplicaset": True}) + + self.assertEqual(SERVER_TYPE.RSGhost, s.server_type) + self.assertEqual("RSGhost", s.server_type_name) + self.assertFalse(s.is_writable) + self.assertFalse(s.is_readable) + + def test_fields(self): + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "primary": "a:27017", + "tags": {"a": "foo", "b": "baz"}, + "maxMessageSizeBytes": 1, + "maxBsonObjectSize": 2, + "maxWriteBatchSize": 3, + "minWireVersion": 4, + "maxWireVersion": 25, + "setName": "rs", + } + ) + + self.assertEqual(SERVER_TYPE.RSSecondary, s.server_type) + self.assertEqual(("a", 27017), s.primary) + self.assertEqual({"a": "foo", "b": "baz"}, s.tags) + self.assertEqual(1, s.max_message_size) + self.assertEqual(2, s.max_bson_size) + self.assertEqual(3, s.max_write_batch_size) + self.assertEqual(4, s.min_wire_version) + self.assertEqual(25, s.max_wire_version) + + def test_defaults(self): + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True}) + self.assertEqual(common.MAX_BSON_SIZE, s.max_bson_size) + self.assertEqual(common.MAX_MESSAGE_SIZE, s.max_message_size) + self.assertEqual(common.MIN_WIRE_VERSION, s.min_wire_version) + self.assertEqual(common.MAX_WIRE_VERSION, s.max_wire_version) + self.assertEqual(common.MAX_WRITE_BATCH_SIZE, s.max_write_batch_size) + + def test_standalone(self): + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True}) + self.assertEqual(SERVER_TYPE.Standalone, s.server_type) + + # Mongod started with --slave. + # master-slave replication was removed in MongoDB 4.0. + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: False}) + self.assertEqual(SERVER_TYPE.Standalone, s.server_type) + self.assertTrue(s.is_writable) + self.assertTrue(s.is_readable) + + def test_ok_false(self): + s = parse_hello_response({"ok": 0, HelloCompat.LEGACY_CMD: True}) + self.assertEqual(SERVER_TYPE.Unknown, s.server_type) + self.assertFalse(s.is_writable) + self.assertFalse(s.is_readable) + + def test_all_hosts(self): + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "hosts": ["a"], + "passives": ["b:27018"], + "arbiters": ["c"], + } + ) + + self.assertEqual([("a", 27017), ("b", 27018), ("c", 27017)], sorted(s.all_hosts)) + + def test_repr(self): + s = parse_hello_response({"ok": 1, "msg": "isdbgrid"}) + self.assertEqual( + repr(s), "" + ) + + def test_topology_version(self): + topology_version = {"processId": ObjectId(), "counter": Int64("0")} + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "topologyVersion": topology_version, + } + ) + + self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) + self.assertEqual(topology_version, s.topology_version) + + # Resetting a server to unknown preserves topology_version. + s_unknown = s.to_unknown() + self.assertEqual(SERVER_TYPE.Unknown, s_unknown.server_type) + self.assertEqual(topology_version, s_unknown.topology_version) + + def test_topology_version_not_present(self): + # No topologyVersion field. + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs"}) + + self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) + self.assertEqual(None, s.topology_version) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server_selection.py b/test/test_server_selection.py new file mode 100644 index 0000000000..4384deac2b --- /dev/null +++ b/test/test_server_selection.py @@ -0,0 +1,210 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +from pymongo import MongoClient, ReadPreference +from pymongo.errors import ServerSelectionTimeoutError +from pymongo.hello import HelloCompat +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology +from pymongo.typings import strip_optional + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils import wait_until +from test.utils_selection_tests import ( + create_selection_tests, + get_topology_settings_dict, +) +from test.utils_selection_tests_shared import ( + get_addresses, + make_server_description, +) +from test.utils_shared import ( + FunctionCallRecorder, + OvertCommandListener, +) + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent, "server_selection", "server_selection" + ) +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "server_selection" + ) + + +class SelectionStoreSelector: + """No-op selector that keeps track of what was passed to it.""" + + def __init__(self): + self.selection = None + + def __call__(self, selection): + self.selection = selection + return selection + + +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore + pass + + +class TestCustomServerSelectorFunction(IntegrationTest): + @client_context.require_replica_set + def test_functional_select_max_port_number_host(self): + # Selector that returns server with highest port number. + def custom_selector(servers): + ports = [s.address[1] for s in servers] + idx = ports.index(max(ports)) + return [servers[idx]] + + # Initialize client with appropriate listeners. + listener = OvertCommandListener() + client = self.rs_or_single_client( + server_selector=custom_selector, event_listeners=[listener] + ) + coll = client.get_database("testdb", read_preference=ReadPreference.NEAREST).coll + self.addCleanup(client.drop_database, "testdb") + + # Wait the node list to be fully populated. + def all_hosts_started(): + return len((client.admin.command(HelloCompat.LEGACY_CMD))["hosts"]) == len( + client._topology._description.readable_servers + ) + + wait_until(all_hosts_started, "receive heartbeat from all hosts") + + expected_port = max( + [strip_optional(n.address[1]) for n in client._topology._description.readable_servers] + ) + + # Insert 1 record and access it 10 times. + coll.insert_one({"name": "John Doe"}) + for _ in range(10): + coll.find_one({"name": "John Doe"}) + + # Confirm all find commands are run against appropriate host. + for command in listener.started_events: + if command.command_name == "find": + self.assertEqual(command.connection_id[1], expected_port) + + def test_invalid_server_selector(self): + # Client initialization must fail if server_selector is not callable. + for selector_candidate in [[], 10, "string", {}]: + with self.assertRaisesRegex(ValueError, "must be a callable"): + MongoClient(connect=False, server_selector=selector_candidate) + + # None value for server_selector is OK. + MongoClient(connect=False, server_selector=None) + + @client_context.require_replica_set + def test_selector_called(self): + selector = FunctionCallRecorder(lambda x: x) + + # Client setup. + mongo_client = self.rs_or_single_client(server_selector=selector) + test_collection = mongo_client.testdb.test_collection + self.addCleanup(mongo_client.drop_database, "testdb") + + # Do N operations and test selector is called at least N-1 times due to fast path. + test_collection.insert_one({"age": 20, "name": "John"}) + test_collection.insert_one({"age": 31, "name": "Jane"}) + test_collection.update_one({"name": "Jane"}, {"$set": {"age": 21}}) + test_collection.find_one({"name": "Roe"}) + self.assertGreaterEqual(selector.call_count, 3) + + @client_context.require_replica_set + def test_latency_threshold_application(self): + selector = SelectionStoreSelector() + + scenario_def: dict = { + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSPrimary", "tag": {}}, + ], + } + } + + # Create & populate Topology such that all but one server is too slow. + rtt_times = [srv["avg_rtt_ms"] for srv in scenario_def["topology_description"]["servers"]] + min_rtt_idx = rtt_times.index(min(rtt_times)) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + settings = get_topology_settings_dict( + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) + topology = Topology(TopologySettings(**settings)) + topology.open() + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + topology.on_change(server_description) + + # Invoke server selection and assert no filtering based on latency + # prior to custom server selection logic kicking in. + server = topology.select_server(ReadPreference.NEAREST, _Op.TEST) + assert selector.selection is not None + self.assertEqual(len(selector.selection), len(topology.description.server_descriptions())) + + # Ensure proper filtering based on latency after custom selection. + self.assertEqual(server.description.address, seeds[min_rtt_idx]) + + @client_context.require_replica_set + def test_server_selector_bypassed(self): + selector = FunctionCallRecorder(lambda x: x) + + scenario_def = { + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSSecondary", "tag": {}}, + ], + } + } + + # Create & populate Topology such that no server is writeable. + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + settings = get_topology_settings_dict( + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) + topology = Topology(TopologySettings(**settings)) + topology.open() + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + topology.on_change(server_description) + + # Invoke server selection and assert no calls to our custom selector. + with self.assertRaisesRegex(ServerSelectionTimeoutError, "No primary available for writes"): + topology.select_server(writable_server_selector, _Op.TEST, server_selection_timeout=0.1) + self.assertEqual(selector.call_count, 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py new file mode 100644 index 0000000000..fcf2cce0e0 --- /dev/null +++ b/test/test_server_selection_in_window.py @@ -0,0 +1,180 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations + +import asyncio +import os +import threading +from pathlib import Path +from test import IntegrationTest, client_context, unittest +from test.helpers import ConcurrentRunner +from test.utils import flaky +from test.utils_selection_tests import create_topology +from test.utils_shared import ( + CMAPListener, + OvertCommandListener, + wait_until, +) +from test.utils_spec_runner import SpecTestCreator + +from pymongo.common import clean_node +from pymongo.monitoring import ConnectionReadyEvent +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = True +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection", "in_window") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "in_window" + ) + + +class TestAllScenarios(unittest.TestCase): + def run_scenario(self, scenario_def): + topology = create_topology(scenario_def) + + # Update mock operation_count state: + for mock in scenario_def["mocked_topology_state"]: + address = clean_node(mock["address"]) + server = topology.get_server_by_address(address) + server.pool.operation_count = mock["operation_count"] + + pref = ReadPreference.NEAREST + counts = {address: 0 for address in topology._description.server_descriptions()} + + # Number of times to repeat server selection + iterations = scenario_def["iterations"] + for _ in range(iterations): + server = topology.select_server(pref, _Op.TEST, server_selection_timeout=0) + counts[server.description.address] += 1 + + # Verify expected_frequencies + outcome = scenario_def["outcome"] + tolerance = outcome["tolerance"] + expected_frequencies = outcome["expected_frequencies"] + for host_str, freq in expected_frequencies.items(): + address = clean_node(host_str) + actual_freq = float(counts[address]) / iterations + if freq == 0: + # Should be exactly 0. + self.assertEqual(actual_freq, 0) + else: + # Should be within 'tolerance'. + self.assertAlmostEqual(actual_freq, freq, delta=tolerance) + + +def create_test(scenario_def, test, name): + def run_scenario(self): + self.run_scenario(scenario_def) + + return run_scenario + + +class CustomSpecTestCreator(SpecTestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + Server selection in_window tests do not have a 'tests' field. + The whole file represents a single test case. + """ + return [scenario_def] + + +CustomSpecTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() + + +class FinderTask(ConcurrentRunner): + def __init__(self, collection, iterations): + super().__init__() + self.daemon = True + self.collection = collection + self.iterations = iterations + self.passed = False + + def run(self): + for _ in range(self.iterations): + self.collection.find_one({}) + self.passed = True + + +class TestProse(IntegrationTest): + def frequencies(self, client, listener, n_finds=10): + coll = client.test.test + N_TASKS = 10 + tasks = [FinderTask(coll, n_finds) for _ in range(N_TASKS)] + for task in tasks: + task.start() + for task in tasks: + task.join() + for task in tasks: + self.assertTrue(task.passed) + + events = listener.started_events + self.assertEqual(len(events), n_finds * N_TASKS) + nodes = client.nodes + self.assertEqual(len(nodes), 2) + freqs = {address: 0.0 for address in nodes} + for event in events: + freqs[event.connection_id] += 1 + for address in freqs: + freqs[address] = freqs[address] / float(len(events)) + return freqs + + @client_context.require_failCommand_appName + @client_context.require_multiple_mongoses + @flaky(reason="PYTHON-3689") + def test_load_balancing(self): + listener = OvertCommandListener() + cmap_listener = CMAPListener() + # PYTHON-2584: Use a large localThresholdMS to avoid the impact of + # varying RTTs. + client = self.rs_client( + client_context.mongos_seeds(), + appName="loadBalancingTest", + event_listeners=[listener, cmap_listener], + localThresholdMS=30000, + minPoolSize=10, + ) + wait_until(lambda: len(client.nodes) == 2, "discover both nodes") + # Wait for both pools to be populated. + cmap_listener.wait_for_event(ConnectionReadyEvent, 20) + # Delay find commands on only one mongos. + delay_finds = { + "configureFailPoint": "failCommand", + "mode": {"times": 10000}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 500, + "appName": "loadBalancingTest", + }, + } + with self.fail_point(delay_finds): + nodes = client_context.client.nodes + self.assertEqual(len(nodes), 1) + delayed_server = next(iter(nodes)) + freqs = self.frequencies(client, listener) + self.assertLessEqual(freqs[delayed_server], 0.25) + listener.reset() + freqs = self.frequencies(client, listener, n_finds=150) + self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server_selection_logging.py b/test/test_server_selection_logging.py new file mode 100644 index 0000000000..d53d8dc84f --- /dev/null +++ b/test/test_server_selection_logging.py @@ -0,0 +1,45 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the server selection logging unified format spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection_logging") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection_logging") + + +globals().update( + generate_test_classes( + TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server_selection_rtt.py b/test/test_server_selection_rtt.py new file mode 100644 index 0000000000..2aef36a585 --- /dev/null +++ b/test/test_server_selection_rtt.py @@ -0,0 +1,76 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module.""" +from __future__ import annotations + +import json +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import PyMongoTestCase, unittest + +from pymongo.read_preferences import MovingAverage + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection/rtt") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection/rtt") + + +class TestAllScenarios(PyMongoTestCase): + pass + + +def create_test(scenario_def): + def run_scenario(self): + moving_average = MovingAverage() + + if scenario_def["avg_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["avg_rtt_ms"]) + + if scenario_def["new_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["new_rtt_ms"]) + + self.assertAlmostEqual(moving_average.get(), scenario_def["new_avg_rtt"]) + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json.load(scenario_stream) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_session.py b/test/test_session.py new file mode 100644 index 0000000000..9aa56a711e --- /dev/null +++ b/test/test_session.py @@ -0,0 +1,1235 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the client_session module.""" +from __future__ import annotations + +import asyncio +import copy +import sys +import time +from inspect import iscoroutinefunction +from io import BytesIO +from test.helpers import ExceptionCatchingTask +from typing import Any, Callable, List, Set, Tuple + +from pymongo.synchronous.mongo_client import MongoClient + +sys.path[0:0] = [""] + +from test import ( + IntegrationTest, + SkipTest, + UnitTest, + client_context, + unittest, +) +from test.helpers import client_knobs +from test.utils_shared import ( + EventListener, + HeartbeatEventListener, + OvertCommandListener, + wait_until, +) + +from bson import DBRef +from gridfs.synchronous.grid_file import GridFS, GridFSBucket +from pymongo import ASCENDING, MongoClient, _csot, monitoring +from pymongo.common import _MAX_END_SESSIONS +from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure +from pymongo.operations import IndexModel, InsertOne, UpdateOne +from pymongo.read_concern import ReadConcern +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor + +_IS_SYNC = True + + +# Ignore auth commands like saslStart, so we can assert lsid is in all commands. +class SessionTestListener(EventListener): + def started(self, event): + if not event.command_name.startswith("sasl"): + super().started(event) + + def succeeded(self, event): + if not event.command_name.startswith("sasl"): + super().succeeded(event) + + def failed(self, event): + if not event.command_name.startswith("sasl"): + super().failed(event) + + def first_command_started(self): + assert len(self.started_events) >= 1, "No command-started events" + + return self.started_events[0] + + +def session_ids(client): + return [s.session_id for s in copy.copy(client._topology._session_pool)] + + +class TestSession(IntegrationTest): + client2: MongoClient + sensitive_commands: Set[str] + + @client_context.require_sessions + def setUp(self): + super().setUp() + # Create a second client so we can make sure clients cannot share + # sessions. + self.client2 = self.rs_or_single_client() + + # Redact no commands, so we can test user-admin commands have "lsid". + self.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() + monitoring._SENSITIVE_COMMANDS.clear() + + self.listener = SessionTestListener() + self.session_checker_listener = SessionTestListener() + self.client = self.rs_or_single_client( + event_listeners=[self.listener, self.session_checker_listener] + ) + self.db = self.client.pymongo_test + self.initial_lsids = {s["id"] for s in session_ids(self.client)} + + def tearDown(self): + monitoring._SENSITIVE_COMMANDS.update(self.sensitive_commands) + self.client.drop_database("pymongo_test") + used_lsids = self.initial_lsids.copy() + for event in self.session_checker_listener.started_events: + if "lsid" in event.command: + used_lsids.add(event.command["lsid"]["id"]) + + current_lsids = {s["id"] for s in session_ids(self.client)} + self.assertLessEqual(used_lsids, current_lsids) + + super().tearDown() + + def _test_ops(self, client, *ops): + listener = client.options.event_listeners[0] + + for f, args, kw in ops: + with client.start_session() as s: + listener.reset() + s._materialize() + last_use = s._server_session.last_use + start = time.monotonic() + self.assertLessEqual(last_use, start) + # In case "f" modifies its inputs. + args = copy.copy(args) + kw = copy.copy(kw) + kw["session"] = s + f(*args, **kw) + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + self.assertEqual( + s.session_id, + event.command["lsid"], + f"{f.__name__} sent wrong lsid with {event.command_name}", + ) + + self.assertFalse(s.has_ended) + + self.assertTrue(s.has_ended) + with self.assertRaisesRegex(InvalidOperation, "ended session"): + f(*args, **kw) + + # Test a session cannot be used on another client. + with self.client2.start_session() as s: + # In case "f" modifies its inputs. + args = copy.copy(args) + kw = copy.copy(kw) + kw["session"] = s + with self.assertRaisesRegex( + InvalidOperation, + "Can only use session with the MongoClient that started it", + ): + f(*args, **kw) + + # No explicit session. + for f, args, kw in ops: + listener.reset() + f(*args, **kw) + self.assertGreaterEqual(len(listener.started_events), 1) + lsids = [] + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + lsids.append(event.command["lsid"]) + + if not (sys.platform.startswith("java") or "PyPy" in sys.version): + # Server session was returned to pool. Ignore interpreters with + # non-deterministic GC. + for lsid in lsids: + self.assertIn( + lsid, + session_ids(client), + f"{f.__name__} did not return implicit session to pool", + ) + + def test_implicit_sessions_checkout(self): + # "To confirm that implicit sessions only allocate their server session after a + # successful connection checkout" test from Driver Sessions Spec. + succeeded = False + lsid_set = set() + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + # Retry up to 10 times because there is a known race condition that can cause multiple + # sessions to be used: connection check in happens before session check in + for _ in range(10): + cursor = client.db.test.find({}) + ops: List[Tuple[Callable, List[Any]]] = [ + (client.db.test.find_one, [{"_id": 1}]), + (client.db.test.delete_one, [{}]), + (client.db.test.update_one, [{}, {"$set": {"x": 2}}]), + (client.db.test.bulk_write, [[UpdateOne({}, {"$set": {"x": 2}})]]), + (client.db.test.find_one_and_delete, [{}]), + (client.db.test.find_one_and_update, [{}, {"$set": {"x": 1}}]), + (client.db.test.find_one_and_replace, [{}, {}]), + (client.db.test.aggregate, [[{"$limit": 1}]]), + (client.db.test.find, []), + (client.server_info, []), + (client.db.aggregate, [[{"$listLocalSessions": {}}, {"$limit": 1}]]), + (cursor.distinct, ["_id"]), + (client.db.list_collections, []), + ] + tasks = [] + listener.reset() + + def target(op, *args): + if iscoroutinefunction(op): + res = op(*args) + else: + res = op(*args) + if isinstance(res, (Cursor, CommandCursor)): + res.to_list() + + for op, args in ops: + tasks.append( + ExceptionCatchingTask(target=target, args=[op, *args], name=op.__name__) + ) + tasks[-1].start() + self.assertEqual(len(tasks), len(ops)) + for t in tasks: + t.join() + self.assertIsNone(t.exc) + lsid_set.clear() + for i in listener.started_events: + if i.command.get("lsid"): + lsid_set.add(i.command.get("lsid")["id"]) + if len(lsid_set) == 1: + # Break on first success. + succeeded = True + break + self.assertTrue(succeeded, lsid_set) + + def test_pool_lifo(self): + # "Pool is LIFO" test from Driver Sessions Spec. + a = self.client.start_session() + b = self.client.start_session() + a_id = a.session_id + b_id = b.session_id + a.end_session() + b.end_session() + + s = self.client.start_session() + self.assertEqual(b_id, s.session_id) + self.assertNotEqual(a_id, s.session_id) + + s2 = self.client.start_session() + self.assertEqual(a_id, s2.session_id) + self.assertNotEqual(b_id, s2.session_id) + + s.end_session() + s2.end_session() + + def test_end_session(self): + # We test elsewhere that using an ended session throws InvalidOperation. + client = self.client + s = client.start_session() + self.assertFalse(s.has_ended) + self.assertIsNotNone(s.session_id) + + s.end_session() + self.assertTrue(s.has_ended) + + with self.assertRaisesRegex(InvalidOperation, "ended session"): + s.session_id + + def test_end_sessions(self): + # Use a new client so that the tearDown hook does not error. + listener = SessionTestListener() + client = self.rs_or_single_client(event_listeners=[listener]) + # Start many sessions. + sessions = [client.start_session() for _ in range(_MAX_END_SESSIONS + 1)] + for s in sessions: + s._materialize() + for s in sessions: + s.end_session() + + # Closing the client should end all sessions and clear the pool. + self.assertEqual(len(client._topology._session_pool), _MAX_END_SESSIONS + 1) + client.close() + self.assertEqual(len(client._topology._session_pool), 0) + end_sessions = [e for e in listener.started_events if e.command_name == "endSessions"] + self.assertEqual(len(end_sessions), 2) + + # Closing again should not send any commands. + listener.reset() + client.close() + self.assertEqual(len(listener.started_events), 0) + + def test_client(self): + client = self.client + ops: list = [ + (client.server_info, [], {}), + (client.list_database_names, [], {}), + (client.drop_database, ["pymongo_test"], {}), + ] + + self._test_ops(client, *ops) + + def test_database(self): + client = self.client + db = client.pymongo_test + ops: list = [ + (db.command, ["ping"], {}), + (db.create_collection, ["collection"], {}), + (db.list_collection_names, [], {}), + (db.validate_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), + (db.dereference, [DBRef("collection", 1)], {}), + ] + self._test_ops(client, *ops) + + @staticmethod + def collection_write_ops(coll): + """Generate database write ops for tests.""" + return [ + (coll.drop, [], {}), + (coll.bulk_write, [[InsertOne({})]], {}), + (coll.insert_one, [{}], {}), + (coll.insert_many, [[{}, {}]], {}), + (coll.replace_one, [{}, {}], {}), + (coll.update_one, [{}, {"$set": {"a": 1}}], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), + (coll.delete_one, [{}], {}), + (coll.delete_many, [{}], {}), + (coll.find_one_and_replace, [{}, {}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), + (coll.find_one_and_delete, [{}, {}], {}), + (coll.rename, ["collection2"], {}), + # Drop collection2 between tests of "rename", above. + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), + (coll.drop_indexes, [], {}), + (coll.aggregate, [[{"$out": "aggout"}]], {}), + ] + + def test_collection(self): + client = self.client + coll = client.pymongo_test.collection + + # Test some collection methods - the rest are in test_cursor. + ops = self.collection_write_ops(coll) + ops.extend( + [ + (coll.distinct, ["a"], {}), + (coll.find_one, [], {}), + (coll.count_documents, [{}], {}), + (coll.list_indexes, [], {}), + (coll.index_information, [], {}), + (coll.options, [], {}), + (coll.aggregate, [[]], {}), + ] + ) + + self._test_ops(client, *ops) + + def test_cursor_clone(self): + coll = self.client.pymongo_test.collection + # Ensure some batches. + coll.insert_many({} for _ in range(10)) + self.addCleanup(coll.drop) + + with self.client.start_session() as s: + cursor = coll.find(session=s) + self.assertIs(cursor.session, s) + clone = cursor.clone() + self.assertIs(clone.session, s) + + # No explicit session. + cursor = coll.find(batch_size=2) + next(cursor) + # Session is "owned" by cursor. + self.assertIsNone(cursor.session) + self.assertIsNotNone(cursor._session) + clone = cursor.clone() + next(clone) + self.assertIsNone(clone.session) + self.assertIsNotNone(clone._session) + self.assertIsNot(cursor._session, clone._session) + cursor.close() + clone.close() + + def test_cursor(self): + listener = self.listener + client = self.client + coll = client.pymongo_test.collection + coll.insert_many([{} for _ in range(1000)]) + + # Test all cursor methods. + if _IS_SYNC: + # getitem is only supported in the synchronous API + ops = [ + ("find", lambda session: coll.find(session=session).to_list()), + ("getitem", lambda session: coll.find(session=session)[0]), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), + ] + else: + ops = [ + ("find", lambda session: coll.find(session=session).to_list()), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), + ] + + for name, f in ops: + with client.start_session() as s: + listener.reset() + f(session=s) + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{name} sent no lsid with {event.command_name}", + ) + + self.assertEqual( + s.session_id, + event.command["lsid"], + f"{name} sent wrong lsid with {event.command_name}", + ) + + with self.assertRaisesRegex(InvalidOperation, "ended session"): + f(session=s) + + # No explicit session. + for name, f in ops: + listener.reset() + f(session=None) + event0 = listener.first_command_started() + self.assertIn("lsid", event0.command, f"{name} sent no lsid with {event0.command_name}") + + lsid = event0.command["lsid"] + + for event in listener.started_events[1:]: + self.assertIn( + "lsid", event.command, f"{name} sent no lsid with {event.command_name}" + ) + + self.assertEqual( + lsid, + event.command["lsid"], + f"{name} sent wrong lsid with {event.command_name}", + ) + + def test_gridfs(self): + client = self.client + fs = GridFS(client.pymongo_test) + + def new_file(session=None): + grid_file = fs.new_file(_id=1, filename="f", session=session) + # 1 MB, 5 chunks, to test that each chunk is fetched with same lsid. + grid_file.write(b"a" * 1048576) + grid_file.close() + + def find(session=None): + files = fs.find({"_id": 1}, session=session).to_list() + for f in files: + f.read() + + def get(session=None): + (fs.get(1, session=session)).read() + + def get_version(session=None): + (fs.get_version("f", session=session)).read() + + def get_last_version(session=None): + (fs.get_last_version("f", session=session)).read() + + def find_list(session=None): + fs.find(session=session).to_list() + + self._test_ops( + client, + (new_file, [], {}), + (fs.put, [b"data"], {}), + (get, [], {}), + (get_version, [], {}), + (get_last_version, [], {}), + (fs.list, [], {}), + (fs.find_one, [1], {}), + (find_list, [], {}), + (fs.exists, [1], {}), + (find, [], {}), + (fs.delete, [1], {}), + ) + + def test_gridfs_bucket(self): + client = self.client + bucket = GridFSBucket(client.pymongo_test) + + def upload(session=None): + stream = bucket.open_upload_stream("f", session=session) + stream.write(b"a" * 1048576) + stream.close() + + def upload_with_id(session=None): + stream = bucket.open_upload_stream_with_id(1, "f1", session=session) + stream.write(b"a" * 1048576) + stream.close() + + def open_download_stream(session=None): + stream = bucket.open_download_stream(1, session=session) + stream.read() + + def open_download_stream_by_name(session=None): + stream = bucket.open_download_stream_by_name("f", session=session) + stream.read() + + def find(session=None): + files = bucket.find({"_id": 1}, session=session).to_list() + for f in files: + f.read() + + sio = BytesIO() + + self._test_ops( + client, + (upload, [], {}), + (upload_with_id, [], {}), + (bucket.upload_from_stream, ["f", b"data"], {}), + (bucket.upload_from_stream_with_id, [2, "f", b"data"], {}), + (open_download_stream, [], {}), + (open_download_stream_by_name, [], {}), + (bucket.download_to_stream, [1, sio], {}), + (bucket.download_to_stream_by_name, ["f", sio], {}), + (find, [], {}), + (bucket.rename, [1, "f2"], {}), + (bucket.rename_by_name, ["f2", "f3"], {}), + # Delete both files so _test_ops can run these operations twice. + (bucket.delete, [1], {}), + (bucket.delete_by_name, ["f"], {}), + ) + + def test_gridfsbucket_cursor(self): + client = self.client + bucket = GridFSBucket(client.pymongo_test) + + for file_id in 1, 2: + stream = bucket.open_upload_stream_with_id(file_id, str(file_id)) + stream.write(b"a" * 1048576) + stream.close() + + with client.start_session() as s: + cursor = bucket.find(session=s) + for f in cursor: + f.read() + + self.assertFalse(s.has_ended) + + self.assertTrue(s.has_ended) + + # No explicit session. + cursor = bucket.find(batch_size=1) + files = [cursor.next()] + + s = cursor._session + self.assertFalse(s.has_ended) + cursor.__del__() + + self.assertTrue(s.has_ended) + self.assertIsNone(cursor._session) + + # Files are still valid, they use their own sessions. + for f in files: + f.read() + + # Explicit session. + with client.start_session() as s: + cursor = bucket.find(session=s) + assert cursor.session is not None + s = cursor.session + files = cursor.to_list() + cursor.__del__() + self.assertFalse(s.has_ended) + + for f in files: + f.read() + + for f in files: + # Attempt to read the file again. + f.seek(0) + with self.assertRaisesRegex(InvalidOperation, "ended session"): + f.read() + + def test_aggregate(self): + client = self.client + coll = client.pymongo_test.collection + + def agg(session=None): + (coll.aggregate([], batchSize=2, session=session)).to_list() + + # With empty collection. + self._test_ops(client, (agg, [], {})) + + # Now with documents. + coll.insert_many([{} for _ in range(10)]) + self.addCleanup(coll.drop) + self._test_ops(client, (agg, [], {})) + + def test_killcursors(self): + client = self.client + coll = client.pymongo_test.collection + coll.insert_many([{} for _ in range(10)]) + + def explicit_close(session=None): + cursor = coll.find(batch_size=2, session=session) + next(cursor) + cursor.close() + + self._test_ops(client, (explicit_close, [], {})) + + def test_aggregate_error(self): + listener = self.listener + client = self.client + coll = client.pymongo_test.collection + # 3.6.0 mongos only validates the aggregate pipeline when the + # database exists. + coll.insert_one({}) + listener.reset() + + with self.assertRaises(OperationFailure): + coll.aggregate([{"$badOperation": {"bar": 1}}]) + + event = listener.first_command_started() + self.assertEqual(event.command_name, "aggregate") + lsid = event.command["lsid"] + # Session was returned to pool despite error. + self.assertIn(lsid, session_ids(client)) + + def _test_cursor_helper(self, create_cursor, close_cursor): + coll = self.client.pymongo_test.collection + coll.insert_many([{} for _ in range(1000)]) + + cursor = create_cursor(coll, None) + next(cursor) + # Session is "owned" by cursor. + session = cursor._session + self.assertIsNotNone(session) + lsid = session.session_id + next(cursor) + + # Cursor owns its session unto death. + self.assertNotIn(lsid, session_ids(self.client)) + close_cursor(cursor) + self.assertIn(lsid, session_ids(self.client)) + + # An explicit session is not ended by cursor.close() or list(cursor). + with self.client.start_session() as s: + cursor = create_cursor(coll, s) + next(cursor) + close_cursor(cursor) + self.assertFalse(s.has_ended) + lsid = s.session_id + + self.assertTrue(s.has_ended) + self.assertIn(lsid, session_ids(self.client)) + + def test_cursor_close(self): + def find(coll, session): + return coll.find(session=session) + + self._test_cursor_helper(find, lambda cursor: cursor.close()) + + def test_command_cursor_close(self): + def aggregate(coll, session): + return coll.aggregate([], session=session) + + self._test_cursor_helper(aggregate, lambda cursor: cursor.close()) + + def test_cursor_del(self): + def find(coll, session): + return coll.find(session=session) + + def delete(cursor): + return cursor.__del__() + + self._test_cursor_helper(find, delete) + + def test_command_cursor_del(self): + def aggregate(coll, session): + return coll.aggregate([], session=session) + + def delete(cursor): + return cursor.__del__() + + self._test_cursor_helper(aggregate, delete) + + def test_cursor_exhaust(self): + def find(coll, session): + return coll.find(session=session) + + self._test_cursor_helper(find, lambda cursor: cursor.to_list()) + + def test_command_cursor_exhaust(self): + def aggregate(coll, session): + return coll.aggregate([], session=session) + + self._test_cursor_helper(aggregate, lambda cursor: cursor.to_list()) + + def test_cursor_limit_reached(self): + def find(coll, session): + return coll.find(limit=4, batch_size=2, session=session) + + self._test_cursor_helper( + find, + lambda cursor: cursor.to_list(), + ) + + def test_command_cursor_limit_reached(self): + def aggregate(coll, session): + return coll.aggregate([], batchSize=900, session=session) + + self._test_cursor_helper( + aggregate, + lambda cursor: cursor.to_list(), + ) + + def _test_unacknowledged_ops(self, client, *ops): + listener = client.options.event_listeners[0] + + for f, args, kw in ops: + with client.start_session() as s: + listener.reset() + # In case "f" modifies its inputs. + args = copy.copy(args) + kw = copy.copy(kw) + kw["session"] = s + with self.assertRaises( + ConfigurationError, msg=f"{f.__name__} did not raise ConfigurationError" + ): + f(*args, **kw) + if f.__name__ == "create_collection": + # create_collection runs listCollections first. + event = listener.started_events.pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + # Should not run any command before raising an error. + self.assertFalse(listener.started_events, f"{f.__name__} sent command") + + self.assertTrue(s.has_ended) + + # Unacknowledged write without a session does not send an lsid. + for f, args, kw in ops: + listener.reset() + f(*args, **kw) + self.assertGreaterEqual(len(listener.started_events), 1) + + if f.__name__ == "create_collection": + # create_collection runs listCollections first. + event = listener.started_events.pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + for event in listener.started_events: + self.assertNotIn( + "lsid", event.command, f"{f.__name__} sent lsid with {event.command_name}" + ) + + def test_unacknowledged_writes(self): + # Ensure the collection exists. + self.client.pymongo_test.test_unacked_writes.insert_one({}) + client = self.rs_or_single_client(w=0, event_listeners=[self.listener]) + db = client.pymongo_test + coll = db.test_unacked_writes + ops: list = [ + (client.drop_database, [db.name], {}), + (db.create_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), + ] + ops.extend(self.collection_write_ops(coll)) + self._test_unacknowledged_ops(client, *ops) + + def drop_db(): + try: + self.client.drop_database(db.name) + return True + except OperationFailure as exc: + # Try again on BackgroundOperationInProgressForDatabase and + # BackgroundOperationInProgressForNamespace. + if exc.code in (12586, 12587): + return False + raise + + wait_until(drop_db, "dropped database after w=0 writes") + + def test_snapshot_incompatible_with_causal_consistency(self): + with self.client.start_session(causal_consistency=False, snapshot=False): + pass + with self.client.start_session(causal_consistency=False, snapshot=True): + pass + with self.client.start_session(causal_consistency=True, snapshot=False): + pass + with self.assertRaises(ConfigurationError): + with self.client.start_session(causal_consistency=True, snapshot=True): + pass + + def test_session_not_copyable(self): + client = self.client + with client.start_session() as s: + self.assertRaises(TypeError, lambda: copy.copy(s)) + + +class TestCausalConsistency(UnitTest): + listener: SessionTestListener + client: MongoClient + + @client_context.require_sessions + def setUp(self): + super().setUp() + self.listener = SessionTestListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + + @client_context.require_no_standalone + def test_core(self): + with self.client.start_session() as sess: + self.assertIsNone(sess.cluster_time) + self.assertIsNone(sess.operation_time) + self.listener.reset() + self.client.pymongo_test.test.find_one(session=sess) + started = self.listener.started_events[0] + cmd = started.command + self.assertIsNone(cmd.get("readConcern")) + op_time = sess.operation_time + self.assertIsNotNone(op_time) + succeeded = self.listener.succeeded_events[0] + reply = succeeded.reply + self.assertEqual(op_time, reply.get("operationTime")) + + # No explicit session + self.client.pymongo_test.test.insert_one({}) + self.assertEqual(sess.operation_time, op_time) + self.listener.reset() + try: + self.client.pymongo_test.command("doesntexist", session=sess) + except: + pass + failed = self.listener.failed_events[0] + failed_op_time = failed.failure.get("operationTime") + # Some older builds of MongoDB 3.5 / 3.6 return None for + # operationTime when a command fails. Make sure we don't + # change operation_time to None. + if failed_op_time is None: + self.assertIsNotNone(sess.operation_time) + else: + self.assertEqual(sess.operation_time, failed_op_time) + + with self.client.start_session() as sess2: + self.assertIsNone(sess2.cluster_time) + self.assertIsNone(sess2.operation_time) + self.assertRaises(TypeError, sess2.advance_cluster_time, 1) + self.assertRaises(ValueError, sess2.advance_cluster_time, {}) + self.assertRaises(TypeError, sess2.advance_operation_time, 1) + # No error + assert sess.cluster_time is not None + assert sess.operation_time is not None + sess2.advance_cluster_time(sess.cluster_time) + sess2.advance_operation_time(sess.operation_time) + self.assertEqual(sess.cluster_time, sess2.cluster_time) + self.assertEqual(sess.operation_time, sess2.operation_time) + + def _test_reads(self, op, exception=None): + coll = self.client.pymongo_test.test + with self.client.start_session() as sess: + coll.find_one({}, session=sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + self.listener.reset() + if exception: + with self.assertRaises(exception): + op(coll, sess) + else: + op(coll, sess) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertEqual(operation_time, act) + + @client_context.require_no_standalone + def test_reads(self): + # Make sure the collection exists. + self.client.pymongo_test.test.insert_one({}) + + def aggregate(coll, session): + return (coll.aggregate([], session=session)).to_list() + + def aggregate_raw(coll, session): + return (coll.aggregate_raw_batches([], session=session)).to_list() + + def find_raw(coll, session): + return coll.find_raw_batches({}, session=session).to_list() + + self._test_reads(aggregate) + self._test_reads(lambda coll, session: coll.find({}, session=session).to_list()) + self._test_reads(lambda coll, session: coll.find_one({}, session=session)) + self._test_reads(lambda coll, session: coll.count_documents({}, session=session)) + self._test_reads(lambda coll, session: coll.distinct("foo", session=session)) + self._test_reads(aggregate_raw) + self._test_reads(find_raw) + + with self.assertRaises(ConfigurationError): + self._test_reads(lambda coll, session: coll.estimated_document_count(session=session)) + + def _test_writes(self, op): + coll = self.client.pymongo_test.test + with self.client.start_session() as sess: + op(coll, sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + self.listener.reset() + coll.find_one({}, session=sess) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertEqual(operation_time, act) + + @client_context.require_no_standalone + def test_writes(self): + self._test_writes( + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) + self._test_writes(lambda coll, session: coll.insert_one({}, session=session)) + self._test_writes(lambda coll, session: coll.insert_many([{}], session=session)) + self._test_writes( + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) + self._test_writes( + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) + self._test_writes( + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + self._test_writes(lambda coll, session: coll.delete_one({}, session=session)) + self._test_writes(lambda coll, session: coll.delete_many({}, session=session)) + self._test_writes( + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) + self._test_writes( + lambda coll, session: coll.find_one_and_update( + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) + self._test_writes(lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session)) + self._test_writes(lambda coll, session: coll.create_index("foo", session=session)) + self._test_writes( + lambda coll, session: coll.create_indexes( + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + self._test_writes(lambda coll, session: coll.drop_index("foo_1", session=session)) + self._test_writes(lambda coll, session: coll.drop_indexes(session=session)) + + def _test_no_read_concern(self, op): + coll = self.client.pymongo_test.test + with self.client.start_session() as sess: + coll.find_one({}, session=sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + self.listener.reset() + op(coll, sess) + rc = self.listener.started_events[0].command.get("readConcern") + self.assertIsNone(rc) + + @client_context.require_no_standalone + def test_writes_do_not_include_read_concern(self): + self._test_no_read_concern( + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.insert_one({}, session=session)) + self._test_no_read_concern(lambda coll, session: coll.insert_many([{}], session=session)) + self._test_no_read_concern( + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) + self._test_no_read_concern( + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) + self._test_no_read_concern( + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.delete_one({}, session=session)) + self._test_no_read_concern(lambda coll, session: coll.delete_many({}, session=session)) + self._test_no_read_concern( + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) + self._test_no_read_concern( + lambda coll, session: coll.find_one_and_update( + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) + self._test_no_read_concern( + lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.create_index("foo", session=session)) + self._test_no_read_concern( + lambda coll, session: coll.create_indexes( + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + self._test_no_read_concern(lambda coll, session: coll.drop_index("foo_1", session=session)) + self._test_no_read_concern(lambda coll, session: coll.drop_indexes(session=session)) + + # Not a write, but explain also doesn't support readConcern. + self._test_no_read_concern(lambda coll, session: coll.find({}, session=session).explain()) + + @client_context.require_no_standalone + def test_get_more_does_not_include_read_concern(self): + coll = self.client.pymongo_test.test + with self.client.start_session() as sess: + coll.find_one({}, session=sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + coll.insert_many([{}, {}]) + cursor = coll.find({}).batch_size(1) + next(cursor) + self.listener.reset() + cursor.to_list() + started = self.listener.started_events[0] + self.assertEqual(started.command_name, "getMore") + self.assertIsNone(started.command.get("readConcern")) + + def test_session_not_causal(self): + with self.client.start_session(causal_consistency=False) as s: + self.client.pymongo_test.test.insert_one({}, session=s) + self.listener.reset() + self.client.pymongo_test.test.find_one({}, session=s) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertIsNone(act) + + @client_context.require_standalone + def test_server_not_causal(self): + with self.client.start_session(causal_consistency=True) as s: + self.client.pymongo_test.test.insert_one({}, session=s) + self.listener.reset() + self.client.pymongo_test.test.find_one({}, session=s) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertIsNone(act) + + @client_context.require_no_standalone + def test_read_concern(self): + with self.client.start_session(causal_consistency=True) as s: + coll = self.client.pymongo_test.test + coll.insert_one({}, session=s) + self.listener.reset() + coll.find_one({}, session=s) + read_concern = self.listener.started_events[0].command.get("readConcern") + self.assertIsNotNone(read_concern) + self.assertIsNone(read_concern.get("level")) + self.assertIsNotNone(read_concern.get("afterClusterTime")) + + coll = coll.with_options(read_concern=ReadConcern("majority")) + self.listener.reset() + coll.find_one({}, session=s) + read_concern = self.listener.started_events[0].command.get("readConcern") + self.assertIsNotNone(read_concern) + self.assertEqual(read_concern.get("level"), "majority") + self.assertIsNotNone(read_concern.get("afterClusterTime")) + + @client_context.require_no_standalone + def test_cluster_time_with_server_support(self): + self.client.pymongo_test.test.insert_one({}) + self.listener.reset() + self.client.pymongo_test.test.find_one({}) + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") + self.assertIsNotNone(after_cluster_time) + + @client_context.require_standalone + def test_cluster_time_no_server_support(self): + self.client.pymongo_test.test.insert_one({}) + self.listener.reset() + self.client.pymongo_test.test.find_one({}) + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") + self.assertIsNone(after_cluster_time) + + +class TestClusterTime(IntegrationTest): + def setUp(self): + super().setUp() + if "$clusterTime" not in (client_context.hello): + raise SkipTest("$clusterTime not supported") + + # Sessions prose test: 3) $clusterTime in commands + def test_cluster_time(self): + listener = SessionTestListener() + client = self.rs_or_single_client(event_listeners=[listener]) + collection = client.pymongo_test.collection + # Prepare for tests of find() and aggregate(). + collection.insert_many([{} for _ in range(10)]) + self.addCleanup(collection.drop) + self.addCleanup(client.pymongo_test.collection2.drop) + + def rename_and_drop(): + # Ensure collection exists. + collection.insert_one({}) + collection.rename("collection2") + client.pymongo_test.collection2.drop() + + def insert_and_find(): + cursor = collection.find().batch_size(1) + for _ in range(10): + # Advance the cluster time. + collection.insert_one({}) + next(cursor) + + cursor.close() + + def insert_and_aggregate(): + cursor = (collection.aggregate([], batchSize=1)).batch_size(1) + for _ in range(5): + # Advance the cluster time. + collection.insert_one({}) + next(cursor) + + cursor.close() + + def aggregate(): + (collection.aggregate([])).to_list() + + ops = [ + # Tests from Driver Sessions Spec. + ("ping", lambda: client.admin.command("ping")), + ("aggregate", lambda: aggregate()), + ("find", lambda: collection.find().to_list()), + ("insert_one", lambda: collection.insert_one({})), + # Additional PyMongo tests. + ("insert_and_find", insert_and_find), + ("insert_and_aggregate", insert_and_aggregate), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), + ] + + for _name, f in ops: + listener.reset() + # Call f() twice, insert to advance clusterTime, call f() again. + f() + f() + collection.insert_one({}) + f() + + self.assertGreaterEqual(len(listener.started_events), 1) + for i, event in enumerate(listener.started_events): + self.assertIn( + "$clusterTime", + event.command, + f"{f.__name__} sent no $clusterTime with {event.command_name}", + ) + + if i > 0: + succeeded = listener.succeeded_events[i - 1] + self.assertIn( + "$clusterTime", + succeeded.reply, + f"{f.__name__} received no $clusterTime with {succeeded.command_name}", + ) + + self.assertTrue( + event.command["$clusterTime"]["clusterTime"] + >= succeeded.reply["$clusterTime"]["clusterTime"], + f"{f.__name__} sent wrong $clusterTime with {event.command_name}", + ) + + # Sessions prose test: 20) Drivers do not gossip `$clusterTime` on SDAM commands + def test_cluster_time_not_used_by_sdam(self): + heartbeat_listener = HeartbeatEventListener() + cmd_listener = OvertCommandListener() + with client_knobs(min_heartbeat_interval=0.01): + c1 = self.single_client( + event_listeners=[heartbeat_listener, cmd_listener], heartbeatFrequencyMS=10 + ) + cluster_time = (c1.admin.command({"ping": 1}))["$clusterTime"] + self.assertEqual(c1._topology.max_cluster_time(), cluster_time) + + # Advance the server's $clusterTime by performing an insert via another client. + self.db.test.insert_one({"advance": "$clusterTime"}) + # Wait until the client C1 processes the next pair of SDAM heartbeat started + succeeded events. + heartbeat_listener.reset() + + def next_heartbeat(): + events = heartbeat_listener.events + for i in range(len(events) - 1): + if isinstance(events[i], monitoring.ServerHeartbeatStartedEvent): + if isinstance(events[i + 1], monitoring.ServerHeartbeatSucceededEvent): + return True + return False + + wait_until(next_heartbeat, "never found pair of heartbeat started + succeeded events") + # Assert that C1's max $clusterTime is still the same and has not been updated by SDAM. + cmd_listener.reset() + c1.admin.command({"ping": 1}) + started = cmd_listener.started_events[0] + self.assertEqual(started.command_name, "ping") + self.assertEqual(started.command["$clusterTime"], cluster_time) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_sessions_unified.py b/test/test_sessions_unified.py new file mode 100644 index 0000000000..3c80c70d38 --- /dev/null +++ b/test/test_sessions_unified.py @@ -0,0 +1,40 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Sessions unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sessions") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sessions") + + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_son.py b/test/test_son.py index 5730da2564..36a6834889 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,17 +13,18 @@ # limitations under the License. """Tests for the son module.""" +from __future__ import annotations import copy import pickle import re import sys -import unittest + sys.path[0:0] = [""] -from nose.plugins.skip import SkipTest +from collections import OrderedDict +from test import unittest -from bson.py3compat import b from bson.son import SON @@ -33,9 +34,9 @@ def test_ordered_dict(self): a1["hello"] = "world" a1["mike"] = "awesome" a1["hello_"] = "mike" - self.assertEqual(a1.items(), [("hello", "world"), - ("mike", "awesome"), - ("hello_", "mike")]) + self.assertEqual( + list(a1.items()), [("hello", "world"), ("mike", "awesome"), ("hello_", "mike")] + ) b2 = SON({"hello": "world"}) self.assertEqual(b2["hello"], "world") @@ -43,29 +44,28 @@ def test_ordered_dict(self): def test_equality(self): a1 = SON({"hello": "world"}) - b2 = SON((('hello', 'world'), ('mike', 'awesome'), ('hello_', 'mike'))) + b2 = SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike"))) self.assertEqual(a1, SON({"hello": "world"})) - self.assertEqual(b2, SON((('hello', 'world'), - ('mike', 'awesome'), - ('hello_', 'mike')))) - self.assertEqual(b2, dict((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertEqual(b2, SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) + self.assertEqual(b2, {"hello_": "mike", "mike": "awesome", "hello": "world"}) self.assertNotEqual(a1, b2) - self.assertNotEqual(b2, SON((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertNotEqual(b2, SON((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) # Explicitly test inequality self.assertFalse(a1 != SON({"hello": "world"})) - self.assertFalse(b2 != SON((('hello', 'world'), - ('mike', 'awesome'), - ('hello_', 'mike')))) - self.assertFalse(b2 != dict((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertFalse(b2 != SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) + self.assertFalse(b2 != {"hello_": "mike", "mike": "awesome", "hello": "world"}) + + # Embedded SON. + d4 = SON([("blah", {"foo": SON()})]) + self.assertEqual(d4, {"blah": {"foo": {}}}) + self.assertEqual(d4, {"blah": {"foo": SON()}}) + self.assertNotEqual(d4, {"blah": {"foo": []}}) + + # Original data unaffected. + self.assertEqual(SON, d4["blah"]["foo"].__class__) def test_to_dict(self): a1 = SON() @@ -81,40 +81,34 @@ def test_to_dict(self): self.assertEqual(dict, c3.to_dict()["blah"][0].__class__) self.assertEqual(dict, d4.to_dict()["blah"]["foo"].__class__) - def test_pickle(self): + # Original data unaffected. + self.assertEqual(SON, d4["blah"]["foo"].__class__) + def test_pickle(self): simple_son = SON([]) - complex_son = SON([('son', simple_son), - ('list', [simple_son, simple_son])]) + complex_son = SON([("son", simple_son), ("list", [simple_son, simple_son])]) - for protocol in xrange(pickle.HIGHEST_PROTOCOL + 1): - pickled = pickle.loads(pickle.dumps(complex_son, - protocol=protocol)) - self.assertEqual(pickled['son'], pickled['list'][0]) - self.assertEqual(pickled['son'], pickled['list'][1]) + for protocol in range(pickle.HIGHEST_PROTOCOL + 1): + pickled = pickle.loads(pickle.dumps(complex_son, protocol=protocol)) + self.assertEqual(pickled["son"], pickled["list"][0]) + self.assertEqual(pickled["son"], pickled["list"][1]) def test_pickle_backwards_compatability(self): - # For a full discussion see http://bugs.python.org/issue6137 - if sys.version.startswith('3.0'): - raise SkipTest("Python 3.0.x can't unpickle " - "objects pickled in Python 2.x.") - # This string was generated by pickling a SON object in pymongo # version 2.1.1 - pickled_with_2_1_1 = b( - "ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" - "c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" - "S'_SON__keys'\np7\n(lp8\nsb." + pickled_with_2_1_1 = ( + b"ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" + b"c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" + b"S'_SON__keys'\np7\n(lp8\nsb." ) son_2_1_1 = pickle.loads(pickled_with_2_1_1) self.assertEqual(son_2_1_1, SON([])) def test_copying(self): simple_son = SON([]) - complex_son = SON([('son', simple_son), - ('list', [simple_son, simple_son])]) + complex_son = SON([("son", simple_son), ("list", [simple_son, simple_son])]) regex_son = SON([("x", re.compile("^hello.*"))]) - reflexive_son = SON([('son', simple_son)]) + reflexive_son = SON([("son", simple_son)]) reflexive_son["reflexive"] = reflexive_son simple_son1 = copy.copy(simple_son) @@ -140,9 +134,61 @@ def test_copying(self): self.assertEqual(complex_son, complex_son1) reflexive_son1 = copy.deepcopy(reflexive_son) - self.assertEqual(reflexive_son.keys(), reflexive_son1.keys()) + self.assertEqual(list(reflexive_son), list(reflexive_son1)) self.assertEqual(id(reflexive_son1), id(reflexive_son1["reflexive"])) + def test_iteration(self): + """Test __iter__""" + # test success case + test_son = SON([(1, 100), (2, 200), (3, 300)]) + for ele in test_son: + self.assertEqual(ele * 100, test_son[ele]) + + def test_contains_has(self): + """has_key and __contains__""" + test_son = SON([(1, 100), (2, 200), (3, 300)]) + self.assertIn(1, test_son) + self.assertIn(2, test_son, "in failed") + self.assertNotIn(22, test_son, "in succeeded when it shouldn't") + self.assertTrue(test_son.has_key(2), "has_key failed") + self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") + + def test_clears(self): + """Test clear()""" + test_son = SON([(1, 100), (2, 200), (3, 300)]) + test_son.clear() + self.assertNotIn(1, test_son) + self.assertEqual(0, len(test_son)) + self.assertEqual(0, len(test_son.keys())) + self.assertEqual({}, test_son.to_dict()) + + def test_len(self): + """Test len""" + test_son = SON() + self.assertEqual(0, len(test_son)) + test_son = SON([(1, 100), (2, 200), (3, 300)]) + self.assertEqual(3, len(test_son)) + test_son.popitem() + self.assertEqual(2, len(test_son)) + + def test_keys(self): + # Test to make sure that set operations do not throw an error + d = SON().keys() + for i in [OrderedDict, dict]: + try: + d - i().keys() + except TypeError: + self.fail( + "SON().keys() is not returning an object compatible " + "with %s objects" % (str(i)) + ) + # Test to verify correctness + d = SON({"k": "v"}).keys() + for i in [OrderedDict, dict]: + self.assertEqual(d | i({"k1": 0}).keys(), {"k", "k1"}) + for i in [OrderedDict, dict]: + self.assertEqual(d - i({"k": 0}).keys(), set()) + if __name__ == "__main__": unittest.main() diff --git a/test/test_son_manipulator.py b/test/test_son_manipulator.py deleted file mode 100644 index a3eebf7b22..0000000000 --- a/test/test_son_manipulator.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for SONManipulators. -""" - -import unittest -import sys -sys.path[0:0] = [""] - -from bson.son import SON -from pymongo.database import Database -from pymongo.son_manipulator import (NamespaceInjector, - ObjectIdInjector, - ObjectIdShuffler, - SONManipulator) -from test.test_client import get_client -from test import qcheck - - -class TestSONManipulator(unittest.TestCase): - - def setUp(self): - self.db = Database(get_client(), "pymongo_test") - - def tearDown(self): - self.db = None - - def test_basic(self): - manip = SONManipulator() - collection = self.db.test - - def incoming_is_identity(son): - return son == manip.transform_incoming(son, collection) - qcheck.check_unittest(self, incoming_is_identity, - qcheck.gen_mongo_dict(3)) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - - def test_id_injection(self): - manip = ObjectIdInjector() - collection = self.db.test - - def incoming_adds_id(son): - son = manip.transform_incoming(son, collection) - assert "_id" in son - return True - qcheck.check_unittest(self, incoming_adds_id, - qcheck.gen_mongo_dict(3)) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - - def test_id_shuffling(self): - manip = ObjectIdShuffler() - collection = self.db.test - - def incoming_moves_id(son_in): - son = manip.transform_incoming(son_in, collection) - if not "_id" in son: - return True - for (k, v) in son.items(): - self.assertEqual(k, "_id") - break - # Key order matters in SON equality test, - # matching collections.OrderedDict - if isinstance(son_in, SON): - return son_in.to_dict() == son.to_dict() - return son_in == son - - self.assertTrue(incoming_moves_id({})) - self.assertTrue(incoming_moves_id({"_id": 12})) - self.assertTrue(incoming_moves_id({"hello": "world", "_id": 12})) - self.assertTrue(incoming_moves_id(SON([("hello", "world"), - ("_id", 12)]))) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - - def test_ns_injection(self): - manip = NamespaceInjector() - collection = self.db.test - - def incoming_adds_ns(son): - son = manip.transform_incoming(son, collection) - assert "_ns" in son - return son["_ns"] == collection.name - qcheck.check_unittest(self, incoming_adds_ns, - qcheck.gen_mongo_dict(3)) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py new file mode 100644 index 0000000000..f5096bea01 --- /dev/null +++ b/test/test_srv_polling.py @@ -0,0 +1,387 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the SRV support tests.""" +from __future__ import annotations + +import asyncio +import sys +import time +from test.utils import flaky +from test.utils_shared import FunctionCallRecorder +from typing import Any + +sys.path[0:0] = [""] + +from test import PyMongoTestCase, client_knobs, unittest +from test.utils import wait_until + +import pymongo +from pymongo import common +from pymongo.errors import ConfigurationError +from pymongo.synchronous.srv_resolver import _have_dnspython + +_IS_SYNC = True + +WAIT_TIME = 0.1 + + +class SrvPollingKnobs: + def __init__( + self, + ttl_time=None, + min_srv_rescan_interval=None, + nodelist_callback=None, + count_resolver_calls=False, + ): + self.ttl_time = ttl_time + self.min_srv_rescan_interval = min_srv_rescan_interval + self.nodelist_callback = nodelist_callback + self.count_resolver_calls = count_resolver_calls + + self.old_min_srv_rescan_interval = None + self.old_dns_resolver_response = None + + def enable(self): + self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL + self.old_dns_resolver_response = ( + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl + ) + + if self.min_srv_rescan_interval is not None: + common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval + + def mock_get_hosts_and_min_ttl(resolver, *args): + assert self.old_dns_resolver_response is not None + nodes, ttl = self.old_dns_resolver_response(resolver) + if self.nodelist_callback is not None: + nodes = self.nodelist_callback() + if self.ttl_time is not None: + ttl = self.ttl_time + return nodes, ttl + + patch_func: Any + if self.count_resolver_calls: + patch_func = FunctionCallRecorder(mock_get_hosts_and_min_ttl) + else: + patch_func = mock_get_hosts_and_min_ttl + + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore + + def __enter__(self): + self.enable() + + def disable(self): + common.MIN_SRV_RESCAN_INTERVAL = self.old_min_srv_rescan_interval # type: ignore + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore + self.old_dns_resolver_response + ) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.disable() + + +class TestSrvPolling(PyMongoTestCase): + BASE_SRV_RESPONSE = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27018), + ] + + CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" + + def setUp(self): + # Patch timeouts to ensure short rescan SRV interval. + self.client_knobs = client_knobs( + heartbeat_frequency=WAIT_TIME, + min_heartbeat_interval=WAIT_TIME, + events_queue_frequency=WAIT_TIME, + ) + self.client_knobs.enable() + + def tearDown(self): + self.client_knobs.disable() + + def get_nodelist(self, client): + return client._topology.description.server_descriptions().keys() + + def assert_nodelist_change(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): + """Check if the client._topology eventually sees all nodes in the + expected_nodelist. + """ + + def predicate(): + nodelist = self.get_nodelist(client) + if set(expected_nodelist) == set(nodelist): + return True + return False + + wait_until(predicate, "see expected nodelist", timeout=timeout) + + def assert_nodelist_nochange(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): + """Check if the client._topology ever deviates from seeing all nodes + in the expected_nodelist. Consistency is checked after sleeping for + (WAIT_TIME * 10) seconds. Also check that the resolver is called at + least once. + """ + + def predicate(): + if set(expected_nodelist) == set(self.get_nodelist(client)): + return ( + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count + >= 1 + ) + return False + + wait_until(predicate, "Node list equals expected nodelist", timeout=timeout) + + nodelist = self.get_nodelist(client) + if set(expected_nodelist) != set(nodelist): + msg = "Client nodelist %s changed unexpectedly (expected %s)" + raise self.fail(msg % (nodelist, expected_nodelist)) + self.assertGreaterEqual( + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore + 1, + "resolver was never called", + ) + return True + + def run_scenario(self, dns_response, expect_change): + self.assertEqual(_have_dnspython(), True) + if callable(dns_response): + dns_resolver_response = dns_response + else: + + def dns_resolver_response(): + return dns_response + + if expect_change: + assertion_method = self.assert_nodelist_change + count_resolver_calls = False + expected_response = dns_response + else: + assertion_method = self.assert_nodelist_nochange + count_resolver_calls = True + expected_response = self.BASE_SRV_RESPONSE + + # Patch timeouts to ensure short test running times. + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING) + client._connect() + self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) + # Patch list of hosts returned by DNS query. + with SrvPollingKnobs( + nodelist_callback=dns_resolver_response, count_resolver_calls=count_resolver_calls + ): + assertion_method(expected_response, client) + + # Close the client early to avoid affecting the next scenario run. + client.close() + + def test_addition(self): + response = self.BASE_SRV_RESPONSE[:] + response.append(("localhost.test.build.10gen.cc", 27019)) + self.run_scenario(response, True) + + def test_removal(self): + response = self.BASE_SRV_RESPONSE[:] + response.remove(("localhost.test.build.10gen.cc", 27018)) + self.run_scenario(response, True) + + def test_replace_one(self): + response = self.BASE_SRV_RESPONSE[:] + response.remove(("localhost.test.build.10gen.cc", 27018)) + response.append(("localhost.test.build.10gen.cc", 27019)) + self.run_scenario(response, True) + + def test_replace_both_with_one(self): + response = [("localhost.test.build.10gen.cc", 27019)] + self.run_scenario(response, True) + + def test_replace_both_with_two(self): + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + self.run_scenario(response, True) + + def test_dns_failures(self): + from dns import exception + + for exc in (exception.FormError, exception.TooBig, exception.Timeout): + + def response_callback(*args): + raise exc("DNS Failure!") + + self.run_scenario(response_callback, False) + + @flaky(reason="PYTHON-5500", max_runs=3) + def test_dns_failures_logging(self): + from dns import exception + + with self.assertLogs("pymongo.topology", level="DEBUG") as cm: + + def response_callback(*args): + raise exception.Timeout("DNS Failure!") + + self.run_scenario(response_callback, False) + + srv_failure_logs = [r for r in cm.records if "SRV monitor check failed" in r.getMessage()] + self.assertEqual(len(srv_failure_logs), 1) + + def test_dns_record_lookup_empty(self): + response: list = [] + self.run_scenario(response, False) + + def _test_recover_from_initial(self, initial_callback): + # Construct a valid final response callback distinct from base. + response_final = self.BASE_SRV_RESPONSE[:] + response_final.pop() + + def final_callback(): + return response_final + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=initial_callback, + count_resolver_calls=True, + ): + # Client uses unpatched method to get initial nodelist + client = self.simple_client(self.CONNECTION_STRING) + client._connect() + # Invalid DNS resolver response should not change nodelist. + self.assert_nodelist_nochange(self.BASE_SRV_RESPONSE, client) + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, nodelist_callback=final_callback + ): + # Nodelist should reflect new valid DNS resolver response. + self.assert_nodelist_change(response_final, client) + + @flaky(reason="PYTHON-5315") + def test_recover_from_initially_empty_seedlist(self): + def empty_seedlist(): + return [] + + self._test_recover_from_initial(empty_seedlist) + + @flaky(reason="PYTHON-5315") + def test_recover_from_initially_erroring_seedlist(self): + def erroring_seedlist(): + raise ConfigurationError + + self._test_recover_from_initial(erroring_seedlist) + + def test_10_all_dns_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=0) + client._connect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + self.assert_nodelist_change(response, client) + + def test_11_all_dns_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + client._connect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + self.assert_nodelist_change(response, client) + + def test_12_new_dns_randomly_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27020), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27017), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + client._connect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + time.sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) + final_topology = set(client.topology_description.server_descriptions()) + self.assertIn(("localhost.test.build.10gen.cc", 27017), final_topology) + self.assertEqual(len(final_topology), 2) + + def test_does_not_flipflop(self): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=1) + client._connect() + old = set(client.topology_description.server_descriptions()) + time.sleep(4 * WAIT_TIME) + new = set(client.topology_description.server_descriptions()) + self.assertSetEqual(old, new) + + def test_srv_service_name(self): + # Construct a valid final response callback distinct from base. + response = [ + ("localhost.test.build.10gen.cc.", 27019), + ("localhost.test.build.10gen.cc.", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client( + "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname" + ) + client._connect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + self.assert_nodelist_change(response, client) + + def test_srv_waits_to_poll(self): + modified = [("localhost.test.build.10gen.cc", 27019)] + + def resolver_response(): + return modified + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=resolver_response, + ): + client = self.simple_client(self.CONNECTION_STRING) + client._connect() + with self.assertRaises(AssertionError): + self.assert_nodelist_change(modified, client, timeout=WAIT_TIME / 2) + + def test_import_dns_resolver(self): + # Regression test for PYTHON-4407 + import dns.resolver + + self.assertTrue(hasattr(dns.resolver, "resolve") or hasattr(dns.resolver, "query")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_ssl.py b/test/test_ssl.py index d4aa56f8b2..b1e9a65eb5 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -1,4 +1,4 @@ -# Copyright 2011-2014 MongoDB, Inc. +# Copyright 2011-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,450 +13,677 @@ # limitations under the License. """Tests for SSL support.""" +from __future__ import annotations import os +import pathlib import socket import sys -import unittest +sys.path[0:0] = [""] + +from test import ( + HAVE_IPADDRESS, + IntegrationTest, + PyMongoTestCase, + SkipTest, + client_context, + connected, + remove_all_users, + unittest, +) +from test.utils_shared import ( + EventListener, + OvertCommandListener, + cat_files, + ignore_deprecations, +) +from urllib.parse import quote_plus + +from pymongo import MongoClient, ssl_support +from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.ssl_support import HAVE_PYSSL, HAVE_SSL, _ssl, get_ssl_context +from pymongo.write_concern import WriteConcern + +_HAVE_PYOPENSSL = False try: - from ssl import CertificateError -except ImportError: - # Backport. - from pymongo.ssl_match_hostname import CertificateError + # All of these must be available to use PyOpenSSL + import OpenSSL + import requests + import service_identity -sys.path[0:0] = [""] + # Ensure service_identity>=18.1 is installed + from service_identity.pyopenssl import verify_ip_address -from urllib import quote_plus + from pymongo.ocsp_support import _load_trusted_ca_certs -from nose.plugins.skip import SkipTest + _HAVE_PYOPENSSL = True +except ImportError: + _load_trusted_ca_certs = None # type: ignore -from pymongo import MongoClient, MongoReplicaSetClient -from pymongo.common import HAS_SSL -from pymongo.errors import (ConfigurationError, - ConnectionFailure, - OperationFailure) -from test import host, port, pair, version -from test.utils import server_started_with_auth, remove_all_users +if HAVE_SSL: + import ssl + +_IS_SYNC = True -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'certificates') -CLIENT_PEM = os.path.join(CERT_PATH, 'client.pem') -CA_PEM = os.path.join(CERT_PATH, 'ca.pem') -SIMPLE_SSL = False -CERT_SSL = False -SERVER_IS_RESOLVABLE = False -MONGODB_X509_USERNAME = ( - "CN=client,OU=kerneluser,O=10Gen,L=New York City,ST=New York,C=US") +if _IS_SYNC: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "certificates") +else: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "certificates") + +CLIENT_PEM = os.path.join(CERT_PATH, "client.pem") +CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, "password_protected.pem") +CA_PEM = os.path.join(CERT_PATH, "ca.pem") +CA_BUNDLE_PEM = os.path.join(CERT_PATH, "trusted-ca.pem") +CRL_PEM = os.path.join(CERT_PATH, "crl.pem") +MONGODB_X509_USERNAME = "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client" # To fully test this start a mongod instance (built with SSL support) like so: # mongod --dbpath /path/to/data/directory --sslOnNormalPorts \ -# --sslPEMKeyFile /path/to/mongo/jstests/libs/server.pem \ -# --sslCAFile /path/to/mongo/jstests/libs/ca.pem \ -# --sslCRLFile /path/to/mongo/jstests/libs/crl.pem \ +# --sslPEMKeyFile /path/to/pymongo/test/certificates/server.pem \ +# --sslCAFile /path/to/pymongo/test/certificates/ca.pem \ # --sslWeakCertificateValidation # Also, make sure you have 'server' as an alias for localhost in /etc/hosts # -# Note: For all tests to pass with MongoReplicaSetClient the replica -# set configuration must use 'server' for the hostname of all hosts. - -def is_server_resolvable(): - """Returns True if 'server' is resolvable.""" - socket_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(1) - try: - try: - socket.gethostbyname('server') - return True - except socket.error: - return False - finally: - socket.setdefaulttimeout(socket_timeout) - - -if HAS_SSL: - import ssl - - # Check this all once instead of before every test method below. - - # Is MongoDB configured for SSL? - try: - MongoClient(host, port, connectTimeoutMS=100, ssl=True) - SIMPLE_SSL = True - except ConnectionFailure: - # Is MongoDB configured with server.pem, ca.pem, and crl.pem from - # mongodb jstests/lib? - try: - MongoClient(host, port, connectTimeoutMS=100, ssl=True, - ssl_certfile=CLIENT_PEM) - CERT_SSL = True - except ConnectionFailure: - pass - - if CERT_SSL: - SERVER_IS_RESOLVABLE = is_server_resolvable() +# Note: For all replica set tests to pass, the replica set configuration must +# use 'localhost' for the hostname of all hosts. -class TestClientSSL(unittest.TestCase): - +class TestClientSSL(PyMongoTestCase): + @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what happens without it.") def test_no_ssl_module(self): - # Test that ConfigurationError is raised if the ssl - # module isn't available. - if HAS_SSL: - raise SkipTest( - "The ssl module is available, can't test what happens " - "without it." - ) - # Explicit - self.assertRaises(ConfigurationError, - MongoClient, ssl=True) - self.assertRaises(ConfigurationError, - MongoReplicaSetClient, replicaSet='rs', ssl=True) + self.assertRaises(ConfigurationError, self.simple_client, ssl=True) + # Implied - self.assertRaises(ConfigurationError, - MongoClient, ssl_certfile=CLIENT_PEM) - self.assertRaises(ConfigurationError, - MongoReplicaSetClient, - replicaSet='rs', - ssl_certfile=CLIENT_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tlsCertificateKeyFile=CLIENT_PEM) + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + @ignore_deprecations def test_config_ssl(self): - """Tests various ssl configurations""" - self.assertRaises(ConfigurationError, MongoClient, ssl='foo') - self.assertRaises(ConfigurationError, - MongoClient, - ssl=False, - ssl_certfile=CLIENT_PEM) - self.assertRaises(TypeError, MongoClient, ssl=0) - self.assertRaises(TypeError, MongoClient, ssl=5.5) - self.assertRaises(TypeError, MongoClient, ssl=[]) - - self.assertRaises(ConfigurationError, - MongoReplicaSetClient, replicaSet='rs', ssl='foo') - self.assertRaises(ConfigurationError, - MongoReplicaSetClient, - replicaSet='rs', - ssl=False, - ssl_certfile=CLIENT_PEM) - self.assertRaises(TypeError, - MongoReplicaSetClient, replicaSet='rs', ssl=0) - self.assertRaises(TypeError, - MongoReplicaSetClient, replicaSet='rs', ssl=5.5) - self.assertRaises(TypeError, - MongoReplicaSetClient, replicaSet='rs', ssl=[]) - - self.assertRaises(IOError, MongoClient, ssl_certfile="NoSuchFile") - self.assertRaises(TypeError, MongoClient, ssl_certfile=True) - self.assertRaises(TypeError, MongoClient, ssl_certfile=[]) - self.assertRaises(IOError, MongoClient, ssl_keyfile="NoSuchFile") - self.assertRaises(TypeError, MongoClient, ssl_keyfile=True) - self.assertRaises(TypeError, MongoClient, ssl_keyfile=[]) - - self.assertRaises(IOError, - MongoReplicaSetClient, - replicaSet='rs', - ssl_keyfile="NoSuchFile") - self.assertRaises(IOError, - MongoReplicaSetClient, - replicaSet='rs', - ssl_certfile="NoSuchFile") - self.assertRaises(TypeError, - MongoReplicaSetClient, - replicaSet='rs', - ssl_certfile=True) + # Tests various ssl configurations + self.assertRaises(ValueError, self.simple_client, ssl="foo") + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(TypeError, self.simple_client, ssl=0) + self.assertRaises(TypeError, self.simple_client, ssl=5.5) + self.assertRaises(TypeError, self.simple_client, ssl=[]) + + self.assertRaises(IOError, self.simple_client, tlsCertificateKeyFile="NoSuchFile") + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=True) + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=[]) # Test invalid combinations - self.assertRaises(ConfigurationError, - MongoClient, - ssl=False, - ssl_keyfile=CLIENT_PEM) - self.assertRaises(ConfigurationError, - MongoClient, - ssl=False, - ssl_certfile=CLIENT_PEM) - self.assertRaises(ConfigurationError, - MongoClient, - ssl=False, - ssl_keyfile=CLIENT_PEM, - ssl_certfile=CLIENT_PEM) - - self.assertRaises(ConfigurationError, - MongoReplicaSetClient, - replicaSet='rs', - ssl=False, - ssl_keyfile=CLIENT_PEM) - self.assertRaises(ConfigurationError, - MongoReplicaSetClient, - replicaSet='rs', - ssl=False, - ssl_certfile=CLIENT_PEM) - self.assertRaises(ConfigurationError, - MongoReplicaSetClient, - replicaSet='rs', - ssl=False, - ssl_keyfile=CLIENT_PEM, - ssl_certfile=CLIENT_PEM) - - -class TestSSL(unittest.TestCase): - + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCAFile=CA_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCRLFile=CRL_PEM) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidCertificates=False + ) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidHostnames=False + ) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsDisableOCSPEndpointCheck=False + ) + + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + def test_use_pyopenssl_when_available(self): + self.assertTrue(HAVE_PYSSL) + + @unittest.skipUnless(_HAVE_PYOPENSSL, "Cannot test without PyOpenSSL") + def test_load_trusted_ca_certs(self): + trusted_ca_certs = _load_trusted_ca_certs(CA_BUNDLE_PEM) + self.assertEqual(2, len(trusted_ca_certs)) + + +class TestSSL(IntegrationTest): + saved_port: int + + def assertClientWorks(self, client): + coll = client.pymongo_test.ssl_test.with_options( + write_concern=WriteConcern(w=client_context.w) + ) + coll.drop() + coll.insert_one({"ssl": True}) + self.assertTrue((coll.find_one())["ssl"]) + coll.drop() + + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") def setUp(self): - if not HAS_SSL: - raise SkipTest("The ssl module is not available.") + super().setUp() + # MongoClient should connect to the primary by default. + self.saved_port = MongoClient.PORT + MongoClient.PORT = client_context.port - if sys.version.startswith('3.0'): - raise SkipTest("Python 3.0.x has problems " - "with SSL and socket timeouts.") + def tearDown(self): + MongoClient.PORT = self.saved_port + @client_context.require_tls def test_simple_ssl(self): + if "PyPy" in sys.version: + self.skipTest("Test is flaky on PyPy") # Expects the server to be running with ssl and with # no --sslPEMKeyFile or with --sslWeakCertificateValidation - if not SIMPLE_SSL: - raise SkipTest("No simple mongod available over SSL") - - client = MongoClient(host, port, ssl=True) - response = client.admin.command('ismaster') - if 'setName' in response: - client = MongoReplicaSetClient(pair, - replicaSet=response['setName'], - w=len(response['hosts']), - ssl=True) - - db = client.pymongo_ssl_test - db.test.drop() - self.assertTrue(db.test.insert({'ssl': True})) - self.assertTrue(db.test.find_one()['ssl']) - client.drop_database('pymongo_ssl_test') - - def test_cert_ssl(self): - # Expects the server to be running with the server.pem, ca.pem - # and crl.pem provided in mongodb and the server tests eg: - # - # --sslPEMKeyFile=jstests/libs/server.pem - # --sslCAFile=jstests/libs/ca.pem - # --sslCRLFile=jstests/libs/crl.pem + self.assertClientWorks(self.client) + + @client_context.require_tlsCertificateKeyFile + @client_context.require_no_api_version + @ignore_deprecations + def test_tlsCertificateKeyFilePassword(self): + # Expects the server to be running with server.pem and ca.pem # - # Also requires an /etc/hosts entry where "server" is resolvable - if not CERT_SSL: - raise SkipTest("No mongod available over SSL with certs") - - client = MongoClient(host, port, ssl=True, ssl_certfile=CLIENT_PEM) - response = client.admin.command('ismaster') - if 'setName' in response: - client = MongoReplicaSetClient(pair, - replicaSet=response['setName'], - w=len(response['hosts']), - ssl=True, ssl_certfile=CLIENT_PEM) - - db = client.pymongo_ssl_test - db.test.drop() - self.assertTrue(db.test.insert({'ssl': True})) - self.assertTrue(db.test.find_one()['ssl']) - client.drop_database('pymongo_ssl_test') + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + if not hasattr(ssl, "SSLContext") and not HAVE_PYSSL: + self.assertRaises( + ConfigurationError, + self.simple_client, + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=1000, + ) + else: + connected( + self.simple_client( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=5000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + uri_fmt = ( + "mongodb://localhost/?ssl=true" + "&tlsCertificateKeyFile=%s&tlsCertificateKeyFilePassword=qwerty" + "&tlsCAFile=%s&serverSelectionTimeoutMS=5000" + ) + connected( + self.simple_client(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) + @client_context.require_tlsCertificateKeyFile + @client_context.require_no_auth + @ignore_deprecations def test_cert_ssl_implicitly_set(self): - # Expects the server to be running with the server.pem, ca.pem - # and crl.pem provided in mongodb and the server tests eg: + # Expects the server to be running with server.pem and ca.pem # - # --sslPEMKeyFile=jstests/libs/server.pem - # --sslCAFile=jstests/libs/ca.pem - # --sslCRLFile=jstests/libs/crl.pem + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - # Also requires an /etc/hosts entry where "server" is resolvable - if not CERT_SSL: - raise SkipTest("No mongod available over SSL with certs") - - client = MongoClient(host, port, ssl_certfile=CLIENT_PEM) - response = client.admin.command('ismaster') - if 'setName' in response: - client = MongoReplicaSetClient(pair, - replicaSet=response['setName'], - w=len(response['hosts']), - ssl_certfile=CLIENT_PEM) - - db = client.pymongo_ssl_test - db.test.drop() - self.assertTrue(db.test.insert({'ssl': True})) - self.assertTrue(db.test.find_one()['ssl']) - client.drop_database('pymongo_ssl_test') + # test that setting tlsCertificateKeyFile causes ssl to be set to True + client = self.simple_client( + client_context.host, + client_context.port, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + response = client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" in response: + client = self.simple_client( + client_context.pair, + replicaSet=response["setName"], + w=len(response["hosts"]), + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + self.assertClientWorks(client) + + @client_context.require_tlsCertificateKeyFile + @client_context.require_no_auth + @ignore_deprecations def test_cert_ssl_validation(self): - # Expects the server to be running with the server.pem, ca.pem - # and crl.pem provided in mongodb and the server tests eg: + # Expects the server to be running with server.pem and ca.pem # - # --sslPEMKeyFile=jstests/libs/server.pem - # --sslCAFile=jstests/libs/ca.pem - # --sslCRLFile=jstests/libs/crl.pem + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - # Also requires an /etc/hosts entry where "server" is resolvable - if not CERT_SSL: - raise SkipTest("No mongod available over SSL with certs") - - if not SERVER_IS_RESOLVABLE: - raise SkipTest("No hosts entry for 'server'. Cannot validate " - "hostname in the certificate") - - client = MongoClient('server', - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM) - response = client.admin.command('ismaster') - if 'setName' in response: - if response['primary'].split(":")[0] != 'server': - raise SkipTest("No hosts in the replicaset for 'server'. " - "Cannot validate hostname in the certificate") - - client = MongoReplicaSetClient('server', - replicaSet=response['setName'], - w=len(response['hosts']), - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM) - - db = client.pymongo_ssl_test - db.test.drop() - self.assertTrue(db.test.insert({'ssl': True})) - self.assertTrue(db.test.find_one()['ssl']) - client.drop_database('pymongo_ssl_test') - - def test_cert_ssl_validation_optional(self): - # Expects the server to be running with the server.pem, ca.pem - # and crl.pem provided in mongodb and the server tests eg: + client = self.simple_client( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + response = client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" in response: + if response["primary"].split(":")[0] != "localhost": + raise SkipTest( + "No hosts in the replicaset for 'localhost'. " + "Cannot validate hostname in the certificate" + ) + + client = self.simple_client( + "localhost", + replicaSet=response["setName"], + w=len(response["hosts"]), + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + + self.assertClientWorks(client) + + if HAVE_IPADDRESS: + client = self.simple_client( + "127.0.0.1", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + self.assertClientWorks(client) + + @client_context.require_tlsCertificateKeyFile + @client_context.require_no_auth + @ignore_deprecations + def test_cert_ssl_uri_support(self): + # Expects the server to be running with server.pem and ca.pem # - # --sslPEMKeyFile=jstests/libs/server.pem - # --sslCAFile=jstests/libs/ca.pem - # --sslCRLFile=jstests/libs/crl.pem + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - # Also requires an /etc/hosts entry where "server" is resolvable - if not CERT_SSL: - raise SkipTest("No mongod available over SSL with certs") - - if not SERVER_IS_RESOLVABLE: - raise SkipTest("No hosts entry for 'server'. Cannot validate " - "hostname in the certificate") - - client = MongoClient('server', - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_OPTIONAL, - ssl_ca_certs=CA_PEM) - - response = client.admin.command('ismaster') - if 'setName' in response: - if response['primary'].split(":")[0] != 'server': - raise SkipTest("No hosts in the replicaset for 'server'. " - "Cannot validate hostname in the certificate") - - client = MongoReplicaSetClient('server', - replicaSet=response['setName'], - w=len(response['hosts']), - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_OPTIONAL, - ssl_ca_certs=CA_PEM) - - db = client.pymongo_ssl_test - db.test.drop() - self.assertTrue(db.test.insert({'ssl': True})) - self.assertTrue(db.test.find_one()['ssl']) - client.drop_database('pymongo_ssl_test') - - def test_cert_ssl_validation_hostname_fail(self): - # Expects the server to be running with the server.pem, ca.pem - # and crl.pem provided in mongodb and the server tests eg: + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" + "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false" + ) + client = self.simple_client(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) + self.assertClientWorks(client) + + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) + @client_context.require_tlsCertificateKeyFile + @client_context.require_server_resolvable + @client_context.require_no_api_version + @ignore_deprecations + def test_cert_ssl_validation_hostname_matching(self): + # Expects the server to be running with server.pem and ca.pem # - # --sslPEMKeyFile=jstests/libs/server.pem - # --sslCAFile=jstests/libs/ca.pem - # --sslCRLFile=jstests/libs/crl.pem - if not CERT_SSL: - raise SkipTest("No mongod available over SSL with certs") - - client = MongoClient(host, port, ssl=True, ssl_certfile=CLIENT_PEM) - response = client.admin.command('ismaster') - - try: - MongoClient(pair, + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, True, False, False, _IS_SYNC) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, False, True, False, _IS_SYNC) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) + self.assertTrue(ctx.check_hostname) + + response = self.client.admin.command(HelloCompat.LEGACY_CMD) + + with self.assertRaises(ConnectionFailure) as cm: + connected( + self.simple_client( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + # PYTHON-5414 Check for "module service_identity has no attribute SICertificateError" + self.assertNotIn("has no attribute", str(cm.exception)) + + connected( + self.simple_client( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + if "setName" in response: + with self.assertRaises(ConnectionFailure): + connected( + self.simple_client( + "server", + replicaSet=response["setName"], ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM) - self.fail("Invalid hostname should have failed") - except CertificateError: - pass + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + connected( + self.simple_client( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) - if 'setName' in response: - try: - MongoReplicaSetClient(pair, - replicaSet=response['setName'], - w=len(response['hosts']), - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM) - self.fail("Invalid hostname should have failed") - except CertificateError: - pass + @client_context.require_tlsCertificateKeyFile + @client_context.require_sync + @client_context.require_no_api_version + @ignore_deprecations + def test_tlsCRLFile_support(self): + if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or HAVE_PYSSL: + self.assertRaises( + ConfigurationError, + self.simple_client, + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=1000, + ) + else: + connected( + self.simple_client( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) - def test_mongodb_x509_auth(self): - # Expects the server to be running with the server.pem, ca.pem - # and crl.pem provided in mongodb and the server tests as well as - # --auth + with self.assertRaises(ConnectionFailure): + connected( + self.simple_client( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + connected(self.simple_client(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore + + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCRLFile=%s" + "&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + ) + with self.assertRaises(ConnectionFailure): + connected( + self.simple_client(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) + + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) + @client_context.require_tlsCertificateKeyFile + @client_context.require_server_resolvable + @client_context.require_no_api_version + @ignore_deprecations + def test_validation_with_system_ca_certs(self): + # Expects the server to be running with server.pem and ca.pem. # - # --sslPEMKeyFile=jstests/libs/server.pem - # --sslCAFile=jstests/libs/ca.pem - # --sslCRLFile=jstests/libs/crl.pem - # --auth - if not CERT_SSL: - raise SkipTest("No mongod available over SSL with certs") - - client = MongoClient(host, port, ssl=True, ssl_certfile=CLIENT_PEM) - if not version.at_least(client, (2, 5, 3, -1)): - raise SkipTest("MONGODB-X509 tests require MongoDB 2.5.3 or newer") - if not server_started_with_auth(client): - raise SkipTest('Authentication is not enabled on server') - # Give admin all necessary privileges. - client['$external'].add_user(MONGODB_X509_USERNAME, roles=[ - {'role': 'readWriteAnyDatabase', 'db': 'admin'}, - {'role': 'userAdminAnyDatabase', 'db': 'admin'}]) - coll = client.pymongo_test.test - self.assertRaises(OperationFailure, coll.count) - self.assertTrue(client.admin.authenticate(MONGODB_X509_USERNAME, - mechanism='MONGODB-X509')) - self.assertTrue(coll.remove()) - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % (quote_plus(MONGODB_X509_USERNAME), host, port)) - # SSL options aren't supported in the URI... - self.assertTrue(MongoClient(uri, ssl=True, ssl_certfile=CLIENT_PEM)) - - # Should require a username - uri = ('mongodb://%s:%d/?authMechanism=MONGODB-X509' % (host, port)) - client_bad = MongoClient(uri, ssl=True, ssl_certfile=CLIENT_PEM) - self.assertRaises(OperationFailure, client_bad.pymongo_test.test.remove) + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # --sslWeakCertificateValidation + # + self.patch_system_certs(CA_PEM) + with self.assertRaises(ConnectionFailure): + # Server cert is verified but hostname matching fails + connected( + self.simple_client( + "server", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] + ) + # Server cert is verified. Disable hostname matching. + connected( + self.simple_client( + "server", + ssl=True, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + # Server cert and hostname are verified. + connected( + self.simple_client( + "localhost", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] + ) + + # Server cert and hostname are verified. + connected( + self.simple_client( + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", + **self.credentials, # type: ignore[arg-type] + ) + ) + + def test_system_certs_config_error(self): + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) + if (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr( + ctx, "load_default_certs" + ): + raise SkipTest("Can't test when system CA certificates are loadable.") + + have_certifi = ssl_support.HAVE_CERTIFI + have_wincertstore = ssl_support.HAVE_WINCERTSTORE + # Force the test regardless of environment. + ssl_support.HAVE_CERTIFI = False + ssl_support.HAVE_WINCERTSTORE = False + try: + with self.assertRaises(ConfigurationError): + self.simple_client("mongodb://localhost/?ssl=true") + finally: + ssl_support.HAVE_CERTIFI = have_certifi + ssl_support.HAVE_WINCERTSTORE = have_wincertstore + + def test_certifi_support(self): + if hasattr(ssl, "SSLContext"): + # SSLSocket doesn't provide ca_certs attribute on pythons + # with SSLContext and SSLContext provides no information + # about ca_certs. + raise SkipTest("Can't test when SSLContext available.") + if not ssl_support.HAVE_CERTIFI: + raise SkipTest("Need certifi to test certifi support.") + + have_wincertstore = ssl_support.HAVE_WINCERTSTORE + # Force the test on Windows, regardless of environment. + ssl_support.HAVE_WINCERTSTORE = False + try: + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, CA_PEM) + + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, ssl_support.certifi.where()) + finally: + ssl_support.HAVE_WINCERTSTORE = have_wincertstore + + def test_wincertstore(self): + if sys.platform != "win32": + raise SkipTest("Only valid on Windows.") + if hasattr(ssl, "SSLContext"): + # SSLSocket doesn't provide ca_certs attribute on pythons + # with SSLContext and SSLContext provides no information + # about ca_certs. + raise SkipTest("Can't test when SSLContext available.") + if not ssl_support.HAVE_WINCERTSTORE: + raise SkipTest("Need wincertstore to test wincertstore.") + + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, CA_PEM) + + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) + + @client_context.require_auth + @client_context.require_tlsCertificateKeyFile + @client_context.require_no_api_version + @ignore_deprecations + def test_mongodb_x509_auth(self): + host, port = client_context.host, client_context.port + self.addCleanup(remove_all_users, client_context.client["$external"]) + + # Give x509 user all necessary privileges. + client_context.create_user( + "$external", + MONGODB_X509_USERNAME, + roles=[ + {"role": "readWriteAnyDatabase", "db": "admin"}, + {"role": "userAdminAnyDatabase", "db": "admin"}, + ], + ) + + noauth = self.simple_client( + client_context.pair, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + with self.assertRaises(OperationFailure): + noauth.pymongo_test.test.find_one() + + listener = EventListener() + auth = self.simple_client( + client_context.pair, + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + event_listeners=[listener], + ) + + # No error + auth.pymongo_test.test.find_one() + names = listener.started_command_names() + if client_context.version.at_least(4, 4, -1): + # Speculative auth skips the authenticate command. + self.assertEqual(names, ["find"]) + else: + self.assertEqual(names, ["authenticate", "find"]) + + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + # No error + client.pymongo_test.test.find_one() + + uri = "mongodb://%s:%d/?authMechanism=MONGODB-X509" % (host, port) + client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + # No error + client.pymongo_test.test.find_one() # Auth should fail if username and certificate do not match - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % (quote_plus("not the username"), host, port)) - self.assertRaises(ConfigurationError, MongoClient, uri, - ssl=True, ssl_certfile=CLIENT_PEM) - self.assertRaises(OperationFailure, client.admin.authenticate, - "not the username", - mechanism="MONGODB-X509") + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus("not the username"), + host, + port, + ) + + bad_client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + + with self.assertRaises(OperationFailure): + bad_client.pymongo_test.test.find_one() + + bad_client = self.simple_client( + client_context.pair, + username="not the username", + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + with self.assertRaises(OperationFailure): + bad_client.pymongo_test.test.find_one() # Invalid certificate (using CA certificate as client certificate) - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % (quote_plus(MONGODB_X509_USERNAME), host, port)) - self.assertRaises(ConnectionFailure, MongoClient, uri, - ssl=True, ssl_certfile=CA_PEM) - self.assertRaises(ConnectionFailure, MongoClient, pair, - ssl=True, ssl_certfile=CA_PEM) - - # Cleanup - remove_all_users(client['$external']) - client['$external'].logout() + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + try: + connected( + self.simple_client( + uri, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CA_PEM, + serverSelectionTimeoutMS=1000, + ) + ) + except (ConnectionFailure, ConfigurationError): + pass + else: + self.fail("Invalid certificate accepted.") + + @client_context.require_tlsCertificateKeyFile + @client_context.require_no_api_version + @ignore_deprecations + def test_connect_with_ca_bundle(self): + def remove(path): + try: + os.remove(path) + except OSError: + pass + + temp_ca_bundle = os.path.join(CERT_PATH, "trusted-ca-bundle.pem") + self.addCleanup(remove, temp_ca_bundle) + # Add the CA cert file to the bundle. + cat_files(temp_ca_bundle, CA_BUNDLE_PEM, CA_PEM) + with self.simple_client( + "localhost", tls=True, tlsCertificateKeyFile=CLIENT_PEM, tlsCAFile=temp_ca_bundle + ) as client: + self.assertTrue(client.admin.command("ping")) + + @client_context.require_async + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + def test_pyopenssl_ignored_in_async(self): + client = MongoClient("mongodb://localhost:27017?tls=true&tlsAllowInvalidCertificates=true") + client.admin.command("ping") # command doesn't matter, just needs it to connect + client.close() + if __name__ == "__main__": unittest.main() diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py new file mode 100644 index 0000000000..927230091f --- /dev/null +++ b/test/test_streaming_protocol.py @@ -0,0 +1,226 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the database module.""" +from __future__ import annotations + +import sys +import time + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils_shared import ( + HeartbeatEventListener, + ServerEventListener, + wait_until, +) + +from pymongo import monitoring +from pymongo.hello import HelloCompat + +_IS_SYNC = True + + +class TestStreamingProtocol(IntegrationTest): + @client_context.require_failCommand_appName + def test_failCommand_streaming(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + client = self.rs_or_single_client( + event_listeners=[listener, hb_listener], + heartbeatFrequencyMS=500, + appName="failingHeartbeatTest", + ) + # Force a connection. + client.admin.command("ping") + address = client.address + listener.reset() + + fail_hello = { + "configureFailPoint": "failCommand", + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": False, + "errorCode": 10107, + "appName": "failingHeartbeatTest", + }, + } + with self.fail_point(fail_hello): + + def _marked_unknown(event): + return ( + event.server_address == address + and not event.new_description.is_server_type_known + ) + + def _discovered_node(event): + return ( + event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) + + def marked_unknown(): + return len(listener.matching(_marked_unknown)) >= 1 + + def rediscovered(): + return len(listener.matching(_discovered_node)) >= 1 + + # Topology events are not published synchronously + wait_until(marked_unknown, "mark node unknown") + wait_until(rediscovered, "rediscover node") + + # Server should be selectable. + client.admin.command("ping") + + @client_context.require_failCommand_appName + def test_streaming_rtt(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + # On Windows, RTT can actually be 0.0 because time.time() only has + # 1-15 millisecond resolution. We need to delay the initial hello + # to ensure that RTT is never zero. + name = "streamingRttTest" + delay_hello: dict = { + "configureFailPoint": "failCommand", + "mode": {"times": 1000}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "blockConnection": True, + "blockTimeMS": 20, + # This can be uncommented after SERVER-49220 is fixed. + # 'appName': name, + }, + } + with self.fail_point(delay_hello): + client = self.rs_or_single_client( + event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName=name + ) + # Force a connection. + client.admin.command("ping") + address = client.address + + delay_hello["data"]["blockTimeMS"] = 500 + delay_hello["data"]["appName"] = name + with self.fail_point(delay_hello): + + def rtt_exceeds_250_ms(): + # XXX: Add a public TopologyDescription getter to MongoClient? + topology = client._topology + sd = topology.description.server_descriptions()[address] + assert sd.round_trip_time is not None + return sd.round_trip_time > 0.250 + + wait_until(rtt_exceeds_250_ms, "exceed 250ms RTT") + + # Server should be selectable. + client.admin.command("ping") + + def changed_event(event): + return event.server_address == address and isinstance( + event, monitoring.ServerDescriptionChangedEvent + ) + + # There should only be one event published, for the initial discovery. + events = listener.matching(changed_event) + self.assertEqual(1, len(events)) + self.assertGreater(events[0].new_description.round_trip_time, 0) + + @client_context.require_failCommand_appName + def test_monitor_waits_after_server_check_error(self): + # This test implements: + # https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.md#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks + fail_hello = { + "mode": {"times": 5}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMMinHeartbeatFrequencyTest", + }, + } + with self.fail_point(fail_hello): + start = time.time() + client = self.single_client( + appName="SDAMMinHeartbeatFrequencyTest", serverSelectionTimeoutMS=5000 + ) + # Force a connection. + client.admin.command("ping") + duration = time.time() - start + # Explanation of the expected events: + # 0ms: run configureFailPoint + # 1ms: create MongoClient + # 2ms: failed monitor handshake, 1 + # 502ms: failed monitor handshake, 2 + # 1002ms: failed monitor handshake, 3 + # 1502ms: failed monitor handshake, 4 + # 2002ms: failed monitor handshake, 5 + # 2502ms: monitor handshake succeeds + # 2503ms: run awaitable hello + # 2504ms: application handshake succeeds + # 2505ms: ping command succeeds + self.assertGreaterEqual(duration, 2) + self.assertLessEqual(duration, 4.0) + + @client_context.require_failCommand_appName + def test_heartbeat_awaited_flag(self): + hb_listener = HeartbeatEventListener() + client = self.single_client( + event_listeners=[hb_listener], + heartbeatFrequencyMS=500, + appName="heartbeatEventAwaitedFlag", + ) + # Force a connection. + client.admin.command("ping") + + def hb_succeeded(event): + return isinstance(event, monitoring.ServerHeartbeatSucceededEvent) + + def hb_failed(event): + return isinstance(event, monitoring.ServerHeartbeatFailedEvent) + + fail_heartbeat = { + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": True, + "appName": "heartbeatEventAwaitedFlag", + }, + } + with self.fail_point(fail_heartbeat): + wait_until(lambda: hb_listener.matching(hb_failed), "published failed event") + # Reconnect. + client.admin.command("ping") + + hb_succeeded_events = hb_listener.matching(hb_succeeded) + hb_failed_events = hb_listener.matching(hb_failed) + self.assertFalse(hb_succeeded_events[0].awaited) + self.assertTrue(hb_failed_events[0].awaited) + # Depending on thread scheduling, the failed heartbeat could occur on + # the second or third check. + events = [type(e) for e in hb_listener.events[:4]] + if events == [ + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatSucceededEvent, + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatFailedEvent, + ]: + self.assertFalse(hb_succeeded_events[1].awaited) + else: + self.assertTrue(hb_succeeded_events[1].awaited) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_thread_util.py b/test/test_thread_util.py deleted file mode 100644 index b90fdade93..0000000000 --- a/test/test_thread_util.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright 2012-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test the thread_util module.""" - -import gc -import sys -import threading -import time -import unittest - -sys.path[0:0] = [""] - -from nose.plugins.skip import SkipTest - -from pymongo import thread_util -if thread_util.have_gevent: - import greenlet # Plain greenlets. - import gevent.greenlet # Gevent's enhanced Greenlets. - import gevent.hub - -from test.utils import looplet, my_partial, RendezvousThread - - -class TestIdent(unittest.TestCase): - """Ensure thread_util.Ident works for threads and greenlets. This has - gotten intricate from refactoring: we have classes, Watched and Unwatched, - that implement the logic for the two child threads / greenlets. For the - greenlet case it's easy to ensure the two children are alive at once, so - we run the Watched and Unwatched logic directly. For the thread case we - mix in the RendezvousThread class so we're sure both children are alive - when they call Ident.get(). - - 1. Store main thread's / greenlet's id - 2. Start 2 child threads / greenlets - 3. Store their values for Ident.get() - 4. Children reach rendezvous point - 5. Children call Ident.watch() - 6. One of the children calls Ident.unwatch() - 7. Children terminate - 8. Assert that children got different ids from each other and from main, - and assert watched child's callback was executed, and that unwatched - child's callback was not - """ - def _test_ident(self, use_greenlets): - if 'java' in sys.platform: - raise SkipTest("Can't rely on weakref callbacks in Jython") - - ident = thread_util.create_ident(use_greenlets) - - ids = set([ident.get()]) - unwatched_id = [] - done = set([ident.get()]) # Start with main thread's / greenlet's id. - died = set() - - class Watched(object): - def __init__(self, ident): - self._my_ident = ident - - def before_rendezvous(self): - self.my_id = self._my_ident.get() - ids.add(self.my_id) - - def after_rendezvous(self): - assert not self._my_ident.watching() - self._my_ident.watch(lambda ref: died.add(self.my_id)) - assert self._my_ident.watching() - done.add(self.my_id) - - class Unwatched(Watched): - def before_rendezvous(self): - Watched.before_rendezvous(self) - unwatched_id.append(self.my_id) - - def after_rendezvous(self): - Watched.after_rendezvous(self) - self._my_ident.unwatch(self.my_id) - assert not self._my_ident.watching() - - if use_greenlets: - class WatchedGreenlet(Watched): - def run(self): - self.before_rendezvous() - self.after_rendezvous() - - class UnwatchedGreenlet(Unwatched): - def run(self): - self.before_rendezvous() - self.after_rendezvous() - - t_watched = greenlet.greenlet(WatchedGreenlet(ident).run) - t_unwatched = greenlet.greenlet(UnwatchedGreenlet(ident).run) - looplet([t_watched, t_unwatched]) - else: - class WatchedThread(Watched, RendezvousThread): - def __init__(self, ident, state): - Watched.__init__(self, ident) - RendezvousThread.__init__(self, state) - - class UnwatchedThread(Unwatched, RendezvousThread): - def __init__(self, ident, state): - Unwatched.__init__(self, ident) - RendezvousThread.__init__(self, state) - - state = RendezvousThread.create_shared_state(2) - t_watched = WatchedThread(ident, state) - t_watched.start() - - t_unwatched = UnwatchedThread(ident, state) - t_unwatched.start() - - RendezvousThread.wait_for_rendezvous(state) - RendezvousThread.resume_after_rendezvous(state) - - t_watched.join() - t_unwatched.join() - - self.assertTrue(t_watched.passed) - self.assertTrue(t_unwatched.passed) - - # Remove references, let weakref callbacks run - del t_watched - del t_unwatched - - # Trigger final cleanup in Python <= 2.7.0. - # http://bugs.python.org/issue1868 - ident.get() - self.assertEqual(3, len(ids)) - self.assertEqual(3, len(done)) - - # Make sure thread is really gone - slept = 0 - while not died and slept < 10: - time.sleep(1) - gc.collect() - slept += 1 - - self.assertEqual(1, len(died)) - self.assertFalse(unwatched_id[0] in died) - - def test_thread_ident(self): - self._test_ident(False) - - def test_greenlet_ident(self): - if not thread_util.have_gevent: - raise SkipTest('greenlet not installed') - - self._test_ident(True) - - -class TestGreenletIdent(unittest.TestCase): - def setUp(self): - if not thread_util.have_gevent: - raise SkipTest("need Gevent") - - def test_unwatch_cleans_up(self): - # GreenletIdent.unwatch() should remove the on_thread_died callback - # from an enhanced Gevent Greenlet's list of links. - callback_ran = [False] - - def on_greenlet_died(_): - callback_ran[0] = True - - ident = thread_util.create_ident(use_greenlets=True) - - def watch_and_unwatch(): - ident.watch(on_greenlet_died) - ident.unwatch(ident.get()) - - g = gevent.greenlet.Greenlet(run=watch_and_unwatch) - g.start() - g.join(10) - the_hub = gevent.hub.get_hub() - if hasattr(the_hub, 'join'): - # Gevent 1.0 - the_hub.join() - else: - # Gevent 0.13 and less - the_hub.shutdown() - - self.assertTrue(g.successful()) - - # unwatch() canceled the callback. - self.assertFalse(callback_ran[0]) - - -class TestCounter(unittest.TestCase): - def _test_counter(self, use_greenlets): - counter = thread_util.Counter(use_greenlets) - - self.assertEqual(0, counter.dec()) - self.assertEqual(0, counter.get()) - self.assertEqual(0, counter.dec()) - self.assertEqual(0, counter.get()) - - done = set() - - def f(n): - for i in xrange(n): - self.assertEqual(i, counter.get()) - self.assertEqual(i + 1, counter.inc()) - - for i in xrange(n, 0, -1): - self.assertEqual(i, counter.get()) - self.assertEqual(i - 1, counter.dec()) - - self.assertEqual(0, counter.get()) - - # Extra decrements have no effect - self.assertEqual(0, counter.dec()) - self.assertEqual(0, counter.get()) - self.assertEqual(0, counter.dec()) - self.assertEqual(0, counter.get()) - - done.add(n) - - if use_greenlets: - greenlets = [ - greenlet.greenlet(my_partial(f, i)) for i in xrange(10)] - looplet(greenlets) - else: - threads = [ - threading.Thread(target=my_partial(f, i)) for i in xrange(10)] - for t in threads: - t.start() - for t in threads: - t.join() - - self.assertEqual(10, len(done)) - - def test_thread_counter(self): - self._test_counter(False) - - def test_greenlet_counter(self): - if not thread_util.have_gevent: - raise SkipTest('greenlet not installed') - - self._test_counter(True) - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_threads.py b/test/test_threads.py index c21f9fbf9a..3e469e28fe 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,69 +13,64 @@ # limitations under the License. """Test that pymongo is thread safe.""" +from __future__ import annotations -import unittest import threading -import traceback +from test import IntegrationTest, client_context, unittest +from test.utils import joinall -from nose.plugins.skip import SkipTest -from test.utils import (joinall, remove_all_users, - server_started_with_auth, RendezvousThread) -from test.test_client import get_client -from test.utils import get_pool -from pymongo.pool import SocketInfo, _closed -from pymongo.errors import AutoReconnect, OperationFailure +@client_context.require_connection +def setUpModule(): + pass class AutoAuthenticateThreads(threading.Thread): - def __init__(self, collection, num): threading.Thread.__init__(self) self.coll = collection self.num = num - self.success = True - self.setDaemon(True) + self.success = False + self.daemon = True def run(self): - try: - for i in xrange(self.num): - self.coll.insert({'num':i}) - self.coll.find_one({'num':i}) - except Exception: - traceback.print_exc() - self.success = False + for i in range(self.num): + self.coll.insert_one({"num": i}) + self.coll.find_one({"num": i}) + self.success = True -class SaveAndFind(threading.Thread): +class SaveAndFind(threading.Thread): def __init__(self, collection): threading.Thread.__init__(self) self.collection = collection - self.setDaemon(True) + self.daemon = True + self.passed = False def run(self): sum = 0 for document in self.collection.find(): sum += document["x"] + assert sum == 499500, "sum was %d not 499500" % sum + self.passed = True class Insert(threading.Thread): - def __init__(self, collection, n, expect_exception): threading.Thread.__init__(self) self.collection = collection self.n = n self.expect_exception = expect_exception - self.setDaemon(True) + self.daemon = True def run(self): - for _ in xrange(self.n): + for _ in range(self.n): error = True try: - self.collection.insert({"test": "insert"}) + self.collection.insert_one({"test": "insert"}) error = False except: if not self.expect_exception: @@ -86,21 +81,19 @@ def run(self): class Update(threading.Thread): - def __init__(self, collection, n, expect_exception): threading.Thread.__init__(self) self.collection = collection self.n = n self.expect_exception = expect_exception - self.setDaemon(True) + self.daemon = True def run(self): - for _ in xrange(self.n): + for _ in range(self.n): error = True try: - self.collection.update({"test": "unique"}, - {"$set": {"test": "update"}}) + self.collection.update_one({"test": "unique"}, {"$set": {"test": "update"}}) error = False except: if not self.expect_exception: @@ -110,86 +103,17 @@ def run(self): assert error -class IgnoreAutoReconnect(threading.Thread): - - def __init__(self, collection, n): - threading.Thread.__init__(self) - self.c = collection - self.n = n - self.setDaemon(True) - - def run(self): - for _ in range(self.n): - try: - self.c.find_one() - except AutoReconnect: - pass - - -class FindPauseFind(RendezvousThread): - """See test_server_disconnect() for details""" - def __init__(self, collection, state): - """Params: - `collection`: A collection for testing - `state`: A shared state object from RendezvousThread.shared_state() - """ - super(FindPauseFind, self).__init__(state) - self.collection = collection - - def before_rendezvous(self): - # acquire a socket - list(self.collection.find()) - - pool = get_pool(self.collection.database.connection) - socket_info = pool._get_request_state() - assert isinstance(socket_info, SocketInfo) - self.request_sock = socket_info.sock - assert not _closed(self.request_sock) - - def after_rendezvous(self): - # test_server_disconnect() has closed this socket, but that's ok - # because it's not our request socket anymore - assert _closed(self.request_sock) - - # if disconnect() properly replaced the pool, then this won't raise - # AutoReconnect because it will acquire a new socket - list(self.collection.find()) - assert self.collection.database.connection.in_request() - pool = get_pool(self.collection.database.connection) - assert self.request_sock != pool._get_request_state().sock - - -class BaseTestThreads(object): - """ - Base test class for TestThreads and TestThreadsReplicaSet. (This is not - itself a unittest.TestCase, otherwise it'd be run twice -- once when nose - imports this module, and once when nose imports - test_threads_replica_set_connection.py, which imports this module.) - """ +class TestThreads(IntegrationTest): def setUp(self): - self.db = self._get_client().pymongo_test - - def tearDown(self): - # Clear client reference so that RSC's monitor thread - # dies. - self.db = None - - def _get_client(self): - """ - Intended for overriding in TestThreadsReplicaSet. This method - returns a MongoClient here, and a MongoReplicaSetClient in - test_threads_replica_set_connection.py. - """ - # Regular test client - return get_client() + super().setUp() + self.db = self.client.pymongo_test def test_threading(self): self.db.drop_collection("test") - for i in xrange(1000): - self.db.test.save({"x": i}) + self.db.test.insert_many([{"x": i} for i in range(1000)]) threads = [] - for i in range(10): + for _i in range(10): t = SaveAndFind(self.db.test) t.start() threads.append(t) @@ -198,9 +122,9 @@ def test_threading(self): def test_safe_insert(self): self.db.drop_collection("test1") - self.db.test1.insert({"test": "insert"}) + self.db.test1.insert_one({"test": "insert"}) self.db.drop_collection("test2") - self.db.test2.insert({"test": "insert"}) + self.db.test2.insert_one({"test": "insert"}) self.db.test2.create_index("test", unique=True) self.db.test2.find_one() @@ -216,11 +140,11 @@ def test_safe_insert(self): def test_safe_update(self): self.db.drop_collection("test1") - self.db.test1.insert({"test": "update"}) - self.db.test1.insert({"test": "unique"}) + self.db.test1.insert_one({"test": "update"}) + self.db.test1.insert_one({"test": "unique"}) self.db.drop_collection("test2") - self.db.test2.insert({"test": "update"}) - self.db.test2.insert({"test": "unique"}) + self.db.test2.insert_one({"test": "update"}) + self.db.test2.insert_one({"test": "unique"}) self.db.test2.create_index("test", unique=True) self.db.test2.find_one() @@ -234,152 +158,6 @@ def test_safe_update(self): error.join() okay.join() - def test_server_disconnect(self): - # PYTHON-345, we need to make sure that threads' request sockets are - # closed by disconnect(). - # - # 1. Create a client with auto_start_request=True - # 2. Start N threads and do a find() in each to get a request socket - # 3. Pause all threads - # 4. In the main thread close all sockets, including threads' request - # sockets - # 5. In main thread, do a find(), which raises AutoReconnect and resets - # pool - # 6. Resume all threads, do a find() in them - # - # If we've fixed PYTHON-345, then only one AutoReconnect is raised, - # and all the threads get new request sockets. - cx = get_client(auto_start_request=True) - collection = cx.db.pymongo_test - - # acquire a request socket for the main thread - collection.find_one() - pool = get_pool(collection.database.connection) - socket_info = pool._get_request_state() - assert isinstance(socket_info, SocketInfo) - request_sock = socket_info.sock - - state = FindPauseFind.create_shared_state(nthreads=10) - - threads = [ - FindPauseFind(collection, state) - for _ in range(state.nthreads) - ] - - # Each thread does a find(), thus acquiring a request socket - for t in threads: - t.start() - - # Wait for the threads to reach the rendezvous - FindPauseFind.wait_for_rendezvous(state) - - try: - # Simulate an event that closes all sockets, e.g. primary stepdown - for t in threads: - t.request_sock.close() - - # Finally, ensure the main thread's socket's last_checkout is - # updated: - collection.find_one() - - # ... and close it: - request_sock.close() - - # Doing an operation on the client raises an AutoReconnect and - # resets the pool behind the scenes - self.assertRaises(AutoReconnect, collection.find_one) - - finally: - # Let threads do a second find() - FindPauseFind.resume_after_rendezvous(state) - - joinall(threads) - - for t in threads: - self.assertTrue(t.passed, "%s threw exception" % t) - - -class BaseTestThreadsAuth(object): - """ - Base test class for TestThreadsAuth and TestThreadsAuthReplicaSet. (This is - not itself a unittest.TestCase, otherwise it'd be run twice -- once when - nose imports this module, and once when nose imports - test_threads_replica_set_connection.py, which imports this module.) - """ - def _get_client(self): - """ - Intended for overriding in TestThreadsAuthReplicaSet. This method - returns a MongoClient here, and a MongoReplicaSetClient in - test_threads_replica_set_connection.py. - """ - # Regular test client - return get_client() - - def setUp(self): - client = self._get_client() - if not server_started_with_auth(client): - raise SkipTest("Authentication is not enabled on server") - self.client = client - self.client.admin.add_user('admin-user', 'password', - roles=['clusterAdmin', - 'dbAdminAnyDatabase', - 'readWriteAnyDatabase', - 'userAdminAnyDatabase']) - self.client.admin.authenticate("admin-user", "password") - self.client.auth_test.add_user("test-user", "password", - roles=['readWrite']) - - def tearDown(self): - # Remove auth users from databases - self.client.admin.authenticate("admin-user", "password") - remove_all_users(self.client.auth_test) - self.client.drop_database('auth_test') - remove_all_users(self.client.admin) - # Clear client reference so that RSC's monitor thread - # dies. - self.client = None - - def test_auto_auth_login(self): - client = self._get_client() - self.assertRaises(OperationFailure, client.auth_test.test.find_one) - - # Admin auth - client = self._get_client() - client.admin.authenticate("admin-user", "password") - - nthreads = 10 - threads = [] - for _ in xrange(nthreads): - t = AutoAuthenticateThreads(client.auth_test.test, 100) - t.start() - threads.append(t) - - joinall(threads) - - for t in threads: - self.assertTrue(t.success) - - # Database-specific auth - client = self._get_client() - client.auth_test.authenticate("test-user", "password") - - threads = [] - for _ in xrange(nthreads): - t = AutoAuthenticateThreads(client.auth_test.test, 100) - t.start() - threads.append(t) - - joinall(threads) - - for t in threads: - self.assertTrue(t.success) - -class TestThreads(BaseTestThreads, unittest.TestCase): - pass - -class TestThreadsAuth(BaseTestThreadsAuth, unittest.TestCase): - pass - if __name__ == "__main__": unittest.main() diff --git a/test/test_threads_replica_set_client.py b/test/test_threads_replica_set_client.py deleted file mode 100644 index 23b5176702..0000000000 --- a/test/test_threads_replica_set_client.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2011-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test that pymongo is thread safe.""" - -import unittest - -from pymongo.mongo_replica_set_client import MongoReplicaSetClient - -from test.test_threads import BaseTestThreads, BaseTestThreadsAuth -from test.test_replica_set_client import TestReplicaSetClientBase, pair - - -class TestThreadsReplicaSet(TestReplicaSetClientBase, BaseTestThreads): - def setUp(self): - """ - Prepare to test all the same things that TestThreads tests, but do it - with a replica-set client - """ - TestReplicaSetClientBase.setUp(self) - BaseTestThreads.setUp(self) - - def tearDown(self): - TestReplicaSetClientBase.tearDown(self) - BaseTestThreads.tearDown(self) - - def _get_client(self, **kwargs): - return TestReplicaSetClientBase._get_client(self, **kwargs) - - -class TestThreadsAuthReplicaSet(TestReplicaSetClientBase, BaseTestThreadsAuth): - - def setUp(self): - """ - Prepare to test all the same things that TestThreads tests, but do it - with a replica-set client - """ - TestReplicaSetClientBase.setUp(self) - BaseTestThreadsAuth.setUp(self) - - def tearDown(self): - TestReplicaSetClientBase.tearDown(self) - BaseTestThreadsAuth.tearDown(self) - - def _get_client(self): - """ - Override TestThreadsAuth, so its tests run on a MongoReplicaSetClient - instead of a regular MongoClient. - """ - return MongoReplicaSetClient(pair, replicaSet=self.name) - - -if __name__ == "__main__": - suite = unittest.TestSuite([ - unittest.makeSuite(TestThreadsReplicaSet), - unittest.makeSuite(TestThreadsAuthReplicaSet) - ]) - unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/test/test_timestamp.py b/test/test_timestamp.py index 5a4930c3ec..ef7d8bde15 100644 --- a/test/test_timestamp.py +++ b/test/test_timestamp.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,14 +13,17 @@ # limitations under the License. """Tests for the Timestamp class.""" +from __future__ import annotations -import datetime -import unittest -import sys import copy +import datetime import pickle +import sys + sys.path[0:0] = [""] +from test import unittest + from bson.timestamp import Timestamp from bson.tz_util import utc @@ -30,7 +33,7 @@ def test_timestamp(self): t = Timestamp(123, 456) self.assertEqual(t.time, 123) self.assertEqual(t.inc, 456) - self.assertTrue(isinstance(t, Timestamp)) + self.assertIsInstance(t, Timestamp) def test_datetime(self): d = datetime.datetime(2010, 5, 5, tzinfo=utc) @@ -69,9 +72,15 @@ def test_equality(self): # Explicitly test inequality self.assertFalse(t != Timestamp(1, 1)) + def test_hash(self): + self.assertEqual(hash(Timestamp(1, 2)), hash(Timestamp(1, 2))) + self.assertNotEqual(hash(Timestamp(1, 2)), hash(Timestamp(1, 3))) + self.assertNotEqual(hash(Timestamp(1, 2)), hash(Timestamp(2, 2))) + def test_repr(self): t = Timestamp(0, 0) self.assertEqual(repr(t), "Timestamp(0, 0)") + if __name__ == "__main__": unittest.main() diff --git a/test/test_topology.py b/test/test_topology.py new file mode 100644 index 0000000000..d3bbcd9060 --- /dev/null +++ b/test/test_topology.py @@ -0,0 +1,924 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module.""" +from __future__ import annotations + +import sys + +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test import client_knobs, unittest +from test.pymongo_mocks import DummyMonitor +from test.utils import MockPool, flaky +from test.utils_shared import wait_until + +from bson.objectid import ObjectId +from pymongo import common +from pymongo.errors import AutoReconnect, ConfigurationError, ConnectionFailure +from pymongo.hello import Hello, HelloCompat +from pymongo.read_preferences import Primary, ReadPreference, Secondary +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import any_server_selector, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.monitor import Monitor +from pymongo.synchronous.pool import PoolOptions +from pymongo.synchronous.server import Server +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology, _ErrorContext, _filter_servers +from pymongo.topology_description import TOPOLOGY_TYPE + + +class SetNameDiscoverySettings(TopologySettings): + def get_topology_type(self): + return TOPOLOGY_TYPE.ReplicaSetNoPrimary + + +address = ("a", 27017) + + +def create_mock_topology( + seeds=None, + replica_set_name=None, + monitor_class=DummyMonitor, + direct_connection=False, +): + partitioned_seeds = list(map(common.partition_node, seeds or ["a"])) + topology_settings = TopologySettings( + partitioned_seeds, + replica_set_name=replica_set_name, + pool_class=MockPool, # type: ignore[arg-type] + monitor_class=monitor_class, + direct_connection=direct_connection, + ) + + t = Topology(topology_settings) + t.open() + return t + + +def got_hello(topology, server_address, hello_response): + server_description = ServerDescription(server_address, Hello(hello_response), 0) + + topology.on_change(server_description) + + +def disconnected(topology, server_address): + # Create new description of server type Unknown. + topology.on_change(ServerDescription(server_address)) + + +def get_server(topology, hostname): + return topology.get_server_by_address((hostname, 27017)) + + +def get_type(topology, hostname): + return get_server(topology, hostname).description.server_type + + +def get_monitor(topology, hostname): + return get_server(topology, hostname)._monitor + + +class TopologyTest(unittest.TestCase): + """Disables periodic monitoring, to make tests deterministic.""" + + def setUp(self): + super().setUp() + self.client_knobs = client_knobs(heartbeat_frequency=999999) + self.client_knobs.enable() + self.addCleanup(self.client_knobs.disable) + + +class TestTopologyConfiguration(TopologyTest): + def test_timeout_configuration(self): + pool_options = PoolOptions(connect_timeout=1, socket_timeout=2) + topology_settings = TopologySettings(pool_options=pool_options) + t = Topology(topology_settings=topology_settings) + t.open() + + # Get the default server. + server = t.get_server_by_address(("localhost", 27017)) + + # The pool for application operations obeys our settings. + self.assertEqual(1, server._pool.opts.connect_timeout) + self.assertEqual(2, server._pool.opts.socket_timeout) + + # The pool for monitoring operations uses our connect_timeout as both + # its connect_timeout and its socket_timeout. + monitor = server._monitor + self.assertEqual(1, monitor._pool.opts.connect_timeout) + self.assertEqual(1, monitor._pool.opts.socket_timeout) + + # The monitor, not its pool, is responsible for calling hello. + self.assertTrue(monitor._pool.is_sdam) + + def test_selector_fast_path(self): + topology = create_mock_topology(seeds=["a", "b:27018"], replica_set_name="foo") + description = topology.description + description._topology_type = TOPOLOGY_TYPE.ReplicaSetWithPrimary + + # There is no primary yet, so it should give an empty list. + self.assertEqual(description.apply_selector(Primary()), []) + + # If we set a primary server, we should get it back. + sd = list(description._server_descriptions.values())[0] + sd._server_type = SERVER_TYPE.RSPrimary + self.assertEqual(description.apply_selector(Primary()), [sd]) + + # If there is a custom selector, it should be applied. + def custom_selector(servers): + return [] + + self.assertEqual(description.apply_selector(Primary(), custom_selector=custom_selector), []) + + +class TestSingleServerTopology(TopologyTest): + def test_direct_connection(self): + for server_type, hello_response in [ + ( + SERVER_TYPE.RSPrimary, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + }, + ), + ( + SERVER_TYPE.RSSecondary, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + }, + ), + ( + SERVER_TYPE.Mongos, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "msg": "isdbgrid", + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + }, + ), + ( + SERVER_TYPE.RSArbiter, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "arbiterOnly": True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + }, + ), + ( + SERVER_TYPE.Standalone, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + }, + ), + # A "slave" in a master-slave deployment. + # This replication type was removed in MongoDB + # 4.0. + ( + SERVER_TYPE.Standalone, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + }, + ), + ]: + t = create_mock_topology(direct_connection=True) + + # Can't select a server while the only server is of type Unknown. + with self.assertRaisesRegex(ConnectionFailure, "No servers found yet"): + t.select_servers(any_server_selector, _Op.TEST, server_selection_timeout=0) + + got_hello(t, address, hello_response) + + # Topology type never changes. + self.assertEqual(TOPOLOGY_TYPE.Single, t.description.topology_type) + + # No matter whether the server is writable, + # select_servers() returns it. + s = t.select_server(writable_server_selector, _Op.TEST) + self.assertEqual(server_type, s.description.server_type) + + # Topology type single is always readable and writable regardless + # of server type or state. + self.assertEqual(t.description.topology_type_name, "Single") + self.assertTrue(t.description.has_writable_server()) + self.assertTrue(t.description.has_readable_server()) + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertTrue( + t.description.has_readable_server(Secondary(tag_sets=[{"tag": "does-not-exist"}])) + ) + + def test_reopen(self): + t = create_mock_topology() + + # Additional calls are permitted. + t.open() + t.open() + + def test_unavailable_seed(self): + t = create_mock_topology() + disconnected(t, address) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + + def test_round_trip_time(self): + round_trip_time = 125 + available = True + + class TestMonitor(Monitor): + def _check_with_socket(self, *args, **kwargs): + if available: + return ( + Hello({"ok": 1, "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION}), + round_trip_time, + ) + else: + raise AutoReconnect("mock monitor error") + + t = create_mock_topology(monitor_class=TestMonitor) + self.addCleanup(t.close) + s = t.select_server(writable_server_selector, _Op.TEST) + self.assertEqual(125, s.description.round_trip_time) + + round_trip_time = 25 + t.request_check_all() + + # Exponential weighted average: .8 * 125 + .2 * 25 = 105. + self.assertAlmostEqual(105, s.description.round_trip_time) + + # The server is temporarily down. + available = False + t.request_check_all() + + def raises_err(): + try: + t.select_server(writable_server_selector, _Op.TEST, server_selection_timeout=0.1) + except ConnectionFailure: + return True + else: + return False + + wait_until(raises_err, "discover server is down") + self.assertIsNone(s.description.round_trip_time) + + # Bring it back, RTT is now 20 milliseconds. + available = True + round_trip_time = 20 + + def new_average(): + # We reset the average to the most recent measurement. + description = s.description + return ( + description.round_trip_time is not None + and round(abs(20 - description.round_trip_time), 7) == 0 + ) + + tries = 0 + while not new_average(): + t.request_check_all() + tries += 1 + if tries > 10: + self.fail("Didn't ever calculate correct new average") + + +class TestMultiServerTopology(TopologyTest): + def test_readable_writable(self): + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + self.assertEqual(t.description.topology_type_name, "ReplicaSetWithPrimary") + self.assertTrue(t.description.has_writable_server()) + self.assertTrue(t.description.has_readable_server()) + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertFalse(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) + + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": False, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + self.assertEqual(t.description.topology_type_name, "ReplicaSetNoPrimary") + self.assertFalse(t.description.has_writable_server()) + self.assertFalse(t.description.has_readable_server()) + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertFalse(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) + + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + "tags": {"tag": "exists"}, + }, + ) + + self.assertEqual(t.description.topology_type_name, "ReplicaSetWithPrimary") + self.assertTrue(t.description.has_writable_server()) + self.assertTrue(t.description.has_readable_server()) + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertTrue(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) + + def test_close(self): + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, "b")) + self.assertTrue(get_monitor(t, "a").opened) + self.assertTrue(get_monitor(t, "b").opened) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) + + t.close() + self.assertEqual(2, len(t.description.server_descriptions())) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertFalse(get_monitor(t, "a").opened) + self.assertFalse(get_monitor(t, "b").opened) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) + + # A closed topology should not be updated when receiving a hello. + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b", "c"]}, + ) + + self.assertEqual(2, len(t.description.server_descriptions())) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertFalse(get_monitor(t, "a").opened) + self.assertFalse(get_monitor(t, "b").opened) + # Server c should not have been added. + self.assertEqual(None, get_server(t, "c")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) + + def test_handle_error(self): + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + errctx = _ErrorContext(AutoReconnect("mock"), 0, 0, True, None) + t.handle_error(("a", 27017), errctx) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, "b")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) + + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) + + t.handle_error(("b", 27017), errctx) + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) + + def test_handle_error_removed_server(self): + t = create_mock_topology(replica_set_name="rs") + + # No error resetting a server not in the TopologyDescription. + errctx = _ErrorContext(AutoReconnect("mock"), 0, 0, True, None) + t.handle_error(("b", 27017), errctx) + + # Server was *not* added as type Unknown. + self.assertFalse(t.has_server(("b", 27017))) + + def test_discover_set_name_from_primary(self): + # Discovering a replica set without the setName supplied by the user + # is not yet supported by MongoClient, but Topology can do it. + topology_settings = SetNameDiscoverySettings( + seeds=[address], + pool_class=MockPool, # type: ignore[arg-type] + monitor_class=DummyMonitor, # type: ignore[arg-type] + ) + + t = Topology(topology_settings) + self.assertEqual(t.description.replica_set_name, None) + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) + t.open() + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) + + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetWithPrimary) + + # Another response from the primary. Tests the code that processes + # primary response when topology type is already ReplicaSetWithPrimary. + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) + + # No change. + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetWithPrimary) + + def test_discover_set_name_from_secondary(self): + # Discovering a replica set without the setName supplied by the user + # is not yet supported by MongoClient, but Topology can do it. + topology_settings = SetNameDiscoverySettings( + seeds=[address], + pool_class=MockPool, # type: ignore[arg-type] + monitor_class=DummyMonitor, # type: ignore[arg-type] + ) + + t = Topology(topology_settings) + self.assertEqual(t.description.replica_set_name, None) + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) + t.open() + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a"], + }, + ) + + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) + + def test_wire_version(self): + t = create_mock_topology(replica_set_name="rs") + t.description.check_compatible() # No error. + + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) + + # Use defaults. + server = t.get_server_by_address(address) + self.assertEqual(server.description.min_wire_version, 0) + self.assertEqual(server.description.max_wire_version, 0) + + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 1, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + }, + ) + + self.assertEqual(server.description.min_wire_version, 1) + self.assertEqual(server.description.max_wire_version, 8) + t.select_servers(any_server_selector, _Op.TEST) + + # Incompatible. + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 26, + "maxWireVersion": 27, + }, + ) + + try: + t.select_servers(any_server_selector, _Op.TEST) + except ConfigurationError as e: + # Error message should say which server failed and why. + self.assertEqual( + str(e), + "Server at a:27017 requires wire version 26, but this version " + "of PyMongo only supports up to %d." % (common.MAX_SUPPORTED_WIRE_VERSION,), + ) + else: + self.fail("No error with incompatible wire version") + + # Incompatible. + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 0, + "maxWireVersion": 0, + }, + ) + + try: + t.select_servers(any_server_selector, _Op.TEST) + except ConfigurationError as e: + # Error message should say which server failed and why. + self.assertEqual( + str(e), + "Server at a:27017 reports wire version 0, but this version " + "of PyMongo requires at least %d (MongoDB %s)." + % (common.MIN_SUPPORTED_WIRE_VERSION, common.MIN_SUPPORTED_SERVER_VERSION), + ) + else: + self.fail("No error with incompatible wire version") + + def test_max_write_batch_size(self): + t = create_mock_topology(seeds=["a", "b"], replica_set_name="rs") + + def write_batch_size(): + s = t.select_server(writable_server_selector, _Op.TEST) + return s.description.max_write_batch_size + + got_hello( + t, + ("a", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + "maxWriteBatchSize": 1, + }, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + "maxWriteBatchSize": 2, + }, + ) + + # Uses primary's max batch size. + self.assertEqual(1, write_batch_size()) + + # b becomes primary. + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + "maxWriteBatchSize": 2, + }, + ) + + self.assertEqual(2, write_batch_size()) + + def test_topology_repr(self): + t = create_mock_topology(replica_set_name="rs") + self.addCleanup(t.close) + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "c", "b"]}, + ) + self.assertEqual( + repr(t.description), + f", " + ", " + "]>", + ) + + def test_unexpected_load_balancer(self): + # Note: This behavior should not be reachable in practice but we + # should handle it gracefully nonetheless. See PYTHON-2791. + # Load balancers are included in topology with a single seed. + t = create_mock_topology(seeds=["a"]) + mock_lb_response = { + "ok": 1, + "msg": "isdbgrid", + "serviceId": ObjectId(), + "maxWireVersion": 13, + } + got_hello(t, ("a", 27017), mock_lb_response) + sds = t.description.server_descriptions() + self.assertIn(("a", 27017), sds) + self.assertEqual(sds[("a", 27017)].server_type_name, "LoadBalancer") + self.assertEqual(t.description.topology_type_name, "Single") + self.assertTrue(t.description.has_writable_server()) + + # Load balancers are removed from a topology with multiple seeds. + t = create_mock_topology(seeds=["a", "b"]) + got_hello(t, ("a", 27017), mock_lb_response) + self.assertNotIn(("a", 27017), t.description.server_descriptions()) + self.assertEqual(t.description.topology_type_name, "Unknown") + + def test_filtered_server_selection(self): + s1 = Server(ServerDescription(("localhost", 27017)), pool=object(), monitor=object()) # type: ignore[arg-type] + s2 = Server(ServerDescription(("localhost2", 27017)), pool=object(), monitor=object()) # type: ignore[arg-type] + servers = [s1, s2] + + result = _filter_servers(servers, deprioritized_servers=[s2]) + self.assertEqual(result, [s1]) + + result = _filter_servers(servers, deprioritized_servers=[s1, s2]) + self.assertEqual(result, servers) + + result = _filter_servers(servers, deprioritized_servers=[]) + self.assertEqual(result, servers) + + result = _filter_servers(servers) + self.assertEqual(result, servers) + + +def wait_for_primary(topology): + """Wait for a Topology to discover a writable server. + + If the monitor is currently calling hello, a blocking call to + select_server from this thread can trigger a spurious wake of the monitor + thread. In applications this is harmless but it would break some tests, + so we pass server_selection_timeout=0 and poll instead. + """ + + def get_primary(): + try: + return topology.select_server(writable_server_selector, _Op.TEST, 0) + except ConnectionFailure: + return None + + return wait_until(get_primary, "find primary") + + +class TestTopologyErrors(TopologyTest): + # Errors when calling hello. + + @flaky(reason="PYTHON-5366") + def test_pool_reset(self): + # hello succeeds at first, then always raises socket error. + hello_count = [0] + + class TestMonitor(Monitor): + def _check_with_socket(self, *args, **kwargs): + hello_count[0] += 1 + if hello_count[0] == 1: + return Hello({"ok": 1, "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION}), 0 + else: + raise AutoReconnect("mock monitor error") + + t = create_mock_topology(monitor_class=TestMonitor) + self.addCleanup(t.close) + server = wait_for_primary(t) + self.assertEqual(1, hello_count[0]) + generation = server.pool.gen.get_overall() + + # Pool is reset by hello failure. + t.request_check_all() + self.assertNotEqual(generation, server.pool.gen.get_overall()) + + def test_hello_retry(self): + # hello succeeds at first, then raises socket error, then succeeds. + hello_count = [0] + + class TestMonitor(Monitor): + def _check_with_socket(self, *args, **kwargs): + hello_count[0] += 1 + if hello_count[0] in (1, 3): + return Hello({"ok": 1, "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION}), 0 + else: + raise AutoReconnect(f"mock monitor error #{hello_count[0]}") + + t = create_mock_topology(monitor_class=TestMonitor) + self.addCleanup(t.close) + server = wait_for_primary(t) + self.assertEqual(1, hello_count[0]) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + + # Second hello call, server is marked Unknown, then the monitor + # immediately runs a retry (third hello). + t.request_check_all() + # The third hello call (the immediate retry) happens sometime soon + # after the failed check triggered by request_check_all. Wait until + # the server becomes known again. + server = t.select_server(writable_server_selector, _Op.TEST, 0.250) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + self.assertEqual(3, hello_count[0]) + + def test_internal_monitor_error(self): + exception = AssertionError("internal error") + + class TestMonitor(Monitor): + def _check_with_socket(self, *args, **kwargs): + raise exception + + t = create_mock_topology(monitor_class=TestMonitor) + self.addCleanup(t.close) + with self.assertRaisesRegex(ConnectionFailure, "internal error"): + t.select_server(any_server_selector, _Op.TEST, server_selection_timeout=0.5) + + +class TestServerSelectionErrors(TopologyTest): + def assertMessage(self, message, topology, selector=any_server_selector): + with self.assertRaises(ConnectionFailure) as context: + topology.select_server(selector, _Op.TEST, server_selection_timeout=0) + + self.assertIn(message, str(context.exception)) + + def test_no_primary(self): + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a"], + }, + ) + + self.assertMessage( + 'No replica set members match selector "Primary()"', t, ReadPreference.PRIMARY + ) + + self.assertMessage("No primary available for writes", t, writable_server_selector) + + def test_no_secondary(self): + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) + + self.assertMessage( + "No replica set members match selector" + ' "Secondary(tag_sets=None, max_staleness=-1, hedge=None)"', + t, + ReadPreference.SECONDARY, + ) + + self.assertMessage( + "No replica set members match selector" + " \"Secondary(tag_sets=[{'dc': 'ny'}], max_staleness=-1, " + 'hedge=None)"', + t, + Secondary(tag_sets=[{"dc": "ny"}]), + ) + + def test_bad_replica_set_name(self): + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "wrong", + "hosts": ["a"], + }, + ) + + self.assertMessage('No replica set members available for replica set name "rs"', t) + + def test_multiple_standalones(self): + # Standalones are removed from a topology with multiple seeds. + t = create_mock_topology(seeds=["a", "b"]) + got_hello(t, ("a", 27017), {"ok": 1}) + got_hello(t, ("b", 27017), {"ok": 1}) + self.assertMessage("No servers available", t) + + def test_no_mongoses(self): + # Standalones are removed from a topology with multiple seeds. + t = create_mock_topology(seeds=["a", "b"]) + + # Discover a mongos and change topology type to Sharded. + got_hello(t, ("a", 27017), {"ok": 1, "msg": "isdbgrid"}) + + # Oops, both servers are standalone now. Remove them. + got_hello(t, ("a", 27017), {"ok": 1}) + got_hello(t, ("b", 27017), {"ok": 1}) + self.assertMessage("No mongoses available", t) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_transactions.py b/test/test_transactions.py new file mode 100644 index 0000000000..813d6a688d --- /dev/null +++ b/test/test_transactions.py @@ -0,0 +1,618 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Execute Transactions Spec tests.""" +from __future__ import annotations + +import sys +from io import BytesIO +from test.utils_spec_runner import SpecRunner + +from gridfs.synchronous.grid_file import GridFS, GridFSBucket +from pymongo.server_selectors import writable_server_selector +from pymongo.synchronous.pool import PoolState + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils_shared import ( + OvertCommandListener, + wait_until, +) +from typing import List + +from bson import encode +from bson.raw_bson import RawBSONDocument +from pymongo import WriteConcern, _csot +from pymongo.errors import ( + AutoReconnect, + CollectionInvalid, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, +) +from pymongo.operations import IndexModel, InsertOne +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous import client_session +from pymongo.synchronous.client_session import TransactionOptions +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor + +_IS_SYNC = True + +# Max number of operations to perform after a transaction to prove unpinning +# occurs. Chosen so that there's a low false positive rate. With 2 mongoses, +# 50 attempts yields a one in a quadrillion chance of a false positive +# (1/(0.5^50)). +UNPIN_TEST_MAX_ATTEMPTS = 50 + + +class TransactionsBase(SpecRunner): + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + if ( + "secondary" in self.id() + and not client_context.is_mongos + and not client_context.has_secondaries + ): + raise unittest.SkipTest("No secondaries") + + +class TestTransactions(TransactionsBase): + @client_context.require_transactions + def test_transaction_options_validation(self): + default_options = TransactionOptions() + self.assertIsNone(default_options.read_concern) + self.assertIsNone(default_options.write_concern) + self.assertIsNone(default_options.read_preference) + self.assertIsNone(default_options.max_commit_time_ms) + # No error when valid options are provided. + TransactionOptions( + read_concern=ReadConcern(), + write_concern=WriteConcern(), + read_preference=ReadPreference.PRIMARY, + max_commit_time_ms=10000, + ) + with self.assertRaisesRegex(TypeError, "read_concern must be "): + TransactionOptions(read_concern={}) # type: ignore + with self.assertRaisesRegex(TypeError, "write_concern must be "): + TransactionOptions(write_concern={}) # type: ignore + with self.assertRaisesRegex( + ConfigurationError, "transactions do not support unacknowledged write concern" + ): + TransactionOptions(write_concern=WriteConcern(w=0)) + with self.assertRaisesRegex(TypeError, "is not valid for read_preference"): + TransactionOptions(read_preference={}) # type: ignore + with self.assertRaisesRegex(TypeError, "max_commit_time_ms must be an integer or None"): + TransactionOptions(max_commit_time_ms="10000") # type: ignore + + @client_context.require_transactions + def test_transaction_write_concern_override(self): + """Test txn overrides Client/Database/Collection write_concern.""" + client = self.rs_client(w=0) + db = client.test + coll = db.test + coll.insert_one({}) + with client.start_session() as s: + with s.start_transaction(write_concern=WriteConcern(w=1)): + self.assertTrue((coll.insert_one({}, session=s)).acknowledged) + self.assertTrue((coll.insert_many([{}, {}], session=s)).acknowledged) + self.assertTrue((coll.bulk_write([InsertOne({})], session=s)).acknowledged) + self.assertTrue((coll.replace_one({}, {}, session=s)).acknowledged) + self.assertTrue((coll.update_one({}, {"$set": {"a": 1}}, session=s)).acknowledged) + self.assertTrue((coll.update_many({}, {"$set": {"a": 1}}, session=s)).acknowledged) + self.assertTrue((coll.delete_one({}, session=s)).acknowledged) + self.assertTrue((coll.delete_many({}, session=s)).acknowledged) + coll.find_one_and_delete({}, session=s) + coll.find_one_and_replace({}, {}, session=s) + coll.find_one_and_update({}, {"$set": {"a": 1}}, session=s) + + unsupported_txn_writes: list = [ + (client.drop_database, [db.name], {}), + (db.drop_collection, ["collection"], {}), + (coll.drop, [], {}), + (coll.rename, ["collection2"], {}), + # Drop collection2 between tests of "rename", above. + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), + (coll.drop_indexes, [], {}), + (coll.aggregate, [[{"$out": "aggout"}]], {}), + ] + # Creating a collection in a transaction requires MongoDB 4.4+. + if client_context.version < (4, 3, 4): + unsupported_txn_writes.extend( + [ + (db.create_collection, ["collection"], {}), + ] + ) + + for op in unsupported_txn_writes: + op, args, kwargs = op + with client.start_session() as s: + kwargs["session"] = s + s.start_transaction(write_concern=WriteConcern(w=1)) + with self.assertRaises(OperationFailure): + op(*args, **kwargs) + s.abort_transaction() + + @client_context.require_transactions + @client_context.require_multiple_mongoses + def test_unpin_for_next_transaction(self): + # Increase localThresholdMS and wait until both nodes are discovered + # to avoid false positives. + client = self.rs_client(client_context.mongos_seeds(), localThresholdMS=1000) + wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") + coll = client.test.test + # Create the collection. + coll.insert_one({}) + with client.start_session() as s: + # Session is pinned to Mongos. + with s.start_transaction(): + coll.insert_one({}, session=s) + + addresses = set() + for _ in range(UNPIN_TEST_MAX_ATTEMPTS): + with s.start_transaction(): + cursor = coll.find({}, session=s) + self.assertTrue(next(cursor)) + addresses.add(cursor.address) + # Break early if we can. + if len(addresses) > 1: + break + + self.assertGreater(len(addresses), 1) + + @client_context.require_transactions + @client_context.require_multiple_mongoses + def test_unpin_for_non_transaction_operation(self): + # Increase localThresholdMS and wait until both nodes are discovered + # to avoid false positives. + client = self.rs_client(client_context.mongos_seeds(), localThresholdMS=1000) + wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") + coll = client.test.test + # Create the collection. + coll.insert_one({}) + with client.start_session() as s: + # Session is pinned to Mongos. + with s.start_transaction(): + coll.insert_one({}, session=s) + + addresses = set() + for _ in range(UNPIN_TEST_MAX_ATTEMPTS): + cursor = coll.find({}, session=s) + self.assertTrue(next(cursor)) + addresses.add(cursor.address) + # Break early if we can. + if len(addresses) > 1: + break + + self.assertGreater(len(addresses), 1) + + @client_context.require_transactions + @client_context.require_version_min(4, 3, 4) + def test_create_collection(self): + client = client_context.client + db = client.pymongo_test + coll = db.test_create_collection + self.addCleanup(coll.drop) + + # Use with_transaction to avoid StaleConfig errors on sharded clusters. + def create_and_insert(session): + coll2 = db.create_collection(coll.name, session=session) + self.assertEqual(coll, coll2) + coll.insert_one({}, session=session) + + with client.start_session() as s: + s.with_transaction(create_and_insert) + + # Outside a transaction we raise CollectionInvalid on existing colls. + with self.assertRaises(CollectionInvalid): + db.create_collection(coll.name) + + # Inside a transaction we raise the OperationFailure from create. + with client.start_session() as s: + s.start_transaction() + with self.assertRaises(OperationFailure) as ctx: + db.create_collection(coll.name, session=s) + self.assertEqual(ctx.exception.code, 48) # NamespaceExists + + @client_context.require_transactions + def test_gridfs_does_not_support_transactions(self): + client = client_context.client + db = client.pymongo_test + gfs = GridFS(db) + bucket = GridFSBucket(db) + + def gridfs_find(*args, **kwargs): + return gfs.find(*args, **kwargs).next() + + def gridfs_open_upload_stream(*args, **kwargs): + (bucket.open_upload_stream(*args, **kwargs)).write(b"1") + + gridfs_ops = [ + (gfs.put, (b"123",)), + (gfs.get, (1,)), + (gfs.get_version, ("name",)), + (gfs.get_last_version, ("name",)), + (gfs.delete, (1,)), + (gfs.list, ()), + (gfs.find_one, ()), + (gridfs_find, ()), + (gfs.exists, ()), + (gridfs_open_upload_stream, ("name",)), + ( + bucket.upload_from_stream, + ( + "name", + b"data", + ), + ), + ( + bucket.download_to_stream, + ( + 1, + BytesIO(), + ), + ), + ( + bucket.download_to_stream_by_name, + ( + "name", + BytesIO(), + ), + ), + (bucket.delete, (1,)), + (bucket.find, ()), + (bucket.open_download_stream, (1,)), + (bucket.open_download_stream_by_name, ("name",)), + ( + bucket.rename, + ( + 1, + "new-name", + ), + ), + ( + bucket.rename_by_name, + ( + "new-name", + "new-name2", + ), + ), + (bucket.delete_by_name, ("new-name2",)), + ] + + with client.start_session() as s, s.start_transaction(): + for op, args in gridfs_ops: + with self.assertRaisesRegex( + InvalidOperation, + "GridFS does not support multi-document transactions", + ): + op(*args, session=s) # type: ignore + + # Require 4.2+ for large (16MB+) transactions. + @client_context.require_version_min(4, 2) + @client_context.require_transactions + @unittest.skipIf(sys.platform == "win32", "Our Windows machines are too slow to pass this test") + def test_transaction_starts_with_batched_write(self): + if "PyPy" in sys.version and client_context.tls: + self.skipTest( + "PYTHON-2937 PyPy is so slow sending large " + "messages over TLS that this test fails" + ) + # Start a transaction with a batch of operations that needs to be + # split. + listener = OvertCommandListener() + client = self.rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + coll.delete_many({}) + listener.reset() + self.addCleanup(coll.drop) + large_str = "\0" * (1 * 1024 * 1024) + ops: List[InsertOne[RawBSONDocument]] = [ + InsertOne(RawBSONDocument(encode({"a": large_str}))) for _ in range(48) + ] + with client.start_session() as session: + with session.start_transaction(): + coll.bulk_write(ops, session=session) # type: ignore[arg-type] + # Assert commands were constructed properly. + self.assertEqual( + ["insert", "insert", "commitTransaction"], listener.started_command_names() + ) + first_cmd = listener.started_events[0].command + self.assertTrue(first_cmd["startTransaction"]) + lsid = first_cmd["lsid"] + txn_number = first_cmd["txnNumber"] + for event in listener.started_events[1:]: + self.assertNotIn("startTransaction", event.command) + self.assertEqual(lsid, event.command["lsid"]) + self.assertEqual(txn_number, event.command["txnNumber"]) + self.assertEqual(48, coll.count_documents({})) + + @client_context.require_transactions + def test_transaction_direct_connection(self): + client = self.single_client() + coll = client.pymongo_test.test + + # Make sure the collection exists. + coll.insert_one({}) + self.assertEqual(client.topology_description.topology_type_name, "Single") + + def find(*args, **kwargs): + return coll.find(*args, **kwargs) + + def find_raw_batches(*args, **kwargs): + return coll.find_raw_batches(*args, **kwargs) + + ops = [ + (coll.bulk_write, [[InsertOne[dict]({})]]), + (coll.insert_one, [{}]), + (coll.insert_many, [[{}, {}]]), + (coll.replace_one, [{}, {}]), + (coll.update_one, [{}, {"$set": {"a": 1}}]), + (coll.update_many, [{}, {"$set": {"a": 1}}]), + (coll.delete_one, [{}]), + (coll.delete_many, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.find_one_and_delete, [{}, {}]), + (coll.find_one, [{}]), + (coll.count_documents, [{}]), + (coll.distinct, ["foo"]), + (coll.aggregate, [[]]), + (find, [{}]), + (coll.aggregate_raw_batches, [[]]), + (find_raw_batches, [{}]), + (coll.database.command, ["find", coll.name]), + ] + for f, args in ops: + with client.start_session() as s, s.start_transaction(): + res = f(*args, session=s) # type:ignore[operator] + if isinstance(res, (CommandCursor, Cursor)): + res.to_list() + + @client_context.require_transactions + def test_transaction_pool_cleared_error_labelled_transient(self): + c = self.single_client() + + with self.assertRaises(AutoReconnect) as context: + with c.start_session() as session: + with session.start_transaction(): + server = c._select_server(writable_server_selector, session, "test") + # Pause the server's pool, causing it to fail connection checkout. + server.pool.state = PoolState.PAUSED + with c._checkout(server, session): + pass + + # Verify that the TransientTransactionError label is present in the error. + self.assertTrue(context.exception.has_error_label("TransientTransactionError")) + + +class PatchSessionTimeout: + """Patches the client_session's with_transaction timeout for testing.""" + + def __init__(self, mock_timeout): + self.real_timeout = client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT + self.mock_timeout = mock_timeout + + def __enter__(self): + client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.mock_timeout + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.real_timeout + + +class TestTransactionsConvenientAPI(TransactionsBase): + def setUp(self) -> None: + super().setUp() + self.mongos_clients = [] + if client_context.supports_transactions(): + for address in client_context.mongoses: + self.mongos_clients.append(self.single_client("{}:{}".format(*address))) + + def set_fail_point(self, command_args): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + self.configure_fail_point(client, command_args) + + @client_context.require_transactions + def test_callback_raises_custom_error(self): + class _MyException(Exception): + pass + + def raise_error(_): + raise _MyException + + with self.client.start_session() as s: + with self.assertRaises(_MyException): + s.with_transaction(raise_error) + + @client_context.require_transactions + def test_callback_returns_value(self): + def callback(_): + return "Foo" + + with self.client.start_session() as s: + self.assertEqual(s.with_transaction(callback), "Foo") + + self.db.test.insert_one({}) + + def callback2(session): + self.db.test.insert_one({}, session=session) + return "Foo" + + with self.client.start_session() as s: + self.assertEqual(s.with_transaction(callback2), "Foo") + + @client_context.require_transactions + def test_callback_not_retried_after_timeout(self): + listener = OvertCommandListener() + client = self.rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + + def callback(session): + coll.insert_one({}, session=session) + err: dict = { + "ok": 0, + "errmsg": "Transaction 7819 has been aborted.", + "code": 251, + "codeName": "NoSuchTransaction", + "errorLabels": ["TransientTransactionError"], + } + raise OperationFailure(err["errmsg"], err["code"], err) + + # Create the collection. + coll.insert_one({}) + listener.reset() + with client.start_session() as s: + with PatchSessionTimeout(0): + with self.assertRaises(OperationFailure): + s.with_transaction(callback) + + self.assertEqual(listener.started_command_names(), ["insert", "abortTransaction"]) + + @client_context.require_test_commands + @client_context.require_transactions + def test_callback_not_retried_after_commit_timeout(self): + listener = OvertCommandListener() + client = self.rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + + def callback(session): + coll.insert_one({}, session=session) + + # Create the collection. + coll.insert_one({}) + self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["commitTransaction"], + "errorCode": 251, # NoSuchTransaction + }, + } + ) + self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) + listener.reset() + + with client.start_session() as s: + with PatchSessionTimeout(0): + with self.assertRaises(OperationFailure): + s.with_transaction(callback) + + self.assertEqual(listener.started_command_names(), ["insert", "commitTransaction"]) + + @client_context.require_test_commands + @client_context.require_transactions + def test_commit_not_retried_after_timeout(self): + listener = OvertCommandListener() + client = self.rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + + def callback(session): + coll.insert_one({}, session=session) + + # Create the collection. + coll.insert_one({}) + self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["commitTransaction"], "closeConnection": True}, + } + ) + self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) + listener.reset() + + with client.start_session() as s: + with PatchSessionTimeout(0): + with self.assertRaises(ConnectionFailure): + s.with_transaction(callback) + + # One insert for the callback and two commits (includes the automatic + # retry). + self.assertEqual( + listener.started_command_names(), ["insert", "commitTransaction", "commitTransaction"] + ) + + # Tested here because this supports Motor's convenient transactions API. + @client_context.require_transactions + def test_in_transaction_property(self): + client = client_context.client + coll = client.test.testcollection + coll.insert_one({}) + self.addCleanup(coll.drop) + + with client.start_session() as s: + self.assertFalse(s.in_transaction) + s.start_transaction() + self.assertTrue(s.in_transaction) + coll.insert_one({}, session=s) + self.assertTrue(s.in_transaction) + s.commit_transaction() + self.assertFalse(s.in_transaction) + + with client.start_session() as s: + s.start_transaction() + # commit empty transaction + s.commit_transaction() + self.assertFalse(s.in_transaction) + + with client.start_session() as s: + s.start_transaction() + s.abort_transaction() + self.assertFalse(s.in_transaction) + + # Using a callback + def callback(session): + self.assertTrue(session.in_transaction) + + with client.start_session() as s: + self.assertFalse(s.in_transaction) + s.with_transaction(callback) + self.assertFalse(s.in_transaction) + + +class TestOptionsInsideTransactionProse(TransactionsBase): + @client_context.require_transactions + @client_context.require_no_standalone + def test_case_1(self): + # Write concern not inherited from collection object inside transaction + # Create a MongoClient running against a configured sharded/replica set/load balanced cluster. + client = client_context.client + coll = client[self.db.name].test + coll.delete_many({}) + # Start a new session on the client. + with client.start_session() as s: + # Start a transaction on the session. + s.start_transaction() + # Instantiate a collection object in the driver with a default write concern of { w: 0 }. + inner_coll = coll.with_options(write_concern=WriteConcern(w=0)) + # Insert the document { n: 1 } on the instantiated collection. + result = inner_coll.insert_one({"n": 1}, session=s) + # Commit the transaction. + s.commit_transaction() + # End the session. + # Ensure the document was inserted and no error was thrown from the transaction. + assert result.inserted_id is not None + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_transactions_unified.py b/test/test_transactions_unified.py new file mode 100644 index 0000000000..4ab4885e2a --- /dev/null +++ b/test/test_transactions_unified.py @@ -0,0 +1,55 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Transactions unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import client_context, unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + + +def setUpModule(): + pass + + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "transactions/unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +# Location of JSON test specifications for transactions-convenient-api. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions-convenient-api/unified") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "transactions-convenient-api/unified" + ) + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_typing.py b/test/test_typing.py new file mode 100644 index 0000000000..17dc21b4e0 --- /dev/null +++ b/test/test_typing.py @@ -0,0 +1,640 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that each file in mypy_fails/ actually fails mypy, and test some +sample client code that uses PyMongo typings. +""" + +from __future__ import annotations + +import os +import sys +import tempfile +import unittest +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + TypeVar, + Union, + cast, +) + +if TYPE_CHECKING: + from typing_extensions import NotRequired, TypedDict + + from bson import Binary, ObjectId + from bson.binary import BinaryVector, BinaryVectorDtype + + class Movie(TypedDict): + name: str + year: int + + class MovieWithId(TypedDict): + _id: ObjectId + name: str + year: int + + class ImplicitMovie(TypedDict): + _id: NotRequired[ObjectId] + name: str + year: int +else: + Movie = dict + ImplicitMovie = dict + NotRequired = None + + +try: + from mypy import api +except ImportError: + api = None # type: ignore[assignment] + +sys.path[0:0] = [""] + +from test import IntegrationTest, PyMongoTestCase, client_context + +from bson import CodecOptions, ObjectId, decode, decode_all, decode_file_iter, decode_iter, encode +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from pymongo import ASCENDING, MongoClient +from pymongo.operations import DeleteOne, InsertOne, ReplaceOne +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.collection import Collection + +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") + + +def get_tests() -> Iterable[str]: + for dirpath, _, filenames in os.walk(TEST_PATH): + for filename in filenames: + yield os.path.join(dirpath, filename) + + +FuncT = TypeVar("FuncT", bound=Callable[..., None]) + + +def only_type_check(func: FuncT) -> FuncT: + def inner(*args, **kwargs): + if not TYPE_CHECKING: + raise unittest.SkipTest("Used for Type Checking Only") + func(*args, **kwargs) + + return cast(FuncT, inner) + + +class TestMypyFails(unittest.TestCase): + def ensure_mypy_fails(self, filename: str) -> None: + if api is None: + raise unittest.SkipTest("Mypy is not installed") + stdout, stderr, exit_status = api.run([filename]) + self.assertTrue(exit_status, msg=stdout) + + def test_mypy_failures(self) -> None: + for filename in get_tests(): + if filename == "typeddict_client.py" and TypedDict is None: + continue + with self.subTest(filename=filename): + self.ensure_mypy_fails(filename) + + +class TestPymongo(IntegrationTest): + coll: Collection + + def setUp(self): + super().setUp() + self.coll = self.client.test.test + + def test_insert_find(self) -> None: + doc = {"my": "doc"} + coll2 = self.client.test.test2 + result = self.coll.insert_one(doc) + self.assertEqual(result.inserted_id, doc["_id"]) + retrieved = self.coll.find_one({"_id": doc["_id"]}) + if retrieved: + # Documents returned from find are mutable. + retrieved["new_field"] = 1 + result2 = coll2.insert_one(retrieved) + self.assertEqual(result2.inserted_id, result.inserted_id) + + def test_cursor_iterable(self) -> None: + def to_list(iterable: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]: + return list(iterable) + + self.coll.insert_one({}) + cursor = self.coll.find() + docs = to_list(cursor) + self.assertTrue(docs) + + def test_distinct(self) -> None: + self.coll.delete_many({}) + self.coll.insert_many( + [ + {"_id": None}, + {"_id": 0}, + {"_id": ""}, + {"_id": ObjectId()}, + {"_id": True}, + ] + ) + + def collection_distinct( + collection: Collection, + ) -> list[None | int | str | ObjectId | bool]: + return collection.distinct("_id") + + def cursor_distinct( + collection: Collection, + ) -> list[None | int | str | ObjectId | bool]: + cursor = collection.find() + return cursor.distinct("_id") + + collection_distinct(self.coll) + cursor_distinct(self.coll) + + @only_type_check + def test_bulk_write(self) -> None: + self.coll.insert_one({}) + coll: Collection[Movie] = self.coll + requests: List[InsertOne[Movie]] = [InsertOne(Movie(name="American Graffiti", year=1973))] + self.assertTrue(coll.bulk_write(requests).acknowledged) + new_requests: List[Union[InsertOne[Movie], ReplaceOne[Movie]]] = [] + input_list: List[Union[InsertOne[Movie], ReplaceOne[Movie]]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne({}, Movie(name="American Graffiti", year=1973)), + ] + for i in input_list: + new_requests.append(i) + self.assertTrue(coll.bulk_write(new_requests).acknowledged) + + # Because ReplaceOne is not generic, type checking is not enforced for ReplaceOne in the first example. + @only_type_check + def test_bulk_write_heterogeneous(self): + coll: Collection[Movie] = self.coll + requests: List[Union[InsertOne[Movie], ReplaceOne, DeleteOne]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne({}, {"name": "American Graffiti", "year": "WRONG_TYPE"}), + DeleteOne({}), + ] + self.assertTrue(coll.bulk_write(requests).acknowledged) + requests_two: List[Union[InsertOne[Movie], ReplaceOne[Movie], DeleteOne]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne( + {}, + {"name": "American Graffiti", "year": "WRONG_TYPE"}, # type:ignore[arg-type] + ), + DeleteOne({}), + ] + self.assertTrue(coll.bulk_write(requests_two).acknowledged) + + def test_command(self) -> None: + result: Dict = self.client.admin.command("ping") + result.items() + + def test_list_collections(self) -> None: + cursor = self.client.test.list_collections() + value = cursor.next() + value.items() + + def test_list_databases(self) -> None: + cursor = self.client.list_databases() + value = cursor.next() + value.items() + + def test_default_document_type(self) -> None: + client = self.rs_or_single_client() + self.addCleanup(client.close) + coll = client.test.test + doc = {"my": "doc"} + coll.insert_one(doc) + retrieved = coll.find_one({"_id": doc["_id"]}) + assert retrieved is not None + retrieved["a"] = 1 + + def test_aggregate_pipeline(self) -> None: + coll3 = self.client.test.test3 + coll3.insert_many( + [ + {"x": 1, "tags": ["dog", "cat"]}, + {"x": 2, "tags": ["cat"]}, + {"x": 2, "tags": ["mouse", "cat", "dog"]}, + {"x": 3, "tags": []}, + ] + ) + + class mydict(Dict[str, Any]): + pass + + result = coll3.aggregate( + [ + mydict({"$unwind": "$tags"}), + {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, + {"$sort": SON([("count", -1), ("_id", -1)])}, + ] + ) + self.assertTrue(len(list(result))) + + def test_with_transaction(self) -> None: + def execute_transaction(session): + pass + + with self.client.start_session() as session: + return session.with_transaction( + execute_transaction, read_preference=ReadPreference.PRIMARY + ) + + def test_with_options(self) -> None: + coll: Collection[Dict[str, Any]] = self.coll + coll.drop() + doc = {"name": "foo", "year": 1982, "other": 1} + coll.insert_one(doc) + + coll2 = coll.with_options(codec_options=CodecOptions(document_class=Movie)) + retrieved = coll2.find_one() + assert retrieved is not None + assert retrieved["name"] == "foo" + # We expect a type error here. + assert retrieved["other"] == 1 # type:ignore[misc] + + +class TestDecode(unittest.TestCase): + def test_bson_decode(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + rt_document: Dict[str, Any] = decode(bsonbytes) + assert rt_document["_id"] == 1 + rt_document["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options = CodecOptions(document_class=MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options) + rt_document2 = decode(bsonbytes2, codec_options=codec_options) + assert rt_document2.foo() == "bar" + + codec_options2 = CodecOptions(document_class=RawBSONDocument) + encode(doc, codec_options=codec_options2) + rt_document3 = decode(bsonbytes2, codec_options=codec_options2) + assert rt_document3.raw + + def test_bson_decode_no_codec_option(self) -> None: + doc = decode_all(encode({"a": 1})) + assert doc + doc[0]["a"] = 2 + + def test_bson_decode_all(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + rt_documents: List[Dict[str, Any]] = decode_all(bsonbytes) + assert rt_documents[0]["_id"] == 1 + rt_documents[0]["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + rt_documents2 = decode_all(bsonbytes2, codec_options2) + assert rt_documents2[0].foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + rt_documents3 = decode_all(bsonbytes3, codec_options3) + assert rt_documents3[0].raw + + def test_bson_decode_all_no_codec_option(self) -> None: + docs = decode_all(b"") + docs.append({"new": 1}) + + docs = decode_all(encode({"a": 1})) + assert docs + docs[0]["a"] = 2 + docs.append({"new": 1}) + + def test_bson_decode_iter(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + rt_documents: Iterator[Dict[str, Any]] = decode_iter(bsonbytes) + assert next(rt_documents)["_id"] == 1 + next(rt_documents)["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + rt_documents2 = decode_iter(bsonbytes2, codec_options2) + assert next(rt_documents2).foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + rt_documents3 = decode_iter(bsonbytes3, codec_options3) + assert next(rt_documents3).raw + + def test_bson_decode_iter_no_codec_option(self) -> None: + doc = next(decode_iter(encode({"a": 1}))) + assert doc + doc["a"] = 2 + + def make_tempfile(self, content: bytes) -> Any: + fileobj = tempfile.TemporaryFile() + fileobj.write(content) + fileobj.seek(0) + self.addCleanup(fileobj.close) + return fileobj + + def test_bson_decode_file_iter(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + fileobj = self.make_tempfile(bsonbytes) + rt_documents: Iterator[Dict[str, Any]] = decode_file_iter(fileobj) + assert next(rt_documents)["_id"] == 1 + next(rt_documents)["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + fileobj2 = self.make_tempfile(bsonbytes2) + rt_documents2 = decode_file_iter(fileobj2, codec_options2) + assert next(rt_documents2).foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + fileobj3 = self.make_tempfile(bsonbytes3) + rt_documents3 = decode_file_iter(fileobj3, codec_options3) + assert next(rt_documents3).raw + + def test_bson_decode_file_iter_none_codec_option(self) -> None: + fileobj = self.make_tempfile(encode({"new": 1})) + doc = next(decode_file_iter(fileobj)) + assert doc + doc["a"] = 2 + + +class TestDocumentType(PyMongoTestCase): + @only_type_check + def test_default(self) -> None: + client: MongoClient = MongoClient() + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 + + @only_type_check + def test_explicit_document_type(self) -> None: + client: MongoClient[Dict[str, Any]] = MongoClient() + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 + + @only_type_check + def test_typeddict_document_type(self) -> None: + client: MongoClient[Movie] = MongoClient() + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + assert retrieved["year"] == 1 + assert retrieved["name"] == "a" + + @only_type_check + def test_typeddict_document_type_insertion(self) -> None: + client: MongoClient[Movie] = MongoClient() + coll = client.test.test + mov = {"name": "THX-1138", "year": 1971} + movie = Movie(name="THX-1138", year=1971) + coll.insert_one(mov) # type: ignore[arg-type] + coll.insert_one({"name": "THX-1138", "year": 1971}) # This will work because it is in-line. + coll.insert_one(movie) + coll.insert_many([mov]) # type: ignore[list-item] + coll.insert_many([movie]) + bad_mov = {"name": "THX-1138", "year": "WRONG TYPE"} + bad_movie = Movie(name="THX-1138", year="WRONG TYPE") # type: ignore[typeddict-item] + coll.insert_one(bad_mov) # type:ignore[arg-type] + coll.insert_one({"name": "THX-1138", "year": "WRONG TYPE"}) # type: ignore[arg-type] + coll.insert_one(bad_movie) + coll.insert_many([bad_mov]) # type: ignore[list-item] + coll.insert_many( + [{"name": "THX-1138", "year": "WRONG TYPE"}] # type: ignore[list-item] + ) + coll.insert_many([bad_movie]) + + @only_type_check + def test_bulk_write_document_type_insertion(self): + client: MongoClient[MovieWithId] = MongoClient() + coll: Collection[MovieWithId] = client.test.test + coll.bulk_write( + [InsertOne(Movie({"name": "THX-1138", "year": 1971}))] # type:ignore[arg-type] + ) + mov_dict = {"_id": ObjectId(), "name": "THX-1138", "year": 1971} + coll.bulk_write( + [InsertOne(mov_dict)] # type:ignore[arg-type] + ) + coll.bulk_write( + [ + InsertOne({"_id": ObjectId(), "name": "THX-1138", "year": 1971}) # pyright: ignore + ] # No error because it is in-line. + ) + + @only_type_check + def test_bulk_write_document_type_replacement(self): + client: MongoClient[MovieWithId] = MongoClient() + coll: Collection[MovieWithId] = client.test.test + coll.bulk_write( + [ReplaceOne({}, Movie({"name": "THX-1138", "year": 1971}))] # type:ignore[arg-type] + ) + mov_dict = {"_id": ObjectId(), "name": "THX-1138", "year": 1971} + coll.bulk_write( + [ReplaceOne({}, mov_dict)] # type:ignore[arg-type] + ) + coll.bulk_write( + [ + ReplaceOne({}, {"_id": ObjectId(), "name": "THX-1138", "year": 1971}) # pyright: ignore + ] # No error because it is in-line. + ) + + @only_type_check + def test_typeddict_explicit_document_type(self) -> None: + out = MovieWithId(_id=ObjectId(), name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + assert out["_id"] + + # This should work the same as the test above, but this time using NotRequired to allow + # automatic insertion of the _id field by insert_one. + @only_type_check + def test_typeddict_not_required_document_type(self) -> None: + out = ImplicitMovie(name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + # pyright gives reportTypedDictNotRequiredAccess for the following: + assert out["_id"] # type:ignore[unused-ignore] + + @only_type_check + def test_typeddict_empty_document_type(self) -> None: + out = Movie(name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + # This should fail because _id is not included in our TypedDict definition. + assert out["_id"] # type:ignore[typeddict-item] + + @client_context.require_connection + def test_typeddict_find_notrequired(self): + if NotRequired is None or ImplicitMovie is None: + raise unittest.SkipTest("Python 3.11+ is required to use NotRequired.") + client: MongoClient[ImplicitMovie] = self.rs_or_single_client() + coll = client.test.test + coll.insert_one(ImplicitMovie(name="THX-1138", year=1971)) + out = coll.find_one({}) + assert out is not None + # pyright gives reportTypedDictNotRequiredAccess for the following: + assert out["_id"] # type:ignore[unused-ignore] + + @only_type_check + def test_raw_bson_document_type(self) -> None: + client = MongoClient(document_class=RawBSONDocument) + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + assert len(retrieved.raw) > 0 + + @only_type_check + def test_son_document_type(self) -> None: + client = MongoClient(document_class=SON[str, Any]) + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 + + def test_son_document_type_runtime(self) -> None: + MongoClient(document_class=SON[str, Any], connect=False) + + @only_type_check + def test_create_index(self) -> None: + client: MongoClient[Dict[str, str]] = MongoClient("test") + db = client.test + with client.start_session() as session: + index = db.test.create_index([("user_id", ASCENDING)], unique=True, session=session) + assert isinstance(index, str) + + +class TestCommandDocumentType(unittest.TestCase): + @only_type_check + def test_default(self) -> None: + client: MongoClient = MongoClient() + result: Dict = client.admin.command("ping") + result["a"] = 1 + + @only_type_check + def test_explicit_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options: CodecOptions[Dict[str, Any]] = CodecOptions() + result = client.admin.command("ping", codec_options=codec_options) + result["a"] = 1 + + @only_type_check + def test_typeddict_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options: CodecOptions[Movie] = CodecOptions() + result = client.admin.command("ping", codec_options=codec_options) + assert result["year"] == 1 + assert result["name"] == "a" + + @only_type_check + def test_raw_bson_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options = CodecOptions(RawBSONDocument) + result = client.admin.command("ping", codec_options=codec_options) + assert len(result.raw) > 0 + + @only_type_check + def test_son_document_type(self) -> None: + client = MongoClient(document_class=SON[str, Any]) + codec_options = CodecOptions(SON[str, Any]) + result = client.admin.command("ping", codec_options=codec_options) + result["a"] = 1 + + +class TestCodecOptionsDocumentType(unittest.TestCase): + def test_default(self) -> None: + options: CodecOptions = CodecOptions() + obj = options.document_class() + obj["a"] = 1 + + def test_explicit_document_type(self) -> None: + options: CodecOptions[Dict[str, Any]] = CodecOptions() + obj = options.document_class() + obj["a"] = 1 + + def test_typeddict_document_type(self) -> None: + options: CodecOptions[Movie] = CodecOptions() + # Suppress: Cannot instantiate type "Type[Movie]". + obj = options.document_class(name="a", year=1) + assert obj["year"] == 1 + assert obj["name"] == "a" + + def test_raw_bson_document_type(self) -> None: + options = CodecOptions(RawBSONDocument) + doc_bson = b"\x10\x00\x00\x00\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00" + obj = options.document_class(doc_bson) + assert len(obj.raw) > 0 + + def test_son_document_type(self) -> None: + options = CodecOptions(SON[str, Any]) + obj = options.document_class() + obj["a"] = 1 + + +class TestBSONFromVectorType(unittest.TestCase): + @only_type_check + def test_from_vector_binaryvector(self): + list_vector = BinaryVector([127, 7], BinaryVectorDtype.INT8) + Binary.from_vector(list_vector) + + @only_type_check + def test_from_vector_list_int(self): + list_vector = [127, 7] + Binary.from_vector(list_vector, BinaryVectorDtype.INT8) + + @only_type_check + def test_from_vector_list_float(self): + list_vector = [127.0, 7.0] + Binary.from_vector(list_vector, BinaryVectorDtype.INT8) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_typing_strict.py b/test/test_typing_strict.py new file mode 100644 index 0000000000..32e9fcfcca --- /dev/null +++ b/test/test_typing_strict.py @@ -0,0 +1,40 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test typings in strict mode.""" +from __future__ import annotations + +import unittest +from typing import TYPE_CHECKING, Any, Dict + +import pymongo +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.database import Database + + +def test_generic_arguments() -> None: + """Ensure known usages of generic arguments pass strict typing""" + if not TYPE_CHECKING: + raise unittest.SkipTest("Used for Type Checking Only") + mongo_client: pymongo.MongoClient[Dict[str, Any]] = pymongo.MongoClient() + mongo_client.drop_database("foo") + mongo_client.get_default_database() + db = mongo_client.get_database("test_db") + db = Database(mongo_client, "test_db") + db.with_options() + db.validate_collection("py_test") + col = db.get_collection("py_test") + col.insert_one({"abc": 123}) + col = Collection(db, "py_test") + col.with_options() diff --git a/test/test_unified_format.py b/test/test_unified_format.py new file mode 100644 index 0000000000..f1cfd0139b --- /dev/null +++ b/test/test_unified_format.py @@ -0,0 +1,97 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys +from pathlib import Path +from typing import Any + +sys.path[0:0] = [""] + +from test import UnitTest, unittest +from test.unified_format import MatchEvaluatorUtil, generate_test_classes + +from bson import ObjectId + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "unified-test-format") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "unified-test-format") + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "valid-pass"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + expected_failures=[ + "Client side error in command starting transaction", # PYTHON-1894 + ], + ) +) + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "valid-fail"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + bypass_test_generation_errors=True, + expected_failures=[ + ".*", # All tests expected to fail + ], + ) +) + + +class TestMatchEvaluatorUtil(UnitTest): + def setUp(self): + self.match_evaluator = MatchEvaluatorUtil(self) + + def test_unsetOrMatches(self): + spec: dict[str, Any] = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} + for actual in [{}, {"y": 2}, None]: + self.match_evaluator.match_result(spec, actual) + + spec = {"x": {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}}} + for actual in [{}, {"x": {}}, {"x": {"y": 2}}]: + self.match_evaluator.match_result(spec, actual) + + spec = {"y": {"$$unsetOrMatches": {"$$exists": True}}} + self.match_evaluator.match_result(spec, {}) + self.match_evaluator.match_result(spec, {"y": 2}) + self.match_evaluator.match_result(spec, {"x": 1}) + self.match_evaluator.match_result(spec, {"y": {}}) + + def test_type(self): + self.match_evaluator.match_result( + { + "operationType": "insert", + "ns": {"db": "change-stream-tests", "coll": "test"}, + "fullDocument": {"_id": {"$$type": "objectId"}, "x": 1}, + }, + { + "operationType": "insert", + "fullDocument": {"_id": ObjectId("5fc93511ac93941052098f0c"), "x": 1}, + "ns": {"db": "change-stream-tests", "coll": "test"}, + }, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index c70b89837c..502faf82b0 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -1,4 +1,4 @@ -# Copyright 2011-2014 MongoDB, Inc. +# Copyright 2011-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,380 +13,551 @@ # limitations under the License. """Test the pymongo uri_parser module.""" +from __future__ import annotations import copy -import unittest import sys +import warnings +from typing import Any +from urllib.parse import quote_plus sys.path[0:0] = [""] -from pymongo.uri_parser import (_partition, - _rpartition, - parse_userinfo, - split_hosts, - split_options, - parse_uri) -from pymongo.errors import ConfigurationError, InvalidURI -from pymongo import ReadPreference +from test import unittest +from unittest.mock import patch + from bson.binary import JAVA_LEGACY +from pymongo import ReadPreference +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.synchronous.uri_parser import parse_uri +from pymongo.uri_parser_shared import ( + parse_userinfo, + split_hosts, + split_options, +) class TestURI(unittest.TestCase): - - def test_partition(self): - self.assertEqual(('foo', ':', 'bar'), _partition('foo:bar', ':')) - self.assertEqual(('foobar', '', ''), _partition('foobar', ':')) - - def test_rpartition(self): - self.assertEqual(('fo:o:', ':', 'bar'), _rpartition('fo:o::bar', ':')) - self.assertEqual(('', '', 'foobar'), _rpartition('foobar', ':')) - def test_validate_userinfo(self): - self.assertRaises(InvalidURI, parse_userinfo, - 'foo@') - self.assertRaises(InvalidURI, parse_userinfo, - ':password') - self.assertRaises(InvalidURI, parse_userinfo, - 'fo::o:p@ssword') - self.assertRaises(InvalidURI, parse_userinfo, ':') - self.assertTrue(parse_userinfo('user:password')) - self.assertEqual(('us:r', 'p@ssword'), - parse_userinfo('us%3Ar:p%40ssword')) - self.assertEqual(('us er', 'p ssword'), - parse_userinfo('us+er:p+ssword')) - self.assertEqual(('us er', 'p ssword'), - parse_userinfo('us%20er:p%20ssword')) - self.assertEqual(('us+er', 'p+ssword'), - parse_userinfo('us%2Ber:p%2Bssword')) - self.assertEqual(('dev1@FOO.COM', ''), - parse_userinfo('dev1%40FOO.COM')) - self.assertEqual(('dev1@FOO.COM', ''), - parse_userinfo('dev1%40FOO.COM:')) + self.assertRaises(InvalidURI, parse_userinfo, "foo@") + self.assertRaises(InvalidURI, parse_userinfo, ":password") + self.assertRaises(InvalidURI, parse_userinfo, "fo::o:p@ssword") + self.assertRaises(InvalidURI, parse_userinfo, ":") + self.assertTrue(parse_userinfo("user:password")) + self.assertEqual(("us:r", "p@ssword"), parse_userinfo("us%3Ar:p%40ssword")) + self.assertEqual(("us er", "p ssword"), parse_userinfo("us+er:p+ssword")) + self.assertEqual(("us er", "p ssword"), parse_userinfo("us%20er:p%20ssword")) + self.assertEqual(("us+er", "p+ssword"), parse_userinfo("us%2Ber:p%2Bssword")) + self.assertEqual(("dev1@FOO.COM", ""), parse_userinfo("dev1%40FOO.COM")) + self.assertEqual(("dev1@FOO.COM", ""), parse_userinfo("dev1%40FOO.COM:")) def test_split_hosts(self): - self.assertRaises(ConfigurationError, split_hosts, - 'localhost:27017,') - self.assertRaises(ConfigurationError, split_hosts, - ',localhost:27017') - self.assertRaises(ConfigurationError, split_hosts, - 'localhost:27017,,localhost:27018') - self.assertEqual([('localhost', 27017), ('example.com', 27017)], - split_hosts('localhost,example.com')) - self.assertEqual([('localhost', 27018), ('example.com', 27019)], - split_hosts('localhost:27018,example.com:27019')) - self.assertEqual([('/tmp/mongodb-27017.sock', None)], - split_hosts('/tmp/mongodb-27017.sock')) - self.assertEqual([('/tmp/mongodb-27017.sock', None), - ('example.com', 27017)], - split_hosts('/tmp/mongodb-27017.sock,' - 'example.com:27017')) - self.assertEqual([('example.com', 27017), - ('/tmp/mongodb-27017.sock', None)], - split_hosts('example.com:27017,' - '/tmp/mongodb-27017.sock')) - self.assertRaises(ConfigurationError, split_hosts, '::1', 27017) - self.assertRaises(ConfigurationError, split_hosts, '[::1:27017') - self.assertRaises(ConfigurationError, split_hosts, '::1') - self.assertRaises(ConfigurationError, split_hosts, '::1]:27017') - self.assertEqual([('::1', 27017)], split_hosts('[::1]:27017')) - self.assertEqual([('::1', 27017)], split_hosts('[::1]')) + self.assertRaises(ConfigurationError, split_hosts, "localhost:27017,") + self.assertRaises(ConfigurationError, split_hosts, ",localhost:27017") + self.assertRaises(ConfigurationError, split_hosts, "localhost:27017,,localhost:27018") + self.assertEqual( + [("localhost", 27017), ("example.com", 27017)], split_hosts("localhost,example.com") + ) + self.assertEqual( + [("localhost", 27018), ("example.com", 27019)], + split_hosts("localhost:27018,example.com:27019"), + ) + self.assertEqual( + [("/tmp/mongodb-27017.sock", None)], split_hosts("/tmp/mongodb-27017.sock") + ) + self.assertEqual( + [("/tmp/mongodb-27017.sock", None), ("example.com", 27017)], + split_hosts("/tmp/mongodb-27017.sock,example.com:27017"), + ) + self.assertEqual( + [("example.com", 27017), ("/tmp/mongodb-27017.sock", None)], + split_hosts("example.com:27017,/tmp/mongodb-27017.sock"), + ) + self.assertRaises(ValueError, split_hosts, "::1", 27017) + self.assertRaises(ValueError, split_hosts, "[::1:27017") + self.assertRaises(ValueError, split_hosts, "::1") + self.assertRaises(ValueError, split_hosts, "::1]:27017") + self.assertEqual([("::1", 27017)], split_hosts("[::1]:27017")) + self.assertEqual([("::1", 27017)], split_hosts("[::1]")) def test_split_options(self): - self.assertRaises(ConfigurationError, split_options, 'foo') - self.assertRaises(ConfigurationError, split_options, 'foo=bar') - self.assertRaises(ConfigurationError, split_options, 'foo=bar;foo') - self.assertRaises(ConfigurationError, split_options, 'socketTimeoutMS=foo') - self.assertRaises(ConfigurationError, split_options, 'socketTimeoutMS=0.0') - self.assertRaises(ConfigurationError, split_options, 'connectTimeoutMS=foo') - self.assertRaises(ConfigurationError, split_options, 'connectTimeoutMS=0.0') - self.assertRaises(ConfigurationError, split_options, 'connectTimeoutMS=1e100000') - self.assertRaises(ConfigurationError, split_options, 'connectTimeoutMS=-1e100000') - - # On most platforms float('inf') and float('-inf') represent - # +/- infinity, although on Python 2.4 and 2.5 on Windows those - # expressions are invalid - if not (sys.platform == "win32" and sys.version_info <= (2, 5)): - self.assertRaises(ConfigurationError, split_options, 'connectTimeoutMS=inf') - self.assertRaises(ConfigurationError, split_options, 'connectTimeoutMS=-inf') - - self.assertTrue(split_options('socketTimeoutMS=300')) - self.assertTrue(split_options('connectTimeoutMS=300')) - self.assertEqual({'sockettimeoutms': 0.3}, split_options('socketTimeoutMS=300')) - self.assertEqual({'sockettimeoutms': 0.0001}, split_options('socketTimeoutMS=0.1')) - self.assertEqual({'connecttimeoutms': 0.3}, split_options('connectTimeoutMS=300')) - self.assertEqual({'connecttimeoutms': 0.0001}, split_options('connectTimeoutMS=0.1')) - self.assertTrue(split_options('connectTimeoutMS=300')) - self.assertTrue(isinstance(split_options('w=5')['w'], int)) - self.assertTrue(isinstance(split_options('w=5.5')['w'], basestring)) - self.assertTrue(split_options('w=foo')) - self.assertTrue(split_options('w=majority')) - self.assertRaises(ConfigurationError, split_options, 'wtimeoutms=foo') - self.assertRaises(ConfigurationError, split_options, 'wtimeoutms=5.5') - self.assertTrue(split_options('wtimeoutms=500')) - self.assertRaises(ConfigurationError, split_options, 'fsync=foo') - self.assertRaises(ConfigurationError, split_options, 'fsync=5.5') - self.assertEqual({'fsync': True}, split_options('fsync=true')) - self.assertEqual({'fsync': False}, split_options('fsync=false')) - self.assertEqual({'authmechanism': 'GSSAPI'}, - split_options('authMechanism=GSSAPI')) - self.assertEqual({'authmechanism': 'MONGODB-CR'}, - split_options('authMechanism=MONGODB-CR')) - self.assertEqual({'authsource': 'foobar'}, split_options('authSource=foobar')) - # maxPoolSize isn't yet a documented URI option. - self.assertRaises(ConfigurationError, split_options, 'maxpoolsize=50') + self.assertRaises(ConfigurationError, split_options, "foo") + self.assertRaises(ConfigurationError, split_options, "foo=bar;foo") + self.assertTrue(split_options("ssl=true")) + self.assertTrue(split_options("connect=true")) + self.assertTrue(split_options("tlsAllowInvalidHostnames=false")) + + # Test Invalid URI options that should throw warnings. + with warnings.catch_warnings(): + warnings.filterwarnings("error") + self.assertRaises(Warning, split_options, "foo=bar", warn=True) + self.assertRaises(Warning, split_options, "socketTimeoutMS=foo", warn=True) + self.assertRaises(Warning, split_options, "socketTimeoutMS=0.0", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=foo", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=0.0", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=1e100000", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=-1e100000", warn=True) + self.assertRaises(Warning, split_options, "ssl=foo", warn=True) + self.assertRaises(Warning, split_options, "connect=foo", warn=True) + self.assertRaises(Warning, split_options, "tlsAllowInvalidHostnames=foo", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=inf", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=-inf", warn=True) + self.assertRaises(Warning, split_options, "wtimeoutms=foo", warn=True) + self.assertRaises(Warning, split_options, "wtimeoutms=5.5", warn=True) + self.assertRaises(Warning, split_options, "fsync=foo", warn=True) + self.assertRaises(Warning, split_options, "fsync=5.5", warn=True) + self.assertRaises(Warning, split_options, "authMechanism=foo", warn=True) + + # Test invalid options with warn=False. + self.assertRaises(ConfigurationError, split_options, "foo=bar") + self.assertRaises(ValueError, split_options, "socketTimeoutMS=foo") + self.assertRaises(ValueError, split_options, "socketTimeoutMS=0.0") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=foo") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=0.0") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=1e100000") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=-1e100000") + self.assertRaises(ValueError, split_options, "ssl=foo") + self.assertRaises(ValueError, split_options, "connect=foo") + self.assertRaises(ValueError, split_options, "tlsAllowInvalidHostnames=foo") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=inf") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=-inf") + self.assertRaises(ValueError, split_options, "wtimeoutms=foo") + self.assertRaises(ValueError, split_options, "wtimeoutms=5.5") + self.assertRaises(ValueError, split_options, "fsync=foo") + self.assertRaises(ValueError, split_options, "fsync=5.5") + self.assertRaises(ValueError, split_options, "authMechanism=foo") + + # Test splitting options works when valid. + self.assertTrue(split_options("socketTimeoutMS=300")) + self.assertTrue(split_options("connectTimeoutMS=300")) + self.assertEqual({"sockettimeoutms": 0.3}, split_options("socketTimeoutMS=300")) + self.assertEqual({"sockettimeoutms": 0.0001}, split_options("socketTimeoutMS=0.1")) + self.assertEqual({"connecttimeoutms": 0.3}, split_options("connectTimeoutMS=300")) + self.assertEqual({"connecttimeoutms": 0.0001}, split_options("connectTimeoutMS=0.1")) + self.assertTrue(split_options("connectTimeoutMS=300")) + self.assertIsInstance(split_options("w=5")["w"], int) + self.assertIsInstance(split_options("w=5.5")["w"], str) + self.assertTrue(split_options("w=foo")) + self.assertTrue(split_options("w=majority")) + self.assertTrue(split_options("wtimeoutms=500")) + self.assertEqual({"fsync": True}, split_options("fsync=true")) + self.assertEqual({"fsync": False}, split_options("fsync=false")) + self.assertEqual({"authMechanism": "GSSAPI"}, split_options("authMechanism=GSSAPI")) + self.assertEqual( + {"authMechanism": "SCRAM-SHA-1"}, split_options("authMechanism=SCRAM-SHA-1") + ) + self.assertEqual({"authsource": "foobar"}, split_options("authSource=foobar")) + self.assertEqual({"maxpoolsize": 50}, split_options("maxpoolsize=50")) + + # Test suggestions given when invalid kwarg passed + + expected = r"Unknown option: auth. Did you mean one of \(authsource, authmechanism, timeoutms\) or maybe a camelCase version of one\? Refer to docstring." + with self.assertRaisesRegex(ConfigurationError, expected): + split_options("auth=GSSAPI") def test_parse_uri(self): self.assertRaises(InvalidURI, parse_uri, "http://foobar.com") self.assertRaises(InvalidURI, parse_uri, "http://foo@foobar.com") - self.assertRaises(ConfigurationError, - parse_uri, "mongodb://::1", 27017) - - orig = { - 'nodelist': [("localhost", 27017)], - 'username': None, - 'password': None, - 'database': None, - 'collection': None, - 'options': {} + self.assertRaises(ValueError, parse_uri, "mongodb://::1", 27017) + + orig: dict = { + "nodelist": [("localhost", 27017)], + "username": None, + "password": None, + "database": None, + "collection": None, + "options": {}, + "fqdn": None, } - res = copy.deepcopy(orig) + res: dict = copy.deepcopy(orig) self.assertEqual(res, parse_uri("mongodb://localhost")) - res.update({'username': 'fred', 'password': 'foobar'}) + res.update({"username": "fred", "password": "foobar"}) self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost")) - res.update({'database': 'baz'}) + res.update({"database": "baz"}) self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/baz")) res = copy.deepcopy(orig) - res['nodelist'] = [("example1.com", 27017), ("example2.com", 27017)] - self.assertEqual(res, - parse_uri("mongodb://example1.com:27017," - "example2.com:27017")) + res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] + self.assertEqual(res, parse_uri("mongodb://example1.com:27017,example2.com:27017")) res = copy.deepcopy(orig) - res['nodelist'] = [("localhost", 27017), - ("localhost", 27018), - ("localhost", 27019)] - self.assertEqual(res, - parse_uri("mongodb://localhost," - "localhost:27018,localhost:27019")) + res["nodelist"] = [("localhost", 27017), ("localhost", 27018), ("localhost", 27019)] + self.assertEqual(res, parse_uri("mongodb://localhost,localhost:27018,localhost:27019")) res = copy.deepcopy(orig) - res['database'] = 'foo' + res["database"] = "foo" self.assertEqual(res, parse_uri("mongodb://localhost/foo")) res = copy.deepcopy(orig) self.assertEqual(res, parse_uri("mongodb://localhost/")) - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, parse_uri("mongodb://" - "localhost/test.yield_historical.in")) + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual(res, parse_uri("mongodb://localhost/test.yield_historical.in")) - res.update({'username': 'fred', 'password': 'foobar'}) - self.assertEqual(res, - parse_uri("mongodb://fred:foobar@localhost/" - "test.yield_historical.in")) + res.update({"username": "fred", "password": "foobar"}) + self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/test.yield_historical.in")) res = copy.deepcopy(orig) - res['nodelist'] = [("example1.com", 27017), ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb://example1.com:27017,example2.com" - ":27017/test.yield_historical.in")) + res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual( + res, + parse_uri("mongodb://example1.com:27017,example2.com:27017/test.yield_historical.in"), + ) - res = copy.deepcopy(orig) - res['nodelist'] = [("::1", 27017)] - res['options'] = {'slaveok': True} - self.assertEqual(res, parse_uri("mongodb://[::1]:27017/?slaveOk=true")) - - res = copy.deepcopy(orig) - res['nodelist'] = [("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 27017)] - res['options'] = {'slaveok': True} - self.assertEqual(res, parse_uri( - "mongodb://[2001:0db8:85a3:0000:0000" - ":8a2e:0370:7334]:27017/?slaveOk=true")) + # Test socket path without escaped characters. + self.assertRaises(InvalidURI, parse_uri, "mongodb:///tmp/mongodb-27017.sock") + # Test with escaped characters. res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None)] - self.assertEqual(res, parse_uri("mongodb:///tmp/mongodb-27017.sock")) + res["nodelist"] = [("example2.com", 27017), ("/tmp/mongodb-27017.sock", None)] + self.assertEqual(res, parse_uri("mongodb://example2.com,%2Ftmp%2Fmongodb-27017.sock")) res = copy.deepcopy(orig) - res['nodelist'] = [("example2.com", 27017), - ("/tmp/mongodb-27017.sock", None)] - self.assertEqual(res, - parse_uri("mongodb://example2.com," - "/tmp/mongodb-27017.sock")) + res["nodelist"] = [("shoe.sock.pants.co.uk", 27017), ("/tmp/mongodb-27017.sock", None)] + res["database"] = "nethers_db" + self.assertEqual( + res, + parse_uri("mongodb://shoe.sock.pants.co.uk,%2Ftmp%2Fmongodb-27017.sock/nethers_db"), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("shoe.sock.pants.co.uk", 27017), - ("/tmp/mongodb-27017.sock", None)] - res['database'] = "nethers_db" - self.assertEqual(res, - parse_uri("mongodb://shoe.sock.pants.co.uk," - "/tmp/mongodb-27017.sock/nethers_db")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27017.sock," + "example2.com:27017" + "/test.yield_historical.in" + ), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None), - ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb:///tmp/mongodb-27017.sock," - "example2.com:27017" - "/test.yield_historical.in")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.sock"}) + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27017.sock," + "example2.com:27017/test.yield_historical" + ".sock" + ), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None), - ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.sock'}) - self.assertEqual(res, - parse_uri("mongodb:///tmp/mongodb-27017.sock," - "example2.com:27017" - "/test.yield_historical.sock")) + res["nodelist"] = [("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.sock"}) + self.assertEqual(res, parse_uri("mongodb://example2.com:27017/test.yield_historical.sock")) res = copy.deepcopy(orig) - res['nodelist'] = [("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.sock'}) - self.assertEqual(res, - parse_uri("mongodb://example2.com:27017" - "/test.yield_historical.sock")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None)] + res.update({"database": "test", "collection": "mongodb-27017.sock"}) + self.assertEqual( + res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock/test.mongodb-27017.sock") + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None)] - res.update({'database': 'test', 'collection': 'mongodb-27017.sock'}) - self.assertEqual(res, - parse_uri("mongodb:///tmp/mongodb-27017.sock" - "/test.mongodb-27017.sock")) + res["nodelist"] = [ + ("/tmp/mongodb-27020.sock", None), + ("::1", 27017), + ("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 27018), + ("192.168.0.212", 27019), + ("localhost", 27018), + ] + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27020.sock" + ",[::1]:27017,[2001:0db8:" + "85a3:0000:0000:8a2e:0370:7334]," + "192.168.0.212:27019,localhost", + 27018, + ), + ) res = copy.deepcopy(orig) - res['nodelist'] = [('/tmp/mongodb-27020.sock', None), - ("::1", 27017), - ("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 27018), - ("192.168.0.212", 27019), - ("localhost", 27018)] - self.assertEqual(res, parse_uri("mongodb:///tmp/mongodb-27020.sock," - "[::1]:27017,[2001:0db8:" - "85a3:0000:0000:8a2e:0370:7334]," - "192.168.0.212:27019,localhost", - 27018)) + res.update({"username": "fred", "password": "foobar"}) + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/test.yield_historical.in")) res = copy.deepcopy(orig) - res.update({'username': 'fred', 'password': 'foobar'}) - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - res['options'] = {'slaveok': True} - self.assertEqual(res, - parse_uri("mongodb://fred:foobar@localhost/" - "test.yield_historical.in?slaveok=true")) + res["database"] = "test" + res["collection"] = 'name/with "delimiters' + self.assertEqual(res, parse_uri('mongodb://localhost/test.name/with "delimiters')) res = copy.deepcopy(orig) - res['options'] = {'readpreference': ReadPreference.SECONDARY} - self.assertEqual(res, - parse_uri("mongodb://localhost/?readPreference=secondary")) + res["options"] = {"readPreference": ReadPreference.SECONDARY.mongos_mode} + self.assertEqual(res, parse_uri("mongodb://localhost/?readPreference=secondary")) # Various authentication tests res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR'} - res['username'] = 'user' - res['password'] = 'password' - self.assertEqual(res, - parse_uri("mongodb://user:password@localhost/" - "?authMechanism=MONGODB-CR")) + res["options"] = {"authMechanism": "SCRAM-SHA-256"} + res["username"] = "user" + res["password"] = "password" + self.assertEqual( + res, parse_uri("mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-256") + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR', 'authsource': 'bar'} - res['username'] = 'user' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user:password@localhost/foo" - "?authSource=bar;authMechanism=MONGODB-CR")) + res["options"] = {"authMechanism": "SCRAM-SHA-256", "authSource": "bar"} + res["username"] = "user" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user:password@localhost/foo?authSource=bar;authMechanism=SCRAM-SHA-256" + ), + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR'} - res['username'] = 'user' - res['password'] = '' - self.assertEqual(res, - parse_uri("mongodb://user:@localhost/" - "?authMechanism=MONGODB-CR")) + res["options"] = {"authMechanism": "SCRAM-SHA-256"} + res["username"] = "user" + res["password"] = "" + self.assertEqual(res, parse_uri("mongodb://user:@localhost/?authMechanism=SCRAM-SHA-256")) res = copy.deepcopy(orig) - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo")) + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual(res, parse_uri("mongodb://user%40domain.com:password@localhost/foo")) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'GSSAPI'} - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?authMechanism=GSSAPI")) + res["options"] = {"authMechanism": "GSSAPI"} + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri("mongodb://user%40domain.com:password@localhost/foo?authMechanism=GSSAPI"), + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'GSSAPI'} - res['username'] = 'user@domain.com' - res['password'] = '' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com" - "@localhost/foo?authMechanism=GSSAPI")) + res["options"] = {"authMechanism": "GSSAPI"} + res["username"] = "user@domain.com" + res["password"] = "" + res["database"] = "foo" + self.assertEqual( + res, parse_uri("mongodb://user%40domain.com@localhost/foo?authMechanism=GSSAPI") + ) res = copy.deepcopy(orig) - res['options'] = {'readpreference': ReadPreference.SECONDARY, - 'readpreferencetags': [ - {'dc': 'west', 'use': 'website'}, - {'dc': 'east', 'use': 'website'}]} - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?readpreference=secondary&" - "readpreferencetags=dc:west,use:website&" - "readpreferencetags=dc:east,use:website")) + res["options"] = { + "readPreference": ReadPreference.SECONDARY.mongos_mode, + "readPreferenceTags": [ + {"dc": "west", "use": "website"}, + {"dc": "east", "use": "website"}, + ], + } + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west,use:website&" + "readpreferencetags=dc:east,use:website" + ), + ) res = copy.deepcopy(orig) - res['options'] = {'readpreference': ReadPreference.SECONDARY, - 'readpreferencetags': [ - {'dc': 'west', 'use': 'website'}, - {'dc': 'east', 'use': 'website'}, - {}]} - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?readpreference=secondary&" - "readpreferencetags=dc:west,use:website&" - "readpreferencetags=dc:east,use:website&" - "readpreferencetags=")) + res["options"] = { + "readPreference": ReadPreference.SECONDARY.mongos_mode, + "readPreferenceTags": [ + {"dc": "west", "use": "website"}, + {"dc": "east", "use": "website"}, + {}, + ], + } + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west,use:website&" + "readpreferencetags=dc:east,use:website&" + "readpreferencetags=" + ), + ) res = copy.deepcopy(orig) - res['options'] = {'uuidrepresentation': JAVA_LEGACY} - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=" - "javaLegacy")) - - self.assertRaises(ConfigurationError, parse_uri, - "mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=notAnOption") - - def test_parse_uri_unicode(self): - # Ensure parsing a unicode returns option names that can be passed - # as kwargs. In Python 2.4, keyword argument names must be ASCII. - # In all Pythons, str is the type of valid keyword arg names. - res = parse_uri(unicode("mongodb://localhost/?fsync=true")) - for key in res['options']: - self.assertTrue(isinstance(key, str)) + res["options"] = {"uuidrepresentation": JAVA_LEGACY} + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?uuidrepresentation=" + "javaLegacy" + ), + ) + + with warnings.catch_warnings(): + warnings.filterwarnings("error") + self.assertRaises( + Warning, + parse_uri, + "mongodb://user%40domain.com:password" + "@localhost/foo?uuidrepresentation=notAnOption", + warn=True, + ) + self.assertRaises( + ValueError, + parse_uri, + "mongodb://user%40domain.com:password@localhost/foo?uuidrepresentation=notAnOption", + ) + + def test_parse_ssl_paths(self): + # Turn off "validate" since these paths don't exist on filesystem. + self.assertEqual( + { + "collection": None, + "database": None, + "nodelist": [("/MongoDB.sock", None)], + "options": {"tlsCertificateKeyFile": "/a/b"}, + "password": "foo/bar", + "username": "jesse", + "fqdn": None, + }, + parse_uri( + "mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=/a/b", + validate=False, + ), + ) + + self.assertEqual( + { + "collection": None, + "database": None, + "nodelist": [("/MongoDB.sock", None)], + "options": {"tlsCertificateKeyFile": "a/b"}, + "password": "foo/bar", + "username": "jesse", + "fqdn": None, + }, + parse_uri( + "mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=a/b", + validate=False, + ), + ) + + def test_tlsinsecure_simple(self): + # check that tlsInsecure is expanded correctly. + self.maxDiff = None + uri = "mongodb://example.com/?tlsInsecure=true" + res = { + "tlsAllowInvalidHostnames": True, + "tlsAllowInvalidCertificates": True, + "tlsInsecure": True, + "tlsDisableOCSPEndpointCheck": True, + } + print(parse_uri(uri)["options"]) + self.assertEqual(res, parse_uri(uri)["options"]) + + def test_normalize_options(self): + # check that options are converted to their internal names correctly. + uri = "mongodb://example.com/?ssl=true&appname=myapp" + res = {"tls": True, "appname": "myapp"} + self.assertEqual(res, parse_uri(uri)["options"]) + + def test_unquote_during_parsing(self): + quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%3A+etc" + unquoted_val = "val!@#$%^&*()_+: etc" + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:" + quoted_val + ) + res = parse_uri(uri) + options: dict[str, Any] = { + "authMechanism": "MONGODB-AWS", + "authMechanismProperties": {"AWS_SESSION_TOKEN": unquoted_val}, + } + self.assertEqual(options, res["options"]) + + uri = ( + "mongodb://localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west," + quoted_val + ":" + quoted_val + "&" + "readpreferencetags=dc:east,use:" + quoted_val + ) + res = parse_uri(uri) + options = { + "readPreference": ReadPreference.SECONDARY.mongos_mode, + "readPreferenceTags": [ + {"dc": "west", unquoted_val: unquoted_val}, + {"dc": "east", "use": unquoted_val}, + ], + } + self.assertEqual(options, res["options"]) + + def test_redact_AWS_SESSION_TOKEN(self): + token = "token" + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN-" + token + ) + with self.assertRaisesRegex( + ValueError, + "Malformed auth mechanism properties", + ): + parse_uri(uri) + + def test_handle_colon(self): + token = "token:foo" + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:" + token + ) + res = parse_uri(uri) + options = { + "authMechanism": "MONGODB-AWS", + "authMechanismProperties": {"AWS_SESSION_TOKEN": token}, + } + self.assertEqual(options, res["options"]) + + def test_special_chars(self): + user = "user@ /9+:?~!$&'()*+,;=" + pwd = "pwd@ /9+:?~!$&'()*+,;=" + uri = f"mongodb://{quote_plus(user)}:{quote_plus(pwd)}@localhost" + res = parse_uri(uri) + self.assertEqual(user, res["username"]) + self.assertEqual(pwd, res["password"]) + + def test_do_not_include_password_in_port_message(self): + with self.assertRaisesRegex(ValueError, "Port must be an integer between 0 and 65535"): + parse_uri("mongodb://localhost:65536") + with self.assertRaisesRegex( + ValueError, "Port contains non-digit characters. Hint: username " + ) as ctx: + parse_uri("mongodb://user:PASS /@localhost:27017") + self.assertNotIn("PASS", str(ctx.exception)) + + # This "invalid" case is technically a valid URI: + res = parse_uri("mongodb://user:1234/@localhost:27017") + self.assertEqual([("user", 1234)], res["nodelist"]) + self.assertEqual("@localhost:27017", res["database"]) + + def test_port_with_whitespace(self): + with self.assertRaisesRegex(ValueError, "Port contains whitespace character: ' '"): + parse_uri("mongodb://localhost:27017 ") + with self.assertRaisesRegex(ValueError, "Port contains whitespace character: ' '"): + parse_uri("mongodb://localhost: 27017") + with self.assertRaisesRegex(ValueError, r"Port contains whitespace character: '\\n'"): + parse_uri("mongodb://localhost:27\n017") + + def test_parse_uri_options_type(self): + opts = parse_uri("mongodb://localhost:27017")["options"] + self.assertIsInstance(opts, dict) if __name__ == "__main__": diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py new file mode 100644 index 0000000000..3d8f7b2b75 --- /dev/null +++ b/test/test_uri_spec.py @@ -0,0 +1,233 @@ +# Copyright 2011-2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that the pymongo.uri_parser module is compliant with the connection +string and uri options specifications. +""" +from __future__ import annotations + +import json +import os +import sys +import warnings + +sys.path[0:0] = [""] + +from test import unittest +from test.helpers_shared import clear_warning_registry + +from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, _CaseInsensitiveDictionary, validate +from pymongo.compression_support import _have_snappy +from pymongo.synchronous.uri_parser import parse_uri + +CONN_STRING_TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), os.path.join("connection_string", "test") +) + +URI_OPTIONS_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "uri_options") + +TEST_DESC_SKIP_LIST = [ + "Valid options specific to single-threaded drivers are parsed correctly", + "Invalid serverSelectionTryOnce causes a warning", + "tlsDisableCertificateRevocationCheck can be set to true", + "tlsDisableCertificateRevocationCheck can be set to false", + "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "tlsAllowInvalidCertificates=true and tlsDisableCertificateRevocationCheck=false raises an error", + "tlsAllowInvalidCertificates=false and tlsDisableCertificateRevocationCheck=true raises an error", + "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and true) raises an error", + "tlsDisableCertificateRevocationCheck=true and tlsAllowInvalidCertificates=false raises an error", + "tlsDisableCertificateRevocationCheck=false and tlsAllowInvalidCertificates=true raises an error", + "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and false) raises an error", + "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "tlsInsecure=true and tlsDisableCertificateRevocationCheck=false raises an error", + "tlsInsecure=false and tlsDisableCertificateRevocationCheck=true raises an error", + "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and true) raises an error", + "tlsDisableCertificateRevocationCheck=true and tlsInsecure=false raises an error", + "tlsDisableCertificateRevocationCheck=false and tlsInsecure=true raises an error", + "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and false) raises an error", + "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "tlsDisableCertificateRevocationCheck=true and tlsDisableOCSPEndpointCheck=false raises an error", + "tlsDisableCertificateRevocationCheck=false and tlsDisableOCSPEndpointCheck=true raises an error", + "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "tlsDisableOCSPEndpointCheck=true and tlsDisableCertificateRevocationCheck=false raises an error", + "tlsDisableOCSPEndpointCheck=false and tlsDisableCertificateRevocationCheck=true raises an error", + "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error", +] + + +class TestAllScenarios(unittest.TestCase): + def setUp(self): + clear_warning_registry() + + +def get_error_message_template(expected, artifact): + return "{} {} for test '{}'".format("Expected" if expected else "Unexpected", artifact, "%s") + + +def run_scenario_in_dir(target_workdir): + def workdir_context_decorator(func): + def modified_test_scenario(*args, **kwargs): + original_workdir = os.getcwd() + os.chdir(target_workdir) + with warnings.catch_warnings(): + warnings.simplefilter("default") + func(*args, **kwargs) + os.chdir(original_workdir) + + return modified_test_scenario + + return workdir_context_decorator + + +def create_test(test, test_workdir): + def run_scenario(self): + compressors = (test.get("options") or {}).get("compressors", []) + if "snappy" in compressors and not _have_snappy(): + self.skipTest("This test needs the snappy module.") + valid = True + warning = False + expected_warning = test.get("warning", False) + expected_valid = test.get("valid", True) + + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("ignore", category=ResourceWarning) + try: + options = parse_uri(test["uri"], warn=True) + except Exception: + valid = False + else: + warning = len(ctx) > 0 + if expected_valid and warning and not expected_warning: + raise ValueError("Got unexpected warning(s): ", [str(i) for i in ctx]) + + self.assertEqual( + valid, + expected_valid, + get_error_message_template(not expected_valid, "error") % test["description"], + ) + + if expected_valid: + self.assertEqual( + warning, + expected_warning, + get_error_message_template(expected_warning, "warning") % test["description"], + ) + + # Compare hosts and port. + if test["hosts"] is not None: + self.assertEqual( + len(test["hosts"]), + len(options["nodelist"]), + "Incorrect number of hosts parsed from URI", + ) + + for exp, actual in zip(test["hosts"], options["nodelist"]): + self.assertEqual( + exp["host"], + actual[0], + "Expected host {} but got {}".format(exp["host"], actual[0]), + ) + if exp["port"] is not None: + self.assertEqual( + exp["port"], + actual[1], + "Expected port {} but got {}".format(exp["port"], actual), + ) + + # Compare auth options. + auth = test["auth"] + if auth is not None: + auth["database"] = auth.pop("db") # db == database + # Special case for PyMongo's collection parsing. + if options.get("collection") is not None: + options["database"] += "." + options["collection"] + for elm in auth: + if auth[elm] is not None: + # We have to do this because while the spec requires + # "+"->"+", unquote_plus does "+"->" " + options[elm] = options[elm].replace(" ", "+") + self.assertEqual( + auth[elm], + options[elm], + f"Expected {auth[elm]} but got {options[elm]}", + ) + + # Compare URI options. + err_msg = "For option %s expected %s but got %s" + if test["options"]: + opts = _CaseInsensitiveDictionary() + opts.update(options["options"]) + for opt in test["options"]: + lopt = opt.lower() + optname = INTERNAL_URI_OPTION_NAME_MAP.get(lopt, lopt) + if opts.get(optname) is not None: + if opts[optname] == test["options"][opt]: + expected_value = test["options"][opt] + else: + expected_value = validate(lopt, test["options"][opt])[1] + self.assertEqual( + opts[optname], + expected_value, + err_msg + % ( + opt, + expected_value, + opts[optname], + ), + ) + else: + self.fail(f"Missing expected option {opt}") + + return run_scenario_in_dir(test_workdir)(run_scenario) + + +def create_tests(test_path): + for dirpath, _, filenames in os.walk(test_path): + dirname = os.path.split(dirpath) + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] + + for filename in filenames: + if not filename.endswith(".json"): + # skip everything that is not a test specification + continue + json_path = os.path.join(dirpath, filename) + with open(json_path, encoding="utf-8") as scenario_stream: + scenario_def = json.load(scenario_stream) + + for testcase in scenario_def["tests"]: + dsc = testcase["description"] + + if dsc in TEST_DESC_SKIP_LIST: + print("Skipping test '%s'" % dsc) + continue + + testmethod = create_test(testcase, dirpath) + testname = "test_{}_{}_{}".format( + dirname, + os.path.splitext(filename)[0], + str(dsc).replace(" ", "_"), + ) + testmethod.__name__ = testname + setattr(TestAllScenarios, testmethod.__name__, testmethod) + + +for test_path in [CONN_STRING_TEST_PATH, URI_OPTIONS_TEST_PATH]: + create_tests(test_path) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py new file mode 100644 index 0000000000..19b125770f --- /dev/null +++ b/test/test_versioned_api.py @@ -0,0 +1,69 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import sys +from test import UnitTest + +sys.path[0:0] = [""] + +from test import unittest + +from pymongo.mongo_client import MongoClient +from pymongo.server_api import ServerApi, ServerApiVersion + + +class TestServerApi(UnitTest): + def test_server_api_defaults(self): + api = ServerApi(ServerApiVersion.V1) + self.assertEqual(api.version, "1") + self.assertIsNone(api.strict) + self.assertIsNone(api.deprecation_errors) + + def test_server_api_explicit_false(self): + api = ServerApi("1", strict=False, deprecation_errors=False) + self.assertEqual(api.version, "1") + self.assertFalse(api.strict) + self.assertFalse(api.deprecation_errors) + + def test_server_api_strict(self): + api = ServerApi("1", strict=True, deprecation_errors=True) + self.assertEqual(api.version, "1") + self.assertTrue(api.strict) + self.assertTrue(api.deprecation_errors) + + def test_server_api_validation(self): + with self.assertRaises(ValueError): + ServerApi("2") + with self.assertRaises(TypeError): + ServerApi("1", strict="not-a-bool") # type: ignore[arg-type] + with self.assertRaises(TypeError): + ServerApi("1", deprecation_errors="not-a-bool") # type: ignore[arg-type] + with self.assertRaises(TypeError): + MongoClient(server_api="not-a-ServerApi") + + def assertServerApi(self, event): + self.assertIn("apiVersion", event.command) + self.assertEqual(event.command["apiVersion"], "1") + + def assertNoServerApi(self, event): + self.assertNotIn("apiVersion", event.command) + + def assertServerApiInAllCommands(self, events): + for event in events: + self.assertServerApi(event) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_versioned_api_integration.py b/test/test_versioned_api_integration.py new file mode 100644 index 0000000000..066a1935ca --- /dev/null +++ b/test/test_versioned_api_integration.py @@ -0,0 +1,81 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys +from pathlib import Path +from test.unified_format import generate_test_classes + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils_shared import OvertCommandListener + +from pymongo.server_api import ServerApi + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "versioned-api") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "versioned-api") + + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestServerApiIntegration(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + + def assertServerApi(self, event): + self.assertIn("apiVersion", event.command) + self.assertEqual(event.command["apiVersion"], "1") + + def assertServerApiInAllCommands(self, events): + for event in events: + self.assertServerApi(event) + + @client_context.require_version_min(4, 7) + def test_command_options(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) + coll = client.test.test + coll.insert_many([{} for _ in range(100)]) + self.addCleanup(coll.delete_many, {}) + coll.find(batch_size=25).to_list() + client.admin.command("ping") + self.assertServerApiInAllCommands(listener.started_events) + + @client_context.require_version_min(4, 7) + @client_context.require_transactions + def test_command_options_txn(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) + coll = client.test.test + coll.insert_many([{} for _ in range(100)]) + self.addCleanup(coll.delete_many, {}) + + listener.reset() + with client.start_session() as s, s.start_transaction(): + coll.insert_many([{} for _ in range(100)], session=s) + coll.find(batch_size=25, session=s).to_list() + client.test.command("find", "test", session=s) + self.assertServerApiInAllCommands(listener.started_events) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_write_concern.py b/test/test_write_concern.py new file mode 100644 index 0000000000..02a7cb6e5c --- /dev/null +++ b/test/test_write_concern.py @@ -0,0 +1,85 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the unit tests for WriteConcern.""" +from __future__ import annotations + +import collections +import unittest + +from pymongo.errors import ConfigurationError +from pymongo.write_concern import WriteConcern + + +class TestWriteConcern(unittest.TestCase): + def test_invalid(self): + # Can't use fsync and j options together + self.assertRaises(ConfigurationError, WriteConcern, j=True, fsync=True) + # Can't use w=0 and j options together + self.assertRaises(ConfigurationError, WriteConcern, w=0, j=True) + + def test_equality(self): + concern = WriteConcern(j=True, wtimeout=3000) + self.assertEqual(concern, WriteConcern(j=True, wtimeout=3000)) + self.assertNotEqual(concern, WriteConcern()) + + def test_equality_to_none(self): + concern = WriteConcern() + self.assertNotEqual(concern, None) + # Explicitly use the != operator. + self.assertTrue(concern != None) # noqa: E711 + + def test_equality_compatible_type(self): + class _FakeWriteConcern: + def __init__(self, **document): + self.document = document + + def __eq__(self, other): + try: + return self.document == other.document + except AttributeError: + return NotImplemented + + def __ne__(self, other): + try: + return self.document != other.document + except AttributeError: + return NotImplemented + + self.assertEqual(WriteConcern(j=True), _FakeWriteConcern(j=True)) + self.assertEqual(_FakeWriteConcern(j=True), WriteConcern(j=True)) + self.assertEqual(WriteConcern(j=True), _FakeWriteConcern(j=True)) + self.assertEqual(WriteConcern(wtimeout=42), _FakeWriteConcern(wtimeout=42)) + self.assertNotEqual(WriteConcern(wtimeout=42), _FakeWriteConcern(wtimeout=2000)) + + def test_equality_incompatible_type(self): + _fake_type = collections.namedtuple("NotAWriteConcern", ["document"]) # type: ignore + self.assertNotEqual(WriteConcern(j=True), _fake_type({"j": True})) + + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + def test_repr(self): + concern = WriteConcern(j=True, wtimeout=3000, w="majority", fsync=False) + self.assertRepr(concern) + self.assertEqual( + repr(concern), + "WriteConcern(wtimeout=3000, j=True, fsync=False, w='majority')", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/transactions-convenient-api/unified/callback-aborts.json b/test/transactions-convenient-api/unified/callback-aborts.json new file mode 100644 index 0000000000..206428715c --- /dev/null +++ b/test/transactions-convenient-api/unified/callback-aborts.json @@ -0,0 +1,344 @@ +{ + "description": "callback-aborts", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction succeeds if callback aborts", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + }, + { + "description": "withTransaction succeeds if callback aborts with no ops", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "abortTransaction", + "object": "session0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + }, + { + "description": "withTransaction still succeeds if callback aborts and runs extra op", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "autocommit": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/callback-commits.json b/test/transactions-convenient-api/unified/callback-commits.json new file mode 100644 index 0000000000..06f791e9ae --- /dev/null +++ b/test/transactions-convenient-api/unified/callback-commits.json @@ -0,0 +1,423 @@ +{ + "description": "callback-commits", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction succeeds if callback commits", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "withTransaction still succeeds if callback commits and runs extra op", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "autocommit": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/callback-retry.json b/test/transactions-convenient-api/unified/callback-retry.json new file mode 100644 index 0000000000..277dfa18ed --- /dev/null +++ b/test/transactions-convenient-api/unified/callback-retry.json @@ -0,0 +1,472 @@ +{ + "description": "callback-retry", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "callback succeeds after multiple connection errors", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "ignoreResultAndError": true + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "callback is not retried after non-transient error (DuplicateKeyError)", + "operations": [ + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorContains": "E11000" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-retry-errorLabels.json b/test/transactions-convenient-api/unified/commit-retry-errorLabels.json new file mode 100644 index 0000000000..c6a4e44d62 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-retry-errorLabels.json @@ -0,0 +1,231 @@ +{ + "description": "commit-retry-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotWritablePrimary)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-retry.json b/test/transactions-convenient-api/unified/commit-retry.json new file mode 100644 index 0000000000..928f0167e4 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-retry.json @@ -0,0 +1,552 @@ +{ + "description": "commit-retry", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction succeeds after multiple connection errors", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction retry only overwrites write concern w option", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "writeConcern": { + "w": 2, + "journal": true, + "wtimeoutMS": 5000 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 2, + "j": true, + "wtimeout": 5000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commit is not retried after MaxTimeMSExpired error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 50 + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "maxCommitTimeMS": 60000 + }, + "expectError": { + "errorCodeName": "MaxTimeMSExpired", + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "maxTimeMS": 60000, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-transienttransactionerror-4.2.json b/test/transactions-convenient-api/unified/commit-transienttransactionerror-4.2.json new file mode 100644 index 0000000000..0f5a782452 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-transienttransactionerror-4.2.json @@ -0,0 +1,294 @@ +{ + "description": "commit-transienttransactionerror-4.2", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.1.6", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "transaction is retried after commitTransaction TransientTransactionError (PreparedTransactionInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 267, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-transienttransactionerror.json b/test/transactions-convenient-api/unified/commit-transienttransactionerror.json new file mode 100644 index 0000000000..dd5158d813 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-transienttransactionerror.json @@ -0,0 +1,996 @@ +{ + "description": "commit-transienttransactionerror", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "transaction is retried after commitTransaction TransientTransactionError (LockTimeout)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 24, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction is retried after commitTransaction TransientTransactionError (WriteConflict)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 112, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction is retried after commitTransaction TransientTransactionError (SnapshotUnavailable)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 246, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction is retried after commitTransaction TransientTransactionError (NoSuchTransaction)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 251, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-writeconcernerror.json b/test/transactions-convenient-api/unified/commit-writeconcernerror.json new file mode 100644 index 0000000000..568f7ede42 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-writeconcernerror.json @@ -0,0 +1,812 @@ +{ + "description": "commit-writeconcernerror", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction is retried after WriteConcernTimeout timeout error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "errmsg": "waiting for replication timed out", + "errInfo": { + "wtimeout": true + } + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is retried after WriteConcernTimeout non-timeout error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "errmsg": "multiple errors reported" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is not retried after UnknownReplWriteConcern error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 79, + "codeName": "UnknownReplWriteConcern", + "errmsg": "No write concern mode named 'foo' found in replica set configuration" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + }, + "expectError": { + "errorCodeName": "UnknownReplWriteConcern", + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + }, + "expectError": { + "errorCodeName": "UnsatisfiableWriteConcern", + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is not retried after MaxTimeMSExpired error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 50, + "codeName": "MaxTimeMSExpired", + "errmsg": "operation exceeded time limit" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + }, + "expectError": { + "errorCodeName": "MaxTimeMSExpired", + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit.json b/test/transactions-convenient-api/unified/commit.json new file mode 100644 index 0000000000..5684d5ee89 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit.json @@ -0,0 +1,398 @@ +{ + "description": "commit", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction commits after callback returns", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "withTransaction commits after callback returns (second transaction)", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/transaction-options.json b/test/transactions-convenient-api/unified/transaction-options.json new file mode 100644 index 0000000000..b1a74c5fd1 --- /dev/null +++ b/test/transactions-convenient-api/unified/transaction-options.json @@ -0,0 +1,819 @@ +{ + "description": "transaction-options", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction and no transaction options set", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from client", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options override defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "snapshot" + }, + "writeConcern": { + "w": "majority" + } + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options override client options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "readConcernLevel": "local", + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/abort.json b/test/transactions/unified/abort.json new file mode 100644 index 0000000000..c151a7d0c6 --- /dev/null +++ b/test/transactions/unified/abort.json @@ -0,0 +1,828 @@ +{ + "description": "abort", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "implicit abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "endSession" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "two aborts", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "cannot call abortTransaction twice" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort without start", + "operations": [ + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort directly after no-op commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "Cannot call abortTransaction after calling commitTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort directly after commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "Cannot call abortTransaction after calling commitTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "abort ignores TransactionAborted", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorContains": "E11000" + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCodeName": "NoSuchTransaction", + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort does not apply writeConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 10 + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/bulk.json b/test/transactions/unified/bulk.json new file mode 100644 index 0000000000..ece162518f --- /dev/null +++ b/test/transactions/unified/bulk.json @@ -0,0 +1,652 @@ +{ + "description": "bulk", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "bulk", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": 2 + } + }, + "upsert": true + } + }, + { + "insertOne": { + "document": { + "_id": 3 + } + } + }, + { + "insertOne": { + "document": { + "_id": 4 + } + } + }, + { + "insertOne": { + "document": { + "_id": 5 + } + } + }, + { + "insertOne": { + "document": { + "_id": 6 + } + } + }, + { + "insertOne": { + "document": { + "_id": 7 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 2 + }, + "replacement": { + "y": 2 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 4 + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "update": { + "$set": { + "z": 1 + } + } + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 6 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 4, + "insertedCount": 6, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "3": 3, + "4": 4, + "5": 5, + "6": 6, + "7": 7 + } + }, + "matchedCount": 7, + "modifiedCount": 7, + "upsertedCount": 1, + "upsertedIds": { + "2": 2 + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": 2 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "y": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "y": 2 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + }, + { + "q": { + "_id": 4 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gte": 2 + } + }, + "u": { + "$set": { + "z": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gte": 6 + } + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "y": 1 + }, + { + "_id": 2, + "y": 2, + "z": 1 + }, + { + "_id": 5, + "z": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/causal-consistency.json b/test/transactions/unified/causal-consistency.json new file mode 100644 index 0000000000..52a6cb8180 --- /dev/null +++ b/test/transactions/unified/causal-consistency.json @@ -0,0 +1,426 @@ +{ + "description": "causal-consistency", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session_no_cc", + "client": "client0", + "sessionOptions": { + "causalConsistency": false + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "count": 0 + } + ] + } + ], + "tests": [ + { + "description": "causal consistency", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "count": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "count": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "count": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "count": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "count": 2 + } + ] + } + ] + }, + { + "description": "causal consistency disabled", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session_no_cc", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session_no_cc", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session_no_cc", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "count": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session_no_cc", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session_no_cc" + }, + "txnNumber": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "count": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session_no_cc" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_no_cc" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "count": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/client-bulkWrite.json b/test/transactions/unified/client-bulkWrite.json new file mode 100644 index 0000000000..4a8d013f8d --- /dev/null +++ b/test/transactions/unified/client-bulkWrite.json @@ -0,0 +1,593 @@ +{ + "description": "client bulkWrite transactions", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client_with_wmajority", + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "session": { + "id": "session_with_wmajority", + "client": "client_with_wmajority" + } + } + ], + "_yamlAnchors": { + "namespace": "transaction-tests.coll0" + }, + "initialData": [ + { + "databaseName": "transaction-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "tests": [ + { + "description": "client bulkWrite in a transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client writeConcern ignored for client bulkWrite in transaction", + "operations": [ + { + "object": "session_with_wmajority", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "client_with_wmajority", + "name": "clientBulkWrite", + "arguments": { + "session": "session_with_wmajority", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "session_with_wmajority", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client_with_wmajority", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with writeConcern in a transaction causes a transaction error", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "writeConcern": { + "w": 1 + }, + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot set write concern after starting a transaction" + } + } + ] + } + ] +} diff --git a/test/transactions/unified/commit.json b/test/transactions/unified/commit.json new file mode 100644 index 0000000000..ab778d8df2 --- /dev/null +++ b/test/transactions/unified/commit.json @@ -0,0 +1,1234 @@ +{ + "description": "commit", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "rerun commit after empty transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "multiple commits in a row", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "write concern error on commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 10 + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commit without start", + "operations": [ + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commit after no-op abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorContains": "Cannot call commitTransaction after calling abortTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commit after abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorContains": "Cannot call commitTransaction after calling abortTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "multiple commits after empty transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "reset session state commit", + "operations": [ + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "reset session state abort", + "operations": [ + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/count.json b/test/transactions/unified/count.json new file mode 100644 index 0000000000..404b06beb6 --- /dev/null +++ b/test/transactions/unified/count.json @@ -0,0 +1,177 @@ +{ + "description": "count", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0.2", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "tests": [ + { + "description": "count", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorCodeName": "OperationNotSupportedInTransaction", + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "test", + "query": { + "_id": 1 + }, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "count", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/create-collection.json b/test/transactions/unified/create-collection.json new file mode 100644 index 0000000000..e190088b3b --- /dev/null +++ b/test/transactions/unified/create-collection.json @@ -0,0 +1,282 @@ +{ + "description": "create-collection", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "explicitly create collection using create command", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "createCollection", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "object": "testRunner", + "name": "assertCollectionNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertCollectionExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "create", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "implicitly create collection using insert", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "assertCollectionNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertCollectionExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/create-index.json b/test/transactions/unified/create-index.json new file mode 100644 index 0000000000..98d6e11547 --- /dev/null +++ b/test/transactions/unified/create-index.json @@ -0,0 +1,313 @@ +{ + "description": "create-index", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "create index on a non-existing collection", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "createIndex", + "arguments": { + "session": "session0", + "name": "t_1", + "keys": { + "x": 1 + } + } + }, + { + "object": "testRunner", + "name": "assertIndexNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertIndexExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "t_1", + "key": { + "x": 1 + } + } + ], + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "createIndexes", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "create index on a collection created within the same transaction", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "createCollection", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "object": "collection0", + "name": "createIndex", + "arguments": { + "session": "session0", + "name": "t_1", + "keys": { + "x": 1 + } + } + }, + { + "object": "testRunner", + "name": "assertIndexNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertIndexExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "create", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "t_1", + "key": { + "x": 1 + } + } + ], + "lsid": { + "$$sessionLsid": "session0" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "createIndexes", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/delete.json b/test/transactions/unified/delete.json new file mode 100644 index 0000000000..4c1cae0a4e --- /dev/null +++ b/test/transactions/unified/delete.json @@ -0,0 +1,425 @@ +{ + "description": "delete", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ], + "tests": [ + { + "description": "delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$lte": 3 + } + } + }, + "expectResult": { + "deletedCount": 2 + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$lte": 3 + } + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 4 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 5 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for delete", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "collection_wc_majority", + "name": "deleteMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$lte": 3 + } + } + }, + "expectResult": { + "deletedCount": 2 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$lte": 3 + } + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/do-not-retry-read-in-transaction.json b/test/transactions/unified/do-not-retry-read-in-transaction.json new file mode 100644 index 0000000000..6d9dc704b8 --- /dev/null +++ b/test/transactions/unified/do-not-retry-read-in-transaction.json @@ -0,0 +1,115 @@ +{ + "description": "do not retry read in a transaction", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryReads": true + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-read-in-transaction-test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "tests": [ + { + "description": "find does not retry in a transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "startTransaction": true + }, + "commandName": "find", + "databaseName": "retryable-read-in-transaction-test" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/error-labels-blockConnection.json b/test/transactions/unified/error-labels-blockConnection.json new file mode 100644 index 0000000000..8da04d1005 --- /dev/null +++ b/test/transactions/unified/error-labels-blockConnection.json @@ -0,0 +1,235 @@ +{ + "description": "error-labels-blockConnection", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "socketTimeoutMS": 100 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/error-labels-errorLabels.json b/test/transactions/unified/error-labels-errorLabels.json new file mode 100644 index 0000000000..1f95ad3419 --- /dev/null +++ b/test/transactions/unified/error-labels-errorLabels.json @@ -0,0 +1,423 @@ +{ + "description": "error-labels-errorLabels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to retryable commit errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to writeConcernError ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/error-labels.json b/test/transactions/unified/error-labels.json new file mode 100644 index 0000000000..74ed750b07 --- /dev/null +++ b/test/transactions/unified/error-labels.json @@ -0,0 +1,2263 @@ +{ + "description": "error-labels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "DuplicateKey errors do not contain transient label", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 1 + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorContains": "E11000" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + }, + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "NotWritablePrimary errors contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "WriteConflict errors contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "NoSuchTransaction errors contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 251 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "NoSuchTransaction errors on commit contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 251 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add TransientTransactionError label to connection errors, but do not add RetryableWriteError label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert", + "find", + "aggregate", + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": {}, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "do not add RetryableWriteError label to writeConcernError ShutdownInProgress that occurs within transaction", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "errmsg": "multiple errors reported" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernTimeout with wtimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "errmsg": "waiting for replication timed out", + "errInfo": { + "wtimeout": true + } + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "omit UnknownTransactionCommitResult label from writeConcernError UnsatisfiableWriteConcern", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 100, + "errmsg": "Not enough data-bearing nodes" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "omit UnknownTransactionCommitResult label from writeConcernError UnknownReplWriteConcern", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 79, + "errmsg": "No write concern mode named 'blah' found in replica set configuration" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteConcern", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "do not add UnknownTransactionCommitResult label to MaxTimeMSExpired inside transactions", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 50 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "maxTimeMS": 60000, + "session": "session0" + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": {}, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "maxTimeMS": 60000 + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to MaxTimeMSExpired", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 50 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError MaxTimeMSExpired", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 50, + "errmsg": "operation exceeded time limit" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/errors-client.json b/test/transactions/unified/errors-client.json new file mode 100644 index 0000000000..00f1497c2d --- /dev/null +++ b/test/transactions/unified/errors-client.json @@ -0,0 +1,142 @@ +{ + "description": "errors-client", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Client side error in command starting transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "x": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "object": "testRunner", + "name": "assertSessionTransactionState", + "arguments": { + "session": "session0", + "state": "starting" + } + } + ] + }, + { + "description": "Client side error when transaction is in progress", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "x": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "object": "testRunner", + "name": "assertSessionTransactionState", + "arguments": { + "session": "session0", + "state": "in_progress" + } + } + ] + } + ] +} diff --git a/test/transactions/unified/errors.json b/test/transactions/unified/errors.json new file mode 100644 index 0000000000..94a9cac207 --- /dev/null +++ b/test/transactions/unified/errors.json @@ -0,0 +1,285 @@ +{ + "description": "errors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "start insert start", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "expectError": { + "isClientError": true, + "errorContains": "transaction already in progress" + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ] + }, + { + "description": "start twice", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "startTransaction", + "expectError": { + "isClientError": true, + "errorContains": "transaction already in progress" + } + } + ] + }, + { + "description": "commit and start twice", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "startTransaction", + "expectError": { + "isClientError": true, + "errorContains": "transaction already in progress" + } + } + ] + }, + { + "description": "write conflict commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCodeName": "WriteConflict", + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorCodeName": "NoSuchTransaction", + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + } + ] + }, + { + "description": "write conflict abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCodeName": "WriteConflict", + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "abortTransaction" + } + ] + } + ] +} diff --git a/test/transactions/unified/findOneAndDelete.json b/test/transactions/unified/findOneAndDelete.json new file mode 100644 index 0000000000..7db9c872af --- /dev/null +++ b/test/transactions/unified/findOneAndDelete.json @@ -0,0 +1,317 @@ +{ + "description": "findOneAndDelete", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + } + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 4 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for findOneAndDelete", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + } + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/findOneAndReplace.json b/test/transactions/unified/findOneAndReplace.json new file mode 100644 index 0000000000..f0742f0c60 --- /dev/null +++ b/test/transactions/unified/findOneAndReplace.json @@ -0,0 +1,356 @@ +{ + "description": "findOneAndReplace", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "replacement": { + "x": 1 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 1 + }, + "upsert": true, + "returnDocument": "After" + }, + "expectResult": { + "_id": 4, + "x": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "x": 1 + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 4 + }, + "update": { + "x": 1 + }, + "new": true, + "upsert": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3, + "x": 1 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for findOneAndReplace", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "replacement": { + "x": 1 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "x": 1 + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/findOneAndUpdate.json b/test/transactions/unified/findOneAndUpdate.json new file mode 100644 index 0000000000..f5308efef3 --- /dev/null +++ b/test/transactions/unified/findOneAndUpdate.json @@ -0,0 +1,546 @@ +{ + "description": "findOneAndUpdate", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "returnDocument": "After" + }, + "expectResult": { + "_id": 4, + "x": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3, + "x": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3, + "x": 2 + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": true, + "upsert": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3, + "x": 2 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for findOneAndUpdate", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/insert.json b/test/transactions/unified/insert.json new file mode 100644 index 0000000000..9a80d8bf4b --- /dev/null +++ b/test/transactions/unified/insert.json @@ -0,0 +1,895 @@ +{ + "description": "insert", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 5 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 5 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 5 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ] + }, + { + "description": "insert with session1", + "operations": [ + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "session": "session1" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 4 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "collection writeConcern without transaction", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection_wc_majority", + "database": "database1", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "session": { + "id": "session2", + "client": "client1" + } + } + ] + } + }, + { + "object": "collection_wc_majority", + "name": "insertOne", + "arguments": { + "session": "session2", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session2" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for insert", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection_wc_majority", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/isolation.json b/test/transactions/unified/isolation.json new file mode 100644 index 0000000000..5d0a0139fb --- /dev/null +++ b/test/transactions/unified/isolation.json @@ -0,0 +1,281 @@ +{ + "description": "isolation", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "one transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [] + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "two transactions", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [] + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-pin-auto.json b/test/transactions/unified/mongos-pin-auto.json new file mode 100644 index 0000000000..27db520401 --- /dev/null +++ b/test/transactions/unified/mongos-pin-auto.json @@ -0,0 +1,5474 @@ +{ + "description": "mongos-pin-auto", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "remain pinned after non-transient Interrupted error on insertOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorCodeName": "Interrupted" + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "unpin after transient error within a transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on insertOne insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on insertMany insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on updateOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on replaceOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on updateMany update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on deleteOne delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on deleteMany delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on findOneAndDelete findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on findOneAndUpdate findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on findOneAndReplace findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on bulkWrite insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on bulkWrite update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on bulkWrite delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on find find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on countDocuments aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on aggregate aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on distinct distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "session": "session0", + "fieldName": "_id", + "filter": {} + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on runCommand insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "session": "session0", + "commandName": "insert", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient connection error on insertOne insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on insertOne insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on insertMany insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on insertMany insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on updateOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on updateOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on replaceOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on replaceOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on updateMany update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on updateMany update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on deleteOne delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on deleteOne delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on deleteMany delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + } + } + }, + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on deleteMany delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on findOneAndDelete findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on findOneAndDelete findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on findOneAndUpdate findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on findOneAndUpdate findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on findOneAndReplace findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on findOneAndReplace findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on bulkWrite insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on bulkWrite insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on bulkWrite update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on bulkWrite update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on bulkWrite delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on bulkWrite delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on find find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on find find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on countDocuments aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on countDocuments aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on aggregate aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on aggregate aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on distinct distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "session": "session0", + "fieldName": "_id", + "filter": {} + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on distinct distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "session": "session0", + "fieldName": "_id", + "filter": {} + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on runCommand insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "session": "session0", + "commandName": "insert", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on runCommand insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "session": "session0", + "commandName": "insert", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-recovery-token-errorLabels.json b/test/transactions/unified/mongos-recovery-token-errorLabels.json new file mode 100644 index 0000000000..13345c6a29 --- /dev/null +++ b/test/transactions/unified/mongos-recovery-token-errorLabels.json @@ -0,0 +1,211 @@ +{ + "description": "mongos-recovery-token-errorLabels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "serverless": "forbid", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction retry succeeds on new mongos", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-recovery-token.json b/test/transactions/unified/mongos-recovery-token.json new file mode 100644 index 0000000000..bb88aa16bd --- /dev/null +++ b/test/transactions/unified/mongos-recovery-token.json @@ -0,0 +1,568 @@ +{ + "description": "mongos-recovery-token", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.1.8", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction explicit retries include recoveryToken", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction retry fails on new mongos", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "heartbeatFrequencyMS": 30000, + "appName": "transactionsClient" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 7 + }, + "data": { + "failCommands": [ + "commitTransaction", + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "transactionsClient" + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ], + "errorCodeName": "NoSuchTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction sends recoveryToken", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-unpin.json b/test/transactions/unified/mongos-unpin.json new file mode 100644 index 0000000000..4d1ebc87bc --- /dev/null +++ b/test/transactions/unified/mongos-unpin.json @@ -0,0 +1,450 @@ +{ + "description": "mongos-unpin", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "mongos-unpin-db" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "mongos-unpin-db", + "documents": [] + } + ], + "_yamlAnchors": { + "anchors": 24 + }, + "tests": [ + { + "description": "unpin after TransientTransactionError error on commit", + "runOnRequirements": [ + { + "serverless": "forbid", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0", + "expectError": { + "errorCode": 24, + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + }, + { + "description": "unpin on successful abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin after non-transient error on abort", + "runOnRequirements": [ + { + "serverless": "forbid", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + }, + { + "description": "unpin after TransientTransactionError error on abort", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + }, + { + "description": "unpin when a new transaction is started", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin when a non-transaction write operation uses a session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin when a non-transaction read operation uses a session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + } + ] +} diff --git a/test/transactions/unified/pin-mongos.json b/test/transactions/unified/pin-mongos.json new file mode 100644 index 0000000000..c96f3f341f --- /dev/null +++ b/test/transactions/unified/pin-mongos.json @@ -0,0 +1,1466 @@ +{ + "description": "pin-mongos", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.1.8", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "countDocuments", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "insertOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 4 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 5 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 5 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 6 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 7 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 8 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 8 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 9 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 9 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 10 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 10 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + }, + { + "_id": 8 + }, + { + "_id": 9 + }, + { + "_id": 10 + } + ] + } + ] + }, + { + "description": "mixed read write operations", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 4 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 5 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 5 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 6 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 7 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + } + ] + } + ] + }, + { + "description": "multiple commits", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + } + } + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient error on commit", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + } + } + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 51 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ], + "errorCode": 51 + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "unpin after transient error within a transaction", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient error within a transaction and commit", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "heartbeatFrequencyMS": 30000, + "appName": "transactionsClient" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 7 + }, + "data": { + "failCommands": [ + "insert", + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "transactionsClient" + } + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ], + "errorCodeName": "NoSuchTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/read-concern.json b/test/transactions/unified/read-concern.json new file mode 100644 index 0000000000..b3bd967c09 --- /dev/null +++ b/test/transactions/unified/read-concern.json @@ -0,0 +1,1924 @@ +{ + "description": "read-concern", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "database": { + "id": "database_rc_majority", + "client": "client0", + "databaseName": "transaction-tests", + "databaseOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "collection": { + "id": "collection_rc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "tests": [ + { + "description": "only first countDocuments includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first find includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first aggregate includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first distinct includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first runCommand includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "countDocuments ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "find ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "aggregate ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "distinct ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "runCommand ignores database readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database_rc_majority", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/read-pref.json b/test/transactions/unified/read-pref.json new file mode 100644 index 0000000000..eda00bd10d --- /dev/null +++ b/test/transactions/unified/read-pref.json @@ -0,0 +1,728 @@ +{ + "description": "read-pref", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection_rp_primary", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection_rp_secondary", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "default readPreference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } + } + } + }, + { + "object": "collection_rp_secondary", + "name": "aggregate", + "arguments": { + "session": "session0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + }, + { + "$count": "count" + } + ] + }, + "expectResult": [ + { + "count": 1 + } + ] + }, + { + "object": "collection_rp_secondary", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rp_secondary", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "primary readPreference", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "primary" + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } + } + } + }, + { + "object": "collection_rp_secondary", + "name": "aggregate", + "arguments": { + "session": "session0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + }, + { + "$count": "count" + } + ] + }, + "expectResult": [ + { + "count": 1 + } + ] + }, + { + "object": "collection_rp_secondary", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rp_secondary", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "secondary readPreference", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "secondary" + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } + } + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "session": "session0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + }, + { + "$count": "count" + } + ] + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "primaryPreferred readPreference", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "primaryPreferred" + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } + } + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "session": "session0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + }, + { + "$count": "count" + } + ] + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "nearest readPreference", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "nearest" + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } + } + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "session": "session0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + }, + { + "$count": "count" + } + ] + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "secondary write only", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "secondary" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/reads.json b/test/transactions/unified/reads.json new file mode 100644 index 0000000000..52e8457634 --- /dev/null +++ b/test/transactions/unified/reads.json @@ -0,0 +1,706 @@ +{ + "description": "reads", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "tests": [ + { + "description": "collection readConcern without transaction", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "readConcern": { + "level": "majority" + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-abort-errorLabels.json b/test/transactions/unified/retryable-abort-errorLabels.json new file mode 100644 index 0000000000..77a1b03eb0 --- /dev/null +++ b/test/transactions/unified/retryable-abort-errorLabels.json @@ -0,0 +1,2436 @@ +{ + "description": "retryable-abort-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "abortTransaction only retries once with RetryableWriteError from server", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction does not retry without RetryableWriteError label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11600, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11602, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 189, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-abort-handshake.json b/test/transactions/unified/retryable-abort-handshake.json new file mode 100644 index 0000000000..4ad56e2f2f --- /dev/null +++ b/test/transactions/unified/retryable-abort-handshake.json @@ -0,0 +1,204 @@ +{ + "description": "retryable abortTransaction on handshake errors", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "AbortTransaction succeeds after handshake network error", + "skipReason": "DRIVERS-2032: Pinned servers need to be checked if they are still selectable", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session1" + }, + "expectError": { + "isError": true + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "startTransaction": true + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-abort.json b/test/transactions/unified/retryable-abort.json new file mode 100644 index 0000000000..381cfa91f8 --- /dev/null +++ b/test/transactions/unified/retryable-abort.json @@ -0,0 +1,600 @@ +{ + "description": "retryable-abort", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "abortTransaction only performs a single retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction does not retry after Interrupted", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11601, + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction does not retry after WriteConcernError Interrupted", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "writeConcernError": { + "code": 11601, + "errmsg": "operation was interrupted" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after connection error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-commit-errorLabels.json b/test/transactions/unified/retryable-commit-errorLabels.json new file mode 100644 index 0000000000..d3ce8b148e --- /dev/null +++ b/test/transactions/unified/retryable-commit-errorLabels.json @@ -0,0 +1,2564 @@ +{ + "description": "retryable-commit-errorLabels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction does not retry error without RetryableWriteError label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commitTransaction retries once with RetryableWriteError from server", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11600, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11602, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 189, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after InterruptedAtShutdown", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after ShutdownInProgress", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-commit-handshake.json b/test/transactions/unified/retryable-commit-handshake.json new file mode 100644 index 0000000000..d9315a8fc6 --- /dev/null +++ b/test/transactions/unified/retryable-commit-handshake.json @@ -0,0 +1,211 @@ +{ + "description": "retryable commitTransaction on handshake errors", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ], + "uriOptions": { + "retryWrites": false + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "CommitTransaction succeeds after handshake network error", + "skipReason": "DRIVERS-2032: Pinned servers need to be checked if they are still selectable", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session1" + }, + "expectError": { + "isError": true + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "startTransaction": true + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-commit.json b/test/transactions/unified/retryable-commit.json new file mode 100644 index 0000000000..b794c1c55c --- /dev/null +++ b/test/transactions/unified/retryable-commit.json @@ -0,0 +1,868 @@ +{ + "description": "retryable-commit", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction fails after Interrupted", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11601, + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorCodeName": "Interrupted", + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 100, + "errmsg": "Not enough data-bearing nodes" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction fails after two errors", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction applies majority write concern on retries", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 2, + "journal": true, + "wtimeoutMS": 5000 + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 2, + "j": true, + "wtimeout": 5000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after connection error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-writes.json b/test/transactions/unified/retryable-writes.json new file mode 100644 index 0000000000..c196e68622 --- /dev/null +++ b/test/transactions/unified/retryable-writes.json @@ -0,0 +1,468 @@ +{ + "description": "retryable-writes", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "increment txnNumber", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 4, + "1": 5 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "4" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ] + }, + { + "description": "writes are not retried", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/run-command.json b/test/transactions/unified/run-command.json new file mode 100644 index 0000000000..7bd420ef74 --- /dev/null +++ b/test/transactions/unified/run-command.json @@ -0,0 +1,421 @@ +{ + "description": "run-command", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "run command with default read preference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + }, + "expectResult": { + "n": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "run command with secondary read preference in client option and primary read preference in transaction options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "secondary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "primary" + } + } + }, + { + "object": "database1", + "name": "runCommand", + "arguments": { + "session": "session1", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + }, + "expectResult": { + "n": 1 + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "run command with explicit primary read preference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + }, + "readPreference": { + "mode": "primary" + }, + "commandName": "insert" + }, + "expectResult": { + "n": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "run command fails with explicit secondary read preference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "readPreference": { + "mode": "secondary" + }, + "commandName": "find" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + } + ] + }, + { + "description": "run command fails with secondary read preference from transaction options", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "secondary" + } + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + } + ] + } + ] +} diff --git a/test/transactions/unified/transaction-options-repl.json b/test/transactions/unified/transaction-options-repl.json new file mode 100644 index 0000000000..dc2cb77582 --- /dev/null +++ b/test/transactions/unified/transaction-options-repl.json @@ -0,0 +1,267 @@ +{ + "description": "transaction-options-repl", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "readConcern snapshot in startTransaction options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "snapshot" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "snapshot" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "snapshot" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/transaction-options.json b/test/transactions/unified/transaction-options.json new file mode 100644 index 0000000000..78e4c8207b --- /dev/null +++ b/test/transactions/unified/transaction-options.json @@ -0,0 +1,2081 @@ +{ + "description": "transaction-options", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "no transaction options set", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction options inherited from client", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction options inherited from defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + }, + "maxCommitTimeMS": 60000 + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "startTransaction options override defaults", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "snapshot" + }, + "writeConcern": { + "w": 1 + }, + "maxCommitTimeMS": 30000 + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "defaultTransactionOptions override client options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "readConcern local in defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "local" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "client writeConcern ignored for bulk", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "collection1", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ], + "session": "session1" + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "readPreference inherited from client", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "secondary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "readPreference inherited from defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "primary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readPreference": { + "mode": "secondary" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "startTransaction overrides readPreference", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "primary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readPreference": { + "mode": "primary" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "secondary" + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/update.json b/test/transactions/unified/update.json new file mode 100644 index 0000000000..8090fc9087 --- /dev/null +++ b/test/transactions/unified/update.json @@ -0,0 +1,565 @@ +{ + "description": "update", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "session": "session0", + "filter": { + "x": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 3 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "x": 1 + }, + "u": { + "y": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gte": 3 + } + }, + "u": { + "$set": { + "z": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3, + "z": 1 + }, + { + "_id": 4, + "y": 1, + "z": 1 + } + ] + } + ] + }, + { + "description": "collections writeConcern ignored for update", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection1", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + }, + { + "object": "collection1", + "name": "replaceOne", + "arguments": { + "session": "session0", + "filter": { + "x": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "collection1", + "name": "updateMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 3 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "x": 1 + }, + "u": { + "y": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gte": 3 + } + }, + "u": { + "$set": { + "z": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/write-concern.json b/test/transactions/unified/write-concern.json new file mode 100644 index 0000000000..29d1977a82 --- /dev/null +++ b/test/transactions/unified/write-concern.json @@ -0,0 +1,1588 @@ +{ + "description": "write-concern", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection_w0", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + } + ] + } + ], + "tests": [ + { + "description": "commit with majority", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commit with default", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "abort with majority", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + } + ] + } + ] + }, + { + "description": "abort with default", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + } + ] + } + ] + }, + { + "description": "start with unacknowledged write concern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "transactions do not support unacknowledged write concern" + } + } + ] + }, + { + "description": "start with implicit unacknowledged write concern", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "w": 0 + } + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "expectError": { + "isClientError": true, + "errorContains": "transactions do not support unacknowledged write concern" + } + } + ] + }, + { + "description": "unacknowledged write concern coll insertOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll insertMany", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "insertMany", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2 + } + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "bulkWrite", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll deleteOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 0 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "unacknowledged write concern coll deleteMany", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "deleteMany", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 0 + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "unacknowledged write concern coll updateOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 0 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll updateMany", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "updateMany", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 0 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": true + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll findOneAndDelete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + } + }, + "expectResult": { + "_id": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 0 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "unacknowledged write concern coll findOneAndReplace", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "replacement": { + "x": 1 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 0 + }, + "update": { + "x": 1 + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll findOneAndUpdate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/unicode/test_utf8.py b/test/unicode/test_utf8.py new file mode 100644 index 0000000000..578d98bffb --- /dev/null +++ b/test/unicode/test_utf8.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from bson import encode +from bson.errors import InvalidStringData + + +class TestUTF8(unittest.TestCase): + # Verify that python and bson have the same understanding of + # legal utf-8 if the first byte is 0xf4 (244) + def _assert_same_utf8_validation(self, data): + try: + data.decode("utf-8") + py_is_legal = True + except UnicodeDecodeError: + py_is_legal = False + + try: + encode({"x": data}) + bson_is_legal = True + except InvalidStringData: + bson_is_legal = False + + self.assertEqual(py_is_legal, bson_is_legal, data) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json new file mode 100644 index 0000000000..26d14051a7 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json @@ -0,0 +1,30 @@ +{ + "description": "clientEncryptionOpts-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + }, + "invalid": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json new file mode 100644 index 0000000000..c43a2a9125 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json @@ -0,0 +1,23 @@ +{ + "description": "clientEncryptionOpts-keyVaultClient-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json new file mode 100644 index 0000000000..1be9167a40 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-keyVaultClient-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": 0, + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json new file mode 100644 index 0000000000..3f54d89aa7 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json @@ -0,0 +1,28 @@ +{ + "description": "clientEncryptionOpts-keyVaultNamespace-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json new file mode 100644 index 0000000000..53f2f5f086 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-keyVaultNamespace-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": 0, + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json new file mode 100644 index 0000000000..cfd979e2b2 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "invalid": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json new file mode 100644 index 0000000000..59b273487d --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-aws-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json new file mode 100644 index 0000000000..ffcc85bfcf --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-aws-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json new file mode 100644 index 0000000000..1664b79097 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-azure-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json new file mode 100644 index 0000000000..5bd50c8078 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-azure-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json new file mode 100644 index 0000000000..120c088b00 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-gcp-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json new file mode 100644 index 0000000000..1dd1c8a2a3 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-gcp-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-invalidName.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-invalidName.json new file mode 100644 index 0000000000..9c659c8f76 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-invalidName.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-invalidName", + "schemaVersion": "1.18", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name_with_invalid_character*": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json new file mode 100644 index 0000000000..22ded20440 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-kmip-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json new file mode 100644 index 0000000000..9b9e74be37 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-kmip-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json new file mode 100644 index 0000000000..b93cfe00d1 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-local-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json new file mode 100644 index 0000000000..526ea24831 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-local-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json new file mode 100644 index 0000000000..b823a67baf --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json @@ -0,0 +1,26 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys" + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json new file mode 100644 index 0000000000..e7a6190b68 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json @@ -0,0 +1,27 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": 0 + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json b/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json new file mode 100644 index 0000000000..3b4972f23d --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json @@ -0,0 +1,30 @@ +{ + "description": "clientEncryptionOpts-tlsOptions_not_supported", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + }, + "tlsOptions": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-additionalProperties.json b/test/unified-test-format/invalid/collectionData-additionalProperties.json new file mode 100644 index 0000000000..1f4ed4c154 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-additionalProperties.json @@ -0,0 +1,39 @@ +{ + "description": "collectionData-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "documents": [], + "foo": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-collectionName-required.json b/test/unified-test-format/invalid/collectionData-collectionName-required.json new file mode 100644 index 0000000000..5426418c88 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-collectionName-required.json @@ -0,0 +1,37 @@ +{ + "description": "collectionData-collectionName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "databaseName": "foo", + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-collectionName-type.json b/test/unified-test-format/invalid/collectionData-collectionName-type.json new file mode 100644 index 0000000000..2a922de13e --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-collectionName-type.json @@ -0,0 +1,38 @@ +{ + "description": "collectionData-collectionName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": 0, + "databaseName": "foo", + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-createOptions-type.json b/test/unified-test-format/invalid/collectionData-createOptions-type.json new file mode 100644 index 0000000000..5b78bbcbb6 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-createOptions-type.json @@ -0,0 +1,39 @@ +{ + "description": "collectionData-createOptions-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "createOptions": 0, + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-databaseName-required.json b/test/unified-test-format/invalid/collectionData-databaseName-required.json new file mode 100644 index 0000000000..8417801390 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-databaseName-required.json @@ -0,0 +1,37 @@ +{ + "description": "collectionData-databaseName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-databaseName-type.json b/test/unified-test-format/invalid/collectionData-databaseName-type.json new file mode 100644 index 0000000000..d3480e8034 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-databaseName-type.json @@ -0,0 +1,38 @@ +{ + "description": "collectionData-databaseName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": 0, + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-documents-items.json b/test/unified-test-format/invalid/collectionData-documents-items.json new file mode 100644 index 0000000000..beb5af61c4 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-documents-items.json @@ -0,0 +1,40 @@ +{ + "description": "collectionData-documents-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "documents": [ + 0 + ] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-documents-required.json b/test/unified-test-format/invalid/collectionData-documents-required.json new file mode 100644 index 0000000000..4aadf9b159 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-documents-required.json @@ -0,0 +1,37 @@ +{ + "description": "collectionData-documents-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-documents-type.json b/test/unified-test-format/invalid/collectionData-documents-type.json new file mode 100644 index 0000000000..9cbd3c164c --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-documents-type.json @@ -0,0 +1,38 @@ +{ + "description": "collectionData-documents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "documents": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-additionalProperties.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-additionalProperties.json new file mode 100644 index 0000000000..beef260eed --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-additionalProperties.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "foo": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-readConcern-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readConcern-type.json new file mode 100644 index 0000000000..1b9f4bcbea --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readConcern-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-readConcern-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "readConcern": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-readPreference-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readPreference-type.json new file mode 100644 index 0000000000..988b594d13 --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readPreference-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-readPreference-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "readPreference": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json new file mode 100644 index 0000000000..088e9d1eb2 --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-timeoutMS-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "timeoutMS": 4.5 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-writeConcern-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-writeConcern-type.json new file mode 100644 index 0000000000..bd2157c5cb --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-writeConcern-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-writeConcern-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "writeConcern": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/createEntities-items.json b/test/unified-test-format/invalid/createEntities-items.json new file mode 100644 index 0000000000..8e9d6ff702 --- /dev/null +++ b/test/unified-test-format/invalid/createEntities-items.json @@ -0,0 +1,13 @@ +{ + "description": "createEntities-items", + "schemaVersion": "1.0", + "createEntities": [ + 0 + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/createEntities-minItems.json b/test/unified-test-format/invalid/createEntities-minItems.json new file mode 100644 index 0000000000..3654923d28 --- /dev/null +++ b/test/unified-test-format/invalid/createEntities-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "createEntities-minItems", + "schemaVersion": "1.0", + "createEntities": [], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/createEntities-type.json b/test/unified-test-format/invalid/createEntities-type.json new file mode 100644 index 0000000000..ce3c382c93 --- /dev/null +++ b/test/unified-test-format/invalid/createEntities-type.json @@ -0,0 +1,11 @@ +{ + "description": "createEntities-type", + "schemaVersion": "1.0", + "createEntities": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/description-required.json b/test/unified-test-format/invalid/description-required.json new file mode 100644 index 0000000000..e4e0d0efdf --- /dev/null +++ b/test/unified-test-format/invalid/description-required.json @@ -0,0 +1,9 @@ +{ + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-additionalProperties.json b/test/unified-test-format/invalid/entity-additionalProperties.json new file mode 100644 index 0000000000..38b8898787 --- /dev/null +++ b/test/unified-test-format/invalid/entity-additionalProperties.json @@ -0,0 +1,15 @@ +{ + "description": "entity-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "foo": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-additionalProperties.json b/test/unified-test-format/invalid/entity-bucket-additionalProperties.json new file mode 100644 index 0000000000..46f9b4038e --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "entity-bucket-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-bucketOptions-type.json b/test/unified-test-format/invalid/entity-bucket-bucketOptions-type.json new file mode 100644 index 0000000000..c3d7423e65 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-bucketOptions-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-bucket-bucketOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0", + "bucketOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-database-required.json b/test/unified-test-format/invalid/entity-bucket-database-required.json new file mode 100644 index 0000000000..1fde5a96c9 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-database-required.json @@ -0,0 +1,29 @@ +{ + "description": "entity-bucket-database-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-database-type.json b/test/unified-test-format/invalid/entity-bucket-database-type.json new file mode 100644 index 0000000000..798d273fb0 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-database-type.json @@ -0,0 +1,30 @@ +{ + "description": "entity-bucket-database-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0", + "database": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-id-required.json b/test/unified-test-format/invalid/entity-bucket-id-required.json new file mode 100644 index 0000000000..c547d8ea3c --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-id-required.json @@ -0,0 +1,29 @@ +{ + "description": "entity-bucket-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "database": "database0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-id-type.json b/test/unified-test-format/invalid/entity-bucket-id-type.json new file mode 100644 index 0000000000..f4e10ee630 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-id-type.json @@ -0,0 +1,30 @@ +{ + "description": "entity-bucket-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": 0, + "database": "database0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-additionalProperties.json b/test/unified-test-format/invalid/entity-client-additionalProperties.json new file mode 100644 index 0000000000..467e1d6ae1 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-additionalProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-id-required.json b/test/unified-test-format/invalid/entity-client-id-required.json new file mode 100644 index 0000000000..4be2fbf8e8 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-id-required.json @@ -0,0 +1,15 @@ +{ + "description": "entity-client-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": {} + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-id-type.json b/test/unified-test-format/invalid/entity-client-id-type.json new file mode 100644 index 0000000000..cdc7cbc0e7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-id-type.json @@ -0,0 +1,17 @@ +{ + "description": "entity-client-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-items.json b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-items.json new file mode 100644 index 0000000000..1252ac82d7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-items.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-ignoreCommandMonitoringEvents-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "ignoreCommandMonitoringEvents": [ + 0 + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-minItems.json b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-minItems.json new file mode 100644 index 0000000000..e78068a442 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-minItems.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-ignoreCommandMonitoringEvents-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "ignoreCommandMonitoringEvents": [] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-type.json b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-type.json new file mode 100644 index 0000000000..5ac2b340c5 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-ignoreCommandMonitoringEvents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "ignoreCommandMonitoringEvents": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-enum.json b/test/unified-test-format/invalid/entity-client-observeEvents-enum.json new file mode 100644 index 0000000000..c39c94eee2 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-enum.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeEvents-enum", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "foo" + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-items.json b/test/unified-test-format/invalid/entity-client-observeEvents-items.json new file mode 100644 index 0000000000..3aee11e3d5 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-items.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeEvents-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + 0 + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-minItems.json b/test/unified-test-format/invalid/entity-client-observeEvents-minItems.json new file mode 100644 index 0000000000..e70d90c0a7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-minItems.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeEvents-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-type.json b/test/unified-test-format/invalid/entity-client-observeEvents-type.json new file mode 100644 index 0000000000..c144e32369 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeEvents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-minProperties.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-minProperties.json new file mode 100644 index 0000000000..87cbd21125 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-minProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeLogMessages-minProperties", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": {} + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-property-type.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-type.json new file mode 100644 index 0000000000..fed0accd6e --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-type.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeLogMessages-property-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": { + "command": {} + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-property-value.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-value.json new file mode 100644 index 0000000000..f14b18d6de --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-value.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeLogMessages-property-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": { + "command": "notALogLevel" + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-type.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-type.json new file mode 100644 index 0000000000..8a277034e2 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeLogMessages-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeSensitiveCommands-type.json b/test/unified-test-format/invalid/entity-client-observeSensitiveCommands-type.json new file mode 100644 index 0000000000..c5572f1fbe --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeSensitiveCommands-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeSensitiveCommands-type", + "schemaVersion": "1.5", + "createEntities": [ + { + "client": { + "id": "client0", + "observeSensitiveCommands": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-deprecationErrors-type.json b/test/unified-test-format/invalid/entity-client-serverApi-deprecationErrors-type.json new file mode 100644 index 0000000000..b688dae631 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-deprecationErrors-type.json @@ -0,0 +1,21 @@ +{ + "description": "entity-client-serverApi-version-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": "1", + "deprecationErrors": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-strict-type.json b/test/unified-test-format/invalid/entity-client-serverApi-strict-type.json new file mode 100644 index 0000000000..0b2fdc4849 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-strict-type.json @@ -0,0 +1,21 @@ +{ + "description": "entity-client-serverApi-version-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": "1", + "strict": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-type.json b/test/unified-test-format/invalid/entity-client-serverApi-type.json new file mode 100644 index 0000000000..20c9d1dce3 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-serverApi-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-version-required.json b/test/unified-test-format/invalid/entity-client-serverApi-version-required.json new file mode 100644 index 0000000000..8bef92b06f --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-version-required.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-serverApi-version-required", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": {} + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-version-type.json b/test/unified-test-format/invalid/entity-client-serverApi-version-type.json new file mode 100644 index 0000000000..2c36ff57ed --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-version-type.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-serverApi-version-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-uriOptions-type.json b/test/unified-test-format/invalid/entity-client-uriOptions-type.json new file mode 100644 index 0000000000..4252480e98 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-uriOptions-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-uriOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-useMultipleMongoses-type.json b/test/unified-test-format/invalid/entity-client-useMultipleMongoses-type.json new file mode 100644 index 0000000000..e429cd71f8 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-useMultipleMongoses-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-useMultipleMongoses-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json b/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json new file mode 100644 index 0000000000..77c0a91434 --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json @@ -0,0 +1,30 @@ +{ + "description": "entity-clientEncryption-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + }, + "invalid": {} + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json new file mode 100644 index 0000000000..88e852342a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json @@ -0,0 +1,17 @@ +{ + "description": "entity-clientEncryption-clientEncryptionOpts-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0" + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json new file mode 100644 index 0000000000..77fb6a362a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-clientEncryption-clientEncryptionOpts-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": 0 + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-id-required.json b/test/unified-test-format/invalid/entity-clientEncryption-id-required.json new file mode 100644 index 0000000000..464ba7159a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-id-required.json @@ -0,0 +1,28 @@ +{ + "description": "entity-clientEncryption-id-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-id-type.json b/test/unified-test-format/invalid/entity-clientEncryption-id-type.json new file mode 100644 index 0000000000..a7746657fc --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-id-type.json @@ -0,0 +1,29 @@ +{ + "description": "entity-clientEncryption-id-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": 0, + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-additionalProperties.json b/test/unified-test-format/invalid/entity-collection-additionalProperties.json new file mode 100644 index 0000000000..90ee2b1ca0 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-additionalProperties.json @@ -0,0 +1,32 @@ +{ + "description": "entity-collection-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-collectionName-required.json b/test/unified-test-format/invalid/entity-collection-collectionName-required.json new file mode 100644 index 0000000000..2446722e5e --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-collectionName-required.json @@ -0,0 +1,30 @@ +{ + "description": "entity-collection-collectionName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-collectionName-type.json b/test/unified-test-format/invalid/entity-collection-collectionName-type.json new file mode 100644 index 0000000000..ccad66aac9 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-collectionName-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-collection-collectionName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-collectionOptions-type.json b/test/unified-test-format/invalid/entity-collection-collectionOptions-type.json new file mode 100644 index 0000000000..52220c1cd1 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-collectionOptions-type.json @@ -0,0 +1,32 @@ +{ + "description": "entity-collection-collectionOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "collectionOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-database-required.json b/test/unified-test-format/invalid/entity-collection-database-required.json new file mode 100644 index 0000000000..ba96b43f76 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-database-required.json @@ -0,0 +1,30 @@ +{ + "description": "entity-collection-database-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-database-type.json b/test/unified-test-format/invalid/entity-collection-database-type.json new file mode 100644 index 0000000000..b87134498d --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-database-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-collection-database-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": 0, + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-id-required.json b/test/unified-test-format/invalid/entity-collection-id-required.json new file mode 100644 index 0000000000..84e5352ead --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-id-required.json @@ -0,0 +1,30 @@ +{ + "description": "entity-collection-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "database": "database0", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-id-type.json b/test/unified-test-format/invalid/entity-collection-id-type.json new file mode 100644 index 0000000000..f0821e5250 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-id-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-collection-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": 0, + "database": "database0", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-additionalProperties.json b/test/unified-test-format/invalid/entity-database-additionalProperties.json new file mode 100644 index 0000000000..964cd27966 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "entity-database-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-client-required.json b/test/unified-test-format/invalid/entity-database-client-required.json new file mode 100644 index 0000000000..54f99cf13e --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-client-required.json @@ -0,0 +1,23 @@ +{ + "description": "entity-database-client-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-client-type.json b/test/unified-test-format/invalid/entity-database-client-type.json new file mode 100644 index 0000000000..ff4584c405 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-client-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-database-client-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": 0, + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-databaseName-required.json b/test/unified-test-format/invalid/entity-database-databaseName-required.json new file mode 100644 index 0000000000..64cca95c49 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-databaseName-required.json @@ -0,0 +1,23 @@ +{ + "description": "entity-database-databaseName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-databaseName-type.json b/test/unified-test-format/invalid/entity-database-databaseName-type.json new file mode 100644 index 0000000000..bd01aef781 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-databaseName-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-database-databaseName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-databaseOptions-type.json b/test/unified-test-format/invalid/entity-database-databaseOptions-type.json new file mode 100644 index 0000000000..bc22ad3129 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-databaseOptions-type.json @@ -0,0 +1,25 @@ +{ + "description": "entity-database-databaseOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-id-required.json b/test/unified-test-format/invalid/entity-database-id-required.json new file mode 100644 index 0000000000..0b65cf1159 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-id-required.json @@ -0,0 +1,23 @@ +{ + "description": "entity-database-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "client": "client0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-id-type.json b/test/unified-test-format/invalid/entity-database-id-type.json new file mode 100644 index 0000000000..98b5789d04 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-id-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-database-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": 0, + "client": "client0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-maxProperties.json b/test/unified-test-format/invalid/entity-maxProperties.json new file mode 100644 index 0000000000..f4a6b7c914 --- /dev/null +++ b/test/unified-test-format/invalid/entity-maxProperties.json @@ -0,0 +1,22 @@ +{ + "description": "entity-maxProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + }, + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-minProperties.json b/test/unified-test-format/invalid/entity-minProperties.json new file mode 100644 index 0000000000..d89949ce30 --- /dev/null +++ b/test/unified-test-format/invalid/entity-minProperties.json @@ -0,0 +1,13 @@ +{ + "description": "entity-minProperties", + "schemaVersion": "1.0", + "createEntities": [ + {} + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-additionalProperties.json b/test/unified-test-format/invalid/entity-session-additionalProperties.json new file mode 100644 index 0000000000..ab4cd2014f --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "entity-session-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-client-required.json b/test/unified-test-format/invalid/entity-session-client-required.json new file mode 100644 index 0000000000..8c9ed72e99 --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-client-required.json @@ -0,0 +1,22 @@ +{ + "description": "entity-session-client-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-client-type.json b/test/unified-test-format/invalid/entity-session-client-type.json new file mode 100644 index 0000000000..b5ccc3f60f --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-client-type.json @@ -0,0 +1,23 @@ +{ + "description": "entity-session-client-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0", + "client": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-id-required.json b/test/unified-test-format/invalid/entity-session-id-required.json new file mode 100644 index 0000000000..3e5d5c5439 --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-id-required.json @@ -0,0 +1,22 @@ +{ + "description": "entity-session-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "client": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-id-type.json b/test/unified-test-format/invalid/entity-session-id-type.json new file mode 100644 index 0000000000..dcd46e5be7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-id-type.json @@ -0,0 +1,23 @@ +{ + "description": "entity-session-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": 0, + "client": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-sessionOptions-type.json b/test/unified-test-format/invalid/entity-session-sessionOptions-type.json new file mode 100644 index 0000000000..0ee15891eb --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-sessionOptions-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-session-sessionOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-additionalProperties.json b/test/unified-test-format/invalid/entity-stream-additionalProperties.json new file mode 100644 index 0000000000..c8e76e9985 --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-additionalProperties.json @@ -0,0 +1,19 @@ +{ + "description": "entity-stream-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0", + "hexBytes": "FF", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-hexBytes-pattern.json b/test/unified-test-format/invalid/entity-stream-hexBytes-pattern.json new file mode 100644 index 0000000000..7381893b55 --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-hexBytes-pattern.json @@ -0,0 +1,18 @@ +{ + "description": "entity-stream-hexBytes-pattern", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0", + "hexBytes": "FFF" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-hexBytes-required.json b/test/unified-test-format/invalid/entity-stream-hexBytes-required.json new file mode 100644 index 0000000000..cc3bf09b20 --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-hexBytes-required.json @@ -0,0 +1,17 @@ +{ + "description": "entity-stream-hexBytes-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-hexBytes-type.json b/test/unified-test-format/invalid/entity-stream-hexBytes-type.json new file mode 100644 index 0000000000..e6e2299eac --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-hexBytes-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-stream-hexBytes-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0", + "hexBytes": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-id-required.json b/test/unified-test-format/invalid/entity-stream-id-required.json new file mode 100644 index 0000000000..ff814d4e9c --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-id-required.json @@ -0,0 +1,17 @@ +{ + "description": "entity-stream-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "hexBytes": "FF" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-id-type.json b/test/unified-test-format/invalid/entity-stream-id-type.json new file mode 100644 index 0000000000..5fc654d97e --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-id-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-stream-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": 0, + "hexBytes": "FF" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-additionalProperties.json b/test/unified-test-format/invalid/entity-thread-additionalProperties.json new file mode 100644 index 0000000000..b296719f13 --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-additionalProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-thread-additionalProperties", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": { + "id": "thread0", + "foo": "bar" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-id-required.json b/test/unified-test-format/invalid/entity-thread-id-required.json new file mode 100644 index 0000000000..3b197e3d6b --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-id-required.json @@ -0,0 +1,15 @@ +{ + "description": "entity-thread-id-required", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": {} + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-id-type.json b/test/unified-test-format/invalid/entity-thread-id-type.json new file mode 100644 index 0000000000..8f281ef6f4 --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-id-type.json @@ -0,0 +1,17 @@ +{ + "description": "entity-thread-id-type", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": { + "id": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutFailedEvent-reason-type.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutFailedEvent-reason-type.json new file mode 100644 index 0000000000..110ce7869e --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutFailedEvent-reason-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckOutFailedEvent-reason-type", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutFailedEvent": { + "reason": 10 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties.json new file mode 100644 index 0000000000..f84e208d6a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedInEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedInEvent-additionalProperties.json new file mode 100644 index 0000000000..56ffcdee72 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedInEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckedInEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedInEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedOutEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedOutEvent-additionalProperties.json new file mode 100644 index 0000000000..9b804aad0a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedOutEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckedOutEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionClosedEvent-reason-type.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionClosedEvent-reason-type.json new file mode 100644 index 0000000000..053cd0b413 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionClosedEvent-reason-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionClosedEvent-reason-type", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionClosedEvent": { + "reason": 10 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCreatedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCreatedEvent-additionalProperties.json new file mode 100644 index 0000000000..c2edc3f6aa --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCreatedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCreatedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionReadyEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionReadyEvent-additionalProperties.json new file mode 100644 index 0000000000..994fb63314 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionReadyEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionReadyEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-hasServiceId-type.json new file mode 100644 index 0000000000..5a1a25d463 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-hasServiceId-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolClearedEvent-hasServiceId-type", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClearedEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type.json b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type.json new file mode 100644 index 0000000000..de59318822 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type", + "schemaVersion": "1.11", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClearedEvent": { + "interruptInUseConnections": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolClosedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-poolClosedEvent-additionalProperties.json new file mode 100644 index 0000000000..c181707f4a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolClosedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolClosedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClosedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolCreatedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-poolCreatedEvent-additionalProperties.json new file mode 100644 index 0000000000..6aaa59a600 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolCreatedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolCreatedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolCreatedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolReadyEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-poolReadyEvent-additionalProperties.json new file mode 100644 index 0000000000..66c803a5d8 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolReadyEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolReadyEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolReadyEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-additionalProperties.json new file mode 100644 index 0000000000..9e45cbadda --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-additionalProperties.json @@ -0,0 +1,27 @@ +{ + "description": "expectedCommandEvent-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "foo": 0 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-commandName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-commandName-type.json new file mode 100644 index 0000000000..a571d8e0c0 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-commandName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-databaseName-type.json new file mode 100644 index 0000000000..f6a305b89a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-databaseName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-databaseName-type", + "schemaVersion": "1.15", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "databaseName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type.json new file mode 100644 index 0000000000..7787ea6516 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type", + "schemaVersion": "1.6", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "hasServerConnectionId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServiceId-type.json new file mode 100644 index 0000000000..5314dc9f80 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServiceId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-hasServiceId-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-additionalProperties.json new file mode 100644 index 0000000000..996332d27d --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-additionalProperties.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "foo": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-command-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-command-type.json new file mode 100644 index 0000000000..8f89460617 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-command-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-command-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-commandName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-commandName-type.json new file mode 100644 index 0000000000..121947b06f --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-commandName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-databaseName-type.json new file mode 100644 index 0000000000..97d2b84f68 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-databaseName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-databaseName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type.json new file mode 100644 index 0000000000..a913f00ab7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type", + "schemaVersion": "1.6", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "hasServerConnectionId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServiceId-type.json new file mode 100644 index 0000000000..39ab925efb --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServiceId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-hasServiceId-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-commandName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-commandName-type.json new file mode 100644 index 0000000000..bde2f4817b --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-commandName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-databaseName-type.json new file mode 100644 index 0000000000..47b8c8bb9d --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-databaseName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-databaseName-type", + "schemaVersion": "1.15", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "databaseName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type.json new file mode 100644 index 0000000000..0712c33694 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type", + "schemaVersion": "1.6", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "hasServerConnectionId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServiceId-type.json new file mode 100644 index 0000000000..edc9d3cd72 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServiceId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-hasServiceId-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-reply-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-reply-type.json new file mode 100644 index 0000000000..9df04acd29 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-reply-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-reply-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "reply": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-maxProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-maxProperties.json new file mode 100644 index 0000000000..dd8b0e7e7c --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-maxProperties.json @@ -0,0 +1,28 @@ +{ + "description": "expectedCommandEvent-maxProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": {}, + "commandSucceededEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-minProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-minProperties.json new file mode 100644 index 0000000000..0f3e711a18 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-minProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedCommandEvent-minProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + {} + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-additionalProperties.json b/test/unified-test-format/invalid/expectedError-additionalProperties.json new file mode 100644 index 0000000000..3a79df8e34 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "foo": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorCode-type.json b/test/unified-test-format/invalid/expectedError-errorCode-type.json new file mode 100644 index 0000000000..b6b6f5d05a --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorCode-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorCode-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorCode": "foo" + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorCodeName-type.json b/test/unified-test-format/invalid/expectedError-errorCodeName-type.json new file mode 100644 index 0000000000..3ac5e43045 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorCodeName-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorCodeName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorCodeName": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorContains-type.json b/test/unified-test-format/invalid/expectedError-errorContains-type.json new file mode 100644 index 0000000000..847a987dff --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorContains-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorContains-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorContains": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsContain-items.json b/test/unified-test-format/invalid/expectedError-errorLabelsContain-items.json new file mode 100644 index 0000000000..4eab56ad18 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsContain-items.json @@ -0,0 +1,27 @@ +{ + "description": "expectedError-errorLabelsContain-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsContain": [ + 0 + ] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsContain-minItems.json b/test/unified-test-format/invalid/expectedError-errorLabelsContain-minItems.json new file mode 100644 index 0000000000..48162110aa --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsContain-minItems.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsContain-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsContain": [] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsContain-type.json b/test/unified-test-format/invalid/expectedError-errorLabelsContain-type.json new file mode 100644 index 0000000000..a0aba918b5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsContain-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsContain-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsContain": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsOmit-items.json b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-items.json new file mode 100644 index 0000000000..6c94d07135 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-items.json @@ -0,0 +1,27 @@ +{ + "description": "expectedError-errorLabelsOmit-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsOmit": [ + 0 + ] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsOmit-minItems.json b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-minItems.json new file mode 100644 index 0000000000..88c6582028 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-minItems.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsOmit-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsOmit": [] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsOmit-type.json b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-type.json new file mode 100644 index 0000000000..5f57114fea --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsOmit-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsOmit": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorResponse-type.json b/test/unified-test-format/invalid/expectedError-errorResponse-type.json new file mode 100644 index 0000000000..6eb66d9b0b --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorResponse-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorResponse-type", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorResponse": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isClientError-type.json b/test/unified-test-format/invalid/expectedError-isClientError-type.json new file mode 100644 index 0000000000..bfcc06679b --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isClientError-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isClientError-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isClientError": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isError-const.json b/test/unified-test-format/invalid/expectedError-isError-const.json new file mode 100644 index 0000000000..6a398bbf22 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isError-const.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isError-const", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": false + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isError-type.json b/test/unified-test-format/invalid/expectedError-isError-type.json new file mode 100644 index 0000000000..354aff31f4 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isError-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isError-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json b/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json new file mode 100644 index 0000000000..5683911d0d --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isTimeoutError-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isTimeoutError": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-minProperties.json b/test/unified-test-format/invalid/expectedError-minProperties.json new file mode 100644 index 0000000000..10e0b89ab7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-minProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedError-minProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": {} + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json b/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json new file mode 100644 index 0000000000..90ed9c3273 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [], + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-client-required.json b/test/unified-test-format/invalid/expectedEventsForClient-client-required.json new file mode 100644 index 0000000000..24b6330de7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-client-required.json @@ -0,0 +1,22 @@ +{ + "description": "expectedEventsForClient-client-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "events": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-client-type.json b/test/unified-test-format/invalid/expectedEventsForClient-client-type.json new file mode 100644 index 0000000000..6e66857ee6 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-client-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedEventsForClient-client-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": 0, + "events": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-eventType-enum.json b/test/unified-test-format/invalid/expectedEventsForClient-eventType-enum.json new file mode 100644 index 0000000000..6e26cfaa7e --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-eventType-enum.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-eventType-enum", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid eventType value", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "foo", + "events": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-eventType-type.json b/test/unified-test-format/invalid/expectedEventsForClient-eventType-type.json new file mode 100644 index 0000000000..105bb001e5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-eventType-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-eventType-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid eventType type", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": 10, + "events": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-items.json b/test/unified-test-format/invalid/expectedEventsForClient-events-items.json new file mode 100644 index 0000000000..c1fcd4a6c3 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-items.json @@ -0,0 +1,25 @@ +{ + "description": "expectedEventsForClient-events-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + 0 + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-required.json b/test/unified-test-format/invalid/expectedEventsForClient-events-required.json new file mode 100644 index 0000000000..39c1e9e12d --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-required.json @@ -0,0 +1,22 @@ +{ + "description": "expectedEventsForClient-events-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-type.json b/test/unified-test-format/invalid/expectedEventsForClient-events-type.json new file mode 100644 index 0000000000..4199d042b0 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedEventsForClient-events-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_cmap_eventType.json b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_cmap_eventType.json new file mode 100644 index 0000000000..b380219912 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_cmap_eventType.json @@ -0,0 +1,28 @@ +{ + "description": "expectedEventsForClient-events_conflicts_with_cmap_eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid event when eventType is cmap", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "commandStartedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_command_eventType.json b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_command_eventType.json new file mode 100644 index 0000000000..08446fe180 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_command_eventType.json @@ -0,0 +1,28 @@ +{ + "description": "expectedEventsForClient-events_conflicts_with_command_eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid event when eventType is command", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "command", + "events": [ + { + "poolCreatedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_default_eventType.json b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_default_eventType.json new file mode 100644 index 0000000000..c31efbb8b7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_default_eventType.json @@ -0,0 +1,27 @@ +{ + "description": "expectedEventsForClient-events_conflicts_with_default_eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid event when eventType is unset", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "poolCreatedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json b/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json new file mode 100644 index 0000000000..965190664e --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-ignoreExtraEvents-type", + "schemaVersion": "1.7", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [], + "ignoreExtraEvents": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-additionalProperties.json b/test/unified-test-format/invalid/expectedLogMessage-additionalProperties.json new file mode 100644 index 0000000000..cd7cf8726c --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessage-additionalProperties", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-component-enum.json b/test/unified-test-format/invalid/expectedLogMessage-component-enum.json new file mode 100644 index 0000000000..2283e9b243 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-component-enum.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-component-enum", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "foo", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-component-required.json b/test/unified-test-format/invalid/expectedLogMessage-component-required.json new file mode 100644 index 0000000000..f3a157787f --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-component-required.json @@ -0,0 +1,28 @@ +{ + "description": "expectedLogMessage-component-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-component-type.json b/test/unified-test-format/invalid/expectedLogMessage-component-type.json new file mode 100644 index 0000000000..af8f711573 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-component-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-component-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": 0, + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-data-required.json b/test/unified-test-format/invalid/expectedLogMessage-data-required.json new file mode 100644 index 0000000000..7e8152dddd --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-data-required.json @@ -0,0 +1,28 @@ +{ + "description": "expectedLogMessage-data-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "command" + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-data-type.json b/test/unified-test-format/invalid/expectedLogMessage-data-type.json new file mode 100644 index 0000000000..4f81fb6272 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-data-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-data-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "command", + "data": 0 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-failureIsRedacted-type.json b/test/unified-test-format/invalid/expectedLogMessage-failureIsRedacted-type.json new file mode 100644 index 0000000000..190748a185 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-failureIsRedacted-type.json @@ -0,0 +1,30 @@ +{ + "description": "expectedLogMessage-failureIsRedacted-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "command", + "failureIsRedacted": 0, + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-level-enum.json b/test/unified-test-format/invalid/expectedLogMessage-level-enum.json new file mode 100644 index 0000000000..f4c886bb68 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-level-enum.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-level-enum", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "foo", + "component": "command", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-level-required.json b/test/unified-test-format/invalid/expectedLogMessage-level-required.json new file mode 100644 index 0000000000..27c9c7a6cd --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-level-required.json @@ -0,0 +1,28 @@ +{ + "description": "expectedLogMessage-level-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "component": "command", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-level-type.json b/test/unified-test-format/invalid/expectedLogMessage-level-type.json new file mode 100644 index 0000000000..180d7afcd6 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-level-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-level-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": 0, + "component": "command", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-additionalProperties.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-additionalProperties.json new file mode 100644 index 0000000000..306b78b446 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-additionalProperties", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-client-required.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-required.json new file mode 100644 index 0000000000..d8e1100bea --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-required.json @@ -0,0 +1,22 @@ +{ + "description": "expectedLogMessagesForClient-client-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "messages": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-client-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-type.json new file mode 100644 index 0000000000..5399cac029 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedEventsForClient-client-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": 0, + "messages": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreExtraMessages-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreExtraMessages-type.json new file mode 100644 index 0000000000..a9f2da9bce --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreExtraMessages-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-ignoreExtraMessages-type", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "ignoreExtraMessages": "true", + "messages": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-items.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-items.json new file mode 100644 index 0000000000..345faf41f5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-items.json @@ -0,0 +1,26 @@ +{ + "description": "expectedLogMessagesForClient-ignoreMessages-items", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "ignoreMessages": [ + 0 + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-type.json new file mode 100644 index 0000000000..4bc2d41dbf --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-ignoreMessages-type", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "ignoreMessages": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-items.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-items.json new file mode 100644 index 0000000000..9788d8fe5c --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-items.json @@ -0,0 +1,25 @@ +{ + "description": "expectedLogMessagesForClient-messages-items", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + 0 + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-required.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-required.json new file mode 100644 index 0000000000..85d070672f --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-required.json @@ -0,0 +1,22 @@ +{ + "description": "expectedLogMessagesForClient-messages-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-type.json new file mode 100644 index 0000000000..27531667c5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedLogMessagesForClient-messages-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json new file mode 100644 index 0000000000..1c6ec460b7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json new file mode 100644 index 0000000000..58f686739a --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "foo": "bar" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json new file mode 100644 index 0000000000..1b4a7e2e70 --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "type": "not a server type" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json new file mode 100644 index 0000000000..c7ea9cc9be --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "type": 12 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties.json new file mode 100644 index 0000000000..ef2686e93f --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties", + "schemaVersion": "1.14", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "topologyDescriptionChangedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/initialData-items.json b/test/unified-test-format/invalid/initialData-items.json new file mode 100644 index 0000000000..9c27d554f9 --- /dev/null +++ b/test/unified-test-format/invalid/initialData-items.json @@ -0,0 +1,13 @@ +{ + "description": "initialData-items", + "schemaVersion": "1.0", + "initialData": [ + 0 + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/initialData-minItems.json b/test/unified-test-format/invalid/initialData-minItems.json new file mode 100644 index 0000000000..984100a2be --- /dev/null +++ b/test/unified-test-format/invalid/initialData-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "initialData-minItems", + "schemaVersion": "1.0", + "initialData": [], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/initialData-type.json b/test/unified-test-format/invalid/initialData-type.json new file mode 100644 index 0000000000..c33585e03a --- /dev/null +++ b/test/unified-test-format/invalid/initialData-type.json @@ -0,0 +1,11 @@ +{ + "description": "initialData-type", + "schemaVersion": "1.0", + "initialData": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-additionalProperties.json b/test/unified-test-format/invalid/operation-additionalProperties.json new file mode 100644 index 0000000000..8f2f1434ec --- /dev/null +++ b/test/unified-test-format/invalid/operation-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "operation-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-arguments-type.json b/test/unified-test-format/invalid/operation-arguments-type.json new file mode 100644 index 0000000000..a22f3921c3 --- /dev/null +++ b/test/unified-test-format/invalid/operation-arguments-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-arguments-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "arguments": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectError-conflicts_with_expectResult.json b/test/unified-test-format/invalid/operation-expectError-conflicts_with_expectResult.json new file mode 100644 index 0000000000..bc15fbac76 --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectError-conflicts_with_expectResult.json @@ -0,0 +1,26 @@ +{ + "description": "operation-expectError-conflicts_with_expectResult", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": true + }, + "expectResult": {} + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectError-conflicts_with_saveResultAsEntity.json b/test/unified-test-format/invalid/operation-expectError-conflicts_with_saveResultAsEntity.json new file mode 100644 index 0000000000..dead4a3b9d --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectError-conflicts_with_saveResultAsEntity.json @@ -0,0 +1,26 @@ +{ + "description": "operation-expectError-conflicts_with_saveResultAsEntity", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": true + }, + "saveResultAsEntity": "foo" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectError-type.json b/test/unified-test-format/invalid/operation-expectError-type.json new file mode 100644 index 0000000000..b224ba3535 --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectError-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-expectError-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectEvents-type.json b/test/unified-test-format/invalid/operation-expectEvents-type.json new file mode 100644 index 0000000000..ecd4c011a9 --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectEvents-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-expectEvents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectError.json b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectError.json new file mode 100644 index 0000000000..b47e6be2a1 --- /dev/null +++ b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectError.json @@ -0,0 +1,19 @@ +{ + "description": "operation-ignoreResultAndError-conflicts_with_expectError", + "schemaVersion": "1.3", + "tests": [ + { + "description": "ignoreResultAndError used with expectError", + "operations": [ + { + "name": "foo", + "object": "bar", + "ignoreResultAndError": true, + "expectError": { + "isError": true + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectResult.json b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectResult.json new file mode 100644 index 0000000000..03c5a1dbbc --- /dev/null +++ b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectResult.json @@ -0,0 +1,17 @@ +{ + "description": "operation-ignoreResultAndError-conflicts_with_expectResult", + "schemaVersion": "1.3", + "tests": [ + { + "description": "ignoreResultAndError used with expectResult", + "operations": [ + { + "name": "foo", + "object": "bar", + "ignoreResultAndError": true, + "expectResult": 1 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_saveResultAsEntity.json b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_saveResultAsEntity.json new file mode 100644 index 0000000000..6745dff2eb --- /dev/null +++ b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_saveResultAsEntity.json @@ -0,0 +1,17 @@ +{ + "description": "operation-ignoreResultAndError-conflicts_with_saveResultAsEntity", + "schemaVersion": "1.3", + "tests": [ + { + "description": "ignoreResultAndError used with saveResultAsEntity", + "operations": [ + { + "name": "foo", + "object": "bar", + "ignoreResultAndError": true, + "saveResultAsEntity": "entity0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-name-required.json b/test/unified-test-format/invalid/operation-name-required.json new file mode 100644 index 0000000000..42fcb3a308 --- /dev/null +++ b/test/unified-test-format/invalid/operation-name-required.json @@ -0,0 +1,21 @@ +{ + "description": "operation-name-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "object": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-name-type.json b/test/unified-test-format/invalid/operation-name-type.json new file mode 100644 index 0000000000..2f91da078a --- /dev/null +++ b/test/unified-test-format/invalid/operation-name-type.json @@ -0,0 +1,22 @@ +{ + "description": "operation-name-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": 0, + "object": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-object-required.json b/test/unified-test-format/invalid/operation-object-required.json new file mode 100644 index 0000000000..c0410ce3fd --- /dev/null +++ b/test/unified-test-format/invalid/operation-object-required.json @@ -0,0 +1,21 @@ +{ + "description": "operation-object-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-object-type.json b/test/unified-test-format/invalid/operation-object-type.json new file mode 100644 index 0000000000..edb0a0b51a --- /dev/null +++ b/test/unified-test-format/invalid/operation-object-type.json @@ -0,0 +1,22 @@ +{ + "description": "operation-object-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-saveResultAsEntity-type.json b/test/unified-test-format/invalid/operation-saveResultAsEntity-type.json new file mode 100644 index 0000000000..65ead94c7a --- /dev/null +++ b/test/unified-test-format/invalid/operation-saveResultAsEntity-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-saveResultAsEntity-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "saveResultAsEntity": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-additionalProperties.json b/test/unified-test-format/invalid/runOnRequirement-additionalProperties.json new file mode 100644 index 0000000000..79fa687e45 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-additionalProperties.json @@ -0,0 +1,16 @@ +{ + "description": "runOnRequirement-additionalProperties", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "foo": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-auth-type.json b/test/unified-test-format/invalid/runOnRequirement-auth-type.json new file mode 100644 index 0000000000..e5475d079d --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-auth-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-auth-type", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "auth": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json b/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json new file mode 100644 index 0000000000..007f3f304c --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-authMechanism-type", + "schemaVersion": "1.19", + "runOnRequirements": [ + { + "authMechanism": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-pattern.json b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-pattern.json new file mode 100644 index 0000000000..1db023bf68 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-pattern.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-csfle-minLibmongocryptVersion-pattern", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "csfle": { + "minLibmongocryptVersion": "1.2.3.4" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-type.json b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-type.json new file mode 100644 index 0000000000..8de7b293f1 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-type.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-csfle-minLibmongocryptVersion-type", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "csfle": { + "minLibmongocryptVersion": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-csfle-type.json b/test/unified-test-format/invalid/runOnRequirement-csfle-type.json new file mode 100644 index 0000000000..b48c850d14 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-csfle-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-csfle-type", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-pattern.json b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-pattern.json new file mode 100644 index 0000000000..78766eb925 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-pattern.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-maxServerVersion-pattern", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "1.2.3.4" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-type.json b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-type.json new file mode 100644 index 0000000000..ffc9118ba2 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-maxServerVersion-type", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-minProperties.json b/test/unified-test-format/invalid/runOnRequirement-minProperties.json new file mode 100644 index 0000000000..c2bfed3be7 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-minProperties.json @@ -0,0 +1,13 @@ +{ + "description": "runOnRequirement-minProperties", + "schemaVersion": "1.0", + "runOnRequirements": [ + {} + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-minServerVersion-pattern.json b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-pattern.json new file mode 100644 index 0000000000..19abc1755f --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-pattern.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-minServerVersion-pattern", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "1.2.3.4" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-minServerVersion-type.json b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-type.json new file mode 100644 index 0000000000..688d1c67ee --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-minServerVersion-type", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-serverless-enum.json b/test/unified-test-format/invalid/runOnRequirement-serverless-enum.json new file mode 100644 index 0000000000..031fa539df --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-serverless-enum.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-serverless-enum", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "serverless": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-serverless-type.json b/test/unified-test-format/invalid/runOnRequirement-serverless-type.json new file mode 100644 index 0000000000..1aa41712f9 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-serverless-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-serverless-type", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "serverless": 1234 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-enum.json b/test/unified-test-format/invalid/runOnRequirement-topologies-enum.json new file mode 100644 index 0000000000..f62e5040d4 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-enum.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-topologies-enum", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [ + "foo" + ] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-items.json b/test/unified-test-format/invalid/runOnRequirement-topologies-items.json new file mode 100644 index 0000000000..a205b3293d --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-items.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-topologies-items", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [ + 0 + ] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-minItems.json b/test/unified-test-format/invalid/runOnRequirement-topologies-minItems.json new file mode 100644 index 0000000000..16f29b3f4b --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-minItems.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-topologies-minItems", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-type.json b/test/unified-test-format/invalid/runOnRequirement-topologies-type.json new file mode 100644 index 0000000000..f6d147cd6f --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-topologies-type", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirements-items.json b/test/unified-test-format/invalid/runOnRequirements-items.json new file mode 100644 index 0000000000..40ec84a3f3 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirements-items.json @@ -0,0 +1,13 @@ +{ + "description": "runOnRequirements-items", + "schemaVersion": "1.0", + "runOnRequirements": [ + 0 + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirements-minItems.json b/test/unified-test-format/invalid/runOnRequirements-minItems.json new file mode 100644 index 0000000000..4ca9f99b5d --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirements-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "runOnRequirements-minItems", + "schemaVersion": "1.0", + "runOnRequirements": [], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirements-type.json b/test/unified-test-format/invalid/runOnRequirements-type.json new file mode 100644 index 0000000000..98b859f3ea --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirements-type.json @@ -0,0 +1,11 @@ +{ + "description": "runOnRequirements-type", + "schemaVersion": "1.0", + "runOnRequirements": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/schemaVersion-pattern.json b/test/unified-test-format/invalid/schemaVersion-pattern.json new file mode 100644 index 0000000000..bcb8980516 --- /dev/null +++ b/test/unified-test-format/invalid/schemaVersion-pattern.json @@ -0,0 +1,10 @@ +{ + "description": "schemaVersion-pattern", + "schemaVersion": "1.2.3.4", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/schemaVersion-required.json b/test/unified-test-format/invalid/schemaVersion-required.json new file mode 100644 index 0000000000..7388ff0bf1 --- /dev/null +++ b/test/unified-test-format/invalid/schemaVersion-required.json @@ -0,0 +1,9 @@ +{ + "description": "schemaVersion-required", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/schemaVersion-type.json b/test/unified-test-format/invalid/schemaVersion-type.json new file mode 100644 index 0000000000..646473a209 --- /dev/null +++ b/test/unified-test-format/invalid/schemaVersion-type.json @@ -0,0 +1,10 @@ +{ + "description": "schemaVersion-type", + "schemaVersion": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-additionalProperties.json b/test/unified-test-format/invalid/test-additionalProperties.json new file mode 100644 index 0000000000..a699319c30 --- /dev/null +++ b/test/unified-test-format/invalid/test-additionalProperties.json @@ -0,0 +1,11 @@ +{ + "description": "test-additionalProperties", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "foo": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-description-required.json b/test/unified-test-format/invalid/test-description-required.json new file mode 100644 index 0000000000..8bf23014d4 --- /dev/null +++ b/test/unified-test-format/invalid/test-description-required.json @@ -0,0 +1,9 @@ +{ + "description": "test-description-required", + "schemaVersion": "1.0", + "tests": [ + { + "operation": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-description-type.json b/test/unified-test-format/invalid/test-description-type.json new file mode 100644 index 0000000000..bba3690449 --- /dev/null +++ b/test/unified-test-format/invalid/test-description-type.json @@ -0,0 +1,10 @@ +{ + "description": "test-description-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": 0, + "operation": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectEvents-items.json b/test/unified-test-format/invalid/test-expectEvents-items.json new file mode 100644 index 0000000000..394f74746c --- /dev/null +++ b/test/unified-test-format/invalid/test-expectEvents-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-expectEvents-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectEvents-minItems.json b/test/unified-test-format/invalid/test-expectEvents-minItems.json new file mode 100644 index 0000000000..0da3a56f79 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectEvents-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectEvents-minItems", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectEvents-type.json b/test/unified-test-format/invalid/test-expectEvents-type.json new file mode 100644 index 0000000000..1569f0a0d7 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectEvents-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectEvents-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectLogMessages-items.json b/test/unified-test-format/invalid/test-expectLogMessages-items.json new file mode 100644 index 0000000000..be4a609c56 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectLogMessages-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-expectLogMessages-items", + "schemaVersion": "1.13", + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectLogMessages-minItems.json b/test/unified-test-format/invalid/test-expectLogMessages-minItems.json new file mode 100644 index 0000000000..d7a07c2e77 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectLogMessages-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectLogMessages-minItems", + "schemaVersion": "1.11", + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectLogMessages-type.json b/test/unified-test-format/invalid/test-expectLogMessages-type.json new file mode 100644 index 0000000000..9a8d6fcdfb --- /dev/null +++ b/test/unified-test-format/invalid/test-expectLogMessages-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectLogMessages-type", + "schemaVersion": "1.13", + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-operations-items.json b/test/unified-test-format/invalid/test-operations-items.json new file mode 100644 index 0000000000..00af8e7453 --- /dev/null +++ b/test/unified-test-format/invalid/test-operations-items.json @@ -0,0 +1,12 @@ +{ + "description": "test-operations-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-operations-required.json b/test/unified-test-format/invalid/test-operations-required.json new file mode 100644 index 0000000000..67c6f83044 --- /dev/null +++ b/test/unified-test-format/invalid/test-operations-required.json @@ -0,0 +1,9 @@ +{ + "description": "test-operations-required", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo" + } + ] +} diff --git a/test/unified-test-format/invalid/test-operations-type.json b/test/unified-test-format/invalid/test-operations-type.json new file mode 100644 index 0000000000..1e8b5b2496 --- /dev/null +++ b/test/unified-test-format/invalid/test-operations-type.json @@ -0,0 +1,10 @@ +{ + "description": "test-operations-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-outcome-items.json b/test/unified-test-format/invalid/test-outcome-items.json new file mode 100644 index 0000000000..cf6bb54f87 --- /dev/null +++ b/test/unified-test-format/invalid/test-outcome-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-outcome-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "outcome": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-outcome-minItems.json b/test/unified-test-format/invalid/test-outcome-minItems.json new file mode 100644 index 0000000000..aadf8e514a --- /dev/null +++ b/test/unified-test-format/invalid/test-outcome-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-outcome-minItems", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "outcome": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-outcome-type.json b/test/unified-test-format/invalid/test-outcome-type.json new file mode 100644 index 0000000000..e60c119d7e --- /dev/null +++ b/test/unified-test-format/invalid/test-outcome-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-outcome-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "outcome": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-runOnRequirements-items.json b/test/unified-test-format/invalid/test-runOnRequirements-items.json new file mode 100644 index 0000000000..866bebb51f --- /dev/null +++ b/test/unified-test-format/invalid/test-runOnRequirements-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-runOnRequirements-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "runOnRequirements": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-runOnRequirements-minItems.json b/test/unified-test-format/invalid/test-runOnRequirements-minItems.json new file mode 100644 index 0000000000..d61f063849 --- /dev/null +++ b/test/unified-test-format/invalid/test-runOnRequirements-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-runOnRequirements-minItems", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "runOnRequirements": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-runOnRequirements-type.json b/test/unified-test-format/invalid/test-runOnRequirements-type.json new file mode 100644 index 0000000000..5b25b1005d --- /dev/null +++ b/test/unified-test-format/invalid/test-runOnRequirements-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-runOnRequirements-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "runOnRequirements": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-skipReason-type.json b/test/unified-test-format/invalid/test-skipReason-type.json new file mode 100644 index 0000000000..0408e76834 --- /dev/null +++ b/test/unified-test-format/invalid/test-skipReason-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-skipReason-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "skipReason": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/tests-items.json b/test/unified-test-format/invalid/tests-items.json new file mode 100644 index 0000000000..11f37469e4 --- /dev/null +++ b/test/unified-test-format/invalid/tests-items.json @@ -0,0 +1,7 @@ +{ + "description": "tests-items", + "schemaVersion": "1.0", + "tests": [ + 0 + ] +} diff --git a/test/unified-test-format/invalid/tests-minItems.json b/test/unified-test-format/invalid/tests-minItems.json new file mode 100644 index 0000000000..3f74f94af7 --- /dev/null +++ b/test/unified-test-format/invalid/tests-minItems.json @@ -0,0 +1,5 @@ +{ + "description": "tests-minItems", + "schemaVersion": "1.0", + "tests": [] +} diff --git a/test/unified-test-format/invalid/tests-required.json b/test/unified-test-format/invalid/tests-required.json new file mode 100644 index 0000000000..de4b2fd063 --- /dev/null +++ b/test/unified-test-format/invalid/tests-required.json @@ -0,0 +1,4 @@ +{ + "description": "tests-required", + "schemaVersion": "1.0" +} diff --git a/test/unified-test-format/invalid/tests-type.json b/test/unified-test-format/invalid/tests-type.json new file mode 100644 index 0000000000..62d8194a41 --- /dev/null +++ b/test/unified-test-format/invalid/tests-type.json @@ -0,0 +1,5 @@ +{ + "description": "tests-type", + "schemaVersion": "1.0", + "tests": 0 +} diff --git a/test/unified-test-format/valid-fail/assertNumberConnectionsCheckedOut.json b/test/unified-test-format/valid-fail/assertNumberConnectionsCheckedOut.json new file mode 100644 index 0000000000..9799bb2f65 --- /dev/null +++ b/test/unified-test-format/valid-fail/assertNumberConnectionsCheckedOut.json @@ -0,0 +1,63 @@ +{ + "description": "assertNumberConnectionsCheckedOut", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + } + ], + "tests": [ + { + "description": "operation fails if client field is not specified", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "connections": 1 + } + } + ] + }, + { + "description": "operation fails if connections field is not specified", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ] + }, + { + "description": "operation fails if client entity does not exist", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client1" + } + } + ] + }, + { + "description": "operation fails if number of connections is incorrect", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-bucket-database-undefined.json b/test/unified-test-format/valid-fail/entity-bucket-database-undefined.json new file mode 100644 index 0000000000..7f7f1978c3 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-bucket-database-undefined.json @@ -0,0 +1,18 @@ +{ + "description": "entity-bucket-database-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "bucket": { + "id": "bucket0", + "database": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-client-apiVersion-unsupported.json b/test/unified-test-format/valid-fail/entity-client-apiVersion-unsupported.json new file mode 100644 index 0000000000..d92d23dcaf --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-client-apiVersion-unsupported.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-apiVersion-unsupported", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": "server_will_never_support_this_api_version" + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-collection-database-undefined.json b/test/unified-test-format/valid-fail/entity-collection-database-undefined.json new file mode 100644 index 0000000000..20b0733e34 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-collection-database-undefined.json @@ -0,0 +1,19 @@ +{ + "description": "entity-collection-database-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "collection": { + "id": "collection0", + "database": "foo", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-database-client-undefined.json b/test/unified-test-format/valid-fail/entity-database-client-undefined.json new file mode 100644 index 0000000000..0f8110e6d3 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-database-client-undefined.json @@ -0,0 +1,19 @@ +{ + "description": "entity-database-client-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "database": { + "id": "database0", + "client": "foo", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-findCursor-malformed.json b/test/unified-test-format/valid-fail/entity-findCursor-malformed.json new file mode 100644 index 0000000000..0956efa4c8 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-findCursor-malformed.json @@ -0,0 +1,44 @@ +{ + "description": "entity-findCursor-malformed", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0Name", + "collectionName": "coll0", + "documents": [] + } + ], + "tests": [ + { + "description": "createFindCursor fails if filter is not specified", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "saveResultAsEntity": "cursor0" + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-findCursor.json b/test/unified-test-format/valid-fail/entity-findCursor.json new file mode 100644 index 0000000000..389e448c06 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-findCursor.json @@ -0,0 +1,52 @@ +{ + "description": "entity-findCursor", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0Name", + "collectionName": "coll0", + "documents": [] + } + ], + "tests": [ + { + "description": "iterateUntilDocumentOrError fails if it references a nonexistent entity", + "operations": [ + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + } + ] + }, + { + "description": "close fails if it references a nonexistent entity", + "operations": [ + { + "name": "close", + "object": "cursor0" + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-session-client-undefined.json b/test/unified-test-format/valid-fail/entity-session-client-undefined.json new file mode 100644 index 0000000000..260356436a --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-session-client-undefined.json @@ -0,0 +1,18 @@ +{ + "description": "entity-session-client-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "session": { + "id": "session0", + "client": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/ignoreResultAndError-malformed.json b/test/unified-test-format/valid-fail/ignoreResultAndError-malformed.json new file mode 100644 index 0000000000..b64779c723 --- /dev/null +++ b/test/unified-test-format/valid-fail/ignoreResultAndError-malformed.json @@ -0,0 +1,48 @@ +{ + "description": "ignoreResultAndError-malformed", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "malformed operation fails if ignoreResultAndError is true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "foo": "bar" + }, + "ignoreResultAndError": true + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/ignoreResultAndError.json b/test/unified-test-format/valid-fail/ignoreResultAndError.json new file mode 100644 index 0000000000..01b2421a9f --- /dev/null +++ b/test/unified-test-format/valid-fail/ignoreResultAndError.json @@ -0,0 +1,59 @@ +{ + "description": "ignoreResultAndError", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "operation errors are not ignored if ignoreResultAndError is false", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "ignoreResultAndError": false + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json new file mode 100644 index 0000000000..e62de80033 --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_aws_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json new file mode 100644 index 0000000000..8ef805d0fa --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_azure_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "tenantId": "tenantId" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json new file mode 100644 index 0000000000..c6da1ce58c --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_gcp_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "email": "email" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-no_kms.json b/test/unified-test-format/valid-fail/kmsProviders-no_kms.json new file mode 100644 index 0000000000..57499b4eaf --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-no_kms.json @@ -0,0 +1,32 @@ +{ + "description": "clientEncryptionOpts-no_kms", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operation-failure.json b/test/unified-test-format/valid-fail/operation-failure.json new file mode 100644 index 0000000000..8f6cae1521 --- /dev/null +++ b/test/unified-test-format/valid-fail/operation-failure.json @@ -0,0 +1,56 @@ +{ + "description": "operation-failure", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "operation-failure" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "tests": [ + { + "description": "Unsupported command", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "unsupportedCommand", + "command": { + "unsupportedCommand": 1 + } + } + } + ] + }, + { + "description": "Unsupported query operator", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$unsupportedQueryOperator": 1 + } + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operation-unsupported.json b/test/unified-test-format/valid-fail/operation-unsupported.json new file mode 100644 index 0000000000..d8ef5ab1c8 --- /dev/null +++ b/test/unified-test-format/valid-fail/operation-unsupported.json @@ -0,0 +1,22 @@ +{ + "description": "operation-unsupported", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "Unsupported operation", + "operations": [ + { + "name": "unsupportedOperation", + "object": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operator-matchAsDocument.json b/test/unified-test-format/valid-fail/operator-matchAsDocument.json new file mode 100644 index 0000000000..24f6be9cb8 --- /dev/null +++ b/test/unified-test-format/valid-fail/operator-matchAsDocument.json @@ -0,0 +1,205 @@ +{ + "description": "operator-matchAsDocument", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "json": "{ \"x\": 1, \"y\": 2 }" + }, + { + "_id": 2, + "json": "1" + }, + { + "_id": 3, + "json": "[ \"foo\" ]" + }, + { + "_id": 4, + "json": "{ \"x\" }" + } + ] + } + ], + "tests": [ + { + "description": "matchAsDocument with non-matching filter", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": "two" + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument evaluates special operators", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument does not permit extra fields", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1 + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument expects JSON object but given scalar", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 2, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": {} + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument expects JSON object but given array", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 3 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 3, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": {} + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument fails to decode Extended JSON", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 4 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 4, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": {} + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operator-matchAsRoot.json b/test/unified-test-format/valid-fail/operator-matchAsRoot.json new file mode 100644 index 0000000000..ec6309418c --- /dev/null +++ b/test/unified-test-format/valid-fail/operator-matchAsRoot.json @@ -0,0 +1,67 @@ +{ + "description": "operator-matchAsRoot", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "x": { + "y": 2, + "z": 3 + } + } + ] + } + ], + "tests": [ + { + "description": "matchAsRoot with nested document does not match", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 3 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/returnDocument-enum-invalid.json b/test/unified-test-format/valid-fail/returnDocument-enum-invalid.json new file mode 100644 index 0000000000..ea425fb568 --- /dev/null +++ b/test/unified-test-format/valid-fail/returnDocument-enum-invalid.json @@ -0,0 +1,66 @@ +{ + "description": "returnDocument-enum-invalid", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "tests": [ + { + "description": "FindOneAndReplace returnDocument invalid enum value", + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "invalid" + } + } + ] + }, + { + "description": "FindOneAndUpdate returnDocument invalid enum value", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "invalid" + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/schemaVersion-unsupported.json b/test/unified-test-format/valid-fail/schemaVersion-unsupported.json new file mode 100644 index 0000000000..ceb5532917 --- /dev/null +++ b/test/unified-test-format/valid-fail/schemaVersion-unsupported.json @@ -0,0 +1,10 @@ +{ + "description": "schemaVersion-unsupported", + "schemaVersion": "0.1", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/assertNumberConnectionsCheckedOut.json b/test/unified-test-format/valid-pass/assertNumberConnectionsCheckedOut.json new file mode 100644 index 0000000000..a9fc063f33 --- /dev/null +++ b/test/unified-test-format/valid-pass/assertNumberConnectionsCheckedOut.json @@ -0,0 +1,27 @@ +{ + "description": "assertNumberConnectionsCheckedOut", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + } + ], + "tests": [ + { + "description": "basic assertion succeeds", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json new file mode 100644 index 0000000000..19edc2247b --- /dev/null +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -0,0 +1,79 @@ +{ + "description": "collectionData-createOptions", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0", + "createOptions": { + "capped": true, + "size": 4096 + }, + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "collection is created with the correct options", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$collStats": { + "storageStats": {} + } + }, + { + "$project": { + "capped": "$storageStats.capped", + "maxSize": "$storageStats.maxSize" + } + } + ] + }, + "expectResult": [ + { + "capped": true, + "maxSize": 4096 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/createEntities-operation.json b/test/unified-test-format/valid-pass/createEntities-operation.json new file mode 100644 index 0000000000..3fde42919d --- /dev/null +++ b/test/unified-test-format/valid-pass/createEntities-operation.json @@ -0,0 +1,74 @@ +{ + "description": "createEntities-operation", + "schemaVersion": "1.9", + "tests": [ + { + "description": "createEntities operation", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll1" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll1", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "database1" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-client-cmap-events.json b/test/unified-test-format/valid-pass/entity-client-cmap-events.json new file mode 100644 index 0000000000..3209033def --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-client-cmap-events.json @@ -0,0 +1,71 @@ +{ + "description": "entity-client-cmap-events", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "events are captured during an operation", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-commandCursor.json b/test/unified-test-format/valid-pass/entity-commandCursor.json new file mode 100644 index 0000000000..72b74b4a9a --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-commandCursor.json @@ -0,0 +1,278 @@ +{ + "description": "entity-commandCursor", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "runCursorCommand creates and exhausts cursor by running getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "createCommandCursor creates a cursor and stores it as an entity that can be iterated one document at a time", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "saveResultAsEntity": "myRunCommandCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 2, + "x": 22 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 3, + "x": 33 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 4, + "x": 44 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 5, + "x": 55 + } + } + ] + }, + { + "description": "createCommandCursor's cursor can be closed and will perform a killCursors operation", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "saveResultAsEntity": "myRunCommandCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "close", + "object": "myRunCommandCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "collection", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json new file mode 100644 index 0000000000..b17ae78b94 --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -0,0 +1,111 @@ +{ + "description": "entity-cursor-iterateOnce", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "iterateOnce", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateOnce", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-find-cursor.json b/test/unified-test-format/valid-pass/entity-find-cursor.json new file mode 100644 index 0000000000..6f955d81f4 --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-find-cursor.json @@ -0,0 +1,191 @@ +{ + "description": "entity-find-cursor", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0Name", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ], + "tests": [ + { + "description": "cursors can be created, iterated, and closed", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 3 + } + }, + { + "name": "close", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0Name" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": { + "$$type": "string" + }, + "firstBatch": { + "$$type": "array" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "coll0", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursorsKilled": { + "$$unsetOrMatches": { + "$$type": "array" + } + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedError-errorResponse.json b/test/unified-test-format/valid-pass/expectedError-errorResponse.json new file mode 100644 index 0000000000..177b1baf56 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedError-errorResponse.json @@ -0,0 +1,70 @@ +{ + "description": "expectedError-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "tests": [ + { + "description": "Unsupported command", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "unsupportedCommand", + "command": { + "unsupportedCommand": 1 + } + }, + "expectError": { + "errorResponse": { + "errmsg": { + "$$type": "string" + } + } + } + } + ] + }, + { + "description": "Unsupported query operator", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$unsupportedQueryOperator": 1 + } + }, + "expectError": { + "errorResponse": { + "errmsg": { + "$$type": "string" + } + } + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedError-isClientError.json b/test/unified-test-format/valid-pass/expectedError-isClientError.json new file mode 100644 index 0000000000..9c6beda588 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedError-isClientError.json @@ -0,0 +1,74 @@ +{ + "description": "expectedError-isClientError", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "isClientError considers network errors", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedEventsForClient-eventType.json b/test/unified-test-format/valid-pass/expectedEventsForClient-eventType.json new file mode 100644 index 0000000000..fe308df965 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedEventsForClient-eventType.json @@ -0,0 +1,126 @@ +{ + "description": "expectedEventsForClient-eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "eventType can be set to command and cmap", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + } + ] + } + ] + }, + { + "description": "eventType defaults to command if unset", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json b/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json new file mode 100644 index 0000000000..178b756c2c --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json @@ -0,0 +1,151 @@ +{ + "description": "expectedEventsForClient-ignoreExtraEvents", + "schemaVersion": "1.7", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "ignoreExtraEvents can be set to false", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": false, + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "ignoreExtraEvents can be set to true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "ignoreExtraEvents defaults to false if unset", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 4 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 4 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedEventsForClient-topologyDescriptionChangedEvent.json b/test/unified-test-format/valid-pass/expectedEventsForClient-topologyDescriptionChangedEvent.json new file mode 100644 index 0000000000..cf7bd60826 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedEventsForClient-topologyDescriptionChangedEvent.json @@ -0,0 +1,68 @@ +{ + "description": "expectedEventsForClient-topologyDescriptionChangedEvent", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "can assert on values of newDescription and previousDescription fields", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "directConnection": true + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Single" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/ignoreResultAndError.json b/test/unified-test-format/valid-pass/ignoreResultAndError.json new file mode 100644 index 0000000000..2e9b1c58ab --- /dev/null +++ b/test/unified-test-format/valid-pass/ignoreResultAndError.json @@ -0,0 +1,59 @@ +{ + "description": "ignoreResultAndError", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "operation errors are ignored if ignoreResultAndError is true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "ignoreResultAndError": true + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json b/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json new file mode 100644 index 0000000000..7cc74939eb --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json @@ -0,0 +1,52 @@ +{ + "description": "kmsProviders-explicit_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId", + "secretAccessKey": "secretAccessKey" + }, + "azure": { + "tenantId": "tenantId", + "clientId": "clientId", + "clientSecret": "clientSecret" + }, + "gcp": { + "email": "email", + "privateKey": "cHJpdmF0ZUtleQo=" + }, + "kmip": { + "endpoint": "endpoint" + }, + "local": { + "key": "a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json b/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json new file mode 100644 index 0000000000..363f2a4576 --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json @@ -0,0 +1,54 @@ +{ + "description": "kmsProviders-mixed_kms_credential_fields", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId", + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": "tenantId", + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": "email", + "privateKey": { + "$$placeholder": 1 + } + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json b/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json new file mode 100644 index 0000000000..3f7721f01d --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json @@ -0,0 +1,70 @@ +{ + "description": "kmsProviders-placeholder_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json b/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json new file mode 100644 index 0000000000..12ca580941 --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json @@ -0,0 +1,39 @@ +{ + "description": "kmsProviders-unconfigured_kms", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {}, + "azure": {}, + "gcp": {}, + "kmip": {}, + "local": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "skipReason": "DRIVERS-2280: waiting on driver support for on-demand credentials", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/observeSensitiveCommands.json b/test/unified-test-format/valid-pass/observeSensitiveCommands.json new file mode 100644 index 0000000000..d3ae5665be --- /dev/null +++ b/test/unified-test-format/valid-pass/observeSensitiveCommands.json @@ -0,0 +1,706 @@ +{ + "description": "observeSensitiveCommands", + "schemaVersion": "1.5", + "runOnRequirements": [ + { + "auth": false + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ], + "observeSensitiveCommands": true + } + }, + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ], + "observeSensitiveCommands": false + } + }, + { + "client": { + "id": "client2", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "observeSensitiveCommands" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "observeSensitiveCommands" + } + }, + { + "database": { + "id": "database2", + "client": "client2", + "databaseName": "observeSensitiveCommands" + } + } + ], + "tests": [ + { + "description": "getnonce is observed with observeSensitiveCommands=true", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "getnonce", + "command": { + "getnonce": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "getnonce", + "reply": { + "ok": { + "$$exists": false + }, + "nonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "getnonce is not observed with observeSensitiveCommands=false", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [] + } + ] + }, + { + "description": "getnonce is not observed by default", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [] + } + ] + }, + { + "description": "hello with speculativeAuthenticate", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + }, + { + "client": "client1", + "events": [] + }, + { + "client": "client2", + "events": [] + } + ] + }, + { + "description": "hello without speculativeAuthenticate is always observed", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello with speculativeAuthenticate", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + }, + { + "client": "client1", + "events": [] + }, + { + "client": "client2", + "events": [] + } + ] + }, + { + "description": "legacy hello without speculativeAuthenticate is always observed", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operation-empty_array.json b/test/unified-test-format/valid-pass/operation-empty_array.json new file mode 100644 index 0000000000..93b25c983c --- /dev/null +++ b/test/unified-test-format/valid-pass/operation-empty_array.json @@ -0,0 +1,10 @@ +{ + "description": "operation-empty_array", + "schemaVersion": "1.0", + "tests": [ + { + "description": "Empty operations array", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operator-lte.json b/test/unified-test-format/valid-pass/operator-lte.json new file mode 100644 index 0000000000..7a6a8057ad --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-lte.json @@ -0,0 +1,88 @@ +{ + "description": "operator-lte", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 2, + "y": 3, + "z": 4 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 2 + }, + "x": { + "$$lte": 2.1 + }, + "y": { + "$$lte": { + "$numberLong": "3" + } + }, + "z": { + "$$lte": 4 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operator-matchAsDocument.json b/test/unified-test-format/valid-pass/operator-matchAsDocument.json new file mode 100644 index 0000000000..fd8b514d4a --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-matchAsDocument.json @@ -0,0 +1,124 @@ +{ + "description": "operator-matchAsDocument", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "json": "{ \"x\": 1, \"y\": 2.0 }" + }, + { + "_id": 2, + "json": "{ \"x\": { \"$oid\": \"57e193d7a9cc81b4027498b5\" } }" + } + ] + } + ], + "tests": [ + { + "description": "matchAsDocument performs flexible numeric comparisons", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": 2 + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument evaluates special operators", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument decodes Extended JSON", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 2, + "json": { + "$$matchAsDocument": { + "x": { + "$$type": "objectId" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operator-matchAsRoot.json b/test/unified-test-format/valid-pass/operator-matchAsRoot.json new file mode 100644 index 0000000000..1966e3b377 --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-matchAsRoot.json @@ -0,0 +1,151 @@ +{ + "description": "operator-matchAsRoot", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "x": { + "y": 2, + "z": 3 + } + }, + { + "_id": 2, + "json": "{ \"x\": 1, \"y\": 2 }" + } + ] + } + ], + "tests": [ + { + "description": "matchAsRoot with nested document", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 2 + } + } + } + ] + } + ] + }, + { + "description": "matchAsRoot performs flexible numeric comparisons", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 2 + } + } + } + ] + } + ] + }, + { + "description": "matchAsRoot evaluates special operators", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 2, + "z": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "matchAsRoot with matchAsDocument", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 2, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "x": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operator-type-number_alias.json b/test/unified-test-format/valid-pass/operator-type-number_alias.json new file mode 100644 index 0000000000..e628d0d777 --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-type-number_alias.json @@ -0,0 +1,174 @@ +{ + "description": "operator-type-number_alias", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "type number alias matches int32", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberInt": "2147483647" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches int64", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberLong": "9223372036854775807" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches double", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberDouble": "2.71828" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches decimal128", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberDecimal": "3.14159" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-change-streams.json b/test/unified-test-format/valid-pass/poc-change-streams.json new file mode 100644 index 0000000000..50f0d06f08 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-change-streams.json @@ -0,0 +1,455 @@ +{ + "description": "poc-change-streams", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "getMore", + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "change-stream-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "change-stream-tests" + } + }, + { + "database": { + "id": "database2", + "client": "client1", + "databaseName": "change-stream-tests-2" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "test2" + } + }, + { + "collection": { + "id": "collection3", + "database": "database2", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "change-stream-tests", + "documents": [] + }, + { + "collectionName": "test2", + "databaseName": "change-stream-tests", + "documents": [] + }, + { + "collectionName": "test", + "databaseName": "change-stream-tests-2", + "documents": [] + } + ], + "tests": [ + { + "description": "saveResultAsEntity is optional for createChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1 + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection2", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection3", + "arguments": { + "document": { + "y": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "z": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test2" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests-2", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "y": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "z": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true, + "fullDocument": { + "$$unsetOrMatches": "default" + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "Test consecutive resume", + "runOnRequirements": [ + { + "minServerVersion": "4.1.7", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "batchSize": 1, + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "x": 2 + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 2 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 3 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": { + "fullDocument": { + "$$unsetOrMatches": "default" + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "change-stream-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": { + "fullDocument": { + "$$unsetOrMatches": "default" + }, + "resumeAfter": { + "$$exists": true + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "change-stream-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": { + "fullDocument": { + "$$unsetOrMatches": "default" + }, + "resumeAfter": { + "$$exists": true + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "change-stream-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-command-monitoring.json b/test/unified-test-format/valid-pass/poc-command-monitoring.json new file mode 100644 index 0000000000..fe0a5ae991 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-command-monitoring.json @@ -0,0 +1,223 @@ +{ + "description": "poc-command-monitoring", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", + "runOnRequirements": [ + { + "minServerVersion": "3.1", + "maxServerVersion": "4.4.99", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 1 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + } + ] + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "A failed find event", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "$or": true + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-crud.json b/test/unified-test-format/valid-pass/poc-crud.json new file mode 100644 index 0000000000..94e4ec5682 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-crud.json @@ -0,0 +1,450 @@ +{ + "description": "poc-crud", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database0", + "collectionName": "coll2", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "collectionName": "coll2", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + }, + { + "collectionName": "aggregate_out", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "BulkWrite with mixed ordered operations", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 2, + "insertedCount": 2, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "3": 4 + } + }, + "matchedCount": 3, + "modifiedCount": 3, + "upsertedCount": 1, + "upsertedIds": { + "5": 4 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 34 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "InsertMany continue-on-error behavior with unordered (duplicate key in requests)", + "operations": [ + { + "name": "insertMany", + "object": "collection1", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "ReplaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "replaceOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "readConcern majority with out stage", + "runOnRequirements": [ + { + "minServerVersion": "4.1.0", + "topologies": [ + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection2", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "aggregate_out" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll2", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "aggregate_out" + } + ], + "readConcern": { + "level": "majority" + } + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "aggregate_out", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $listLocalSessions", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "database1", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "dummy": "dummy field" + } + }, + { + "$project": { + "_id": 0, + "dummy": 1 + } + } + ] + }, + "expectResult": [ + { + "dummy": "dummy field" + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-gridfs.json b/test/unified-test-format/valid-pass/poc-gridfs.json new file mode 100644 index 0000000000..1f07a19bf6 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-gridfs.json @@ -0,0 +1,301 @@ +{ + "description": "poc-gridfs", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000007" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2, + "data": { + "$binary": { + "base64": "mao=", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "Delete when length is 10", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ] + }, + { + "description": "Download when there are three chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectResult": { + "$$matchesHexBytes": "112233445566778899aa" + } + } + ] + }, + { + "description": "Download when files entry does not exist", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isError": true + } + } + ] + }, + { + "description": "Download when an intermediate chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true + } + } + ] + }, + { + "description": "Upload when length is 5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, + "chunkSizeBytes": 4 + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "oid0" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {}, + "sort": { + "uploadDate": -1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "oid0" + }, + "length": 5, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "283d4fea5dded59cf837d3047328f5af" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "_id": { + "$gt": { + "$oid": "000000000000000000000007" + } + } + }, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "oid0" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "oid0" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-queryable-encryption.json b/test/unified-test-format/valid-pass/poc-queryable-encryption.json new file mode 100644 index 0000000000..309d1d3b4b --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-queryable-encryption.json @@ -0,0 +1,193 @@ +{ + "description": "poc-queryable-encryption", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "csfle": true, + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + } + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "poc-queryable-encryption" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "encrypted" + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "unencryptedDB", + "client": "client1", + "databaseName": "poc-queryable-encryption" + } + }, + { + "collection": { + "id": "unencryptedColl", + "database": "unencryptedDB", + "collectionName": "encrypted" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "poc-queryable-encryption", + "collectionName": "encrypted", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + } + } + ], + "tests": [ + { + "description": "insert, replace, and find with queryable encryption", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": 11 + } + } + }, + { + "object": "encryptedColl", + "name": "replaceOne", + "arguments": { + "filter": { + "encryptedInt": 11 + }, + "replacement": { + "encryptedInt": 22 + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "encryptedInt": 22 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedInt": 22 + } + ] + }, + { + "object": "unencryptedColl", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhS16TJojgDDBtbluxBokvcotP1mQTGeYpNt8xd3MJQ=", + "subType": "00" + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-retryable-reads.json b/test/unified-test-format/valid-pass/poc-retryable-reads.json new file mode 100644 index 0000000000..2b65d501a7 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-retryable-reads.json @@ -0,0 +1,433 @@ +{ + "description": "poc-retryable-reads", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "uriOptions": { + "retryReads": false + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate succeeds after InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds on second attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 2 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 2 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 2 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection1", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {} + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on second attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {} + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {} + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds on second attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "name": "listDatabases", + "object": "client0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-retryable-writes.json b/test/unified-test-format/valid-pass/poc-retryable-writes.json new file mode 100644 index 0000000000..f19aa3f9d8 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-retryable-writes.json @@ -0,0 +1,491 @@ +{ + "description": "poc-retryable-writes", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "uriOptions": { + "retryWrites": false + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate is committed on first attempt", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate is not committed on first attempt", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate is never committed", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertMany succeeds after PrimarySteppedDown", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "InsertOne fails after connection failure when retryWrites option is false", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne fails after multiple retryable writeConcernErrors", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-sessions.json b/test/unified-test-format/valid-pass/poc-sessions.json new file mode 100644 index 0000000000..117c9e7d00 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-sessions.json @@ -0,0 +1,466 @@ +{ + "description": "poc-sessions", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Server supports explicit sessions", + "operations": [ + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Server supports implicit sessions", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Dirty explicit session is discarded", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 2 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json new file mode 100644 index 0000000000..9ab44a9c54 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -0,0 +1,505 @@ +{ + "description": "poc-transactions-convenient-api", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + }, + { + "session": { + "id": "session2", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction and no transaction options set", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from client", + "operations": [ + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from defaultTransactionOptions", + "operations": [ + { + "name": "withTransaction", + "object": "session2", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session2", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session2" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session2" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json new file mode 100644 index 0000000000..de08edec44 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -0,0 +1,409 @@ +{ + "description": "poc-transactions-mongos-pin-auto", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "remain pinned after non-transient Interrupted error on insertOne", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorCodeName": "Interrupted" + } + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$type": "object" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "unpin after transient error within a transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$type": "object" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-transactions.json b/test/unified-test-format/valid-pass/poc-transactions.json new file mode 100644 index 0000000000..2055a3b705 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-transactions.json @@ -0,0 +1,323 @@ +{ + "description": "poc-transactions", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Client side error in command starting transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + }, + { + "name": "assertSessionTransactionState", + "object": "testRunner", + "arguments": { + "session": "session0", + "state": "starting" + } + } + ] + }, + { + "description": "explicitly create collection using create command", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "create", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "create index on a non-existing collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "session": "session0", + "name": "x_1", + "keys": { + "x": 1 + } + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "x_1" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ], + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "createIndexes", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py new file mode 100644 index 0000000000..0c5f68edd3 --- /dev/null +++ b/test/unified_format.py @@ -0,0 +1,1625 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unified test format runner. + +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.md +""" +from __future__ import annotations + +import asyncio +import binascii +import copy +import functools +import os +import re +import sys +import time +import traceback +from collections import defaultdict +from inspect import iscoroutinefunction +from test import ( + IntegrationTest, + client_context, + client_knobs, + unittest, +) +from test.helpers_shared import ALL_KMS_PROVIDERS, DEFAULT_KMS_TLS +from test.unified_format_shared import ( + KMS_TLS_OPTS, + PLACEHOLDER_MAP, + EventListenerUtil, + MatchEvaluatorUtil, + coerce_result, + parse_bulk_write_error_result, + parse_bulk_write_result, + parse_client_bulk_write_error_result, + parse_collection_or_database_options, + with_metaclass, +) +from test.utils import flaky, get_pool +from test.utils_shared import ( + camel_to_snake, + camel_to_snake_args, + parse_spec_options, + prepare_spec_arguments, + snake_to_camel, + wait_until, +) +from test.utils_spec_runner import SpecRunnerThread +from test.version import Version +from typing import Any, Dict, List, Mapping, Optional + +import pytest + +import pymongo +from bson import SON, json_util +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.objectid import ObjectId +from gridfs import GridFSBucket, GridOut, NoFile +from gridfs.errors import CorruptGridFile +from pymongo import ASCENDING, CursorType, MongoClient, _csot +from pymongo.driver_info import DriverInfo +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ClientBulkWriteException, + ConfigurationError, + ConnectionFailure, + EncryptionError, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, +) +from pymongo.monitoring import ( + CommandStartedEvent, +) +from pymongo.operations import ( + SearchIndexModel, +) +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import ServerApi +from pymongo.server_selectors import Selection, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.change_stream import ChangeStream +from pymongo.synchronous.client_session import ClientSession, TransactionOptions, _TxnState +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.encryption import ClientEncryption +from pymongo.topology_description import TopologyDescription +from pymongo.typings import _Address +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + +IS_INTERRUPTED = False + + +def interrupt_loop(): + global IS_INTERRUPTED + IS_INTERRUPTED = True + + +def is_run_on_requirement_satisfied(requirement): + topology_satisfied = True + req_topologies = requirement.get("topologies") + if req_topologies: + topology_satisfied = client_context.is_topology_type(req_topologies) + + server_version = Version(*client_context.version[:3]) + + min_version_satisfied = True + req_min_server_version = requirement.get("minServerVersion") + if req_min_server_version: + min_version_satisfied = Version.from_string(req_min_server_version) <= server_version + + max_version_satisfied = True + req_max_server_version = requirement.get("maxServerVersion") + if req_max_server_version: + max_version_satisfied = Version.from_string(req_max_server_version) >= server_version + + params_satisfied = True + params = requirement.get("serverParameters") + if params: + for param, val in params.items(): + if param not in client_context.server_parameters: + params_satisfied = False + elif client_context.server_parameters[param] != val: + params_satisfied = False + + auth_satisfied = True + req_auth = requirement.get("auth") + if req_auth is not None: + if req_auth: + auth_satisfied = client_context.auth_enabled + if auth_satisfied and "authMechanism" in requirement: + auth_satisfied = client_context.check_auth_type(requirement["authMechanism"]) + else: + auth_satisfied = not client_context.auth_enabled + + csfle_satisfied = True + req_csfle = requirement.get("csfle") + if req_csfle is True: + # Don't overwrite unsatisfied minimum version requirements. + if min_version_satisfied: + min_version_satisfied = Version.from_string("4.2") <= server_version + csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + elif isinstance(req_csfle, dict) and "minLibmongocryptVersion" in req_csfle: + csfle_satisfied = False + req_version = req_csfle["minLibmongocryptVersion"] + if _HAVE_PYMONGOCRYPT: + from pymongocrypt import libmongocrypt_version + + if Version.from_string(libmongocrypt_version()) >= Version.from_string(req_version): + csfle_satisfied = True + + return ( + topology_satisfied + and min_version_satisfied + and max_version_satisfied + and params_satisfied + and auth_satisfied + and csfle_satisfied + ) + + +class NonLazyCursor: + """A find cursor proxy that creates the remote cursor when initialized.""" + + def __init__(self, find_cursor, client): + self.client = client + self.find_cursor = find_cursor + # Create the server side cursor. + self.first_result = None + + @classmethod + def create(cls, find_cursor, client): + cursor = cls(find_cursor, client) + try: + cursor.first_result = next(cursor.find_cursor) + except StopIteration: + cursor.first_result = None + return cursor + + @property + def alive(self): + return self.first_result is not None or self.find_cursor.alive + + def __next__(self): + if self.first_result is not None: + first = self.first_result + self.first_result = None + return first + return next(self.find_cursor) + + # Added to support the iterateOnce operation. + try_next = __next__ + + def close(self): + self.find_cursor.close() + self.client = None + + +class EntityMapUtil: + """Utility class that implements an entity map as per the unified + test format specification. + """ + + def __init__(self, test_class): + self._entities: Dict[str, Any] = {} + self._listeners: Dict[str, EventListenerUtil] = {} + self._session_lsids: Dict[str, Mapping[str, Any]] = {} + self.test: UnifiedSpecTestMixinV1 = test_class + + def __contains__(self, item): + return item in self._entities + + def __len__(self): + return len(self._entities) + + def __getitem__(self, item): + try: + return self._entities[item] + except KeyError: + self.test.fail(f"Could not find entity named {item} in map") + + def __setitem__(self, key, value): + if not isinstance(key, str): + self.test.fail("Expected entity name of type str, got %s" % (type(key))) + + if key in self._entities: + self.test.fail(f"Entity named {key} already in map") + + self._entities[key] = value + + def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: + if "$$placeholder" in current: + if path not in PLACEHOLDER_MAP: + raise ValueError(f"Could not find a placeholder value for {path}") + return PLACEHOLDER_MAP[path] + + # Distinguish between temp and non-temp aws credentials. + if path.endswith("/kmsProviders/aws") and "sessionToken" in current: + path = path.replace("aws", "aws_temp") + + for key in list(current): + value = current[key] + if isinstance(value, dict): + subpath = f"{path}/{key}" + current[key] = self._handle_placeholders(spec, value, subpath) + return current + + def _create_entity(self, entity_spec, uri=None): + if len(entity_spec) != 1: + self.test.fail(f"Entity spec {entity_spec} did not contain exactly one top-level key") + + entity_type, spec = next(iter(entity_spec.items())) + spec = self._handle_placeholders(spec, spec, "") + if entity_type == "client": + kwargs: dict = {} + observe_events = spec.get("observeEvents", []) + + if "autoEncryptOpts" in spec: + auto_encrypt_opts = spec["autoEncryptOpts"].copy() + auto_encrypt_kwargs: dict = dict(kms_tls_options=DEFAULT_KMS_TLS) + kms_providers = auto_encrypt_opts.pop("kmsProviders", ALL_KMS_PROVIDERS.copy()) + key_vault_namespace = auto_encrypt_opts.pop("keyVaultNamespace") + extra_opts = auto_encrypt_opts.pop("extraOptions", {}) + for key, value in extra_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + for key, value in auto_encrypt_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, **auto_encrypt_kwargs + ) + kwargs["auto_encryption_opts"] = auto_encryption_opts + + # The unified tests use topologyOpeningEvent, we use topologyOpenedEvent + for i in range(len(observe_events)): + if "topologyOpeningEvent" == observe_events[i]: + observe_events[i] = "topologyOpenedEvent" + ignore_commands = spec.get("ignoreCommandMonitoringEvents", []) + observe_sensitive_commands = spec.get("observeSensitiveCommands", False) + ignore_commands = [cmd.lower() for cmd in ignore_commands] + listener = EventListenerUtil( + observe_events, + ignore_commands, + observe_sensitive_commands, + spec.get("storeEventsAsEntities"), + self, + ) + self._listeners[spec["id"]] = listener + kwargs["event_listeners"] = [listener] + if spec.get("useMultipleMongoses"): + if client_context.load_balancer: + kwargs["h"] = client_context.MULTI_MONGOS_LB_URI + elif client_context.is_mongos: + kwargs["h"] = client_context.mongos_seeds() + kwargs.update(spec.get("uriOptions", {})) + server_api = spec.get("serverApi") + if "waitQueueSize" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueSize") + if "waitQueueMultiple" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueMultiple") + if server_api: + kwargs["server_api"] = ServerApi( + server_api["version"], + strict=server_api.get("strict"), + deprecation_errors=server_api.get("deprecationErrors"), + ) + if uri: + kwargs["h"] = uri + client = self.test.rs_or_single_client(**kwargs) + client._connect() + self[spec["id"]] = client + return + elif entity_type == "database": + client = self[spec["client"]] + if type(client).__name__ != "MongoClient": + self.test.fail( + "Expected entity {} to be of type MongoClient, got {}".format( + spec["client"], type(client) + ) + ) + options = parse_collection_or_database_options(spec.get("databaseOptions", {})) + self[spec["id"]] = client.get_database(spec["databaseName"], **options) + return + elif entity_type == "collection": + database = self[spec["database"]] + if not isinstance(database, Database): + self.test.fail( + "Expected entity {} to be of type Database, got {}".format( + spec["database"], type(database) + ) + ) + options = parse_collection_or_database_options(spec.get("collectionOptions", {})) + self[spec["id"]] = database.get_collection(spec["collectionName"], **options) + return + elif entity_type == "session": + client = self[spec["client"]] + if type(client).__name__ != "MongoClient": + self.test.fail( + "Expected entity {} to be of type MongoClient, got {}".format( + spec["client"], type(client) + ) + ) + opts = camel_to_snake_args(spec.get("sessionOptions", {})) + if "default_transaction_options" in opts: + txn_opts = parse_spec_options(opts["default_transaction_options"]) + txn_opts = TransactionOptions(**txn_opts) + opts = copy.deepcopy(opts) + opts["default_transaction_options"] = txn_opts + session = client.start_session(**dict(opts)) + self[spec["id"]] = session + self._session_lsids[spec["id"]] = copy.deepcopy(session.session_id) + self.test.addCleanup(session.end_session) + return + elif entity_type == "bucket": + db = self[spec["database"]] + kwargs = parse_spec_options(spec.get("bucketOptions", {}).copy()) + bucket = GridFSBucket(db, **kwargs) + + # PyMongo does not support GridFSBucket.drop(), emulate it. + @_csot.apply + def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: + self._files.drop(*args, **kwargs) + self._chunks.drop(*args, **kwargs) + + if not hasattr(bucket, "drop"): + bucket.drop = drop.__get__(bucket) + self[spec["id"]] = bucket + return + elif entity_type == "clientEncryption": + opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) + if isinstance(opts["key_vault_client"], str): + opts["key_vault_client"] = self[opts["key_vault_client"]] + # Set TLS options for providers like "kmip:name1". + kms_tls_options = {} + for provider in opts["kms_providers"]: + provider_type = provider.split(":")[0] + if provider_type in KMS_TLS_OPTS: + kms_tls_options[provider] = KMS_TLS_OPTS[provider_type] + self[spec["id"]] = ClientEncryption( + opts["kms_providers"], + opts["key_vault_namespace"], + opts["key_vault_client"], + DEFAULT_CODEC_OPTIONS, + opts.get("kms_tls_options", kms_tls_options), + opts.get("key_expiration_ms"), + ) + return + elif entity_type == "thread": + name = spec["id"] + thread = SpecRunnerThread(name) + thread.start() + self.test.addCleanup(thread.join, 5) + self[name] = thread + return + + self.test.fail(f"Unable to create entity of unknown type {entity_type}") + + def create_entities_from_spec(self, entity_spec, uri=None): + for spec in entity_spec: + self._create_entity(spec, uri=uri) + + def get_listener_for_client(self, client_name: str) -> EventListenerUtil: + client = self[client_name] + if type(client).__name__ != "MongoClient": + self.test.fail( + f"Expected entity {client_name} to be of type MongoClient, got {type(client)}" + ) + + listener = self._listeners.get(client_name) + if not listener: + self.test.fail(f"No listeners configured for client {client_name}") + + return listener + + def get_lsid_for_session(self, session_name): + session = self[session_name] + if not isinstance(session, ClientSession): + self.test.fail( + f"Expected entity {session_name} to be of type ClientSession, got {type(session)}" + ) + + try: + return session.session_id + except InvalidOperation: + # session has been closed. + return self._session_lsids[session_name] + + def advance_cluster_times(self, cluster_time) -> None: + """Manually synchronize entities when desired""" + for entity in self._entities.values(): + if isinstance(entity, ClientSession) and cluster_time: + entity.advance_cluster_time(cluster_time) + + +class UnifiedSpecTestMixinV1(IntegrationTest): + """Mixin class to run test cases from test specification files. + + Assumes that tests conform to the `unified test format + `_. + + Specification of the test suite being currently run is available as + a class attribute ``TEST_SPEC``. + """ + + SCHEMA_VERSION = Version.from_string("1.25") + RUN_ON_LOAD_BALANCER = True + TEST_SPEC: Any + TEST_PATH = "" # This gets filled in by generate_test_classes + mongos_clients: list[MongoClient] = [] + + @staticmethod + def should_run_on(run_on_spec): + if not run_on_spec: + # Always run these tests. + return True + + for req in run_on_spec: + if is_run_on_requirement_satisfied(req): + return True + return False + + def insert_initial_data(self, initial_data): + for i, collection_data in enumerate(initial_data): + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + opts = collection_data.get("createOptions", {}) + documents = collection_data["documents"] + + # Setup the collection with as few majority writes as possible. + db = self.client[db_name] + db.drop_collection(coll_name) + # Only use majority wc only on the final write. + if i == len(initial_data) - 1: + wc = WriteConcern(w="majority") + else: + wc = WriteConcern(w=1) + + # Remove any encryption collections associated with the collection. + collections = db.list_collection_names() + for collection in collections: + if collection in [f"enxcol_.{coll_name}.esc", f"enxcol_.{coll_name}.ecoc"]: + db.drop_collection(collection) + + if documents: + if opts: + db.create_collection(coll_name, **opts) + db.get_collection(coll_name, write_concern=wc).insert_many(documents) + else: + # Ensure collection exists + db.create_collection(coll_name, write_concern=wc, **opts) + + @classmethod + def setUpClass(cls) -> None: + # Speed up the tests by decreasing the heartbeat frequency. + cls.knobs = client_knobs( + heartbeat_frequency=0.1, + min_heartbeat_interval=0.1, + kill_cursor_frequency=0.1, + events_queue_frequency=0.1, + ) + cls.knobs.enable() + + @classmethod + def tearDownClass(cls) -> None: + cls.knobs.disable() + + def setUp(self): + # super call creates internal client cls.client + super().setUp() + # process file-level runOnRequirements + run_on_spec = self.TEST_SPEC.get("runOnRequirements", []) + if not self.should_run_on(run_on_spec): + raise unittest.SkipTest(f"{self.__class__.__name__} runOnRequirements not satisfied") + + # add any special-casing for skipping tests here + + # Handle mongos_clients for transactions tests. + self.mongos_clients = [] + if client_context.supports_transactions() and not client_context.load_balancer: + for address in client_context.mongoses: + self.mongos_clients.append(self.single_client("{}:{}".format(*address))) + + # process schemaVersion + # note: we check major schema version during class generation + version = Version.from_string(self.TEST_SPEC["schemaVersion"]) + self.assertLessEqual( + version, + self.SCHEMA_VERSION, + f"expected schema version {self.SCHEMA_VERSION} or lower, got {version}", + ) + + # initialize internals + self.match_evaluator = MatchEvaluatorUtil(self) + + def maybe_skip_test(self, spec): + # add any special-casing for skipping tests here + class_name = self.__class__.__name__.lower() + description = spec["description"].lower() + + if "client side error in command starting transaction" in description: + self.skipTest("Implement PYTHON-1894") + if "type=symbol" in description: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to entire download" in description: + self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + if any( + x in description + for x in [ + "first insertone is never committed", + "second updateone is never committed", + "third updateone is never committed", + ] + ): + self.skipTest("Implement PYTHON-4597") + + if "csot" in class_name: + # Skip tests that are too slow to run on a given platform. + slow_macos = [ + "operation fails after two consecutive socket timeouts.*", + "operation succeeds after one socket timeout.*", + "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + ] + slow_win32 = [ + *slow_macos, + "maxTimeMS value in the command is less than timeoutMS", + "timeoutMS applies to whole operation.*", + ] + slow_pypy = [ + "timeoutMS applies to whole operation.*", + ] + if "CI" in os.environ and sys.platform == "win32" and "gridfs" in class_name: + self.skipTest("PYTHON-3522 CSOT GridFS test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "win32": + for pat in slow_win32: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "darwin": + for pat in slow_macos: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on MacOS") + if "CI" in os.environ and sys.implementation.name.lower() == "pypy": + for pat in slow_pypy: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on PyPy") + if "change" in description or "change" in class_name: + self.skipTest("CSOT not implemented for watch()") + if "cursors" in class_name: + self.skipTest("CSOT not implemented for cursors") + if ( + "tailable" in class_name + or "tailable" in description + and "non-tailable" not in description + ): + self.skipTest("CSOT not implemented for tailable cursors") + if "sessions" in class_name: + self.skipTest("CSOT not implemented for sessions") + if "withtransaction" in description: + self.skipTest("CSOT not implemented for with_transaction") + if "transaction" in class_name or "transaction" in description: + self.skipTest("CSOT not implemented for transactions") + + # Some tests need to be skipped based on the operations they try to run. + for op in spec["operations"]: + name = op["name"] + if name == "count": + self.skipTest("PyMongo does not support count()") + if name == "listIndexNames": + self.skipTest("PyMongo does not support list_index_names()") + if not client_context.test_commands_enabled: + if name == "failPoint" or name == "targetedFailPoint": + self.skipTest("Test commands must be enabled to use fail points") + if name == "modifyCollection": + self.skipTest("PyMongo does not support modifyCollection") + if "timeoutMode" in op.get("arguments", {}): + self.skipTest("PyMongo does not support timeoutMode") + + def process_error(self, exception, spec): + if isinstance(exception, unittest.SkipTest): + raise + is_error = spec.get("isError") + is_client_error = spec.get("isClientError") + is_timeout_error = spec.get("isTimeoutError") + error_contains = spec.get("errorContains") + error_code = spec.get("errorCode") + error_code_name = spec.get("errorCodeName") + error_labels_contain = spec.get("errorLabelsContain") + error_labels_omit = spec.get("errorLabelsOmit") + expect_result = spec.get("expectResult") + error_response = spec.get("errorResponse") + if error_response: + if isinstance(exception, ClientBulkWriteException): + self.match_evaluator.match_result(error_response, exception.error.details) + else: + self.match_evaluator.match_result(error_response, exception.details) + + if is_error: + # already satisfied because exception was raised + pass + + if is_client_error: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception + # Connection errors are considered client errors. + if isinstance(error, ConnectionFailure): + self.assertNotIsInstance(error, NotPrimaryError) + elif isinstance(error, CorruptGridFile): + pass + elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError, NoFile)): + pass + else: + self.assertNotIsInstance(error, PyMongoError) + + if is_timeout_error: + self.assertIsInstance(exception, PyMongoError) + if not exception.timeout: + # Re-raise the exception for better diagnostics. + raise exception + + if error_contains: + if isinstance(exception, BulkWriteError): + errmsg = str(exception.details).lower() + elif isinstance(exception, ClientBulkWriteException): + errmsg = str(exception.details).lower() + else: + errmsg = str(exception).lower() + self.assertIn(error_contains.lower(), errmsg) + + if error_code: + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("code")) + else: + self.assertEqual(error_code, exception.details.get("code")) + + if error_code_name: + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("codeName")) + else: + self.assertEqual(error_code_name, exception.details.get("codeName")) + + if error_labels_contain: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception + labels = [ + err_label for err_label in error_labels_contain if error.has_error_label(err_label) + ] + self.assertEqual(labels, error_labels_contain) + + if error_labels_omit: + for err_label in error_labels_omit: + if exception.has_error_label(err_label): + self.fail(f"Exception '{exception}' unexpectedly had label '{err_label}'") + + if expect_result: + if isinstance(exception, BulkWriteError): + result = parse_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) + elif isinstance(exception, ClientBulkWriteException): + result = parse_client_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) + else: + self.fail( + f"expectResult can only be specified with {BulkWriteError} or {ClientBulkWriteException} exceptions, got {exception}" + ) + + return exception + + def __raise_if_unsupported(self, opname, target, *target_types): + if not isinstance(target, target_types): + self.fail(f"Operation {opname} not supported for entity of type {type(target)}") + + def __entityOperation_createChangeStream(self, target, *args, **kwargs): + self.__raise_if_unsupported("createChangeStream", target, MongoClient, Database, Collection) + stream = target.watch(*args, **kwargs) + self.addCleanup(stream.close) + return stream + + def _clientOperation_createChangeStream(self, target, *args, **kwargs): + return self.__entityOperation_createChangeStream(target, *args, **kwargs) + + def _databaseOperation_createChangeStream(self, target, *args, **kwargs): + return self.__entityOperation_createChangeStream(target, *args, **kwargs) + + def _collectionOperation_createChangeStream(self, target, *args, **kwargs): + return self.__entityOperation_createChangeStream(target, *args, **kwargs) + + def _databaseOperation_runCommand(self, target, **kwargs): + self.__raise_if_unsupported("runCommand", target, Database) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + return target.command(**kwargs) + + def _databaseOperation_runCursorCommand(self, target, **kwargs): + return (self._databaseOperation_createCommandCursor(target, **kwargs)).to_list() + + def _databaseOperation_createCommandCursor(self, target, **kwargs): + self.__raise_if_unsupported("createCommandCursor", target, Database) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + batch_size = 0 + + cursor_type = kwargs.pop("cursor_type", "nonTailable") + if cursor_type == CursorType.TAILABLE: + ordered_command["tailable"] = True + elif cursor_type == CursorType.TAILABLE_AWAIT: + ordered_command["tailable"] = True + ordered_command["awaitData"] = True + elif cursor_type != "nonTailable": + self.fail(f"unknown cursorType: {cursor_type}") + + if "maxTimeMS" in kwargs: + kwargs["max_await_time_ms"] = kwargs.pop("maxTimeMS") + + if "batch_size" in kwargs: + batch_size = kwargs.pop("batch_size") + + cursor = target.cursor_command(**kwargs) + + if batch_size > 0: + cursor.batch_size(batch_size) + + return cursor + + def _collectionOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] for idx in collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + def _collectionOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + for index in collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + def _collectionOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + + def _databaseOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] for idx in collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + def _databaseOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + for index in collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + def _databaseOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + + def kill_all_sessions(self): + if getattr(self, "client", None) is None: + return + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + try: + client.admin.command("killAllSessions", []) + except (OperationFailure, AutoReconnect): + # "operation was interrupted" by killing the command's + # own session. + # On 8.0+ killAllSessions sometimes returns a network error. + pass + + def _databaseOperation_listCollections(self, target, *args, **kwargs): + if "batch_size" in kwargs: + kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} + cursor = target.list_collections(*args, **kwargs) + return cursor.to_list() + + def _databaseOperation_createCollection(self, target, *args, **kwargs): + # PYTHON-1936 Ignore the listCollections event from create_collection. + kwargs["check_exists"] = False + ret = target.create_collection(*args, **kwargs) + return ret + + def __entityOperation_aggregate(self, target, *args, **kwargs): + self.__raise_if_unsupported("aggregate", target, Database, Collection) + return (target.aggregate(*args, **kwargs)).to_list() + + def _databaseOperation_aggregate(self, target, *args, **kwargs): + return self.__entityOperation_aggregate(target, *args, **kwargs) + + def _collectionOperation_aggregate(self, target, *args, **kwargs): + return self.__entityOperation_aggregate(target, *args, **kwargs) + + def _collectionOperation_find(self, target, *args, **kwargs): + self.__raise_if_unsupported("find", target, Collection) + find_cursor = target.find(*args, **kwargs) + return find_cursor.to_list() + + def _collectionOperation_createFindCursor(self, target, *args, **kwargs): + self.__raise_if_unsupported("find", target, Collection) + if "filter" not in kwargs: + self.fail('createFindCursor requires a "filter" argument') + cursor = NonLazyCursor.create(target.find(*args, **kwargs), target.database.client) + self.addCleanup(cursor.close) + return cursor + + def _collectionOperation_count(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support collection.count()") + + def _collectionOperation_listIndexes(self, target, *args, **kwargs): + if "batch_size" in kwargs: + self.skipTest("PyMongo does not support batch_size for list_indexes") + return (target.list_indexes(*args, **kwargs)).to_list() + + def _collectionOperation_listIndexNames(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support list_index_names") + + def _collectionOperation_createSearchIndexes(self, target, *args, **kwargs): + models = [SearchIndexModel(**i) for i in kwargs["models"]] + return target.create_search_indexes(models) + + def _collectionOperation_listSearchIndexes(self, target, *args, **kwargs): + name = kwargs.get("name") + agg_kwargs = kwargs.get("aggregation_options", dict()) + return (target.list_search_indexes(name, **agg_kwargs)).to_list() + + def _sessionOperation_withTransaction(self, target, *args, **kwargs): + self.__raise_if_unsupported("withTransaction", target, ClientSession) + return target.with_transaction(*args, **kwargs) + + def _sessionOperation_startTransaction(self, target, *args, **kwargs): + self.__raise_if_unsupported("startTransaction", target, ClientSession) + return target.start_transaction(*args, **kwargs) + + def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported("iterateUntilDocumentOrError", target, ChangeStream) + return next(target) + + def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported( + "iterateUntilDocumentOrError", target, NonLazyCursor, CommandCursor + ) + while target.alive: + try: + return next(target) + except StopIteration: + pass + return None + + def _cursor_close(self, target, *args, **kwargs): + self.__raise_if_unsupported("close", target, NonLazyCursor, CommandCursor) + return target.close() + + def _clientOperation_appendMetadata(self, target, *args, **kwargs): + info_opts = kwargs["driver_info_options"] + driver_info = DriverInfo(info_opts["name"], info_opts["version"], info_opts["platform"]) + target.append_metadata(driver_info) + + def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + + return target.create_data_key(*args, **kwargs) + + def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): + return target.get_keys(*args, **kwargs).to_list() + + def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): + result = target.delete_key(*args, **kwargs) + response = result.raw_result + response["deletedCount"] = result.deleted_count + return response + + def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + data = target.rewrap_many_data_key(*args, **kwargs) + if data.bulk_write_result: + return {"bulkWriteResult": parse_bulk_write_result(data.bulk_write_result)} + return {} + + def _clientEncryptionOperation_encrypt(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + return target.encrypt(*args, **kwargs) + + def _bucketOperation_download(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> bytes: + with target.open_download_stream(*args, **kwargs) as gout: + return gout.read() + + def _bucketOperation_downloadByName( + self, target: GridFSBucket, *args: Any, **kwargs: Any + ) -> bytes: + with target.open_download_stream_by_name(*args, **kwargs) as gout: + return gout.read() + + def _bucketOperation_upload(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> ObjectId: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return target.upload_from_stream(*args, **kwargs) + + def _bucketOperation_uploadWithId(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> Any: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return target.upload_from_stream_with_id(*args, **kwargs) + + def _bucketOperation_find( + self, target: GridFSBucket, *args: Any, **kwargs: Any + ) -> List[GridOut]: + return target.find(*args, **kwargs).to_list() + + def run_entity_operation(self, spec): + target = self.entity_map[spec["object"]] + opname = spec["name"] + opargs = spec.get("arguments") + expect_error = spec.get("expectError") + save_as_entity = spec.get("saveResultAsEntity") + expect_result = spec.get("expectResult") + ignore = spec.get("ignoreResultAndError") + if ignore and (expect_error or save_as_entity or expect_result): + raise ValueError( + "ignoreResultAndError is incompatible with saveResultAsEntity" + ", expectError, and expectResult" + ) + if opargs: + arguments = parse_spec_options(copy.deepcopy(opargs)) + prepare_spec_arguments( + spec, + arguments, + camel_to_snake(opname), + self.entity_map, + self.run_operations_and_throw, + ) + else: + arguments = {} + + if isinstance(target, MongoClient): + method_name = f"_clientOperation_{opname}" + elif isinstance(target, Database): + method_name = f"_databaseOperation_{opname}" + elif isinstance(target, Collection): + method_name = f"_collectionOperation_{opname}" + # contentType is always stored in metadata in pymongo. + if target.name.endswith(".files") and opname == "find": + for doc in spec.get("expectResult", []): + if "contentType" in doc: + doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") + elif isinstance(target, ChangeStream): + method_name = f"_changeStreamOperation_{opname}" + elif isinstance(target, (NonLazyCursor, CommandCursor)): + method_name = f"_cursor_{opname}" + elif isinstance(target, ClientSession): + method_name = f"_sessionOperation_{opname}" + elif isinstance(target, GridFSBucket): + method_name = f"_bucketOperation_{opname}" + if "id" in arguments: + arguments["file_id"] = arguments.pop("id") + # MD5 is always disabled in pymongo. + arguments.pop("disable_md5", None) + elif isinstance(target, ClientEncryption): + method_name = f"_clientEncryptionOperation_{opname}" + else: + method_name = "doesNotExist" + + try: + method = getattr(self, method_name) + except AttributeError: + target_opname = camel_to_snake(opname) + if target_opname == "iterate_once": + target_opname = "try_next" + if target_opname == "client_bulk_write": + target_opname = "bulk_write" + try: + cmd = getattr(target, target_opname) + except AttributeError: + self.fail(f"Unsupported operation {opname} on entity {target}") + else: + cmd = functools.partial(method, target) + + try: + # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. + if "timeout" in arguments: + timeout = arguments.pop("timeout") + with pymongo.timeout(timeout): + result = cmd(**dict(arguments)) + else: + result = cmd(**dict(arguments)) + except Exception as exc: + # Ignore all operation errors but to avoid masking bugs don't + # ignore things like TypeError and ValueError. + if ignore and isinstance(exc, (PyMongoError,)): + return exc + if expect_error: + return self.process_error(exc, expect_error) + raise + else: + if expect_error: + self.fail(f'Expected error {expect_error} but "{opname}" succeeded: {result}') + + if expect_result: + actual = coerce_result(opname, result) + self.match_evaluator.match_result(expect_result, actual) + + if save_as_entity: + self.entity_map[save_as_entity] = result + return None + return None + + def __set_fail_point(self, client, command_args): + if not client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") + + self.configure_fail_point(client, command_args) + self.addCleanup(self.configure_fail_point, client, command_args, off=True) + + def _testOperation_failPoint(self, spec): + self.__set_fail_point( + client=self.entity_map[spec["client"]], command_args=spec["failPoint"] + ) + + def _testOperation_targetedFailPoint(self, spec): + session = self.entity_map[spec["session"]] + if not session._pinned_address: + self.fail( + "Cannot use targetedFailPoint operation with unpinned " "session {}".format( + spec["session"] + ) + ) + + client = self.single_client("{}:{}".format(*session._pinned_address)) + self.__set_fail_point(client=client, command_args=spec["failPoint"]) + + def _testOperation_createEntities(self, spec): + self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) + self.entity_map.advance_cluster_times(self._cluster_time) + + def _testOperation_assertSessionTransactionState(self, spec): + session = self.entity_map[spec["session"]] + expected_state = getattr(_TxnState, spec["state"].upper()) + self.assertEqual(expected_state, session._transaction.state) + + def _testOperation_assertSessionPinned(self, spec): + session = self.entity_map[spec["session"]] + self.assertIsNotNone(session._transaction.pinned_address) + + def _testOperation_assertSessionUnpinned(self, spec): + session = self.entity_map[spec["session"]] + self.assertIsNone(session._pinned_address) + self.assertIsNone(session._transaction.pinned_address) + + def __get_last_two_command_lsids(self, listener): + cmd_started_events = [] + for event in reversed(listener.events): + if isinstance(event, CommandStartedEvent): + cmd_started_events.append(event) + if len(cmd_started_events) < 2: + self.fail( + "Needed 2 CommandStartedEvents to compare lsids, " + "got %s" % (len(cmd_started_events)) + ) + return tuple([e.command["lsid"] for e in cmd_started_events][:2]) + + def _testOperation_assertDifferentLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec["client"]) + self.assertNotEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSameLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec["client"]) + self.assertEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSessionDirty(self, spec): + session = self.entity_map[spec["session"]] + self.assertTrue(session._server_session.dirty) + + def _testOperation_assertSessionNotDirty(self, spec): + session = self.entity_map[spec["session"]] + return self.assertFalse(session._server_session.dirty) + + def _testOperation_assertCollectionExists(self, spec): + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list(self.client.get_database(database_name).list_collection_names()) + self.assertIn(collection_name, collection_name_list) + + def _testOperation_assertCollectionNotExists(self, spec): + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list(self.client.get_database(database_name).list_collection_names()) + self.assertNotIn(collection_name, collection_name_list) + + def _testOperation_assertIndexExists(self, spec): + collection = self.client[spec["databaseName"]][spec["collectionName"]] + index_names = [idx["name"] for idx in collection.list_indexes()] + self.assertIn(spec["indexName"], index_names) + + def _testOperation_assertIndexNotExists(self, spec): + collection = self.client[spec["databaseName"]][spec["collectionName"]] + for index in collection.list_indexes(): + self.assertNotEqual(spec["indexName"], index["name"]) + + def _testOperation_assertNumberConnectionsCheckedOut(self, spec): + client = self.entity_map[spec["client"]] + pool = get_pool(client) + self.assertEqual(spec["connections"], pool.active_sockets) + + def _event_count(self, client_name, event): + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events("all") + count = 0 + for actual in actual_events: + try: + self.match_evaluator.match_event(event, actual) + except AssertionError: + continue + else: + count += 1 + return count + + def _testOperation_assertEventCount(self, spec): + """Run the assertEventCount test operation. + + Assert the given event was published exactly `count` times. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + self.assertEqual(self._event_count(client, event), count, f"expected {count} not {event!r}") + + def _testOperation_waitForEvent(self, spec): + """Run the waitForEvent test operation. + + Wait for a number of events to be published, or fail. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + wait_until( + lambda: self._event_count(client, event) >= count, + f"find {count} {event} event(s)", + ) + + def _testOperation_wait(self, spec): + """Run the "wait" test operation.""" + time.sleep(spec["ms"] / 1000.0) + + def _testOperation_recordTopologyDescription(self, spec): + """Run the recordTopologyDescription test operation.""" + self.entity_map[spec["id"]] = self.entity_map[spec["client"]].topology_description + + def _testOperation_assertTopologyType(self, spec): + """Run the assertTopologyType test operation.""" + description = self.entity_map[spec["topologyDescription"]] + self.assertIsInstance(description, TopologyDescription) + self.assertEqual(description.topology_type_name, spec["topologyType"]) + + def _testOperation_waitForPrimaryChange(self, spec: dict) -> None: + """Run the waitForPrimaryChange test operation.""" + client = self.entity_map[spec["client"]] + old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] + timeout = spec["timeoutMS"] / 1000.0 + + def get_primary(td: TopologyDescription) -> Optional[_Address]: + servers = writable_server_selector(Selection.from_topology_description(td)) + if servers and servers[0].server_type == SERVER_TYPE.RSPrimary: + return servers[0].address + return None + + old_primary = get_primary(old_description) + + def primary_changed() -> bool: + primary = client.primary + if primary is None: + return False + return primary != old_primary + + wait_until(primary_changed, "change primary", timeout=timeout) + + def _testOperation_runOnThread(self, spec): + """Run the 'runOnThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.schedule(functools.partial(self.run_entity_operation, spec["operation"])) + + def _testOperation_waitForThread(self, spec): + """Run the 'waitForThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.stop() + thread.join(10) + if thread.exc: + raise thread.exc + self.assertFalse(thread.is_alive(), "Thread {} is still running".format(spec["thread"])) + + def _testOperation_loop(self, spec): + failure_key = spec.get("storeFailuresAsEntity") + error_key = spec.get("storeErrorsAsEntity") + successes_key = spec.get("storeSuccessesAsEntity") + iteration_key = spec.get("storeIterationsAsEntity") + iteration_limiter_key = spec.get("numIterations") + for i in [failure_key, error_key]: + if i: + self.entity_map[i] = [] + for i in [successes_key, iteration_key]: + if i: + self.entity_map[i] = 0 + i = 0 + global IS_INTERRUPTED + while True: + if iteration_limiter_key and i >= iteration_limiter_key: + break + i += 1 + if IS_INTERRUPTED: + break + try: + if iteration_key: + self.entity_map._entities[iteration_key] += 1 + for op in spec["operations"]: + self.run_entity_operation(op) + if successes_key: + self.entity_map._entities[successes_key] += 1 + except Exception as exc: + if isinstance(exc, AssertionError): + key = failure_key or error_key + else: + key = error_key or failure_key + if not key: + raise + self.entity_map[key].append( + {"error": str(exc), "time": time.time(), "type": type(exc).__name__} + ) + + def run_special_operation(self, spec): + opname = spec["name"] + method_name = f"_testOperation_{opname}" + try: + method = getattr(self, method_name) + except AttributeError: + self.fail(f"Unsupported special test operation {opname}") + else: + if iscoroutinefunction(method): + method(spec["arguments"]) + else: + method(spec["arguments"]) + + def run_operations(self, spec): + for op in spec: + if op["object"] == "testRunner": + self.run_special_operation(op) + else: + self.run_entity_operation(op) + + def run_operations_and_throw(self, spec): + for op in spec: + if op["object"] == "testRunner": + self.run_special_operation(op) + else: + result = self.run_entity_operation(op) + if isinstance(result, Exception): + raise result + + def check_events(self, spec): + for event_spec in spec: + client_name = event_spec["client"] + events = event_spec["events"] + event_type = event_spec.get("eventType", "command") + ignore_extra_events = event_spec.get("ignoreExtraEvents", False) + server_connection_id = event_spec.get("serverConnectionId") + has_server_connection_id = event_spec.get("hasServerConnectionId", False) + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events(event_type) + if ignore_extra_events: + actual_events = actual_events[: len(events)] + + if len(events) == 0: + self.assertEqual(actual_events, []) + continue + + if len(actual_events) != len(events): + expected = "\n".join(str(e) for e in events) + actual = "\n".join(str(a) for a in actual_events) + self.assertEqual( + len(actual_events), + len(events), + f"expected events:\n{expected}\nactual events:\n{actual}", + ) + + for idx, expected_event in enumerate(events): + self.match_evaluator.match_event(expected_event, actual_events[idx]) + + if has_server_connection_id: + assert server_connection_id is not None + assert server_connection_id >= 0 + else: + assert server_connection_id is None + + def process_ignore_messages(self, ignore_logs, actual_logs): + final_logs = [] + for log in actual_logs: + ignored = False + for ignore_log in ignore_logs: + if log["data"]["message"] == ignore_log["data"][ + "message" + ] and self.match_evaluator.match_result(ignore_log, log, test=False): + ignored = True + break + if not ignored: + final_logs.append(log) + return final_logs + + def check_log_messages(self, operations, spec): + def format_logs(log_list): + client_to_log = defaultdict(list) + for log in log_list: + if log.module == "ocsp_support": + continue + data = json_util.loads(log.getMessage()) + client_id = data.get("clientId", data.get("topologyId")) + client_to_log[client_id].append( + { + "level": log.levelname.lower(), + "component": log.name.replace("pymongo.", "", 1), + "data": data, + } + ) + return client_to_log + + with self.assertLogs("pymongo", level="DEBUG") as cm: + self.run_operations(operations) + formatted_logs = format_logs(cm.records) + for client in spec: + components = set() + for message in client["messages"]: + components.add(message["component"]) + + clientid = self.entity_map[client["client"]]._topology_settings._topology_id + actual_logs = formatted_logs[clientid] + actual_logs = [log for log in actual_logs if log["component"] in components] + + ignore_logs = client.get("ignoreMessages", []) + if ignore_logs: + actual_logs = self.process_ignore_messages(ignore_logs, actual_logs) + + if client.get("ignoreExtraMessages", False): + actual_logs = actual_logs[: len(client["messages"])] + self.assertEqual( + len(client["messages"]), + len(actual_logs), + f"expected {client['messages']} but got {actual_logs}", + ) + for expected_msg, actual_msg in zip(client["messages"], actual_logs): + expected_data, actual_data = expected_msg.pop("data"), actual_msg.pop("data") + + if "failureIsRedacted" in expected_msg: + self.assertIn("failure", actual_data) + should_redact = expected_msg.pop("failureIsRedacted") + if should_redact: + actual_fields = set(json_util.loads(actual_data["failure"]).keys()) + self.assertTrue( + {"code", "codeName", "errorLabels"}.issuperset(actual_fields) + ) + + self.match_evaluator.match_result(expected_data, actual_data) + self.match_evaluator.match_result(expected_msg, actual_msg) + + def verify_outcome(self, spec): + for collection_data in spec: + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + expected_documents = collection_data["documents"] + + coll = self.client.get_database(db_name).get_collection( + coll_name, + read_preference=ReadPreference.PRIMARY, + read_concern=ReadConcern(level="local"), + ) + + if expected_documents: + sorted_expected_documents = sorted(expected_documents, key=lambda doc: doc["_id"]) + actual_documents = coll.find({}, sort=[("_id", ASCENDING)]).to_list() + self.assertListEqual(sorted_expected_documents, actual_documents) + + def run_scenario(self, spec, uri=None): + # Kill all sessions before and after each test to prevent an open + # transaction (from a test failure) from blocking collection/database + # operations during test set up and tear down. + self.kill_all_sessions() + + # Handle flaky tests. + flaky_tests = [ + ("PYTHON-5170", ".*test_discovery_and_monitoring.*"), + ("PYTHON-5174", ".*Driver_extends_timeout_while_streaming"), + ("PYTHON-5315", ".*TestSrvPolling.test_recover_from_initially_.*"), + ("PYTHON-4987", ".*UnknownTransactionCommitResult_labels_to_connection_errors"), + ("PYTHON-3689", ".*TestProse.test_load_balancing"), + ("PYTHON-3522", ".*csot.*"), + ] + for reason, flaky_test in flaky_tests: + if re.match(flaky_test.lower(), self.id().lower()) is not None: + func_name = self.id() + options = dict(reason=reason, reset_func=self.setUp, func_name=func_name) + if "csot" in func_name.lower(): + options["max_runs"] = 3 + options["affects_cpython_linux"] = True + decorator = flaky(**options) + decorator(self._run_scenario)(spec, uri) + return + self._run_scenario(spec, uri) + + def _run_scenario(self, spec, uri=None): + # maybe skip test manually + self.maybe_skip_test(spec) + + # process test-level runOnRequirements + run_on_spec = spec.get("runOnRequirements", []) + if not self.should_run_on(run_on_spec): + raise unittest.SkipTest("runOnRequirements not satisfied") + + # process skipReason + skip_reason = spec.get("skipReason", None) + if skip_reason is not None: + raise unittest.SkipTest(f"{skip_reason}") + + # process createEntities + self._uri = uri + self.entity_map = EntityMapUtil(self) + self.entity_map.create_entities_from_spec(self.TEST_SPEC.get("createEntities", []), uri=uri) + self._cluster_time = None + # process initialData + if "initialData" in self.TEST_SPEC: + self.insert_initial_data(self.TEST_SPEC["initialData"]) + self._cluster_time = self.client._topology.max_cluster_time() + self.entity_map.advance_cluster_times(self._cluster_time) + + if "expectLogMessages" in spec: + expect_log_messages = spec["expectLogMessages"] + self.assertTrue(expect_log_messages, "expectEvents must be non-empty") + self.check_log_messages(spec["operations"], expect_log_messages) + else: + # process operations + self.run_operations(spec["operations"]) + + # process expectEvents + if "expectEvents" in spec: + expect_events = spec["expectEvents"] + self.assertTrue(expect_events, "expectEvents must be non-empty") + self.check_events(expect_events) + + # process outcome + self.verify_outcome(spec.get("outcome", [])) + + +class UnifiedSpecTestMeta(type): + """Metaclass for generating test classes.""" + + TEST_SPEC: Any + EXPECTED_FAILURES: Any + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + + def create_test(spec): + def test_case(self): + self.run_scenario(spec) + + return test_case + + for test_spec in cls.TEST_SPEC["tests"]: + description = test_spec["description"] + test_name = "test_{}".format( + description.strip(". ").replace(" ", "_").replace(".", "_") + ) + test_method = create_test(copy.deepcopy(test_spec)) + test_method.__name__ = str(test_name) + + for fail_pattern in cls.EXPECTED_FAILURES: + if re.search(fail_pattern, description): + test_method = unittest.expectedFailure(test_method) + break + + setattr(cls, test_name, test_method) + + +_ALL_MIXIN_CLASSES = [ + UnifiedSpecTestMixinV1, + # add mixin classes for new schema major versions here +] + + +_SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS = { + KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES +} + + +def generate_test_classes( + test_path, + module=__name__, + class_name_prefix="", + expected_failures=[], # noqa: B006 + bypass_test_generation_errors=False, + **kwargs, +): + """Method for generating test classes. Returns a dictionary where keys are + the names of test classes and values are the test class objects. + """ + test_klasses = {} + + def test_base_class_factory(test_spec): + """Utility that creates the base class to use for test generation. + This is needed to ensure that cls.TEST_SPEC is appropriately set when + the metaclass __init__ is invoked. + """ + + class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore + TEST_SPEC = test_spec + EXPECTED_FAILURES = expected_failures + + base = SpecTestBase + + # Add "encryption" marker if the "csfle" runOnRequirement is set. + for req in test_spec.get("runOnRequirements", []): + if "csfle" in req: + base = pytest.mark.encryption(base) + + return base + + for dirpath, _, filenames in os.walk(test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + fpath = os.path.join(dirpath, filename) + with open(fpath) as scenario_stream: + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = json_util.loads(scenario_stream.read(), json_options=opts) + + test_type = os.path.splitext(filename)[0] + snake_class_name = "Test{}_{}_{}".format( + class_name_prefix, + dirname.replace("-", "_"), + test_type.replace("-", "_").replace(".", "_"), + ) + class_name = snake_to_camel(snake_class_name) + + try: + schema_version = Version.from_string(scenario_def["schemaVersion"]) + mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(schema_version[0]) + if mixin_class is None: + raise ValueError( + f"test file '{fpath}' has unsupported schemaVersion '{schema_version}'" + ) + module_dict = {"__module__": module, "TEST_PATH": test_path} + module_dict.update(kwargs) + test_klasses[class_name] = type( + class_name, + ( + mixin_class, + test_base_class_factory(scenario_def), + ), + module_dict, + ) + except Exception: + if bypass_test_generation_errors: + continue + raise + + return test_klasses diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py new file mode 100644 index 0000000000..5aa989cb24 --- /dev/null +++ b/test/unified_format_shared.py @@ -0,0 +1,685 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared utility functions and constants for the unified test format runner. + +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.md +""" +from __future__ import annotations + +import binascii +import collections +import datetime +import os +import time +import types +from collections import abc +from test.helpers_shared import ( + AWS_CREDS, + AWS_CREDS_2, + AWS_TEMP_CREDS, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, +) +from test.utils_shared import CMAPListener, camel_to_snake, parse_collection_options +from typing import Any, MutableMapping, Union + +from bson import ( + RE_TYPE, + Binary, + Code, + DBRef, + Decimal128, + Int64, + MaxKey, + MinKey, + ObjectId, + Regex, + json_util, +) +from pymongo.monitoring import ( + _SENSITIVE_COMMANDS, + CommandFailedEvent, + CommandListener, + CommandStartedEvent, + CommandSucceededEvent, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, + ServerClosedEvent, + ServerDescriptionChangedEvent, + ServerHeartbeatFailedEvent, + ServerHeartbeatListener, + ServerHeartbeatStartedEvent, + ServerHeartbeatSucceededEvent, + ServerListener, + ServerOpeningEvent, + TopologyClosedEvent, + TopologyDescriptionChangedEvent, + TopologyEvent, + TopologyListener, + TopologyOpenedEvent, + _CommandEvent, + _ConnectionEvent, + _PoolEvent, + _ServerEvent, + _ServerHeartbeatEvent, +) +from pymongo.results import BulkWriteResult +from pymongo.server_description import ServerDescription +from pymongo.topology_description import TopologyDescription + +JSON_OPTS = json_util.JSONOptions(tz_aware=False) + +IS_INTERRUPTED = False + +KMS_TLS_OPTS = { + "kmip": { + "tlsCAFile": CA_PEM, + "tlsCertificateKeyFile": CLIENT_PEM, + } +} + + +# Build up a placeholder maps. +PLACEHOLDER_MAP = {} +for provider_name, provider_data in [ + ("local", {"key": LOCAL_MASTER_KEY}), + ("local:name1", {"key": LOCAL_MASTER_KEY}), + ("aws_temp", AWS_TEMP_CREDS), + ("aws", AWS_CREDS), + ("aws:name1", AWS_CREDS), + ("aws:name2", AWS_CREDS_2), + ("azure", AZURE_CREDS), + ("azure:name1", AZURE_CREDS), + ("gcp", GCP_CREDS), + ("gcp:name1", GCP_CREDS), + ("kmip", KMIP_CREDS), + ("kmip:name1", KMIP_CREDS), +]: + for key, value in provider_data.items(): + placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" + PLACEHOLDER_MAP[placeholder] = value + + placeholder = f"/autoEncryptOpts/kmsProviders/{provider_name}/{key}" + PLACEHOLDER_MAP[placeholder] = value + +OIDC_ENV = os.environ.get("OIDC_ENV", "test") +if OIDC_ENV == "test": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = {"ENVIRONMENT": "test"} +elif OIDC_ENV == "azure": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": os.environ["AZUREOIDC_RESOURCE"], + } +elif OIDC_ENV == "gcp": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = { + "ENVIRONMENT": "gcp", + "TOKEN_RESOURCE": os.environ["GCPOIDC_AUDIENCE"], + } +elif OIDC_ENV == "k8s": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = {"ENVIRONMENT": "k8s"} + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass. + + Vendored from six: https://github.com/benjaminp/six/blob/master/six.py + """ + + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + def __new__(cls, name, this_bases, d): + # __orig_bases__ is required by PEP 560. + resolved_bases = types.resolve_bases(bases) + if resolved_bases is not bases: + d["__orig_bases__"] = bases + return meta(name, resolved_bases, d) + + @classmethod + def __prepare__( + cls, name: str, this_bases: tuple[type, ...], /, **kwds: Any + ) -> MutableMapping[str, object]: + return meta.__prepare__(name, bases) + + return type.__new__(metaclass, "temporary_class", (), {}) + + +def parse_collection_or_database_options(options): + return parse_collection_options(options) + + +def parse_bulk_write_result(result): + upserted_ids = {str(int_idx): result.upserted_ids[int_idx] for int_idx in result.upserted_ids} + return { + "deletedCount": result.deleted_count, + "insertedCount": result.inserted_count, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": result.upserted_count, + "upsertedIds": upserted_ids, + } + + +def parse_client_bulk_write_individual(op_type, result): + if op_type == "insert": + return {"insertedId": result.inserted_id} + if op_type == "update": + if result.upserted_id: + return { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedId": result.upserted_id, + } + else: + return { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + } + if op_type == "delete": + return { + "deletedCount": result.deleted_count, + } + + +def parse_client_bulk_write_result(result): + insert_results, update_results, delete_results = {}, {}, {} + if result.has_verbose_results: + for idx, res in result.insert_results.items(): + insert_results[str(idx)] = parse_client_bulk_write_individual("insert", res) + for idx, res in result.update_results.items(): + update_results[str(idx)] = parse_client_bulk_write_individual("update", res) + for idx, res in result.delete_results.items(): + delete_results[str(idx)] = parse_client_bulk_write_individual("delete", res) + + return { + "deletedCount": result.deleted_count, + "insertedCount": result.inserted_count, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": result.upserted_count, + "insertResults": insert_results, + "updateResults": update_results, + "deleteResults": delete_results, + } + + +def parse_bulk_write_error_result(error): + write_result = BulkWriteResult(error.details, True) + return parse_bulk_write_result(write_result) + + +def parse_client_bulk_write_error_result(error): + write_result = error.partial_result + if not write_result: + return None + return parse_client_bulk_write_result(write_result) + + +class EventListenerUtil( + CMAPListener, CommandListener, ServerListener, ServerHeartbeatListener, TopologyListener +): + def __init__( + self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map + ): + self._event_types = {name.lower() for name in observe_events} + if observe_sensitive_commands: + self._observe_sensitive_commands = True + self._ignore_commands = set(ignore_commands) + else: + self._observe_sensitive_commands = False + self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) + self._ignore_commands.add("configurefailpoint") + self._event_mapping = collections.defaultdict(list) + self.entity_map = entity_map + if store_events: + for i in store_events: + id = i["id"] + events = (i.lower() for i in i["events"]) + for i in events: + self._event_mapping[i].append(id) + self.entity_map[id] = [] + super().__init__() + + def get_events(self, event_type): + assert event_type in ("command", "cmap", "sdam", "all"), event_type + if event_type == "all": + return list(self.events) + if event_type == "command": + return [e for e in self.events if isinstance(e, _CommandEvent)] + if event_type == "cmap": + return [e for e in self.events if isinstance(e, (_ConnectionEvent, _PoolEvent))] + return [ + e + for e in self.events + if isinstance(e, (_ServerEvent, TopologyEvent, _ServerHeartbeatEvent)) + ] + + def add_event(self, event): + event_name = type(event).__name__.lower() + if event_name in self._event_types: + super().add_event(event) + for id in self._event_mapping[event_name]: + self.entity_map[id].append( + { + "name": type(event).__name__, + "observedAt": time.time(), + "description": repr(event), + } + ) + + def _command_event(self, event): + if event.command_name.lower() not in self._ignore_commands: + self.add_event(event) + + def started(self, event): + if isinstance(event, CommandStartedEvent): + if event.command == {}: + # Command is redacted. Observe only if flag is set. + if self._observe_sensitive_commands: + self._command_event(event) + else: + self._command_event(event) + else: + self.add_event(event) + + def succeeded(self, event): + if isinstance(event, CommandSucceededEvent): + if event.reply == {}: + # Command is redacted. Observe only if flag is set. + if self._observe_sensitive_commands: + self._command_event(event) + else: + self._command_event(event) + else: + self.add_event(event) + + def failed(self, event): + if isinstance(event, CommandFailedEvent): + self._command_event(event) + else: + self.add_event(event) + + def opened(self, event: Union[ServerOpeningEvent, TopologyOpenedEvent]) -> None: + self.add_event(event) + + def description_changed( + self, event: Union[ServerDescriptionChangedEvent, TopologyDescriptionChangedEvent] + ) -> None: + self.add_event(event) + + def topology_changed(self, event: TopologyDescriptionChangedEvent) -> None: + self.add_event(event) + + def closed(self, event: Union[ServerClosedEvent, TopologyClosedEvent]) -> None: + self.add_event(event) + + +binary_types = (Binary, bytes) +long_types = (Int64,) +unicode_type = str + + +BSON_TYPE_ALIAS_MAP = { + # https://mongodb.com/docs/manual/reference/operator/query/type/ + # https://pymongo.readthedocs.io/en/stable/api/bson/index.html + "double": (float,), + "string": (str,), + "object": (abc.Mapping,), + "array": (abc.MutableSequence,), + "binData": binary_types, + "undefined": (type(None),), + "objectId": (ObjectId,), + "bool": (bool,), + "date": (datetime.datetime,), + "null": (type(None),), + "regex": (Regex, RE_TYPE), + "dbPointer": (DBRef,), + "javascript": (unicode_type, Code), + "symbol": (unicode_type,), + "javascriptWithScope": (unicode_type, Code), + "int": (int,), + "long": (Int64,), + "decimal": (Decimal128,), + "maxKey": (MaxKey,), + "minKey": (MinKey,), + "number": (float, int, Int64, Decimal128), +} + + +class MatchEvaluatorUtil: + """Utility class that implements methods for evaluating matches as per + the unified test format specification. + """ + + def __init__(self, test_class): + self.test = test_class + + def _operation_exists(self, spec, actual, key_to_compare): + if spec is True: + if key_to_compare is None: + assert actual is not None + else: + self.test.assertIn(key_to_compare, actual) + elif spec is False: + if key_to_compare is None: + assert actual is None + else: + self.test.assertNotIn(key_to_compare, actual) + else: + self.test.fail(f"Expected boolean value for $$exists operator, got {spec}") + + def __type_alias_to_type(self, alias): + if alias not in BSON_TYPE_ALIAS_MAP: + self.test.fail(f"Unrecognized BSON type alias {alias}") + return BSON_TYPE_ALIAS_MAP[alias] + + def _operation_type(self, spec, actual, key_to_compare): + if isinstance(spec, abc.MutableSequence): + permissible_types = tuple( + [t for alias in spec for t in self.__type_alias_to_type(alias)] + ) + else: + permissible_types = self.__type_alias_to_type(spec) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertIsInstance(value, permissible_types) + + def _operation_matchesEntity(self, spec, actual, key_to_compare): + expected_entity = self.test.entity_map[spec] + self.test.assertEqual(expected_entity, actual[key_to_compare]) + + def _operation_matchesHexBytes(self, spec, actual, key_to_compare): + expected = binascii.unhexlify(spec) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertEqual(value, expected) + + def _operation_unsetOrMatches(self, spec, actual, key_to_compare): + if key_to_compare is None and not actual: + # top-level document can be None when unset + return + + if key_to_compare not in actual: + # we add a dummy value for the compared key to pass map size check + actual[key_to_compare] = "dummyValue" + return + self.match_result(spec, actual[key_to_compare], in_recursive_call=True) + + def _operation_sessionLsid(self, spec, actual, key_to_compare): + expected_lsid = self.test.entity_map.get_lsid_for_session(spec) + self.test.assertEqual(expected_lsid, actual[key_to_compare]) + + def _operation_lte(self, spec, actual, key_to_compare): + if key_to_compare not in actual: + self.test.fail(f"Actual command is missing the {key_to_compare} field: {spec}") + self.test.assertLessEqual(actual[key_to_compare], spec) + + def _operation_matchAsDocument(self, spec, actual, key_to_compare): + self._match_document(spec, json_util.loads(actual[key_to_compare]), False, test=True) + + def _operation_matchAsRoot(self, spec, actual, key_to_compare): + if key_to_compare: + actual = actual[key_to_compare] + self._match_document(spec, actual, True, test=True) + + def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): + method_name = "_operation_{}".format(opname.strip("$")) + try: + method = getattr(self, method_name) + except AttributeError: + self.test.fail(f"Unsupported special matching operator {opname}") + else: + method(spec, actual, key_to_compare) + + def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=None): + """Returns True if a special operation is evaluated, False + otherwise. If the ``expectation`` map contains a single key, + value pair we check it for a special operation. + If given, ``key_to_compare`` is assumed to be the key in + ``expectation`` whose corresponding value needs to be + evaluated for a possible special operation. ``key_to_compare`` + is ignored when ``expectation`` has only one key. + """ + if not isinstance(expectation, abc.Mapping): + return False + + is_special_op, opname, spec = False, False, False + + if key_to_compare is not None: + if key_to_compare.startswith("$$"): + is_special_op = True + opname = key_to_compare + spec = expectation[key_to_compare] + key_to_compare = None + else: + nested = expectation[key_to_compare] + if isinstance(nested, abc.Mapping) and len(nested) == 1: + opname, spec = next(iter(nested.items())) + if opname.startswith("$$"): + is_special_op = True + elif len(expectation) == 1: + opname, spec = next(iter(expectation.items())) + if opname.startswith("$$"): + is_special_op = True + key_to_compare = None + + if is_special_op: + self._evaluate_special_operation( + opname=opname, spec=spec, actual=actual, key_to_compare=key_to_compare + ) + return True + + return False + + def _match_document(self, expectation, actual, is_root, test=False): + if self._evaluate_if_special_operation(expectation, actual): + return True + + self.test.assertIsInstance(actual, abc.Mapping) + for key, value in expectation.items(): + if self._evaluate_if_special_operation(expectation, actual, key): + continue + + self.test.assertIn(key, actual) + if not self.match_result(value, actual[key], in_recursive_call=True, test=test): + return False + + if not is_root: + expected_keys = set(expectation.keys()) + for key, value in expectation.items(): + if value == {"$$exists": False}: + expected_keys.remove(key) + if test: + self.test.assertEqual(expected_keys, set(actual.keys())) + else: + return set(expected_keys).issubset(set(actual.keys())) + return True + + def match_result(self, expectation, actual, in_recursive_call=False, test=True): + if isinstance(expectation, abc.Mapping): + return self._match_document( + expectation, actual, is_root=not in_recursive_call, test=test + ) + + if isinstance(expectation, abc.MutableSequence): + self.test.assertIsInstance(actual, abc.MutableSequence) + for e, a in zip(expectation, actual): + if isinstance(e, abc.Mapping): + res = self._match_document(e, a, is_root=not in_recursive_call, test=test) + else: + res = self.match_result(e, a, in_recursive_call=True, test=test) + if not res: + return False + return True + + # account for flexible numerics in element-wise comparison + if isinstance(expectation, (int, float)): + if test: + self.test.assertEqual(expectation, actual) + else: + return expectation == actual + else: + if test: + self.test.assertIsInstance(actual, type(expectation)) + self.test.assertEqual(expectation, actual) + else: + return isinstance(actual, type(expectation)) and expectation == actual + return True + + def match_server_description(self, actual: ServerDescription, spec: dict) -> None: + for field, expected in spec.items(): + field = camel_to_snake(field) + if field == "type": + field = "server_type_name" + self.test.assertEqual(getattr(actual, field), expected) + + def match_topology_description(self, actual: TopologyDescription, spec: dict) -> None: + for field, expected in spec.items(): + field = camel_to_snake(field) + if field == "type": + field = "topology_type_name" + self.test.assertEqual(getattr(actual, field), expected) + + def match_event_fields(self, actual: Any, spec: dict) -> None: + for field, expected in spec.items(): + if field == "command" and isinstance(actual, CommandStartedEvent): + command = spec["command"] + if command: + self.match_result(command, actual.command) + continue + if field == "reply" and isinstance(actual, CommandSucceededEvent): + reply = spec["reply"] + if reply: + self.match_result(reply, actual.reply) + continue + if field == "hasServiceId": + if spec["hasServiceId"]: + self.test.assertIsNotNone(actual.service_id) + self.test.assertIsInstance(actual.service_id, ObjectId) + else: + self.test.assertIsNone(actual.service_id) + continue + if field == "hasServerConnectionId": + if spec["hasServerConnectionId"]: + self.test.assertIsNotNone(actual.server_connection_id) + self.test.assertIsInstance(actual.server_connection_id, int) + else: + self.test.assertIsNone(actual.server_connection_id) + continue + if field in ("previousDescription", "newDescription"): + if isinstance(actual, ServerDescriptionChangedEvent): + self.match_server_description( + getattr(actual, camel_to_snake(field)), spec[field] + ) + continue + if isinstance(actual, TopologyDescriptionChangedEvent): + self.match_topology_description( + getattr(actual, camel_to_snake(field)), spec[field] + ) + continue + + if field == "interruptInUseConnections": + field = "interrupt_connections" + else: + field = camel_to_snake(field) + self.test.assertEqual(getattr(actual, field), expected) + + def match_event(self, expectation, actual): + name, spec = next(iter(expectation.items())) + if name == "commandStartedEvent": + self.test.assertIsInstance(actual, CommandStartedEvent) + elif name == "commandSucceededEvent": + self.test.assertIsInstance(actual, CommandSucceededEvent) + elif name == "commandFailedEvent": + self.test.assertIsInstance(actual, CommandFailedEvent) + elif name == "poolCreatedEvent": + self.test.assertIsInstance(actual, PoolCreatedEvent) + elif name == "poolReadyEvent": + self.test.assertIsInstance(actual, PoolReadyEvent) + elif name == "poolClearedEvent": + self.test.assertIsInstance(actual, PoolClearedEvent) + self.test.assertIsInstance(actual.interrupt_connections, bool) + elif name == "poolClosedEvent": + self.test.assertIsInstance(actual, PoolClosedEvent) + elif name == "connectionCreatedEvent": + self.test.assertIsInstance(actual, ConnectionCreatedEvent) + elif name == "connectionReadyEvent": + self.test.assertIsInstance(actual, ConnectionReadyEvent) + elif name == "connectionClosedEvent": + self.test.assertIsInstance(actual, ConnectionClosedEvent) + elif name == "connectionCheckOutStartedEvent": + self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) + elif name == "connectionCheckOutFailedEvent": + self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) + elif name == "connectionCheckedOutEvent": + self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) + elif name == "connectionCheckedInEvent": + self.test.assertIsInstance(actual, ConnectionCheckedInEvent) + elif name == "serverDescriptionChangedEvent": + self.test.assertIsInstance(actual, ServerDescriptionChangedEvent) + elif name == "serverHeartbeatStartedEvent": + self.test.assertIsInstance(actual, ServerHeartbeatStartedEvent) + elif name == "serverHeartbeatSucceededEvent": + self.test.assertIsInstance(actual, ServerHeartbeatSucceededEvent) + elif name == "serverHeartbeatFailedEvent": + self.test.assertIsInstance(actual, ServerHeartbeatFailedEvent) + elif name == "topologyDescriptionChangedEvent": + self.test.assertIsInstance(actual, TopologyDescriptionChangedEvent) + elif name == "topologyOpeningEvent": + self.test.assertIsInstance(actual, TopologyOpenedEvent) + elif name == "topologyClosedEvent": + self.test.assertIsInstance(actual, TopologyClosedEvent) + else: + raise Exception(f"Unsupported event type {name}") + + self.match_event_fields(actual, spec) + + +def coerce_result(opname, result): + """Convert a pymongo result into the spec's result format.""" + if hasattr(result, "acknowledged") and not result.acknowledged: + return {"acknowledged": False} + if opname == "bulkWrite": + return parse_bulk_write_result(result) + if opname == "clientBulkWrite": + return parse_client_bulk_write_result(result) + if opname == "insertOne": + return {"insertedId": result.inserted_id} + if opname == "insertMany": + return dict(enumerate(result.inserted_ids)) + if opname in ("deleteOne", "deleteMany"): + return {"deletedCount": result.deleted_count} + if opname in ("updateOne", "updateMany", "replaceOne"): + value = { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": 0 if result.upserted_id is None else 1, + } + if result.upserted_id is not None: + value["upsertedId"] = result.upserted_id + return value + return result diff --git a/test/uri_options/auth-options.json b/test/uri_options/auth-options.json new file mode 100644 index 0000000000..d7fa14a134 --- /dev/null +++ b/test/uri_options/auth-options.json @@ -0,0 +1,33 @@ +{ + "tests": [ + { + "description": "Valid auth options are parsed correctly (GSSAPI)", + "uri": "mongodb://foo:bar@example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forward,SERVICE_HOST:example.com&authSource=$external", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "authMechanism": "GSSAPI", + "authMechanismProperties": { + "SERVICE_NAME": "other", + "SERVICE_HOST": "example.com", + "CANONICALIZE_HOST_NAME": "forward" + }, + "authSource": "$external" + } + }, + { + "description": "Valid auth options are parsed correctly (SCRAM-SHA-1)", + "uri": "mongodb://foo:bar@example.com/?authMechanism=SCRAM-SHA-1&authSource=authSourceDB", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "authMechanism": "SCRAM-SHA-1", + "authSource": "authSourceDB" + } + } + ] +} diff --git a/test/uri_options/ca.pem b/test/uri_options/ca.pem new file mode 100644 index 0000000000..b4bdaefa85 --- /dev/null +++ b/test/uri_options/ca.pem @@ -0,0 +1 @@ +# This file exists solely for the purpose of facilitating drivers which check for the existence of files specified in the URI options at parse time. diff --git a/test/uri_options/cert.pem b/test/uri_options/cert.pem new file mode 100644 index 0000000000..b4bdaefa85 --- /dev/null +++ b/test/uri_options/cert.pem @@ -0,0 +1 @@ +# This file exists solely for the purpose of facilitating drivers which check for the existence of files specified in the URI options at parse time. diff --git a/test/uri_options/client.pem b/test/uri_options/client.pem new file mode 100644 index 0000000000..b4bdaefa85 --- /dev/null +++ b/test/uri_options/client.pem @@ -0,0 +1 @@ +# This file exists solely for the purpose of facilitating drivers which check for the existence of files specified in the URI options at parse time. diff --git a/test/uri_options/compression-options.json b/test/uri_options/compression-options.json new file mode 100644 index 0000000000..3c13dee062 --- /dev/null +++ b/test/uri_options/compression-options.json @@ -0,0 +1,59 @@ +{ + "tests": [ + { + "description": "Valid compression options are parsed correctly", + "uri": "mongodb://example.com/?compressors=zlib&zlibCompressionLevel=9", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "compressors": [ + "zlib" + ], + "zlibCompressionLevel": 9 + } + }, + { + "description": "Multiple compressors are parsed correctly", + "uri": "mongodb://example.com/?compressors=snappy,zlib", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "compressors": [ + "snappy", + "zlib" + ] + } + }, + { + "description": "Non-numeric zlibCompressionLevel causes a warning", + "uri": "mongodb://example.com/?compressors=zlib&zlibCompressionLevel=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too low zlibCompressionLevel causes a warning", + "uri": "mongodb://example.com/?compressors=zlib&zlibCompressionLevel=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too high zlibCompressionLevel causes a warning", + "uri": "mongodb://example.com/?compressors=zlib&zlibCompressionLevel=10", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + } + ] +} diff --git a/test/uri_options/concern-options.json b/test/uri_options/concern-options.json new file mode 100644 index 0000000000..f55f298087 --- /dev/null +++ b/test/uri_options/concern-options.json @@ -0,0 +1,67 @@ +{ + "tests": [ + { + "description": "Valid read and write concern are parsed correctly", + "uri": "mongodb://example.com/?readConcernLevel=majority&w=5&wTimeoutMS=30000&journal=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "readConcernLevel": "majority", + "w": 5, + "wTimeoutMS": 30000, + "journal": false + } + }, + { + "description": "Arbitrary string readConcernLevel does not cause a warning", + "uri": "mongodb://example.com/?readConcernLevel=arbitraryButStillValid", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "readConcernLevel": "arbitraryButStillValid" + } + }, + { + "description": "Arbitrary string w doesn't cause a warning", + "uri": "mongodb://example.com/?w=arbitraryButStillValid", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "w": "arbitraryButStillValid" + } + }, + { + "description": "Non-numeric wTimeoutMS causes a warning", + "uri": "mongodb://example.com/?wTimeoutMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too low wTimeoutMS causes a warning", + "uri": "mongodb://example.com/?wTimeoutMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid journal causes a warning", + "uri": "mongodb://example.com/?journal=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + } + ] +} diff --git a/test/uri_options/connection-options.json b/test/uri_options/connection-options.json new file mode 100644 index 0000000000..bbaa295ecb --- /dev/null +++ b/test/uri_options/connection-options.json @@ -0,0 +1,273 @@ +{ + "tests": [ + { + "description": "Valid connection and timeout options are parsed correctly", + "uri": "mongodb://example.com/?appname=URI-OPTIONS-SPEC-TEST&connectTimeoutMS=20000&heartbeatFrequencyMS=5000&localThresholdMS=3000&maxIdleTimeMS=50000&replicaSet=uri-options-spec&retryWrites=true&serverSelectionTimeoutMS=15000&socketTimeoutMS=7500&timeoutMS=100", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "appname": "URI-OPTIONS-SPEC-TEST", + "connectTimeoutMS": 20000, + "heartbeatFrequencyMS": 5000, + "localThresholdMS": 3000, + "maxIdleTimeMS": 50000, + "replicaSet": "uri-options-spec", + "retryWrites": true, + "serverSelectionTimeoutMS": 15000, + "socketTimeoutMS": 7500, + "timeoutMS": 100 + } + }, + { + "description": "Non-numeric connectTimeoutMS causes a warning", + "uri": "mongodb://example.com/?connectTimeoutMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too low connectTimeoutMS causes a warning", + "uri": "mongodb://example.com/?connectTimeoutMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Non-numeric heartbeatFrequencyMS causes a warning", + "uri": "mongodb://example.com/?heartbeatFrequencyMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too low heartbeatFrequencyMS causes a warning", + "uri": "mongodb://example.com/?heartbeatFrequencyMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Non-numeric localThresholdMS causes a warning", + "uri": "mongodb://example.com/?localThresholdMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too low localThresholdMS causes a warning", + "uri": "mongodb://example.com/?localThresholdMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Invalid retryWrites causes a warning", + "uri": "mongodb://example.com/?retryWrites=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Non-numeric serverSelectionTimeoutMS causes a warning", + "uri": "mongodb://example.com/?serverSelectionTimeoutMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too low serverSelectionTimeoutMS causes a warning", + "uri": "mongodb://example.com/?serverSelectionTimeoutMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Non-numeric socketTimeoutMS causes a warning", + "uri": "mongodb://example.com/?socketTimeoutMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too low socketTimeoutMS causes a warning", + "uri": "mongodb://example.com/?socketTimeoutMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "directConnection=true", + "uri": "mongodb://example.com/?directConnection=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "directConnection": true + } + }, + { + "description": "directConnection=true with multiple seeds", + "uri": "mongodb://example1.com,example2.com/?directConnection=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "directConnection=false", + "uri": "mongodb://example.com/?directConnection=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "directConnection": false + } + }, + { + "description": "directConnection=false with multiple seeds", + "uri": "mongodb://example1.com,example2.com/?directConnection=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "directConnection": false + } + }, + { + "description": "Invalid directConnection value", + "uri": "mongodb://example.com/?directConnection=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "loadBalanced=true", + "uri": "mongodb://example.com/?loadBalanced=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": true + } + }, + { + "description": "loadBalanced=true with directConnection=false", + "uri": "mongodb://example.com/?loadBalanced=true&directConnection=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": true, + "directConnection": false + } + }, + { + "description": "loadBalanced=false", + "uri": "mongodb://example.com/?loadBalanced=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": false + } + }, + { + "description": "Invalid loadBalanced value", + "uri": "mongodb://example.com/?loadBalanced=1", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "loadBalanced=true with multiple hosts causes an error", + "uri": "mongodb://example1,example2/?loadBalanced=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "loadBalanced=true with directConnection=true causes an error", + "uri": "mongodb://example.com/?loadBalanced=true&directConnection=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "loadBalanced=true with replicaSet causes an error", + "uri": "mongodb://example.com/?loadBalanced=true&replicaSet=replset", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "timeoutMS=0", + "uri": "mongodb://example.com/?timeoutMS=0", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "timeoutMS": 0 + } + }, + { + "description": "Non-numeric timeoutMS causes a warning", + "uri": "mongodb://example.com/?timeoutMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too low timeoutMS causes a warning", + "uri": "mongodb://example.com/?timeoutMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + } + ] +} diff --git a/test/uri_options/connection-pool-options.json b/test/uri_options/connection-pool-options.json new file mode 100644 index 0000000000..a582867d07 --- /dev/null +++ b/test/uri_options/connection-pool-options.json @@ -0,0 +1,76 @@ +{ + "tests": [ + { + "description": "Valid connection pool options are parsed correctly", + "uri": "mongodb://example.com/?maxIdleTimeMS=50000&maxPoolSize=5&minPoolSize=3&maxConnecting=1", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "maxIdleTimeMS": 50000, + "maxPoolSize": 5, + "minPoolSize": 3, + "maxConnecting": 1 + } + }, + { + "description": "Non-numeric maxIdleTimeMS causes a warning", + "uri": "mongodb://example.com/?maxIdleTimeMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too low maxIdleTimeMS causes a warning", + "uri": "mongodb://example.com/?maxIdleTimeMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "maxPoolSize=0 does not error", + "uri": "mongodb://example.com/?maxPoolSize=0", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "maxPoolSize": 0 + } + }, + { + "description": "minPoolSize=0 does not error", + "uri": "mongodb://example.com/?minPoolSize=0", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "minPoolSize": 0 + } + }, + { + "description": "maxConnecting=0 causes a warning", + "uri": "mongodb://example.com/?maxConnecting=0", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "maxConnecting<0 causes a warning", + "uri": "mongodb://example.com/?maxConnecting=-1", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + } + ] +} diff --git a/test/uri_options/read-preference-options.json b/test/uri_options/read-preference-options.json new file mode 100644 index 0000000000..abbf0d0cc6 --- /dev/null +++ b/test/uri_options/read-preference-options.json @@ -0,0 +1,82 @@ +{ + "tests": [ + { + "description": "Valid read preference options are parsed correctly", + "uri": "mongodb://example.com/?readPreference=primaryPreferred&readPreferenceTags=dc:ny,rack:1&maxStalenessSeconds=120&readPreferenceTags=dc:ny", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "readPreference": "primaryPreferred", + "readPreferenceTags": [ + { + "dc": "ny", + "rack": "1" + }, + { + "dc": "ny" + } + ], + "maxStalenessSeconds": 120 + } + }, + { + "description": "Single readPreferenceTags is parsed as array of size one", + "uri": "mongodb://example.com/?readPreference=secondary&readPreferenceTags=dc:ny", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "readPreferenceTags": [ + { + "dc": "ny" + } + ] + } + }, + { + "description": "Read preference tags are case sensitive", + "uri": "mongodb://example.com/?readPreference=secondary&readPreferenceTags=dc:NY", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "readPreferenceTags": [ + { + "dc": "NY" + } + ] + } + }, + { + "description": "Invalid readPreferenceTags causes a warning", + "uri": "mongodb://example.com/?readPreferenceTags=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Non-numeric maxStalenessSeconds causes a warning", + "uri": "mongodb://example.com/?maxStalenessSeconds=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Too low maxStalenessSeconds causes a warning", + "uri": "mongodb://example.com/?maxStalenessSeconds=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + } + ] +} diff --git a/test/uri_options/sdam-options.json b/test/uri_options/sdam-options.json new file mode 100644 index 0000000000..ae0aeb2821 --- /dev/null +++ b/test/uri_options/sdam-options.json @@ -0,0 +1,46 @@ +{ + "tests": [ + { + "description": "serverMonitoringMode=auto", + "uri": "mongodb://example.com/?serverMonitoringMode=auto", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "serverMonitoringMode": "auto" + } + }, + { + "description": "serverMonitoringMode=stream", + "uri": "mongodb://example.com/?serverMonitoringMode=stream", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "serverMonitoringMode": "stream" + } + }, + { + "description": "serverMonitoringMode=poll", + "uri": "mongodb://example.com/?serverMonitoringMode=poll", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "serverMonitoringMode": "poll" + } + }, + { + "description": "invalid serverMonitoringMode", + "uri": "mongodb://example.com/?serverMonitoringMode=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + } + ] +} diff --git a/test/uri_options/single-threaded-options.json b/test/uri_options/single-threaded-options.json new file mode 100644 index 0000000000..80ac3fa4ee --- /dev/null +++ b/test/uri_options/single-threaded-options.json @@ -0,0 +1,24 @@ +{ + "tests": [ + { + "description": "Valid options specific to single-threaded drivers are parsed correctly", + "uri": "mongodb://example.com/?serverSelectionTryOnce=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "serverSelectionTryOnce": false + } + }, + { + "description": "Invalid serverSelectionTryOnce causes a warning", + "uri": "mongodb://example.com/?serverSelectionTryOnce=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + } + ] +} diff --git a/test/uri_options/srv-options.json b/test/uri_options/srv-options.json new file mode 100644 index 0000000000..0670612c0d --- /dev/null +++ b/test/uri_options/srv-options.json @@ -0,0 +1,116 @@ +{ + "tests": [ + { + "description": "SRV URI with custom srvServiceName", + "uri": "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "srvServiceName": "customname" + } + }, + { + "description": "Non-SRV URI with custom srvServiceName", + "uri": "mongodb://example.com/?srvServiceName=customname", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "SRV URI with srvMaxHosts", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "srvMaxHosts": 2 + } + }, + { + "description": "SRV URI with negative integer for srvMaxHosts", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=-1", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "SRV URI with invalid type for srvMaxHosts", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=foo", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Non-SRV URI with srvMaxHosts", + "uri": "mongodb://example.com/?srvMaxHosts=2", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "SRV URI with positive srvMaxHosts and replicaSet", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2&replicaSet=foo", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "SRV URI with positive srvMaxHosts and loadBalanced=true", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2&loadBalanced=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "SRV URI with positive srvMaxHosts and loadBalanced=false", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2&loadBalanced=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": false, + "srvMaxHosts": 2 + } + }, + { + "description": "SRV URI with srvMaxHosts=0 and replicaSet", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=0&replicaSet=foo", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "replicaSet": "foo", + "srvMaxHosts": 0 + } + }, + { + "description": "SRV URI with srvMaxHosts=0 and loadBalanced=true", + "uri": "mongodb+srv://test3.test.build.10gen.cc/?srvMaxHosts=0&loadBalanced=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": true, + "srvMaxHosts": 0 + } + } + ] +} diff --git a/test/uri_options/tls-options.json b/test/uri_options/tls-options.json new file mode 100644 index 0000000000..526cde1cbe --- /dev/null +++ b/test/uri_options/tls-options.json @@ -0,0 +1,640 @@ +{ + "tests": [ + { + "description": "Valid required tls options are parsed correctly", + "uri": "mongodb://example.com/?tls=true&tlsCAFile=ca.pem&tlsCertificateKeyFile=cert.pem", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsCAFile": "ca.pem", + "tlsCertificateKeyFile": "cert.pem" + } + }, + { + "description": "Valid tlsCertificateKeyFilePassword is parsed correctly", + "uri": "mongodb://example.com/?tlsCertificateKeyFilePassword=hunter2", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tlsCertificateKeyFilePassword": "hunter2" + } + }, + { + "description": "Invalid tlsAllowInvalidCertificates causes a warning", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidCertificates is parsed correctly", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tlsAllowInvalidCertificates": true + } + }, + { + "description": "tlsAllowInvalidHostnames is parsed correctly", + "uri": "mongodb://example.com/?tlsAllowInvalidHostnames=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tlsAllowInvalidHostnames": true + } + }, + { + "description": "Invalid tlsAllowInvalidHostnames causes a warning", + "uri": "mongodb://example.com/?tlsAllowInvalidHostnames=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure is parsed correctly", + "uri": "mongodb://example.com/?tlsInsecure=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tlsInsecure": true + } + }, + { + "description": "Invalid tlsInsecure causes a warning", + "uri": "mongodb://example.com/?tlsInsecure=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure and tlsAllowInvalidCertificates both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure and tlsAllowInvalidCertificates both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidCertificates and tlsInsecure both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidCertificates and tlsInsecure both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure and tlsAllowInvalidHostnames both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsAllowInvalidHostnames=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure and tlsAllowInvalidHostnames both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsAllowInvalidHostnames=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidHostnames and tlsInsecure both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidHostnames=true&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidHostnames and tlsInsecure both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidHostnames=false&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tls=true and ssl=true doesn't warn", + "uri": "mongodb://example.com/?tls=true&ssl=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tls=false and ssl=false doesn't warn", + "uri": "mongodb://example.com/?tls=false&ssl=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "ssl=true and tls=true doesn't warn", + "uri": "mongodb://example.com/?ssl=true&tls=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "ssl=false and tls=false doesn't warn", + "uri": "mongodb://example.com/?ssl=false&tls=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tls=false and ssl=true raises error", + "uri": "mongodb://example.com/?tls=false&ssl=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tls=true and ssl=false raises error", + "uri": "mongodb://example.com/?tls=true&ssl=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "ssl=false and tls=true raises error", + "uri": "mongodb://example.com/?ssl=false&tls=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "ssl=true and tls=false raises error", + "uri": "mongodb://example.com/?ssl=true&tls=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck can be set to true", + "uri": "mongodb://example.com/?tls=true&tlsDisableCertificateRevocationCheck=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableCertificateRevocationCheck": true + } + }, + { + "description": "tlsDisableCertificateRevocationCheck can be set to false", + "uri": "mongodb://example.com/?tls=true&tlsDisableCertificateRevocationCheck=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableCertificateRevocationCheck": false + } + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidCertificates=true and tlsDisableCertificateRevocationCheck=false raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidCertificates=false and tlsDisableCertificateRevocationCheck=true raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck=true and tlsAllowInvalidCertificates=false raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck=false and tlsAllowInvalidCertificates=true raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure=true and tlsDisableCertificateRevocationCheck=false raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure=false and tlsDisableCertificateRevocationCheck=true raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck=true and tlsInsecure=false raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck=false and tlsInsecure=true raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck=true and tlsDisableOCSPEndpointCheck=false raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck=false and tlsDisableOCSPEndpointCheck=true raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck=true and tlsDisableCertificateRevocationCheck=false raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck=false and tlsDisableCertificateRevocationCheck=true raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck can be set to true", + "uri": "mongodb://example.com/?tls=true&tlsDisableOCSPEndpointCheck=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableOCSPEndpointCheck": true + } + }, + { + "description": "tlsDisableOCSPEndpointCheck can be set to false", + "uri": "mongodb://example.com/?tls=true&tlsDisableOCSPEndpointCheck=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableOCSPEndpointCheck": false + } + }, + { + "description": "tlsInsecure and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure=true and tlsDisableOCSPEndpointCheck=false raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure=false and tlsDisableOCSPEndpointCheck=true raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsInsecure and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsInsecure both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck=true and tlsInsecure=false raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck=false and tlsInsecure=true raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsInsecure both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidCertificates=true and tlsDisableOCSPEndpointCheck=false raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidCertificates=false and tlsDisableOCSPEndpointCheck=true raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsAllowInvalidCertificates both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck=true and tlsAllowInvalidCertificates=false raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck=false and tlsAllowInvalidCertificates=true raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsAllowInvalidCertificates both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + } + ] +} diff --git a/test/utils.py b/test/utils.py index 76cb90a50f..bfc606fe83 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1,4 +1,4 @@ -# Copyright 2012-2014 MongoDB, Inc. +# Copyright 2012-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,575 +12,263 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Utilities for testing pymongo -""" +"""Utilities for testing pymongo that require synchronization.""" +from __future__ import annotations +import asyncio +import contextlib import os -import struct +import random import sys -import threading - -from nose.plugins.skip import SkipTest -from pymongo import MongoClient, MongoReplicaSetClient -from pymongo.errors import AutoReconnect -from pymongo.pool import NO_REQUEST, NO_SOCKET_YET, SocketInfo -from test import host, port, version - - -try: - import gevent - has_gevent = True -except ImportError: - has_gevent = False - - -# No functools in Python 2.4 -def my_partial(f, *args, **kwargs): - def _f(*new_args, **new_kwargs): - final_kwargs = kwargs.copy() - final_kwargs.update(new_kwargs) - return f(*(args + new_args), **final_kwargs) - - return _f - -def one(s): - """Get one element of a set""" - return iter(s).next() - -def oid_generated_on_client(doc): - """Is this process's PID in the document's _id?""" - pid_from_doc = struct.unpack(">H", doc['_id'].binary[7:9])[0] - return (os.getpid() % 0xFFFF) == pid_from_doc - -def delay(sec): - # Javascript sleep() only available in MongoDB since version ~1.9 - return '''function() { - var d = new Date((new Date()).getTime() + %s * 1000); - while (d > (new Date())) { }; return true; - }''' % sec - -def get_command_line(client): - command_line = client.admin.command('getCmdLineOpts') - assert command_line['ok'] == 1, "getCmdLineOpts() failed" - return command_line - -def server_started_with_option(client, cmdline_opt, config_opt): - """Check if the server was started with a particular option. - - :Parameters: - - `cmdline_opt`: The command line option (i.e. --nojournal) - - `config_opt`: The config file option (i.e. nojournal) - """ - command_line = get_command_line(client) - if 'parsed' in command_line: - parsed = command_line['parsed'] - if config_opt in parsed: - return parsed[config_opt] - argv = command_line['argv'] - return cmdline_opt in argv - - -def server_started_with_auth(client): - command_line = get_command_line(client) - # MongoDB >= 2.0 - if 'parsed' in command_line: - parsed = command_line['parsed'] - # MongoDB >= 2.6 - if 'security' in parsed: - security = parsed['security'] - # >= rc3 - if 'authorization' in security: - return security['authorization'] == 'enabled' - # < rc3 - return security.get('auth', False) or bool(security.get('keyFile')) - return parsed.get('auth', False) or bool(parsed.get('keyFile')) - # Legacy - argv = command_line['argv'] - return '--auth' in argv or '--keyFile' in argv - - -def server_started_with_nojournal(client): - command_line = get_command_line(client) - - # MongoDB 2.6. - if 'parsed' in command_line: - parsed = command_line['parsed'] - if 'storage' in parsed: - storage = parsed['storage'] - if 'journal' in storage: - return not storage['journal']['enabled'] - - return server_started_with_option(client, '--nojournal', 'nojournal') - - -def server_is_master_with_slave(client): - command_line = get_command_line(client) - if 'parsed' in command_line: - return command_line['parsed'].get('master', False) - return '--master' in command_line['argv'] - -def drop_collections(db): - for coll in db.collection_names(): - if not coll.startswith('system'): - db.drop_collection(coll) - -def remove_all_users(db): - if version.at_least(db.connection, (2, 5, 3, -1)): - db.command({"dropAllUsersFromDatabase": 1}) - else: - db.system.users.remove({}) +import threading # Used in the synchronized version of this file +import time +import traceback +from functools import wraps +from inspect import iscoroutinefunction +from bson.son import SON +from pymongo import MongoClient +from pymongo.errors import ConfigurationError +from pymongo.hello import HelloCompat +from pymongo.lock import _create_lock +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference +from pymongo.server_selectors import any_server_selector, writable_server_selector +from pymongo.synchronous.pool import _CancellationContext, _PoolGeneration -def joinall(threads): - """Join threads with a 5-minute timeout, assert joins succeeded""" - for t in threads: - t.join(300) - assert not t.isAlive(), "Thread %s hung" % t - -def is_mongos(client): - res = client.admin.command('ismaster') - return res.get('msg', '') == 'isdbgrid' +_IS_SYNC = True -def enable_text_search(client): - client.admin.command( - 'setParameter', textSearchEnabled=True) - if isinstance(client, MongoReplicaSetClient): - for host, port in client.secondaries: - MongoClient(host, port).admin.command( - 'setParameter', textSearchEnabled=True) +def get_pool(client): + """Get the standalone, primary, or mongos pool.""" + topology = client._get_topology() + server = topology._select_server(writable_server_selector, _Op.TEST) + return server.pool -def assertRaisesExactly(cls, fn, *args, **kwargs): - """ - Unlike the standard assertRaises, this checks that a function raises a - specific class of exception, and not a subclass. E.g., check that - MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. - """ - try: - fn(*args, **kwargs) - except Exception, e: - assert e.__class__ == cls, "got %s, expected %s" % ( - e.__class__.__name__, cls.__name__) - else: - raise AssertionError("%s not raised" % cls) -def looplet(greenlets): - """World's smallest event loop; run until all greenlets are done - """ - while True: - done = True - - for g in greenlets: - if not g.dead: - done = False - g.switch() - - if done: - return - -class RendezvousThread(threading.Thread): - """A thread that starts and pauses at a rendezvous point before resuming. - To be used in tests that must ensure that N threads are all alive - simultaneously, regardless of thread-scheduling's vagaries. - - 1. Write a subclass of RendezvousThread and override before_rendezvous - and / or after_rendezvous. - 2. Create a state with RendezvousThread.shared_state(N) - 3. Start N of your subclassed RendezvousThreads, passing the state to each - one's __init__ - 4. In the main thread, call RendezvousThread.wait_for_rendezvous - 5. Test whatever you need to test while threads are paused at rendezvous - point - 6. In main thread, call RendezvousThread.resume_after_rendezvous - 7. Join all threads from main thread - 8. Assert that all threads' "passed" attribute is True - 9. Test post-conditions - """ - class RendezvousState(object): - def __init__(self, nthreads): - # Number of threads total - self.nthreads = nthreads - - # Number of threads that have arrived at rendezvous point - self.arrived_threads = 0 - self.arrived_threads_lock = threading.Lock() - - # Set when all threads reach rendezvous - self.ev_arrived = threading.Event() - - # Set by resume_after_rendezvous() so threads can continue. - self.ev_resume = threading.Event() - - - @classmethod - def create_shared_state(cls, nthreads): - return RendezvousThread.RendezvousState(nthreads) - - def before_rendezvous(self): - """Overridable: Do this before the rendezvous""" - pass +def get_pools(client): + """Get all pools.""" + return [ + server.pool + for server in (client._get_topology()).select_servers(any_server_selector, _Op.TEST) + ] - def after_rendezvous(self): - """Overridable: Do this after the rendezvous. If it throws no exception, - `passed` is set to True - """ - pass - @classmethod - def wait_for_rendezvous(cls, state): - """Wait for all threads to reach rendezvous and pause there""" - state.ev_arrived.wait(10) - assert state.ev_arrived.isSet(), "Thread timeout" - assert state.nthreads == state.arrived_threads - - @classmethod - def resume_after_rendezvous(cls, state): - """Tell all the paused threads to continue""" - state.ev_resume.set() - - def __init__(self, state): - """Params: - `state`: A shared state object from RendezvousThread.shared_state() - """ - super(RendezvousThread, self).__init__() - self.state = state - self.passed = False - - # If this thread fails to terminate, don't hang the whole program - self.setDaemon(True) - - def _rendezvous(self): - """Pause until all threads arrive here""" - s = self.state - s.arrived_threads_lock.acquire() - s.arrived_threads += 1 - if s.arrived_threads == s.nthreads: - s.arrived_threads_lock.release() - s.ev_arrived.set() - else: - s.arrived_threads_lock.release() - s.ev_arrived.wait() - - def run(self): - try: - self.before_rendezvous() - finally: - self._rendezvous() - - # all threads have passed the rendezvous, wait for - # resume_after_rendezvous() - self.state.ev_resume.wait() - - self.after_rendezvous() - self.passed = True - -def read_from_which_host( - rsc, - mode, - tag_sets=None, - secondary_acceptable_latency_ms=15 -): - """Read from a MongoReplicaSetClient with the given Read Preference mode, - tags, and acceptable latency. Return the 'host:port' which was read from. - - :Parameters: - - `rsc`: A MongoReplicaSetClient - - `mode`: A ReadPreference - - `tag_sets`: List of dicts of tags for data-center-aware reads - - `secondary_acceptable_latency_ms`: a float - """ - db = rsc.pymongo_test - db.read_preference = mode - if isinstance(tag_sets, dict): - tag_sets = [tag_sets] - db.tag_sets = tag_sets or [{}] - db.secondary_acceptable_latency_ms = secondary_acceptable_latency_ms - - cursor = db.test.find() - try: - try: - cursor.next() - except StopIteration: - # No documents in collection, that's fine - pass - - return cursor._Cursor__connection_id - except AutoReconnect: - return None - -def assertReadFrom(testcase, rsc, member, *args, **kwargs): - """Check that a query with the given mode, tag_sets, and - secondary_acceptable_latency_ms reads from the expected replica-set - member - - :Parameters: - - `testcase`: A unittest.TestCase - - `rsc`: A MongoReplicaSetClient - - `member`: A host:port expected to be used - - `mode`: A ReadPreference - - `tag_sets` (optional): List of dicts of tags for data-center-aware reads - - `secondary_acceptable_latency_ms` (optional): a float - """ - for _ in range(10): - testcase.assertEqual(member, read_from_which_host(rsc, *args, **kwargs)) - -def assertReadFromAll(testcase, rsc, members, *args, **kwargs): - """Check that a query with the given mode, tag_sets, and - secondary_acceptable_latency_ms reads from all members in a set, and - only members in that set. - - :Parameters: - - `testcase`: A unittest.TestCase - - `rsc`: A MongoReplicaSetClient - - `members`: Sequence of host:port expected to be used - - `mode`: A ReadPreference - - `tag_sets` (optional): List of dicts of tags for data-center-aware reads - - `secondary_acceptable_latency_ms` (optional): a float - """ - members = set(members) - used = set() - for _ in range(100): - used.add(read_from_which_host(rsc, *args, **kwargs)) +def wait_until(predicate, success_description, timeout=10): + """Wait up to 10 seconds (by default) for predicate to be true. - testcase.assertEqual(members, used) + E.g.: -def get_pool(client): - if isinstance(client, MongoClient): - return client._MongoClient__member.pool - elif isinstance(client, MongoReplicaSetClient): - rs_state = client._MongoReplicaSetClient__rs_state - return rs_state.primary_member.pool - else: - raise TypeError(str(client)) + wait_until(lambda: client.primary == ('a', 1), + 'connect to the primary') -def pools_from_rs_client(client): - """Get Pool instances from a MongoReplicaSetClient or ReplicaSetConnection. - """ - return [ - member.pool for member in - client._MongoReplicaSetClient__rs_state.members] + If the lambda-expression isn't true after 10 seconds, we raise + AssertionError("Didn't ever connect to the primary"). -class TestRequestMixin(object): - """Inherit from this class and from unittest.TestCase to get some - convenient methods for testing connection pools and requests + Returns the predicate's first true value. """ - def assertSameSock(self, pool): - sock_info0 = pool.get_socket() - sock_info1 = pool.get_socket() - self.assertEqual(sock_info0, sock_info1) - pool.maybe_return_socket(sock_info0) - pool.maybe_return_socket(sock_info1) - - def assertDifferentSock(self, pool): - sock_info0 = pool.get_socket() - sock_info1 = pool.get_socket() - self.assertNotEqual(sock_info0, sock_info1) - pool.maybe_return_socket(sock_info0) - pool.maybe_return_socket(sock_info1) - - def assertNoRequest(self, pool): - self.assertEqual(NO_REQUEST, pool._get_request_state()) - - def assertNoSocketYet(self, pool): - self.assertEqual(NO_SOCKET_YET, pool._get_request_state()) - - def assertRequestSocket(self, pool): - self.assertTrue(isinstance(pool._get_request_state(), SocketInfo)) - - def assertInRequestAndSameSock(self, client, pools): - self.assertTrue(client.in_request()) - if not isinstance(pools, list): - pools = [pools] - for pool in pools: - self.assertTrue(pool.in_request()) - self.assertSameSock(pool) - - def assertNotInRequestAndDifferentSock(self, client, pools): - self.assertFalse(client.in_request()) - if not isinstance(pools, list): - pools = [pools] - for pool in pools: - self.assertFalse(pool.in_request()) - self.assertDifferentSock(pool) - - -# Constants for run_threads and _TestLazyConnectMixin. -NTRIALS = 5 -NTHREADS = 10 - - -def run_threads(collection, target, use_greenlets): - """Run a target function in many threads. - - target is a function taking a Collection and an integer. - """ - threads = [] - for i in range(NTHREADS): - bound_target = my_partial(target, collection, i) - if use_greenlets: - threads.append(gevent.Greenlet(run=bound_target)) + start = time.time() + interval = min(float(timeout) / 100, 0.1) + while True: + if iscoroutinefunction(predicate): + retval = predicate() else: - threads.append(threading.Thread(target=bound_target)) + retval = predicate() + if retval: + return retval - for t in threads: - t.start() + if time.time() - start > timeout: + raise AssertionError("Didn't ever %s" % success_description) - for t in threads: - t.join(30) - if use_greenlets: - # bool(Greenlet) is True if it's alive. - assert not t - else: - assert not t.isAlive() + time.sleep(interval) -def lazy_client_trial(reset, target, test, get_client, use_greenlets): - """Test concurrent operations on a lazily-connecting client. +def is_mongos(client): + res = client.admin.command(HelloCompat.LEGACY_CMD) + return res.get("msg", "") == "isdbgrid" - `reset` takes a collection and resets it for the next trial. - `target` takes a lazily-connecting collection and an index from - 0 to NTHREADS, and performs some operation, e.g. an insert. +def ensure_all_connected(client: MongoClient) -> None: + """Ensure that the client's connection pool has socket connections to all + members of a replica set. Raises ConfigurationError when called with a + non-replica set client. - `test` takes the lazily-connecting collection and asserts a - post-condition to prove `target` succeeded. + Depending on the use-case, the caller may need to clear any event listeners + that are configured on the client. """ - if use_greenlets and not has_gevent: - raise SkipTest('Gevent not installed') - - collection = MongoClient(host, port).pymongo_test.test - - # Make concurrency bugs more likely to manifest. - interval = None - if not sys.platform.startswith('java'): - if sys.version_info >= (3, 2): - interval = sys.getswitchinterval() - sys.setswitchinterval(1e-6) - else: - interval = sys.getcheckinterval() - sys.setcheckinterval(1) + hello: dict = client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" not in hello: + raise ConfigurationError("cluster is not a replica set") + + target_host_list = set(hello["hosts"] + hello.get("passives", [])) + connected_host_list = {hello["me"]} + + # Run hello until we have connected to each host at least once. + def discover(): + i = 0 + while i < 100 and connected_host_list != target_host_list: + hello: dict = client.admin.command( + HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY + ) + connected_host_list.update([hello["me"]]) + i += 1 + return connected_host_list try: - for i in range(NTRIALS): - reset(collection) - lazy_client = get_client( - _connect=False, use_greenlets=use_greenlets) - - lazy_collection = lazy_client.pymongo_test.test - run_threads(lazy_collection, target, use_greenlets) - test(lazy_collection) - finally: - if not sys.platform.startswith('java'): - if sys.version_info >= (3, 2): - sys.setswitchinterval(interval) - else: - sys.setcheckinterval(interval) + def predicate(): + return target_host_list == discover() + wait_until(predicate, "connected to all hosts") + except AssertionError as exc: + raise AssertionError( + f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" + ) -class _TestLazyConnectMixin(object): - """Test concurrent operations on a lazily-connecting client. - Inherit from this class and from unittest.TestCase, and override - _get_client(self, **kwargs), for testing a lazily-connecting - client, i.e. a client initialized with _connect=False. - - Set use_greenlets = True to test with Gevent. +def assertRaisesExactly(cls, fn, *args, **kwargs): """ - use_greenlets = False - - NTRIALS = 5 - NTHREADS = 10 - - def test_insert(self): - def reset(collection): - collection.drop() - - def insert(collection, _): - collection.insert({}) - - def test(collection): - self.assertEqual(NTHREADS, collection.count()) + Unlike the standard assertRaises, this checks that a function raises a + specific class of exception, and not a subclass. E.g., check that + MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. + """ + try: + fn(*args, **kwargs) + except Exception as e: + assert e.__class__ == cls, f"got {e.__class__.__name__}, expected {cls.__name__}" + else: + raise AssertionError("%s not raised" % cls) - lazy_client_trial( - reset, insert, test, - self._get_client, self.use_greenlets) - def test_save(self): - def reset(collection): - collection.drop() +def set_fail_point(client, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + client.admin.command(cmd) - def save(collection, _): - collection.save({}) - def test(collection): - self.assertEqual(NTHREADS, collection.count()) +def joinall(tasks): + """Join threads with a 5-minute timeout, assert joins succeeded""" + if _IS_SYNC: + for t in tasks: + t.join(300) + assert not t.is_alive(), "Thread %s hung" % t + else: + asyncio.wait([t.task for t in tasks if t is not None], timeout=300) + + +def flaky( + *, + reason=None, + max_runs=2, + min_passes=1, + delay=1, + affects_cpython_linux=False, + func_name=None, + reset_func=None, +): + """Decorate a test as flaky. - lazy_client_trial( - reset, save, test, - self._get_client, self.use_greenlets) + :param reason: the reason why the test is flaky + :param max_runs: the maximum number of runs before raising an error + :param min_passes: the minimum number of passing runs + :param delay: the delay in seconds between retries + :param affects_cpython_links: whether the test is flaky on CPython on Linux + :param func_name: the name of the function, used for the rety message + :param reset_func: a function to call before retrying - def test_update(self): - def reset(collection): - collection.drop() - collection.insert([{'i': 0}]) + """ + if reason is None: + raise ValueError("flaky requires a reason input") + is_cpython_linux = sys.platform == "linux" and sys.implementation.name == "cpython" + disable_flaky = "DISABLE_FLAKY" in os.environ + if "CI" not in os.environ and "ENABLE_FLAKY" not in os.environ: + disable_flaky = True + + if disable_flaky or (is_cpython_linux and not affects_cpython_linux): + max_runs = 1 + min_passes = 1 + + def decorator(target_func): + @wraps(target_func) + def wrapper(*args, **kwargs): + passes = 0 + for i in range(max_runs): + try: + result = target_func(*args, **kwargs) + passes += 1 + if passes == min_passes: + return result + except Exception as e: + if i == max_runs - 1: + raise e + print( + f"Retrying after attempt {i+1} of {func_name or target_func.__name__} failed with ({reason})):\n" + f"{traceback.format_exc()}", + file=sys.stderr, + ) + time.sleep(delay) + if reset_func: + reset_func() + + return wrapper + + return decorator + + +class MockConnection: + def __init__(self): + self.cancel_context = _CancellationContext() + self.more_to_come = False + self.id = random.randint(0, 100) + self.is_sdam = False + self.server_connection_id = random.randint(0, 100) + + def close_conn(self, reason): + pass - # Update doc 10 times. - def update(collection, i): - collection.update({}, {'$inc': {'i': 1}}) + def __enter__(self): + return self - def test(collection): - self.assertEqual(NTHREADS, collection.find_one()['i']) + def __exit__(self, exc_type, exc_val, exc_tb): + pass - lazy_client_trial( - reset, update, test, - self._get_client, self.use_greenlets) - def test_remove(self): - def reset(collection): - collection.drop() - collection.insert([{'i': i} for i in range(NTHREADS)]) +class MockPool: + def __init__(self, address, options, is_sdam=False, client_id=None): + self.gen = _PoolGeneration() + self._lock = _create_lock() + self.opts = options + self.operation_count = 0 + self.conns = [] - def remove(collection, i): - collection.remove({'i': i}) + def stale_generation(self, gen, service_id): + return self.gen.stale(gen, service_id) - def test(collection): - self.assertEqual(0, collection.count()) + @contextlib.contextmanager + def checkout(self, handler=None): + yield MockConnection() - lazy_client_trial( - reset, remove, test, - self._get_client, self.use_greenlets) + def checkin(self, *args, **kwargs): + pass - def test_find_one(self): - results = [] + def _reset(self, service_id=None): + with self._lock: + self.gen.inc(service_id) - def reset(collection): - collection.drop() - collection.insert({}) - results[:] = [] + def ready(self): + pass - def find_one(collection, _): - results.append(collection.find_one()) + def reset(self, service_id=None, interrupt_connections=False): + self._reset() - def test(collection): - self.assertEqual(NTHREADS, len(results)) + def reset_without_pause(self): + self._reset() - lazy_client_trial( - reset, find_one, test, - self._get_client, self.use_greenlets) + def close(self): + self._reset() - def test_max_bson_size(self): - # Client should have sane defaults before connecting, and should update - # its configuration once connected. - c = self._get_client(_connect=False) - self.assertEqual(16 * (1024 ** 2), c.max_bson_size) - self.assertEqual(2 * c.max_bson_size, c.max_message_size) + def update_is_writable(self, is_writable): + pass - # Make the client connect, so that it sets its max_bson_size and - # max_message_size attributes. - ismaster = c.db.command('ismaster') - self.assertEqual(ismaster['maxBsonObjectSize'], c.max_bson_size) - if 'maxMessageSizeBytes' in ismaster: - self.assertEqual( - ismaster['maxMessageSizeBytes'], - c.max_message_size) + def remove_stale_sockets(self, *args, **kwargs): + pass diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py new file mode 100644 index 0000000000..2772f06070 --- /dev/null +++ b/test/utils_selection_tests.py @@ -0,0 +1,200 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing Server Selection and Max Staleness.""" +from __future__ import annotations + +import datetime +import os +import sys +from test import PyMongoTestCase +from test.utils import MockPool + +sys.path[0:0] = [""] + +from test import unittest +from test.pymongo_mocks import DummyMonitor +from test.utils_selection_tests_shared import ( + get_addresses, + get_topology_type_name, + make_server_description, +) +from test.utils_shared import parse_read_preference + +from bson import json_util +from pymongo.common import HEARTBEAT_FREQUENCY +from pymongo.errors import AutoReconnect, ConfigurationError +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology + +_IS_SYNC = True + + +def get_topology_settings_dict(**kwargs): + settings = { + "monitor_class": DummyMonitor, + "heartbeat_frequency": HEARTBEAT_FREQUENCY, + "pool_class": MockPool, + } + settings.update(kwargs) + return settings + + +def create_topology(scenario_def, **kwargs): + # Initialize topologies. + if "heartbeatFrequencyMS" in scenario_def: + frequency = int(scenario_def["heartbeatFrequencyMS"]) / 1000.0 + else: + frequency = HEARTBEAT_FREQUENCY + + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + + topology_type = get_topology_type_name(scenario_def) + if topology_type == "LoadBalanced": + kwargs.setdefault("load_balanced", True) + # Force topology description to ReplicaSet + elif topology_type in ["ReplicaSetNoPrimary", "ReplicaSetWithPrimary"]: + kwargs.setdefault("replica_set_name", "rs") + settings = get_topology_settings_dict(heartbeat_frequency=frequency, seeds=seeds, **kwargs) + + # "Eligible servers" is defined in the server selection spec as + # the set of servers matching both the ReadPreference's mode + # and tag sets. + topology = Topology(TopologySettings(**settings)) + topology.open() + + # Update topologies with server descriptions. + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + topology.on_change(server_description) + + # Assert that descriptions match + assert ( + scenario_def["topology_description"]["type"] == topology.description.topology_type_name + ), topology.description.topology_type_name + + return topology + + +def create_test(scenario_def): + def run_scenario(self): + _, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + # "Eligible servers" is defined in the server selection spec as + # the set of servers matching both the ReadPreference's mode + # and tag sets. + top_latency = create_topology(scenario_def) + + # "In latency window" is defined in the server selection + # spec as the subset of suitable_servers that falls within the + # allowable latency window. + top_suitable = create_topology(scenario_def, local_threshold_ms=1000000) + + # Create server selector. + if scenario_def.get("operation") == "write": + pref = writable_server_selector + else: + # Make first letter lowercase to match read_pref's modes. + pref_def = scenario_def["read_preference"] + if scenario_def.get("error"): + with self.assertRaises((ConfigurationError, ValueError)): + # Error can be raised when making Read Pref or selecting. + pref = parse_read_preference(pref_def) + top_latency.select_server(pref, _Op.TEST) + return + + pref = parse_read_preference(pref_def) + + # Select servers. + if not scenario_def.get("suitable_servers"): + with self.assertRaises(AutoReconnect): + top_suitable.select_server(pref, _Op.TEST, server_selection_timeout=0) + + return + + if not scenario_def["in_latency_window"]: + with self.assertRaises(AutoReconnect): + top_latency.select_server(pref, _Op.TEST, server_selection_timeout=0) + + return + + actual_suitable_s = top_suitable.select_servers(pref, _Op.TEST, server_selection_timeout=0) + actual_latency_s = top_latency.select_servers(pref, _Op.TEST, server_selection_timeout=0) + + expected_suitable_servers = {} + for server in scenario_def["suitable_servers"]: + server_description = make_server_description(server, hosts) + expected_suitable_servers[server["address"]] = server_description + + actual_suitable_servers = {} + for s in actual_suitable_s: + actual_suitable_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description + + self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) + for k, actual in actual_suitable_servers.items(): + expected = expected_suitable_servers[k] + self.assertEqual(expected.address, actual.address) + self.assertEqual(expected.server_type, actual.server_type) + self.assertEqual(expected.round_trip_time, actual.round_trip_time) + self.assertEqual(expected.tags, actual.tags) + self.assertEqual(expected.all_hosts, actual.all_hosts) + + expected_latency_servers = {} + for server in scenario_def["in_latency_window"]: + server_description = make_server_description(server, hosts) + expected_latency_servers[server["address"]] = server_description + + actual_latency_servers = {} + for s in actual_latency_s: + actual_latency_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description + + self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) + for k, actual in actual_latency_servers.items(): + expected = expected_latency_servers[k] + self.assertEqual(expected.address, actual.address) + self.assertEqual(expected.server_type, actual.server_type) + self.assertEqual(expected.round_trip_time, actual.round_trip_time) + self.assertEqual(expected.tags, actual.tags) + self.assertEqual(expected.all_hosts, actual.all_hosts) + + return run_scenario + + +def create_selection_tests(test_dir): + class TestAllScenarios(PyMongoTestCase): + pass + + for dirpath, _, filenames in os.walk(test_dir): + dirname = os.path.split(dirpath) + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] + + for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json_util.loads(scenario_stream.read()) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + return TestAllScenarios diff --git a/test/utils_selection_tests_shared.py b/test/utils_selection_tests_shared.py new file mode 100644 index 0000000000..dbaed1034f --- /dev/null +++ b/test/utils_selection_tests_shared.py @@ -0,0 +1,100 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing Server Selection and Max Staleness.""" +from __future__ import annotations + +import datetime +import os +import sys + +sys.path[0:0] = [""] + +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION, clean_node +from pymongo.hello import Hello, HelloCompat +from pymongo.server_description import ServerDescription + + +def get_addresses(server_list): + seeds = [] + hosts = [] + for server in server_list: + seeds.append(clean_node(server["address"])) + hosts.append(server["address"]) + return seeds, hosts + + +def make_last_write_date(server): + epoch = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) + millis = server.get("lastWrite", {}).get("lastWriteDate") + if millis: + diff = ((millis % 1000) + 1000) % 1000 + seconds = (millis - diff) / 1000 + micros = diff * 1000 + return epoch + datetime.timedelta(seconds=seconds, microseconds=micros) + else: + # "Unknown" server. + return epoch + + +def make_server_description(server, hosts): + """Make a ServerDescription from server info in a JSON test.""" + server_type = server["type"] + if server_type in ("Unknown", "PossiblePrimary"): + return ServerDescription(clean_node(server["address"]), Hello({})) + + hello_response = {"ok": True, "hosts": hosts} + if server_type not in ("Standalone", "Mongos", "RSGhost"): + hello_response["setName"] = "rs" + + if server_type == "RSPrimary": + hello_response[HelloCompat.LEGACY_CMD] = True + elif server_type == "RSSecondary": + hello_response["secondary"] = True + elif server_type == "Mongos": + hello_response["msg"] = "isdbgrid" + elif server_type == "RSGhost": + hello_response["isreplicaset"] = True + elif server_type == "RSArbiter": + hello_response["arbiterOnly"] = True + + hello_response["lastWrite"] = {"lastWriteDate": make_last_write_date(server)} + + for field in "maxWireVersion", "tags", "idleWritePeriodMillis": + if field in server: + hello_response[field] = server[field] + + hello_response.setdefault("maxWireVersion", MIN_SUPPORTED_WIRE_VERSION) + + # Sets _last_update_time to now. + sd = ServerDescription( + clean_node(server["address"]), + Hello(hello_response), + round_trip_time=server["avg_rtt_ms"] / 1000.0, + ) + + if "lastUpdateTime" in server: + sd._last_update_time = server["lastUpdateTime"] / 1000.0 # ms to sec. + + return sd + + +def get_topology_type_name(scenario_def): + td = scenario_def["topology_description"] + name = td["type"] + if name == "Unknown": + # PyMongo never starts a topology in type Unknown. + return "Sharded" if len(td["servers"]) > 1 else "Single" + else: + return name diff --git a/test/utils_shared.py b/test/utils_shared.py new file mode 100644 index 0000000000..72fb943fc1 --- /dev/null +++ b/test/utils_shared.py @@ -0,0 +1,702 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared utilities for testing pymongo""" +from __future__ import annotations + +import asyncio +import contextlib +import copy +import functools +import random +import re +import shutil +import sys +import threading +import unittest +import warnings +from collections import abc, defaultdict +from functools import partial +from inspect import iscoroutinefunction +from test import client_context +from test.asynchronous.utils import async_wait_until +from test.utils import wait_until +from typing import List + +from bson.objectid import ObjectId +from pymongo import monitoring, operations, read_preferences +from pymongo.cursor_shared import CursorType +from pymongo.errors import OperationFailure +from pymongo.helpers_shared import _SENSITIVE_COMMANDS +from pymongo.lock import _async_create_lock, _create_lock +from pymongo.monitoring import ( + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) +from pymongo.read_concern import ReadConcern +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.collection import ReturnDocument +from pymongo.synchronous.pool import _CancellationContext, _PoolGeneration +from pymongo.write_concern import WriteConcern + +IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) + + +class BaseListener: + def __init__(self): + self.events = [] + + def reset(self): + self.events = [] + + def add_event(self, event): + self.events.append(event) + + def event_count(self, event_type): + return len(self.events_by_type(event_type)) + + def events_by_type(self, event_type): + """Return the matching events by event class. + + event_type can be a single class or a tuple of classes. + """ + return self.matching(lambda e: isinstance(e, event_type)) + + def matching(self, matcher): + """Return the matching events.""" + return [event for event in self.events[:] if matcher(event)] + + def wait_for_event(self, event, count): + """Wait for a number of events to be published, or fail.""" + wait_until(lambda: self.event_count(event) >= count, f"find {count} {event} event(s)") + + async def async_wait_for_event(self, event, count): + """Wait for a number of events to be published, or fail.""" + await async_wait_until( + lambda: self.event_count(event) >= count, f"find {count} {event} event(s)" + ) + + +class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): + def connection_created(self, event): + assert isinstance(event, ConnectionCreatedEvent) + self.add_event(event) + + def connection_ready(self, event): + assert isinstance(event, ConnectionReadyEvent) + self.add_event(event) + + def connection_closed(self, event): + assert isinstance(event, ConnectionClosedEvent) + self.add_event(event) + + def connection_check_out_started(self, event): + assert isinstance(event, ConnectionCheckOutStartedEvent) + self.add_event(event) + + def connection_check_out_failed(self, event): + assert isinstance(event, ConnectionCheckOutFailedEvent) + self.add_event(event) + + def connection_checked_out(self, event): + assert isinstance(event, ConnectionCheckedOutEvent) + self.add_event(event) + + def connection_checked_in(self, event): + assert isinstance(event, ConnectionCheckedInEvent) + self.add_event(event) + + def pool_created(self, event): + assert isinstance(event, PoolCreatedEvent) + self.add_event(event) + + def pool_ready(self, event): + assert isinstance(event, PoolReadyEvent) + self.add_event(event) + + def pool_cleared(self, event): + assert isinstance(event, PoolClearedEvent) + self.add_event(event) + + def pool_closed(self, event): + assert isinstance(event, PoolClosedEvent) + self.add_event(event) + + +class EventListener(BaseListener, monitoring.CommandListener): + def __init__(self): + super().__init__() + self.results = defaultdict(list) + + @property + def started_events(self) -> List[monitoring.CommandStartedEvent]: + return self.results["started"] + + @property + def succeeded_events(self) -> List[monitoring.CommandSucceededEvent]: + return self.results["succeeded"] + + @property + def failed_events(self) -> List[monitoring.CommandFailedEvent]: + return self.results["failed"] + + def started(self, event: monitoring.CommandStartedEvent) -> None: + self.started_events.append(event) + self.add_event(event) + + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: + self.succeeded_events.append(event) + self.add_event(event) + + def failed(self, event: monitoring.CommandFailedEvent) -> None: + self.failed_events.append(event) + self.add_event(event) + + def started_command_names(self) -> List[str]: + """Return list of command names started.""" + return [event.command_name for event in self.started_events] + + def reset(self) -> None: + """Reset the state of this listener.""" + self.results.clear() + super().reset() + + +class TopologyEventListener(monitoring.TopologyListener): + def __init__(self): + self.results = defaultdict(list) + + def closed(self, event): + self.results["closed"].append(event) + + def description_changed(self, event): + self.results["description_changed"].append(event) + + def opened(self, event): + self.results["opened"].append(event) + + def reset(self): + """Reset the state of this listener.""" + self.results.clear() + + +class AllowListEventListener(EventListener): + def __init__(self, *commands): + self.commands = set(commands) + super().__init__() + + def started(self, event): + if event.command_name in self.commands: + super().started(event) + + def succeeded(self, event): + if event.command_name in self.commands: + super().succeeded(event) + + def failed(self, event): + if event.command_name in self.commands: + super().failed(event) + + +class OvertCommandListener(EventListener): + """A CommandListener that ignores sensitive commands.""" + + ignore_list_collections = False + + def started(self, event): + if event.command_name.lower() not in _SENSITIVE_COMMANDS: + super().started(event) + + def succeeded(self, event): + if event.command_name.lower() not in _SENSITIVE_COMMANDS: + super().succeeded(event) + + def failed(self, event): + if event.command_name.lower() not in _SENSITIVE_COMMANDS: + super().failed(event) + + +class _ServerEventListener: + """Listens to all events.""" + + def __init__(self): + self.results = [] + + def opened(self, event): + self.results.append(event) + + def description_changed(self, event): + self.results.append(event) + + def closed(self, event): + self.results.append(event) + + def matching(self, matcher): + """Return the matching events.""" + results = self.results[:] + return [event for event in results if matcher(event)] + + def reset(self): + self.results = [] + + +class ServerEventListener(_ServerEventListener, monitoring.ServerListener): + """Listens to Server events.""" + + +class ServerAndTopologyEventListener( # type: ignore[misc] + ServerEventListener, monitoring.TopologyListener +): + """Listens to Server and Topology events.""" + + +class HeartbeatEventListener(BaseListener, monitoring.ServerHeartbeatListener): + """Listens to only server heartbeat events.""" + + def started(self, event): + self.add_event(event) + + def succeeded(self, event): + self.add_event(event) + + def failed(self, event): + self.add_event(event) + + +class HeartbeatEventsListListener(HeartbeatEventListener): + """Listens to only server heartbeat events and publishes them to a provided list.""" + + def __init__(self, events): + super().__init__() + self.event_list = events + + def started(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatStartedEvent") + + def succeeded(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatSucceededEvent") + + def failed(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatFailedEvent") + + +class ScenarioDict(dict): + """Dict that returns {} for any unknown key, recursively.""" + + def __init__(self, data): + def convert(v): + if isinstance(v, abc.Mapping): + return ScenarioDict(v) + if isinstance(v, (str, bytes)): + return v + if isinstance(v, abc.Sequence): + return [convert(item) for item in v] + return v + + dict.__init__(self, [(k, convert(v)) for k, v in data.items()]) + + def __getitem__(self, item): + try: + return dict.__getitem__(self, item) + except KeyError: + # Unlike a defaultdict, don't set the key, just return a dict. + return ScenarioDict({}) + + +class CompareType: + """Class that compares equal to any object of the given type(s).""" + + def __init__(self, types): + self.types = types + + def __eq__(self, other): + return isinstance(other, self.types) + + +class FunctionCallRecorder: + """Utility class to wrap a callable and record its invocations.""" + + def __init__(self, function): + self._function = function + self._call_list = [] + + def __call__(self, *args, **kwargs): + self._call_list.append((args, kwargs)) + if iscoroutinefunction(self._function): + return self._function(*args, **kwargs) + else: + return self._function(*args, **kwargs) + + def reset(self): + """Wipes the call list.""" + self._call_list = [] + + def call_list(self): + """Returns a copy of the call list.""" + return self._call_list[:] + + @property + def call_count(self): + """Returns the number of times the function has been called.""" + return len(self._call_list) + + +def one(s): + """Get one element of a set""" + return next(iter(s)) + + +def oid_generated_on_process(oid): + """Makes a determination as to whether the given ObjectId was generated + by the current process, based on the 5-byte random number in the ObjectId. + """ + return ObjectId._random() == oid.binary[4:9] + + +def delay(sec): + return """function() { sleep(%f * 1000); return true; }""" % sec + + +def camel_to_snake(camel): + # Regex to convert CamelCase to snake_case. + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() + + +def camel_to_upper_camel(camel): + return camel[0].upper() + camel[1:] + + +def camel_to_snake_args(arguments): + for arg_name in list(arguments): + c2s = camel_to_snake(arg_name) + arguments[c2s] = arguments.pop(arg_name) + return arguments + + +def snake_to_camel(snake): + # Regex to convert snake_case to lowerCamelCase. + return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), snake) + + +def parse_collection_options(opts): + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) + + if "writeConcern" in opts: + opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) + + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + + if "timeoutMS" in opts: + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 + return opts + + +@contextlib.contextmanager +def _ignore_deprecations(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + yield + + +def ignore_deprecations(wrapped=None): + """A context manager or a decorator.""" + if wrapped: + if iscoroutinefunction(wrapped): + + @functools.wraps(wrapped) + async def wrapper(*args, **kwargs): + with _ignore_deprecations(): + return await wrapped(*args, **kwargs) + else: + + @functools.wraps(wrapped) + def wrapper(*args, **kwargs): + with _ignore_deprecations(): + return wrapped(*args, **kwargs) + + return wrapper + + else: + return _ignore_deprecations() + + +class DeprecationFilter: + def __init__(self, action="ignore"): + """Start filtering deprecations.""" + self.warn_context = warnings.catch_warnings() + self.warn_context.__enter__() + warnings.simplefilter(action, DeprecationWarning) + + def stop(self): + """Stop filtering deprecations.""" + self.warn_context.__exit__() # type: ignore + self.warn_context = None # type: ignore + + +# Constants for run_threads and lazy_client_trial. +NTRIALS = 5 +NTHREADS = 10 + + +def run_threads(collection, target): + """Run a target function in many threads. + + target is a function taking a Collection and an integer. + """ + threads = [] + for i in range(NTHREADS): + bound_target = partial(target, collection, i) + threads.append(threading.Thread(target=bound_target)) + + for t in threads: + t.start() + + for t in threads: + t.join(60) + assert not t.is_alive() + + +@contextlib.contextmanager +def frequent_thread_switches(): + """Make concurrency bugs more likely to manifest.""" + interval = sys.getswitchinterval() + sys.setswitchinterval(1e-6) + + try: + yield + finally: + sys.setswitchinterval(interval) + + +def lazy_client_trial(reset, target, test, get_client): + """Test concurrent operations on a lazily-connecting client. + + `reset` takes a collection and resets it for the next trial. + + `target` takes a lazily-connecting collection and an index from + 0 to NTHREADS, and performs some operation, e.g. an insert. + + `test` takes the lazily-connecting collection and asserts a + post-condition to prove `target` succeeded. + """ + collection = client_context.client.pymongo_test.test + + with frequent_thread_switches(): + for _i in range(NTRIALS): + reset(collection) + lazy_client = get_client() + lazy_collection = lazy_client.pymongo_test.test + run_threads(lazy_collection, target) + test(lazy_collection) + + +def gevent_monkey_patched(): + """Check if gevent's monkey patching is active.""" + try: + import socket + + import gevent.socket # type:ignore[import] + + return socket.socket is gevent.socket.socket + except ImportError: + return False + + +def is_greenthread_patched(): + return gevent_monkey_patched() + + +def parse_read_preference(pref): + # Make first letter lowercase to match read_pref's modes. + mode_string = pref.get("mode", "primary") + mode_string = mode_string[:1].lower() + mode_string[1:] + mode = read_preferences.read_pref_mode_from_name(mode_string) + max_staleness = pref.get("maxStalenessSeconds", -1) + tag_sets = pref.get("tagSets") or pref.get("tag_sets") + return read_preferences.make_read_preference( + mode, tag_sets=tag_sets, max_staleness=max_staleness + ) + + +def server_name_to_type(name): + """Convert a ServerType name to the corresponding value. For SDAM tests.""" + # Special case, some tests in the spec include the PossiblePrimary + # type, but only single-threaded drivers need that type. We call + # possible primaries Unknown. + if name == "PossiblePrimary": + return SERVER_TYPE.Unknown + return getattr(SERVER_TYPE, name) + + +def cat_files(dest, *sources): + """Cat multiple files into dest.""" + with open(dest, "wb") as fdst: + for src in sources: + with open(src, "rb") as fsrc: + shutil.copyfileobj(fsrc, fdst) + + +@contextlib.contextmanager +def assertion_context(msg): + """A context manager that adds info to an assertion failure.""" + try: + yield + except AssertionError as exc: + raise AssertionError(f"{msg}: {exc}") + + +def parse_spec_options(opts): + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) + + if "writeConcern" in opts: + w_opts = opts.pop("writeConcern") + if "journal" in w_opts: + w_opts["j"] = w_opts.pop("journal") + if "wtimeoutMS" in w_opts: + w_opts["wtimeout"] = w_opts.pop("wtimeoutMS") + opts["write_concern"] = WriteConcern(**dict(w_opts)) + + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + + if "timeoutMS" in opts: + assert isinstance(opts["timeoutMS"], int) + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 + + if "maxTimeMS" in opts: + opts["max_time_ms"] = opts.pop("maxTimeMS") + + if "maxCommitTimeMS" in opts: + opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") + + return dict(opts) + + +def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): + for arg_name in list(arguments): + c2s = camel_to_snake(arg_name) + # Named "key" instead not fieldName. + if arg_name == "fieldName": + arguments["key"] = arguments.pop(arg_name) + # Aggregate uses "batchSize", while find uses batch_size. + elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": + continue + elif arg_name == "bypassDocumentValidation" and ( + opname == "aggregate" or "find_one_and" in opname + ): + continue + elif arg_name == "timeoutMode": + raise unittest.SkipTest("PyMongo does not support timeoutMode") + # Requires boolean returnDocument. + elif arg_name == "returnDocument": + arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) + elif "bulk_write" in opname and (c2s == "requests" or c2s == "models"): + # Parse each request into a bulk write model. + requests = [] + for request in arguments[c2s]: + if "name" in request: + # CRUD v2 format + bulk_model = camel_to_upper_camel(request["name"]) + bulk_class = getattr(operations, bulk_model) + bulk_arguments = camel_to_snake_args(request["arguments"]) + else: + # Unified test format + bulk_model, spec = next(iter(request.items())) + bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) + bulk_arguments = camel_to_snake_args(spec) + requests.append(bulk_class(**dict(bulk_arguments))) + arguments[c2s] = requests + elif arg_name == "session": + arguments["session"] = entity_map[arguments["session"]] + elif opname == "open_download_stream" and arg_name == "id": + arguments["file_id"] = arguments.pop(arg_name) + elif opname not in ("find", "find_one") and c2s == "max_time_ms": + # find is the only method that accepts snake_case max_time_ms. + # All other methods take kwargs which must use the server's + # camelCase maxTimeMS. See PYTHON-1855. + arguments["maxTimeMS"] = arguments.pop("max_time_ms") + elif opname == "with_transaction" and arg_name == "callback": + if "operations" in arguments[arg_name]: + # CRUD v2 format + callback_ops = arguments[arg_name]["operations"] + else: + # Unified test format + callback_ops = arguments[arg_name] + arguments["callback"] = lambda _: with_txn_callback(copy.deepcopy(callback_ops)) + elif opname == "drop_collection" and arg_name == "collection": + arguments["name_or_collection"] = arguments.pop(arg_name) + elif opname == "create_collection": + if arg_name == "collection": + arguments["name"] = arguments.pop(arg_name) + arguments["check_exists"] = False + # Any other arguments to create_collection are passed through + # **kwargs. + elif opname == "create_index" and arg_name == "keys": + arguments["keys"] = list(arguments.pop(arg_name).items()) + elif opname == "drop_index" and arg_name == "name": + arguments["index_or_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "to": + arguments["new_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "dropTarget": + arguments["dropTarget"] = arguments.pop(arg_name) + elif arg_name == "cursorType": + cursor_type = arguments.pop(arg_name) + if cursor_type == "tailable": + arguments["cursor_type"] = CursorType.TAILABLE + elif cursor_type == "tailableAwait": + arguments["cursor_type"] = CursorType.TAILABLE + else: + raise AssertionError(f"Unsupported cursorType: {cursor_type}") + else: + arguments[c2s] = arguments.pop(arg_name) + + +def create_async_event(): + return asyncio.Event() + + +def create_event(): + return threading.Event() + + +def async_create_barrier(n_tasks: int): + return asyncio.Barrier(n_tasks) + + +def create_barrier(n_tasks: int, timeout: float | None = None): + return threading.Barrier(n_tasks, timeout=timeout) + + +async def async_barrier_wait(barrier, timeout: float | None = None): + await asyncio.wait_for(barrier.wait(), timeout=timeout) + + +def barrier_wait(barrier, timeout: float | None = None): + barrier.wait(timeout=timeout) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py new file mode 100644 index 0000000000..46adeaefb5 --- /dev/null +++ b/test/utils_spec_runner.py @@ -0,0 +1,815 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing driver specs.""" +from __future__ import annotations + +import asyncio +import functools +import os +import time +import unittest +from collections import abc +from inspect import iscoroutinefunction +from test import IntegrationTest, client_context, client_knobs +from test.helpers import ConcurrentRunner +from test.utils_shared import ( + CMAPListener, + CompareType, + EventListener, + OvertCommandListener, + ScenarioDict, + ServerAndTopologyEventListener, + camel_to_snake, + camel_to_snake_args, + parse_spec_options, + prepare_spec_arguments, +) +from typing import List + +from bson import ObjectId, decode, encode, json_util +from bson.binary import Binary +from bson.int64 import Int64 +from bson.son import SON +from gridfs import GridFSBucket +from gridfs.synchronous.grid_file import GridFSBucket +from pymongo.errors import AutoReconnect, BulkWriteError, OperationFailure, PyMongoError +from pymongo.lock import _cond_wait, _create_condition, _create_lock +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.results import BulkWriteResult, _WriteResult +from pymongo.synchronous import client_session +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class SpecRunnerThread(ConcurrentRunner): + def __init__(self, name): + super().__init__(name=name) + self.exc = None + self.daemon = True + self.cond = _create_condition(_create_lock()) + self.ops = [] + + def schedule(self, work): + self.ops.append(work) + with self.cond: + self.cond.notify() + + def stop(self): + self.stopped = True + with self.cond: + self.cond.notify() + + def run(self): + while not self.stopped or self.ops: + if not self.ops: + with self.cond: + _cond_wait(self.cond, 10) + if self.ops: + try: + work = self.ops.pop(0) + work() + except Exception as exc: + self.exc = exc + self.stop() + + +class SpecTestCreator: + """Class to create test cases from specifications.""" + + def __init__(self, create_test, test_class, test_path): + """Create a TestCreator object. + + :Parameters: + - `create_test`: callback that returns a test case. The callback + must accept the following arguments - a dictionary containing the + entire test specification (the `scenario_def`), a dictionary + containing the specification for which the test case will be + generated (the `test_def`). + - `test_class`: the unittest.TestCase class in which to create the + test case. + - `test_path`: path to the directory containing the JSON files with + the test specifications. + """ + self._create_test = create_test + self._test_class = test_class + self.test_path = test_path + + def _ensure_min_max_server_version(self, scenario_def, method): + """Test modifier that enforces a version range for the server on a + test case. + """ + if "minServerVersion" in scenario_def: + min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) + if min_ver is not None: + method = client_context.require_version_min(*min_ver)(method) + + if "maxServerVersion" in scenario_def: + max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) + if max_ver is not None: + method = client_context.require_version_max(*max_ver)(method) + + return method + + @staticmethod + def valid_topology(run_on_req): + return client_context.is_topology_type( + run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) + ) + + @staticmethod + def min_server_version(run_on_req): + version = run_on_req.get("minServerVersion") + if version: + min_ver = tuple(int(elt) for elt in version.split(".")) + return client_context.version >= min_ver + return True + + @staticmethod + def max_server_version(run_on_req): + version = run_on_req.get("maxServerVersion") + if version: + max_ver = tuple(int(elt) for elt in version.split(".")) + return client_context.version <= max_ver + return True + + @staticmethod + def valid_auth_enabled(run_on_req): + if "authEnabled" in run_on_req: + if run_on_req["authEnabled"]: + return client_context.auth_enabled + return not client_context.auth_enabled + return True + + def should_run_on(self, scenario_def): + run_on = scenario_def.get("runOn", []) + if not run_on: + # Always run these tests. + return True + + for req in run_on: + if ( + self.valid_topology(req) + and self.min_server_version(req) + and self.max_server_version(req) + and self.valid_auth_enabled(req) + ): + return True + return False + + def ensure_run_on(self, scenario_def, method): + """Test modifier that enforces a 'runOn' on a test case.""" + + def predicate(): + return self.should_run_on(scenario_def) + + return client_context._require(predicate, "runOn not satisfied", method) + + def tests(self, scenario_def): + """Allow CMAP spec test to override the location of test.""" + return scenario_def["tests"] + + def _create_tests(self): + for dirpath, _, filenames in os.walk(self.test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: # noqa: ASYNC101, RUF100 + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = ScenarioDict( + json_util.loads(scenario_stream.read(), json_options=opts) + ) + + test_type = os.path.splitext(filename)[0] + + # Construct test from scenario. + for test_def in self.tests(scenario_def): + test_name = "test_{}_{}_{}".format( + dirname, + test_type.replace("-", "_").replace(".", "_"), + str(test_def["description"].replace(" ", "_").replace(".", "_")), + ) + + new_test = self._create_test(scenario_def, test_def, test_name) + new_test = self._ensure_min_max_server_version(scenario_def, new_test) + new_test = self.ensure_run_on(scenario_def, new_test) + + new_test.__name__ = test_name + setattr(self._test_class, new_test.__name__, new_test) + + def create_tests(self): + if _IS_SYNC: + self._create_tests() + else: + asyncio.run(self._create_tests()) + + +class SpecRunner(IntegrationTest): + mongos_clients: List + knobs: client_knobs + listener: EventListener + + def setUp(self) -> None: + super().setUp() + self.mongos_clients = [] + + # Speed up the tests by decreasing the heartbeat frequency. + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.targets = {} + self.listener = None # type: ignore + self.pool_listener = None + self.server_listener = None + self.maxDiff = None + + def tearDown(self) -> None: + self.knobs.disable() + + def set_fail_point(self, command_args): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + self.configure_fail_point(client, command_args) + + def targeted_fail_point(self, session, fail_point): + """Run the targetedFailPoint test operation. + + Enable the fail point on the session's pinned mongos. + """ + clients = {c.address: c for c in self.mongos_clients} + client = clients[session._pinned_address] + self.configure_fail_point(client, fail_point) + self.addCleanup(self.set_fail_point, {"mode": "off"}) + + def assert_session_pinned(self, session): + """Run the assertSessionPinned test operation. + + Assert that the given session is pinned. + """ + self.assertIsNotNone(session._transaction.pinned_address) + + def assert_session_unpinned(self, session): + """Run the assertSessionUnpinned test operation. + + Assert that the given session is not pinned. + """ + self.assertIsNone(session._pinned_address) + self.assertIsNone(session._transaction.pinned_address) + + def assert_collection_exists(self, database, collection): + """Run the assertCollectionExists test operation.""" + db = self.client[database] + self.assertIn(collection, db.list_collection_names()) + + def assert_collection_not_exists(self, database, collection): + """Run the assertCollectionNotExists test operation.""" + db = self.client[database] + self.assertNotIn(collection, db.list_collection_names()) + + def assert_index_exists(self, database, collection, index): + """Run the assertIndexExists test operation.""" + coll = self.client[database][collection] + self.assertIn(index, [doc["name"] for doc in coll.list_indexes()]) + + def assert_index_not_exists(self, database, collection, index): + """Run the assertIndexNotExists test operation.""" + coll = self.client[database][collection] + self.assertNotIn(index, [doc["name"] for doc in coll.list_indexes()]) + + def wait(self, ms): + """Run the "wait" test operation.""" + time.sleep(ms / 1000.0) + + def assertErrorLabelsContain(self, exc, expected_labels): + labels = [l for l in expected_labels if exc.has_error_label(l)] + self.assertEqual(labels, expected_labels) + + def assertErrorLabelsOmit(self, exc, omit_labels): + for label in omit_labels: + self.assertFalse( + exc.has_error_label(label), msg=f"error labels should not contain {label}" + ) + + def kill_all_sessions(self): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + try: + client.admin.command("killAllSessions", []) + except (OperationFailure, AutoReconnect): + # "operation was interrupted" by killing the command's + # own session. + # On 8.0+ killAllSessions sometimes returns a network error. + pass + + def check_command_result(self, expected_result, result): + # Only compare the keys in the expected result. + filtered_result = {} + for key in expected_result: + try: + filtered_result[key] = result[key] + except KeyError: + pass + self.assertEqual(filtered_result, expected_result) + + # TODO: factor the following function with test_crud.py. + def check_result(self, expected_result, result): + if isinstance(result, _WriteResult): + for res in expected_result: + prop = camel_to_snake(res) + # SPEC-869: Only BulkWriteResult has upserted_count. + if prop == "upserted_count" and not isinstance(result, BulkWriteResult): + if result.upserted_id is not None: + upserted_count = 1 + else: + upserted_count = 0 + self.assertEqual(upserted_count, expected_result[res], prop) + elif prop == "inserted_ids": + # BulkWriteResult does not have inserted_ids. + if isinstance(result, BulkWriteResult): + self.assertEqual(len(expected_result[res]), result.inserted_count) + else: + # InsertManyResult may be compared to [id1] from the + # crud spec or {"0": id1} from the retryable write spec. + ids = expected_result[res] + if isinstance(ids, dict): + ids = [ids[str(i)] for i in range(len(ids))] + + self.assertEqual(ids, result.inserted_ids, prop) + elif prop == "upserted_ids": + # Convert indexes from strings to integers. + ids = expected_result[res] + expected_ids = {} + for str_index in ids: + expected_ids[int(str_index)] = ids[str_index] + self.assertEqual(expected_ids, result.upserted_ids, prop) + else: + self.assertEqual(getattr(result, prop), expected_result[res], prop) + + return True + else: + + def _helper(expected_result, result): + if isinstance(expected_result, abc.Mapping): + for i in expected_result.keys(): + self.assertEqual(expected_result[i], result[i]) + + elif isinstance(expected_result, list): + for i, k in zip(expected_result, result): + _helper(i, k) + else: + self.assertEqual(expected_result, result) + + _helper(expected_result, result) + return None + + def get_object_name(self, op): + """Allow subclasses to override handling of 'object' + + Transaction spec says 'object' is required. + """ + return op["object"] + + @staticmethod + def parse_options(opts): + return parse_spec_options(opts) + + def run_operation(self, sessions, collection, operation): + original_collection = collection + name = camel_to_snake(operation["name"]) + if name == "run_command": + name = "command" + elif name == "download_by_name": + name = "open_download_stream_by_name" + elif name == "download": + name = "open_download_stream" + elif name == "map_reduce": + self.skipTest("PyMongo does not support mapReduce") + elif name == "count": + self.skipTest("PyMongo does not support count") + + database = collection.database + collection = database.get_collection(collection.name) + if "collectionOptions" in operation: + collection = collection.with_options( + **self.parse_options(operation["collectionOptions"]) + ) + + object_name = self.get_object_name(operation) + if object_name == "gridfsbucket": + # Only create the GridFSBucket when we need it (for the gridfs + # retryable reads tests). + obj = GridFSBucket(database, bucket_name=collection.name) + else: + objects = { + "client": database.client, + "database": database, + "collection": collection, + "testRunner": self, + } + objects.update(sessions) + obj = objects[object_name] + + # Combine arguments with options and handle special cases. + arguments = operation.get("arguments", {}) + arguments.update(arguments.pop("options", {})) + self.parse_options(arguments) + + cmd = getattr(obj, name) + + with_txn_callback = functools.partial( + self.run_operations, sessions, original_collection, in_with_transaction=True + ) + prepare_spec_arguments(operation, arguments, name, sessions, with_txn_callback) + + if name == "run_on_thread": + args = {"sessions": sessions, "collection": collection} + args.update(arguments) + arguments = args + + if not _IS_SYNC and iscoroutinefunction(cmd): + result = cmd(**dict(arguments)) + else: + result = cmd(**dict(arguments)) + # Cleanup open change stream cursors. + if name == "watch": + self.addCleanup(result.close) + + if name == "aggregate": + if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: + # Read from the primary to ensure causal consistency. + out = collection.database.get_collection( + arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY + ) + return out.find() + if "download" in name: + result = Binary(result.read()) + + if isinstance(result, Cursor) or isinstance(result, CommandCursor): + return result.to_list() + + return result + + def allowable_errors(self, op): + """Allow encryption spec to override expected error classes.""" + return (PyMongoError,) + + def _run_op(self, sessions, collection, op, in_with_transaction): + expected_result = op.get("result") + if expect_error(op): + with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: + self.run_operation(sessions, collection, op.copy()) + exc = context.exception + if expect_error_message(expected_result): + if isinstance(exc, BulkWriteError): + errmsg = str(exc.details).lower() + else: + errmsg = str(exc).lower() + self.assertIn(expected_result["errorContains"].lower(), errmsg) + if expect_error_code(expected_result): + self.assertEqual(expected_result["errorCodeName"], exc.details.get("codeName")) + if expect_error_labels_contain(expected_result): + self.assertErrorLabelsContain(exc, expected_result["errorLabelsContain"]) + if expect_error_labels_omit(expected_result): + self.assertErrorLabelsOmit(exc, expected_result["errorLabelsOmit"]) + if expect_timeout_error(expected_result): + self.assertIsInstance(exc, PyMongoError) + if not exc.timeout: + # Re-raise the exception for better diagnostics. + raise exc + + # Reraise the exception if we're in the with_transaction + # callback. + if in_with_transaction: + raise context.exception + else: + result = self.run_operation(sessions, collection, op.copy()) + if "result" in op: + if op["name"] == "runCommand": + self.check_command_result(expected_result, result) + else: + self.check_result(expected_result, result) + + def run_operations(self, sessions, collection, ops, in_with_transaction=False): + for op in ops: + self._run_op(sessions, collection, op, in_with_transaction) + + # TODO: factor with test_command_monitoring.py + def check_events(self, test, listener, session_ids): + events = listener.started_events + if not len(test["expectations"]): + return + + # Give a nicer message when there are missing or extra events + cmds = decode_raw([event.command for event in events]) + self.assertEqual(len(events), len(test["expectations"]), cmds) + for i, expectation in enumerate(test["expectations"]): + event_type = next(iter(expectation)) + event = events[i] + + # The tests substitute 42 for any number other than 0. + if event.command_name == "getMore" and event.command["getMore"]: + event.command["getMore"] = Int64(42) + elif event.command_name == "killCursors": + event.command["cursors"] = [Int64(42)] + elif event.command_name == "update": + # TODO: remove this once PYTHON-1744 is done. + # Add upsert and multi fields back into expectations. + updates = expectation[event_type]["command"]["updates"] + for update in updates: + update.setdefault("upsert", False) + update.setdefault("multi", False) + + # Replace afterClusterTime: 42 with actual afterClusterTime. + expected_cmd = expectation[event_type]["command"] + expected_read_concern = expected_cmd.get("readConcern") + if expected_read_concern is not None: + time = expected_read_concern.get("afterClusterTime") + if time == 42: + actual_time = event.command.get("readConcern", {}).get("afterClusterTime") + if actual_time is not None: + expected_read_concern["afterClusterTime"] = actual_time + + recovery_token = expected_cmd.get("recoveryToken") + if recovery_token == 42: + expected_cmd["recoveryToken"] = CompareType(dict) + + # Replace lsid with a name like "session0" to match test. + if "lsid" in event.command: + for name, lsid in session_ids.items(): + if event.command["lsid"] == lsid: + event.command["lsid"] = name + break + + for attr, expected in expectation[event_type].items(): + actual = getattr(event, attr) + expected = wrap_types(expected) + if isinstance(expected, dict): + for key, val in expected.items(): + if val is None: + if key in actual: + self.fail(f"Unexpected key [{key}] in {actual!r}") + elif key not in actual: + self.fail(f"Expected key [{key}] in {actual!r}") + else: + self.assertEqual( + val, decode_raw(actual[key]), f"Key [{key}] in {actual}" + ) + else: + self.assertEqual(actual, expected) + + def maybe_skip_scenario(self, test): + if test.get("skipReason"): + self.skipTest(test.get("skipReason")) + + def get_scenario_db_name(self, scenario_def): + """Allow subclasses to override a test's database name.""" + return scenario_def["database_name"] + + def get_scenario_coll_name(self, scenario_def): + """Allow subclasses to override a test's collection name.""" + return scenario_def["collection_name"] + + def get_outcome_coll_name(self, outcome, collection): + """Allow subclasses to override outcome collection.""" + return collection.name + + def run_test_ops(self, sessions, collection, test): + """Added to allow retryable writes spec to override a test's + operation. + """ + self.run_operations(sessions, collection, test["operations"]) + + def parse_client_options(self, opts): + """Allow encryption spec to override a clientOptions parsing.""" + # Convert test['clientOptions'] to dict to avoid a Jython bug using + # "**" with ScenarioDict. + return dict(opts) + + def setup_scenario(self, scenario_def): + """Allow specs to override a test's setup.""" + db_name = self.get_scenario_db_name(scenario_def) + coll_name = self.get_scenario_coll_name(scenario_def) + documents = scenario_def["data"] + + # Setup the collection with as few majority writes as possible. + db = client_context.client.get_database(db_name) + coll_exists = bool(db.list_collection_names(filter={"name": coll_name})) + if coll_exists: + db[coll_name].delete_many({}) + # Only use majority wc only on the final write. + wc = WriteConcern(w="majority") + if documents: + db.get_collection(coll_name, write_concern=wc).insert_many(documents) + elif not coll_exists: + # Ensure collection exists. + db.create_collection(coll_name, write_concern=wc) + + def run_scenario(self, scenario_def, test): + self.maybe_skip_scenario(test) + + # Kill all sessions before and after each test to prevent an open + # transaction (from a test failure) from blocking collection/database + # operations during test set up and tear down. + self.kill_all_sessions() + self.addCleanup(self.kill_all_sessions) + self.setup_scenario(scenario_def) + database_name = self.get_scenario_db_name(scenario_def) + collection_name = self.get_scenario_coll_name(scenario_def) + # SPEC-1245 workaround StaleDbVersion on distinct + for c in self.mongos_clients: + c[database_name][collection_name].distinct("x") + + # Configure the fail point before creating the client. + if "failPoint" in test: + fp = test["failPoint"] + self.set_fail_point(fp) + self.addCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + listener = OvertCommandListener() + pool_listener = CMAPListener() + server_listener = ServerAndTopologyEventListener() + # Create a new client, to avoid interference from pooled sessions. + client_options = self.parse_client_options(test["clientOptions"]) + use_multi_mongos = test["useMultipleMongoses"] + host = None + if use_multi_mongos: + if client_context.load_balancer: + host = client_context.MULTI_MONGOS_LB_URI + elif client_context.is_mongos: + host = client_context.mongos_seeds() + client = self.rs_client( + h=host, event_listeners=[listener, pool_listener, server_listener], **client_options + ) + self.scenario_client = client + self.listener = listener + self.pool_listener = pool_listener + self.server_listener = server_listener + + # Create session0 and session1. + sessions = {} + session_ids = {} + for i in range(2): + # Don't attempt to create sessions if they are not supported by + # the running server version. + if not client_context.sessions_enabled: + break + session_name = "session%d" % i + opts = camel_to_snake_args(test["sessionOptions"][session_name]) + if "default_transaction_options" in opts: + txn_opts = self.parse_options(opts["default_transaction_options"]) + txn_opts = client_session.TransactionOptions(**txn_opts) + opts["default_transaction_options"] = txn_opts + + s = client.start_session(**dict(opts)) + + sessions[session_name] = s + # Store lsid so we can access it after end_session, in check_events. + session_ids[session_name] = s.session_id + + self.addCleanup(end_sessions, sessions) + + collection = client[database_name][collection_name] + self.run_test_ops(sessions, collection, test) + + end_sessions(sessions) + + self.check_events(test, listener, session_ids) + + # Disable fail points. + if "failPoint" in test: + fp = test["failPoint"] + self.set_fail_point({"configureFailPoint": fp["configureFailPoint"], "mode": "off"}) + + # Assert final state is expected. + outcome = test["outcome"] + expected_c = outcome.get("collection") + if expected_c is not None: + outcome_coll_name = self.get_outcome_coll_name(outcome, collection) + + # Read from the primary with local read concern to ensure causal + # consistency. + outcome_coll = client_context.client[collection.database.name].get_collection( + outcome_coll_name, + read_preference=ReadPreference.PRIMARY, + read_concern=ReadConcern("local"), + ) + actual_data = outcome_coll.find(sort=[("_id", 1)]).to_list() + + # The expected data needs to be the left hand side here otherwise + # CompareType(Binary) doesn't work. + self.assertEqual(wrap_types(expected_c["data"]), actual_data) + + +def expect_any_error(op): + if isinstance(op, dict): + return op.get("error") + + return False + + +def expect_error_message(expected_result): + if isinstance(expected_result, dict): + return isinstance(expected_result["errorContains"], str) + + return False + + +def expect_error_code(expected_result): + if isinstance(expected_result, dict): + return expected_result["errorCodeName"] + + return False + + +def expect_error_labels_contain(expected_result): + if isinstance(expected_result, dict): + return expected_result["errorLabelsContain"] + + return False + + +def expect_error_labels_omit(expected_result): + if isinstance(expected_result, dict): + return expected_result["errorLabelsOmit"] + + return False + + +def expect_timeout_error(expected_result): + if isinstance(expected_result, dict): + return expected_result["isTimeoutError"] + + return False + + +def expect_error(op): + expected_result = op.get("result") + return ( + expect_any_error(op) + or expect_error_message(expected_result) + or expect_error_code(expected_result) + or expect_error_labels_contain(expected_result) + or expect_error_labels_omit(expected_result) + or expect_timeout_error(expected_result) + ) + + +def end_sessions(sessions): + for s in sessions.values(): + # Aborts the transaction if it's open. + s.end_session() + + +def decode_raw(val): + """Decode RawBSONDocuments in the given container.""" + if isinstance(val, (list, abc.Mapping)): + return decode(encode({"v": val}))["v"] + return val + + +TYPES = { + "binData": Binary, + "long": Int64, + "int": int, + "string": str, + "objectId": ObjectId, + "object": dict, + "array": list, +} + + +def wrap_types(val): + """Support $$type assertion in command results.""" + if isinstance(val, list): + return [wrap_types(v) for v in val] + if isinstance(val, abc.Mapping): + typ = val.get("$$type") + if typ: + if isinstance(typ, str): + types = TYPES[typ] + else: + types = tuple(TYPES[t] for t in typ) + return CompareType(types) + d = {} + for key in val: + d[key] = wrap_types(val[key]) + return d + return val diff --git a/test/version.py b/test/version.py index 1632f8747d..ae6ecb331f 100644 --- a/test/version.py +++ b/test/version.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,43 +13,22 @@ # limitations under the License. """Some tools for running tests based on MongoDB server version.""" +from __future__ import annotations +from pymongo.common import Version as BaseVersion -def _padded(iter, length, padding=0): - l = list(iter) - if len(l) < length: - for _ in range(length - len(l)): - l.append(0) - return l +class Version(BaseVersion): + @classmethod + def from_client(cls, client): + info = client.server_info() + if "versionArray" in info: + return cls.from_version_array(info["versionArray"]) + return cls.from_string(info["version"]) -def _parse_version_string(version_string): - mod = 0 - if version_string.endswith("+"): - version_string = version_string[0:-1] - mod = 1 - elif version_string.endswith("-pre-"): - version_string = version_string[0:-5] - mod = -1 - elif version_string.endswith("-"): - version_string = version_string[0:-1] - mod = -1 - # Deal with '-rcX' substrings - if version_string.find('-rc') != -1: - version_string = version_string[0:version_string.find('-rc')] - mod = -1 - - version = [int(part) for part in version_string.split(".")] - version = _padded(version, 3) - version.append(mod) - - return tuple(version) - - -# Note this is probably broken for very old versions of the database... -def version(client): - return _parse_version_string(client.server_info()["version"]) - - -def at_least(client, min_version): - return version(client) >= tuple(_padded(min_version, 4)) + @classmethod + async def async_from_client(cls, client): + info = await client.server_info() + if "versionArray" in info: + return cls.from_version_array(info["versionArray"]) + return cls.from_string(info["version"]) diff --git a/test/versioned-api/crud-api-version-1-strict.json b/test/versioned-api/crud-api-version-1-strict.json new file mode 100644 index 0000000000..c1c8ecce01 --- /dev/null +++ b/test/versioned-api/crud-api-version-1-strict.json @@ -0,0 +1,1109 @@ +{ + "description": "CRUD Api Version 1 (strict)", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "client", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "versions": [ + { + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + ] + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "versioned-api-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "aggregate on collection appends declared API version", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate on database appends declared API version", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "adminDatabase", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "errorCodeName": "APIStrictError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "bulkWrite appends declared API version", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 6, + "x": 66 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 7 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "countDocuments appends declared API version", + "operations": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$gt": 11 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "x": { + "$gt": 11 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "deleteMany appends declared API version", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "deleteOne appends declared API version", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 7 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "distinct appends declared API version", + "operations": [ + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isError": true, + "errorContains": "command distinct is not in API Version 1", + "errorCodeName": "APIStrictError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "x", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "5.0.9", + "maxServerVersion": "5.0.99" + }, + { + "minServerVersion": "5.3.2" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": {} + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "test", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "find and getMore append API version", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "batchSize": 3 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndDelete appends declared API version", + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "remove": true, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndReplace appends declared API version", + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate appends declared API version", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "insertMany appends declared API version", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "insertOne appends declared API version", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6, + "x": 66 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "replaceOne appends declared API version", + "operations": [ + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateMany appends declared API version", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateOne appends declared API version", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/crud-api-version-1.json b/test/versioned-api/crud-api-version-1.json new file mode 100644 index 0000000000..23ef59a6d9 --- /dev/null +++ b/test/versioned-api/crud-api-version-1.json @@ -0,0 +1,1182 @@ +{ + "description": "CRUD Api Version 1", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "deprecationErrors": true + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "client", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "versions": [ + { + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + ], + "namespace": "versioned-api-tests.test" + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "versioned-api-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "aggregate on collection appends declared API version", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "aggregate on database appends declared API version", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "adminDatabase", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "bulkWrite appends declared API version", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 6, + "x": 66 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 7 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "versioned-api-tests.test", + "document": { + "_id": 6, + "x": 6 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 6 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 6, + "x": 6 + } + } + ], + "nsInfo": [ + { + "ns": "versioned-api-tests.test" + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "countDocuments appends declared API version", + "operations": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$gt": 11 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "x": { + "$gt": 11 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "deleteMany appends declared API version", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "deleteOne appends declared API version", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 7 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "distinct appends declared API version", + "operations": [ + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "x", + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "5.0.9", + "maxServerVersion": "5.0.99" + }, + { + "minServerVersion": "5.3.2" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": {} + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "test", + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "find and getMore append API version", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "batchSize": 3 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "findOneAndDelete appends declared API version", + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "remove": true, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "findOneAndReplace appends declared API version", + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate appends declared API version", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "insertMany appends declared API version", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "insertOne appends declared API version", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6, + "x": 66 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "replaceOne appends declared API version", + "operations": [ + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "updateMany appends declared API version", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "updateOne appends declared API version", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/runcommand-helper-no-api-version-declared.json b/test/versioned-api/runcommand-helper-no-api-version-declared.json new file mode 100644 index 0000000000..17e0126d10 --- /dev/null +++ b/test/versioned-api/runcommand-helper-no-api-version-declared.json @@ -0,0 +1,127 @@ +{ + "description": "RunCommand helper: No API version declared", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverParameters": { + "requireApiVersion": false + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + } + ], + "tests": [ + { + "description": "runCommand does not inspect or change the command document", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1, + "apiVersion": "server_will_never_support_this_api_version" + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "apiVersion": "server_will_never_support_this_api_version", + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + }, + "commandName": "ping", + "databaseName": "versioned-api-tests" + } + } + ] + } + ] + }, + { + "description": "runCommand does not prevent sending invalid API version declarations", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1, + "apiStrict": true + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "apiVersion": { + "$$exists": false + }, + "apiStrict": true, + "apiDeprecationErrors": { + "$$exists": false + } + }, + "commandName": "ping", + "databaseName": "versioned-api-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/test-commands-deprecation-errors.json b/test/versioned-api/test-commands-deprecation-errors.json new file mode 100644 index 0000000000..0668df830a --- /dev/null +++ b/test/versioned-api/test-commands-deprecation-errors.json @@ -0,0 +1,74 @@ +{ + "description": "Test commands: deprecation errors", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverParameters": { + "enableTestCommands": true, + "acceptApiVersion2": true, + "requireApiVersion": false + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + } + ], + "tests": [ + { + "description": "Running a command that is deprecated raises a deprecation error", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "testDeprecationInVersion2", + "command": { + "testDeprecationInVersion2": 1, + "apiVersion": "2", + "apiDeprecationErrors": true + } + }, + "expectError": { + "isError": true, + "errorContains": "command testDeprecationInVersion2 is deprecated in API Version 2", + "errorCodeName": "APIDeprecationError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "testDeprecationInVersion2": 1, + "apiVersion": "2", + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/test-commands-strict-mode.json b/test/versioned-api/test-commands-strict-mode.json new file mode 100644 index 0000000000..9c4ebea785 --- /dev/null +++ b/test/versioned-api/test-commands-strict-mode.json @@ -0,0 +1,75 @@ +{ + "description": "Test commands: strict mode", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverParameters": { + "enableTestCommands": true + }, + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + } + ], + "tests": [ + { + "description": "Running a command that is not part of the versioned API results in an error", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "testVersion2", + "command": { + "testVersion2": 1 + } + }, + "expectError": { + "isError": true, + "errorContains": "command testVersion2 is not in API Version 1", + "errorCodeName": "APIStrictError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "testVersion2": 1, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/transaction-handling.json b/test/versioned-api/transaction-handling.json new file mode 100644 index 0000000000..32031296af --- /dev/null +++ b/test/versioned-api/transaction-handling.json @@ -0,0 +1,348 @@ +{ + "description": "Transaction handling", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "_yamlAnchors": { + "versions": [ + { + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + ] + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "versioned-api-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "All commands in a transaction declare an API version", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 6, + "x": 66 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 7, + "x": 77 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "startTransaction": true, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 7, + "x": 77 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction includes an API version", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 6, + "x": 66 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 7, + "x": 77 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "startTransaction": true, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 7, + "x": 77 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/tools/benchmark.py b/tools/benchmark.py deleted file mode 100644 index a7daa2c64f..0000000000 --- a/tools/benchmark.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2009-2014 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MongoDB benchmarking suite.""" - -import time -import sys -sys.path[0:0] = [""] - -import datetime -import cProfile - -from pymongo import mongo_client -from pymongo import ASCENDING - -trials = 2 -per_trial = 5000 -batch_size = 100 -small = {} -medium = {"integer": 5, - "number": 5.05, - "boolean": False, - "array": ["test", "benchmark"] - } -# this is similar to the benchmark data posted to the user list -large = {"base_url": "http://www.example.com/test-me", - "total_word_count": 6743, - "access_time": datetime.datetime.utcnow(), - "meta_tags": {"description": "i am a long description string", - "author": "Holly Man", - "dynamically_created_meta_tag": "who know\n what" - }, - "page_structure": {"counted_tags": 3450, - "no_of_js_attached": 10, - "no_of_images": 6 - }, - "harvested_words": ["10gen", "web", "open", "source", "application", - "paas", "platform-as-a-service", "technology", - "helps", "developers", "focus", "building", - "mongodb", "mongo"] * 20 - } - - -def setup_insert(db, collection, object): - db.drop_collection(collection) - - -def insert(db, collection, object): - for i in range(per_trial): - to_insert = object.copy() - to_insert["x"] = i - db[collection].insert(to_insert) - - -def insert_batch(db, collection, object): - for i in range(per_trial / batch_size): - db[collection].insert([object] * batch_size) - - -def find_one(db, collection, x): - for _ in range(per_trial): - db[collection].find_one({"x": x}) - - -def find(db, collection, x): - for _ in range(per_trial): - for _ in db[collection].find({"x": x}): - pass - - -def timed(name, function, args=[], setup=None): - times = [] - for _ in range(trials): - if setup: - setup(*args) - start = time.time() - function(*args) - times.append(time.time() - start) - best_time = min(times) - print "%s%d" % (name + (60 - len(name)) * ".", per_trial / best_time) - return best_time - - -def main(): - c = mongo_client.MongoClient(connectTimeoutMS=60*1000) # jack up timeout - c.drop_database("benchmark") - db = c.benchmark - - timed("insert (small, no index)", insert, - [db, 'small_none', small], setup_insert) - timed("insert (medium, no index)", insert, - [db, 'medium_none', medium], setup_insert) - timed("insert (large, no index)", insert, - [db, 'large_none', large], setup_insert) - - db.small_index.create_index("x", ASCENDING) - timed("insert (small, indexed)", insert, [db, 'small_index', small]) - db.medium_index.create_index("x", ASCENDING) - timed("insert (medium, indexed)", insert, [db, 'medium_index', medium]) - db.large_index.create_index("x", ASCENDING) - timed("insert (large, indexed)", insert, [db, 'large_index', large]) - - timed("batch insert (small, no index)", insert_batch, - [db, 'small_bulk', small], setup_insert) - timed("batch insert (medium, no index)", insert_batch, - [db, 'medium_bulk', medium], setup_insert) - timed("batch insert (large, no index)", insert_batch, - [db, 'large_bulk', large], setup_insert) - - timed("find_one (small, no index)", find_one, - [db, 'small_none', per_trial / 2]) - timed("find_one (medium, no index)", find_one, - [db, 'medium_none', per_trial / 2]) - timed("find_one (large, no index)", find_one, - [db, 'large_none', per_trial / 2]) - - timed("find_one (small, indexed)", find_one, - [db, 'small_index', per_trial / 2]) - timed("find_one (medium, indexed)", find_one, - [db, 'medium_index', per_trial / 2]) - timed("find_one (large, indexed)", find_one, - [db, 'large_index', per_trial / 2]) - - timed("find (small, no index)", find, [db, 'small_none', per_trial / 2]) - timed("find (medium, no index)", find, [db, 'medium_none', per_trial / 2]) - timed("find (large, no index)", find, [db, 'large_none', per_trial / 2]) - - timed("find (small, indexed)", find, [db, 'small_index', per_trial / 2]) - timed("find (medium, indexed)", find, [db, 'medium_index', per_trial / 2]) - timed("find (large, indexed)", find, [db, 'large_index', per_trial / 2]) - -# timed("find range (small, no index)", find, -# [db, 'small_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) -# timed("find range (medium, no index)", find, -# [db, 'medium_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) -# timed("find range (large, no index)", find, -# [db, 'large_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) - - timed("find range (small, indexed)", find, - [db, 'small_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) - timed("find range (medium, indexed)", find, - [db, 'medium_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) - timed("find range (large, indexed)", find, - [db, 'large_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) - -if __name__ == "__main__": -# cProfile.run("main()") - main() diff --git a/tools/clean.py b/tools/clean.py index 9c260e3a71..b6e1867a0a 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,30 +16,33 @@ Only really intended to be used by internal build scripts. """ +from __future__ import annotations -import os import sys +from pathlib import Path try: - os.remove("pymongo/_cmessage.so") - os.remove("bson/_cbson.so") -except: + Path("pymongo/_cmessage.so").unlink() + Path("bson/_cbson.so").unlink() +except BaseException: # noqa: S110 pass try: - os.remove("pymongo/_cmessage.pyd") - os.remove("bson/_cbson.pyd") -except: + Path("pymongo/_cmessage.pyd").unlink() + Path("bson/_cbson.pyd").unlink() +except BaseException: # noqa: S110 pass try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 + sys.exit("could still import _cmessage") except ImportError: pass try: - from bson import _cbson + from bson import _cbson # type: ignore[attr-defined] # noqa: F401 + sys.exit("could still import _cbson") except ImportError: pass diff --git a/tools/compare_import_time.py b/tools/compare_import_time.py new file mode 100644 index 0000000000..fdc344f2e9 --- /dev/null +++ b/tools/compare_import_time.py @@ -0,0 +1,37 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import sys + +base_sha = sys.argv[-1] +head_sha = sys.argv[-2] + + +def get_total_time(sha: str) -> int: + with open(f"pymongo-{sha}.log") as fid: + last_line = fid.readlines()[-1] + return int(last_line.split()[4]) + + +base_time = get_total_time(base_sha) +curr_time = get_total_time(head_sha) + +# Check if we got 20% or more slower. +change = int((curr_time - base_time) / base_time * 100) +if change > 20: + print(f"PyMongo import got {change} percent worse") + sys.exit(1) + +print(f"Import time changed by {change} percent") diff --git a/tools/convert_test_to_async.py b/tools/convert_test_to_async.py new file mode 100644 index 0000000000..6c68c34bf3 --- /dev/null +++ b/tools/convert_test_to_async.py @@ -0,0 +1,141 @@ +from __future__ import annotations + +import inspect +import sys + +from pymongo import AsyncMongoClient +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.asynchronous.database import AsyncDatabase + +replacements = { + "Collection": "AsyncCollection", + "Database": "AsyncDatabase", + "Cursor": "AsyncCursor", + "MongoClient": "AsyncMongoClient", + "CommandCursor": "AsyncCommandCursor", + "RawBatchCursor": "AsyncRawBatchCursor", + "RawBatchCommandCursor": "AsyncRawBatchCommandCursor", + "ClientSession": "AsyncClientSession", + "ChangeStream": "AsyncChangeStream", + "CollectionChangeStream": "AsyncCollectionChangeStream", + "DatabaseChangeStream": "AsyncDatabaseChangeStream", + "ClusterChangeStream": "AsyncClusterChangeStream", + "_Bulk": "_AsyncBulk", + "_ClientBulk": "_AsyncClientBulk", + "Connection": "AsyncConnection", + "synchronous": "asynchronous", + "Synchronous": "Asynchronous", + "next": "await anext", + "_Lock": "_ALock", + "_Condition": "_ACondition", + "GridFS": "AsyncGridFS", + "GridFSBucket": "AsyncGridFSBucket", + "GridIn": "AsyncGridIn", + "GridOut": "AsyncGridOut", + "GridOutCursor": "AsyncGridOutCursor", + "GridOutIterator": "AsyncGridOutIterator", + "GridOutChunkIterator": "_AsyncGridOutChunkIterator", + "_grid_in_property": "_a_grid_in_property", + "_grid_out_property": "_a_grid_out_property", + "ClientEncryption": "AsyncClientEncryption", + "MongoCryptCallback": "AsyncMongoCryptCallback", + "ExplicitEncrypter": "AsyncExplicitEncrypter", + "AutoEncrypter": "AsyncAutoEncrypter", + "ContextManager": "AsyncContextManager", + "ClientContext": "AsyncClientContext", + "TestCollection": "AsyncTestCollection", + "IntegrationTest": "AsyncIntegrationTest", + "PyMongoTestCase": "AsyncPyMongoTestCase", + "MockClientTest": "AsyncMockClientTest", + "client_context": "async_client_context", + "setUp": "asyncSetUp", + "tearDown": "asyncTearDown", + "wait_until": "await async_wait_until", + "addCleanup": "addAsyncCleanup", + "TestCase": "IsolatedAsyncioTestCase", + "UnitTest": "AsyncUnitTest", + "MockClient": "AsyncMockClient", + "SpecRunner": "AsyncSpecRunner", + "TransactionsBase": "AsyncTransactionsBase", + "get_pool": "await async_get_pool", + "is_mongos": "await async_is_mongos", + "rs_or_single_client": "await async_rs_or_single_client", + "rs_or_single_client_noauth": "await async_rs_or_single_client_noauth", + "rs_client": "await async_rs_client", + "single_client": "await async_single_client", + "from_client": "await async_from_client", + "closing": "aclosing", + "assertRaisesExactly": "asyncAssertRaisesExactly", + "get_mock_client": "await get_async_mock_client", + "close": "await aclose", +} + +async_classes = [AsyncMongoClient, AsyncDatabase, AsyncCollection, AsyncCursor, AsyncCommandCursor] + + +def get_async_methods() -> set[str]: + result: set[str] = set() + for x in async_classes: + methods = { + k + for k, v in vars(x).items() + if callable(v) + and not isinstance(v, classmethod) + and inspect.iscoroutinefunction(v) + and v.__name__[0] != "_" + } + result = result | methods + return result + + +async_methods = get_async_methods() + + +def apply_replacements(lines: list[str]) -> list[str]: + for i in range(len(lines)): + if "_IS_SYNC = True" in lines[i]: + lines[i] = "_IS_SYNC = False" + if "def test" in lines[i]: + lines[i] = lines[i].replace("def test", "async def test") + for k in replacements: + if k in lines[i]: + lines[i] = lines[i].replace(k, replacements[k]) + for k in async_methods: + if k + "(" in lines[i]: + tokens = lines[i].split(" ") + for j in range(len(tokens)): + if k + "(" in tokens[j]: + if j < 2: + tokens.insert(0, "await") + else: + tokens.insert(j, "await") + break + new_line = " ".join(tokens) + + lines[i] = new_line + + return lines + + +def process_file(input_file: str, output_file: str) -> None: + with open(input_file, "r+") as f: + lines = f.readlines() + lines = apply_replacements(lines) + + with open(output_file, "w+") as f2: + f2.seek(0) + f2.writelines(lines) + f2.truncate() + + +def main() -> None: + args = sys.argv[1:] + sync_file = "./test/" + args[0] + async_file = "./" + args[0] + + process_file(sync_file, async_file) + + +main() diff --git a/tools/ensure_future_annotations_import.py b/tools/ensure_future_annotations_import.py new file mode 100644 index 0000000000..55080148e4 --- /dev/null +++ b/tools/ensure_future_annotations_import.py @@ -0,0 +1,41 @@ +# Copyright 2023-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ensure that 'from __future__ import annotations' is used in all package files +""" +from __future__ import annotations + +import sys +from pathlib import Path + +pattern = "from __future__ import annotations" +missing = [] +for dirname in ["pymongo", "bson", "gridfs"]: + for path in Path(dirname).glob("*.py"): + if Path(path).name in ["_version.py", "errors.py"]: + continue + found = False + with open(path) as fid: + for line in fid.readlines(): + if line.strip() == pattern: + found = True + break + if not found: + missing.append(path) + +if missing: + print(f"Missing '{pattern}' import in:") + for item in missing: + print(item) + sys.exit(1) diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index 897bfcae06..64280a81d2 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -1,4 +1,4 @@ -# Copyright 2009-2014 MongoDB, Inc. +# Copyright 2009-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,12 +16,32 @@ Only really intended to be used by internal build scripts. """ +from __future__ import annotations +import logging import sys + +LOGGER = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") + sys.path[0:0] = [""] -import bson -import pymongo +import bson # noqa: E402 +import pymongo # noqa: E402 + + +def main() -> None: + if not pymongo.has_c() or not bson.has_c(): + try: + from pymongo import _cmessage # type:ignore[attr-defined] # noqa: F401 + except Exception as e: + LOGGER.exception(e) + try: + from bson import _cbson # type:ignore[attr-defined] # noqa: F401 + except Exception as e: + LOGGER.exception(e) + sys.exit("could not load C extensions") + -if not pymongo.has_c() or not bson.has_c(): - sys.exit("could not load C extensions") +if __name__ == "__main__": + main() diff --git a/tools/ocsptest.py b/tools/ocsptest.py new file mode 100644 index 0000000000..8596db226d --- /dev/null +++ b/tools/ocsptest.py @@ -0,0 +1,62 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. +from __future__ import annotations + +import argparse +import logging +import socket + +from pymongo.pyopenssl_context import SSLContext +from pymongo.ssl_support import get_ssl_context + +# Enable logs in this format: +# 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response +FORMAT = "%(asctime)s %(levelname)s %(module)s %(message)s" +logging.basicConfig(format=FORMAT, level=logging.DEBUG) + + +def check_ocsp(host: str, port: int, capath: str) -> None: + ctx = get_ssl_context( + None, # certfile + None, # passphrase + capath, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, + True, # is sync + ) # disable_ocsp_endpoint_check + + # Ensure we're using pyOpenSSL. + assert isinstance(ctx, SSLContext) + + s = socket.socket() + s.connect((host, port)) + try: + s = ctx.wrap_socket(s, server_hostname=host) # type: ignore[assignment] + finally: + s.close() + + +def main() -> None: + parser = argparse.ArgumentParser(description="Debug OCSP") + parser.add_argument("--host", type=str, required=True, help="Host to connect to") + parser.add_argument("-p", "--port", type=int, default=443, help="Port to connect to") + parser.add_argument("--ca_file", type=str, default=None, help="CA file for host") + args = parser.parse_args() + check_ocsp(args.host, args.port, args.ca_file) + + +if __name__ == "__main__": + main() diff --git a/tools/synchro.py b/tools/synchro.py new file mode 100644 index 0000000000..e3d4835502 --- /dev/null +++ b/tools/synchro.py @@ -0,0 +1,465 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Synchronization of asynchronous modules. + +Used as part of our build system to generate synchronous code. +""" + +from __future__ import annotations + +import os +import re +import sys +from os import listdir +from pathlib import Path + +from unasync import Rule, unasync_files # type: ignore[import-not-found] + +replacements = { + "AsyncCollection": "Collection", + "AsyncDatabase": "Database", + "AsyncCursor": "Cursor", + "AsyncMongoClient": "MongoClient", + "AsyncCommandCursor": "CommandCursor", + "AsyncRawBatchCursor": "RawBatchCursor", + "AsyncRawBatchCommandCursor": "RawBatchCommandCursor", + "AsyncClientSession": "ClientSession", + "AsyncChangeStream": "ChangeStream", + "AsyncCollectionChangeStream": "CollectionChangeStream", + "AsyncDatabaseChangeStream": "DatabaseChangeStream", + "AsyncClusterChangeStream": "ClusterChangeStream", + "_AsyncBulk": "_Bulk", + "_AsyncClientBulk": "_ClientBulk", + "AsyncConnection": "Connection", + "async_command": "command", + "async_receive_message": "receive_message", + "async_receive_data": "receive_data", + "async_sendall": "sendall", + "async_socket_sendall": "sendall", + "asynchronous": "synchronous", + "Asynchronous": "Synchronous", + "AsyncBulkTestBase": "BulkTestBase", + "AsyncBulkAuthorizationTestBase": "BulkAuthorizationTestBase", + "anext": "next", + "aiter": "iter", + "_ALock": "_Lock", + "_ACondition": "_Condition", + "AsyncGridFS": "GridFS", + "AsyncGridFSBucket": "GridFSBucket", + "AsyncGridIn": "GridIn", + "AsyncGridOut": "GridOut", + "AsyncGridOutCursor": "GridOutCursor", + "AsyncGridOutIterator": "GridOutIterator", + "_AsyncGridOutChunkIterator": "GridOutChunkIterator", + "_a_grid_in_property": "_grid_in_property", + "_a_grid_out_property": "_grid_out_property", + "AsyncClientEncryption": "ClientEncryption", + "AsyncMongoCryptCallback": "MongoCryptCallback", + "AsyncExplicitEncrypter": "ExplicitEncrypter", + "AsyncAutoEncrypter": "AutoEncrypter", + "AsyncContextManager": "ContextManager", + "AsyncClientContext": "ClientContext", + "AsyncTestCollection": "TestCollection", + "AsyncIntegrationTest": "IntegrationTest", + "AsyncPyMongoTestCase": "PyMongoTestCase", + "AsyncMockClientTest": "MockClientTest", + "async_client_context": "client_context", + "async_setup": "setup", + "asyncSetUp": "setUp", + "asyncTearDown": "tearDown", + "async_teardown": "teardown", + "pytest_asyncio": "pytest", + "async_wait_until": "wait_until", + "addAsyncCleanup": "addCleanup", + "async_setup_class": "setup_class", + "IsolatedAsyncioTestCase": "TestCase", + "AsyncUnitTest": "UnitTest", + "AsyncMockClient": "MockClient", + "AsyncSpecRunner": "SpecRunner", + "AsyncTransactionsBase": "TransactionsBase", + "async_get_pool": "get_pool", + "async_is_mongos": "is_mongos", + "async_rs_or_single_client": "rs_or_single_client", + "async_rs_or_single_client_noauth": "rs_or_single_client_noauth", + "async_rs_client": "rs_client", + "async_single_client": "single_client", + "async_from_client": "from_client", + "aclosing": "closing", + "asyncAssertRaisesExactly": "assertRaisesExactly", + "get_async_mock_client": "get_mock_client", + "aconnect": "_connect", + "async-transactions-ref": "transactions-ref", + "async-snapshot-reads-ref": "snapshot-reads-ref", + "default_async": "default", + "aclose": "close", + "PyMongo|async": "PyMongo", + "PyMongo|c|async": "PyMongo|c", + "AsyncTestGridFile": "TestGridFile", + "AsyncTestGridFileNoConnect": "TestGridFileNoConnect", + "AsyncTestSpec": "TestSpec", + "AsyncSpecTestCreator": "SpecTestCreator", + "async_set_fail_point": "set_fail_point", + "async_ensure_all_connected": "ensure_all_connected", + "async_repl_set_step_down": "repl_set_step_down", + "AsyncPeriodicExecutor": "PeriodicExecutor", + "async_wait_for_event": "wait_for_event", + "pymongo_server_monitor_task": "pymongo_server_monitor_thread", + "pymongo_server_rtt_task": "pymongo_server_rtt_thread", + "_async_create_lock": "_create_lock", + "_async_create_condition": "_create_condition", + "_async_cond_wait": "_cond_wait", + "AsyncNetworkingInterface": "NetworkingInterface", + "_configured_protocol_interface": "_configured_socket_interface", + "_async_configured_socket": "_configured_socket", + "SpecRunnerTask": "SpecRunnerThread", + "AsyncMockConnection": "MockConnection", + "AsyncMockPool": "MockPool", + "StopAsyncIteration": "StopIteration", + "create_async_event": "create_event", + "async_create_barrier": "create_barrier", + "async_barrier_wait": "barrier_wait", + "async_joinall": "joinall", + "async_simple_test_client": "simple_test_client", + "_async_create_connection": "_create_connection", + "pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts": "pymongo.synchronous.srv_resolver._SrvResolver.get_hosts", + "dns.asyncresolver.resolve": "dns.resolver.resolve", +} + +docstring_replacements: dict[tuple[str, str], str] = { + ("MongoClient", "connect"): """If ``True`` (the default), immediately + begin connecting to MongoDB in the background. Otherwise connect + on the first operation. The default value is ``False`` when + running in a Function-as-a-service environment.""", + ("Collection", "create"): """If ``True``, force collection + creation even without options being set.""", + ("Collection", "session"): """A + :class:`~pymongo.client_session.ClientSession` that is used with + the create collection command.""", + ("Collection", "kwargs"): """Additional keyword arguments will + be passed as options for the create collection command.""", +} + +docstring_removals: set[str] = { + ".. warning:: This API is currently in beta, meaning the classes, methods, and behaviors described within may change before the full release." +} + +import_replacements = {"test.synchronous": "test"} + +_pymongo_base = "./pymongo/asynchronous/" +_gridfs_base = "./gridfs/asynchronous/" +_test_base = "./test/asynchronous/" + +_pymongo_dest_base = "./pymongo/synchronous/" +_gridfs_dest_base = "./gridfs/synchronous/" +_test_dest_base = "./test/" + +if not Path.exists(Path(_pymongo_dest_base)): + Path.mkdir(Path(_pymongo_dest_base)) +if not Path.exists(Path(_gridfs_dest_base)): + Path.mkdir(Path(_gridfs_dest_base)) + +async_files = [ + _pymongo_base + f for f in listdir(_pymongo_base) if (Path(_pymongo_base) / f).is_file() +] + +gridfs_files = [ + _gridfs_base + f for f in listdir(_gridfs_base) if (Path(_gridfs_base) / f).is_file() +] + + +def async_only_test(f: str) -> bool: + """Return True for async tests that should not be converted to sync.""" + return f in [ + "test_locks.py", + "test_concurrency.py", + "test_async_cancellation.py", + "test_async_loop_safety.py", + "test_async_contextvars_reset.py", + "test_async_loop_unblocked.py", + ] + + +test_files = [ + _test_base + f + for f in listdir(_test_base) + if (Path(_test_base) / f).is_file() and not async_only_test(f) +] + +# Add each asynchronized test here as part of the converting PR +converted_tests = [ + "__init__.py", + "conftest.py", + "helpers.py", + "pymongo_mocks.py", + "utils_spec_runner.py", + "qcheck.py", + "test_auth.py", + "test_auth_oidc.py", + "test_auth_spec.py", + "test_bulk.py", + "test_change_stream.py", + "test_client.py", + "test_client_bulk_write.py", + "test_client_context.py", + "test_client_metadata.py", + "test_collation.py", + "test_collection.py", + "test_collection_management.py", + "test_command_logging.py", + "test_command_logging.py", + "test_command_monitoring.py", + "test_comment.py", + "test_common.py", + "test_connection_logging.py", + "test_connection_monitoring.py", + "test_connections_survive_primary_stepdown_spec.py", + "test_create_entities.py", + "test_crud_unified.py", + "test_csot.py", + "test_cursor.py", + "test_custom_types.py", + "test_database.py", + "test_discovery_and_monitoring.py", + "test_dns.py", + "test_encryption.py", + "test_examples.py", + "test_grid_file.py", + "test_gridfs.py", + "test_gridfs_bucket.py", + "test_gridfs_spec.py", + "test_heartbeat_monitoring.py", + "test_index_management.py", + "test_json_util_integration.py", + "test_load_balancer.py", + "test_logger.py", + "test_max_staleness.py", + "test_monitor.py", + "test_monitoring.py", + "test_mongos_load_balancing.py", + "test_on_demand_csfle.py", + "test_pooling.py", + "test_raw_bson.py", + "test_read_concern.py", + "test_read_preferences.py", + "test_read_write_concern_spec.py", + "test_retryable_reads.py", + "test_retryable_reads_unified.py", + "test_retryable_writes.py", + "test_retryable_writes_unified.py", + "test_run_command.py", + "test_sdam_monitoring_spec.py", + "test_server_selection.py", + "test_server_selection_in_window.py", + "test_server_selection_logging.py", + "test_server_selection_rtt.py", + "test_session.py", + "test_sessions_unified.py", + "test_srv_polling.py", + "test_ssl.py", + "test_streaming_protocol.py", + "test_transactions.py", + "test_transactions_unified.py", + "test_unified_format.py", + "test_versioned_api_integration.py", + "unified_format.py", + "utils_selection_tests.py", + "utils.py", +] + + +def process_files( + files: list[str], docstring_translate_files: list[str], sync_test_files: list[str] +) -> None: + for file in files: + if "__init__" not in file or "__init__" and "test" in file: + with open(file, "r+") as f: + lines = f.readlines() + lines = apply_is_sync(lines, file) + lines = translate_coroutine_types(lines) + lines = translate_async_sleeps(lines) + if file in docstring_translate_files: + lines = translate_docstrings(lines) + if file in sync_test_files: + lines = translate_imports(lines) + lines = process_ignores(lines) + f.seek(0) + f.writelines(lines) + f.truncate() + + +def apply_is_sync(lines: list[str], file: str) -> list[str]: + try: + is_sync = next(iter([line for line in lines if line.startswith("_IS_SYNC = ")])) + index = lines.index(is_sync) + is_sync = is_sync.replace("False", "True") + lines[index] = is_sync + except StopIteration as e: + print( + f"Missing _IS_SYNC at top of async file {file.replace('synchronous', 'asynchronous')}" + ) + raise e + return lines + + +def translate_coroutine_types(lines: list[str]) -> list[str]: + coroutine_types = [line for line in lines if "Coroutine[" in line] + for type in coroutine_types: + res = re.search(r"Coroutine\[([A-z]+), ([A-z]+), ([A-z]+)\]", type) + if res: + old = res[0] + index = lines.index(type) + new = type.replace(old, res.group(3)) + lines[index] = new + return lines + + +def translate_imports(lines: list[str]) -> list[str]: + for k, v in import_replacements.items(): + matches = [line for line in lines if k in line and "import" in line] + for line in matches: + index = lines.index(line) + lines[index] = line.replace(k, v) + return lines + + +def translate_async_sleeps(lines: list[str]) -> list[str]: + blocking_sleeps = [line for line in lines if "asyncio.sleep(0)" in line] + lines = [line for line in lines if line not in blocking_sleeps] + sleeps = [line for line in lines if "asyncio.sleep" in line] + + for line in sleeps: + res = re.search(r"asyncio.sleep\(([^()]*)\)", line) + if res: + old = res[0] + index = lines.index(line) + new = f"time.sleep({res[1]})" + lines[index] = line.replace(old, new) + + return lines + + +def translate_docstrings(lines: list[str]) -> list[str]: + for i in range(len(lines)): + for k in replacements: + if k in lines[i]: + # This sequence of replacements fixes the grammar issues caused by translating async -> sync + if "an Async" in lines[i]: + lines[i] = lines[i].replace("an Async", "a Async") + if "an 'Async" in lines[i]: + lines[i] = lines[i].replace("an 'Async", "a 'Async") + if "An Async" in lines[i]: + lines[i] = lines[i].replace("An Async", "A Async") + if "An 'Async" in lines[i]: + lines[i] = lines[i].replace("An 'Async", "A 'Async") + if "an asynchronous" in lines[i]: + lines[i] = lines[i].replace("an asynchronous", "a") + if "An asynchronous" in lines[i]: + lines[i] = lines[i].replace("An asynchronous", "A") + # This ensures docstring links are for `pymongo.X` instead of `pymongo.synchronous.X` + if "pymongo.asynchronous" in lines[i] and "import" not in lines[i]: + lines[i] = lines[i].replace("pymongo.asynchronous", "pymongo") + lines[i] = lines[i].replace(k, replacements[k]) + if "Sync" in lines[i] and "Synchronous" not in lines[i] and replacements[k] in lines[i]: + lines[i] = lines[i].replace("Sync", "") + if "rsApplyStop" in lines[i]: + lines[i] = lines[i].replace("rsApplyStop", "rsSyncApplyStop") + if "async for" in lines[i] or "async with" in lines[i] or "async def" in lines[i]: + lines[i] = lines[i].replace("async ", "") + if "await " in lines[i] and "tailable" not in lines[i]: + lines[i] = lines[i].replace("await ", "") + for i in range(len(lines)): + for k in docstring_replacements: # type: ignore[assignment] + if f":param {k[1]}: **Not supported by {k[0]}**." in lines[i]: + lines[i] = lines[i].replace( + f"**Not supported by {k[0]}**.", + docstring_replacements[k], # type: ignore[index] + ) + + for line in docstring_removals: + if line in lines[i]: + lines[i] = "DOCSTRING_REMOVED" + lines[i + 1] = "DOCSTRING_REMOVED" + + return [line for line in lines if line != "DOCSTRING_REMOVED"] + + +def process_ignores(lines: list[str]) -> list[str]: + for i in range(len(lines)): + for k, v in replacements.items(): + if "unasync: off" in lines[i] and v in lines[i]: + lines[i] = lines[i].replace(v, k) + return lines + + +def unasync_directory(files: list[str], src: str, dest: str, replacements: dict[str, str]) -> None: + unasync_files( + files, + [ + Rule( + fromdir=src, + todir=dest, + additional_replacements=replacements, + ) + ], + ) + + +def main() -> None: + modified_files = [f"./{f}" for f in sys.argv[1:]] + errored = False + for fname in async_files + gridfs_files + test_files: + # If the async file was modified, we don't need to check if the sync file was also modified. + if str(fname) in modified_files: + continue + sync_name = str(fname).replace("asynchronous", "synchronous") + test_sync_name = str(fname).replace("/asynchronous", "") + if ( + sync_name in modified_files + or test_sync_name in modified_files + and "OVERRIDE_SYNCHRO_CHECK" not in os.environ + ): + print(f"Refusing to overwrite {test_sync_name}") + errored = True + if errored: + raise ValueError("Aborting synchro due to errors") + + unasync_directory(async_files, _pymongo_base, _pymongo_dest_base, replacements) + unasync_directory(gridfs_files, _gridfs_base, _gridfs_dest_base, replacements) + unasync_directory(test_files, _test_base, _test_dest_base, replacements) + + sync_files = [ + _pymongo_dest_base + f + for f in listdir(_pymongo_dest_base) + if (Path(_pymongo_dest_base) / f).is_file() + ] + + sync_gridfs_files = [ + _gridfs_dest_base + f + for f in listdir(_gridfs_dest_base) + if (Path(_gridfs_dest_base) / f).is_file() + ] + sync_test_files = [ + _test_dest_base + f for f in converted_tests if (Path(_test_dest_base) / f).is_file() + ] + + docstring_translate_files = sync_files + sync_gridfs_files + sync_test_files + + process_files( + sync_files + sync_gridfs_files + sync_test_files, docstring_translate_files, sync_test_files + ) + + +if __name__ == "__main__": + main() diff --git a/tools/synchro.sh b/tools/synchro.sh new file mode 100755 index 0000000000..28b9c6d6c4 --- /dev/null +++ b/tools/synchro.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Keep the synchronous folders in sync with there async counterparts. +set -eu + +python ./tools/synchro.py "$@" +python -m ruff check pymongo/synchronous/ gridfs/synchronous/ test/ --fix --silent +python -m ruff format pymongo/synchronous/ gridfs/synchronous/ test/ --silent diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 5289f2fe20..0000000000 --- a/tox.ini +++ /dev/null @@ -1,24 +0,0 @@ -# Tox (http://tox.testrun.org/) is a tool for running tests -# in multiple virtualenvs. This configuration file will run the -# test suite on all supported python versions. To use it, "pip install tox" -# and then run "tox" from this directory. - -[tox] -envlist = py26, py27, py32, py33, py34, pypy - -[testenv] -commands = - {envpython} setup.py --no_ext test -deps = - nose - -[testenv:py26] -deps = - gevent - {[testenv]deps} - -[testenv:py27] -deps = - gevent - {[testenv]deps} - diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000000..f9a389c896 --- /dev/null +++ b/uv.lock @@ -0,0 +1,2208 @@ +version = 1 +revision = 3 +requires-python = ">=3.9" +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", + "python_full_version < '3.10'", +] + +[[package]] +name = "accessible-pygments" +version = "0.0.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bc/c1/bbac6a50d02774f91572938964c582fff4270eee73ab822a4aeea4d8b11b/accessible_pygments-0.0.5.tar.gz", hash = "sha256:40918d3e6a2b619ad424cb91e556bd3bd8865443d9f22f1dcdf79e33c8046872", size = 1377899, upload-time = "2024-05-10T11:23:10.216Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/3f/95338030883d8c8b91223b4e21744b04d11b161a3ef117295d8241f50ab4/accessible_pygments-0.0.5-py3-none-any.whl", hash = "sha256:88ae3211e68a1d0b011504b2ffc1691feafce124b845bd072ab6f9f66f34d4b7", size = 1395903, upload-time = "2024-05-10T11:23:08.421Z" }, +] + +[[package]] +name = "alabaster" +version = "0.7.16" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65", size = 23776, upload-time = "2024-01-10T00:56:10.189Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92", size = 13511, upload-time = "2024-01-10T00:56:08.388Z" }, +] + +[[package]] +name = "alabaster" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/f8/d9c74d0daf3f742840fd818d69cfae176fa332022fd44e3469487d5a9420/alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e", size = 24210, upload-time = "2024-07-26T18:15:03.762Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929, upload-time = "2024-07-26T18:15:02.05Z" }, +] + +[[package]] +name = "anyio" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949, upload-time = "2025-03-17T00:02:54.77Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916, upload-time = "2025-03-17T00:02:52.713Z" }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + +[[package]] +name = "babel" +version = "2.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, +] + +[[package]] +name = "backports-asyncio-runner" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893, upload-time = "2025-07-02T02:27:15.685Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.13.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/e4/0c4c39e18fd76d6a628d4dd8da40543d136ce2d1752bd6eeeab0791f4d6b/beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195", size = 621067, upload-time = "2025-04-15T17:05:13.836Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/cd/30110dc0ffcf3b131156077b90e9f60ed75711223f306da4db08eff8403b/beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b", size = 187285, upload-time = "2025-04-15T17:05:12.221Z" }, +] + +[[package]] +name = "boto3" +version = "1.40.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7b/34/298ef2023d7d88069776c9cc26b42ba6f05d143a1c9b44a0f65cd795c65b/boto3-1.40.0.tar.gz", hash = "sha256:fc1b3ca3baf3d8820c6faddf47cbba8ad3cd16f8e8d7e2f76d304bf995932eb7", size = 111847, upload-time = "2025-07-31T19:21:06.735Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/44/158581021038c5fc886ffa27fa4731fb4939258da7a23e0bc70b2d5757c9/boto3-1.40.0-py3-none-any.whl", hash = "sha256:959443055d2af676c336cc6033b3f870a8a924384b70d0b2905081d649378179", size = 139882, upload-time = "2025-07-31T19:21:04.65Z" }, +] + +[[package]] +name = "botocore" +version = "1.40.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "urllib3", version = "2.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8f/e7/770ce910457ac6c68ea79b83892ab7a7cb08528f5d1dd77e51bf02a8529e/botocore-1.40.0.tar.gz", hash = "sha256:850242560dc8e74d542045a81eb6cc15f1b730b4ba55ba5b30e6d686548dfcaf", size = 14262316, upload-time = "2025-07-31T19:20:56.662Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/5a/bebc53f022514412613615b09aef20fbe804abb3ea26ec27e504a2d21c8f/botocore-1.40.0-py3-none-any.whl", hash = "sha256:2063e6d035a6a382b2ae37e40f5144044e55d4e091910d0c9f1be3121ad3e4e6", size = 13921768, upload-time = "2025-07-31T19:20:51.487Z" }, +] + +[[package]] +name = "certifi" +version = "2025.7.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/76/52c535bcebe74590f296d6c77c86dabf761c41980e1347a2422e4aa2ae41/certifi-2025.7.14.tar.gz", hash = "sha256:8ea99dbdfaaf2ba2f9bac77b9249ef62ec5218e7c2b2e903378ed5fccf765995", size = 163981, upload-time = "2025-07-14T03:29:28.449Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/52/34c6cf5bb9285074dc3531c437b3919e825d976fde097a7a73f79e726d03/certifi-2025.7.14-py3-none-any.whl", hash = "sha256:6b31f564a415d79ee77df69d757bb49a5bb53bd9f756cbbe24394ffd6fc1f4b2", size = 162722, upload-time = "2025-07-14T03:29:26.863Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/d7/516d984057745a6cd96575eea814fe1edd6646ee6efd552fb7b0921dec83/cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44", size = 184283, upload-time = "2025-09-08T23:22:08.01Z" }, + { url = "https://files.pythonhosted.org/packages/9e/84/ad6a0b408daa859246f57c03efd28e5dd1b33c21737c2db84cae8c237aa5/cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49", size = 180504, upload-time = "2025-09-08T23:22:10.637Z" }, + { url = "https://files.pythonhosted.org/packages/50/bd/b1a6362b80628111e6653c961f987faa55262b4002fcec42308cad1db680/cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c", size = 208811, upload-time = "2025-09-08T23:22:12.267Z" }, + { url = "https://files.pythonhosted.org/packages/4f/27/6933a8b2562d7bd1fb595074cf99cc81fc3789f6a6c05cdabb46284a3188/cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb", size = 216402, upload-time = "2025-09-08T23:22:13.455Z" }, + { url = "https://files.pythonhosted.org/packages/05/eb/b86f2a2645b62adcfff53b0dd97e8dfafb5c8aa864bd0d9a2c2049a0d551/cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0", size = 203217, upload-time = "2025-09-08T23:22:14.596Z" }, + { url = "https://files.pythonhosted.org/packages/9f/e0/6cbe77a53acf5acc7c08cc186c9928864bd7c005f9efd0d126884858a5fe/cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4", size = 203079, upload-time = "2025-09-08T23:22:15.769Z" }, + { url = "https://files.pythonhosted.org/packages/98/29/9b366e70e243eb3d14a5cb488dfd3a0b6b2f1fb001a203f653b93ccfac88/cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453", size = 216475, upload-time = "2025-09-08T23:22:17.427Z" }, + { url = "https://files.pythonhosted.org/packages/21/7a/13b24e70d2f90a322f2900c5d8e1f14fa7e2a6b3332b7309ba7b2ba51a5a/cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495", size = 218829, upload-time = "2025-09-08T23:22:19.069Z" }, + { url = "https://files.pythonhosted.org/packages/60/99/c9dc110974c59cc981b1f5b66e1d8af8af764e00f0293266824d9c4254bc/cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5", size = 211211, upload-time = "2025-09-08T23:22:20.588Z" }, + { url = "https://files.pythonhosted.org/packages/49/72/ff2d12dbf21aca1b32a40ed792ee6b40f6dc3a9cf1644bd7ef6e95e0ac5e/cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb", size = 218036, upload-time = "2025-09-08T23:22:22.143Z" }, + { url = "https://files.pythonhosted.org/packages/e2/cc/027d7fb82e58c48ea717149b03bcadcbdc293553edb283af792bd4bcbb3f/cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a", size = 172184, upload-time = "2025-09-08T23:22:23.328Z" }, + { url = "https://files.pythonhosted.org/packages/33/fa/072dd15ae27fbb4e06b437eb6e944e75b068deb09e2a2826039e49ee2045/cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739", size = 182790, upload-time = "2025-09-08T23:22:24.752Z" }, + { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" }, + { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" }, + { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" }, + { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" }, + { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" }, + { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" }, + { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, + { url = "https://files.pythonhosted.org/packages/c0/cc/08ed5a43f2996a16b462f64a7055c6e962803534924b9b2f1371d8c00b7b/cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf", size = 184288, upload-time = "2025-09-08T23:23:48.404Z" }, + { url = "https://files.pythonhosted.org/packages/3d/de/38d9726324e127f727b4ecc376bc85e505bfe61ef130eaf3f290c6847dd4/cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7", size = 180509, upload-time = "2025-09-08T23:23:49.73Z" }, + { url = "https://files.pythonhosted.org/packages/9b/13/c92e36358fbcc39cf0962e83223c9522154ee8630e1df7c0b3a39a8124e2/cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c", size = 208813, upload-time = "2025-09-08T23:23:51.263Z" }, + { url = "https://files.pythonhosted.org/packages/15/12/a7a79bd0df4c3bff744b2d7e52cc1b68d5e7e427b384252c42366dc1ecbc/cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165", size = 216498, upload-time = "2025-09-08T23:23:52.494Z" }, + { url = "https://files.pythonhosted.org/packages/a3/ad/5c51c1c7600bdd7ed9a24a203ec255dccdd0ebf4527f7b922a0bde2fb6ed/cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534", size = 203243, upload-time = "2025-09-08T23:23:53.836Z" }, + { url = "https://files.pythonhosted.org/packages/32/f2/81b63e288295928739d715d00952c8c6034cb6c6a516b17d37e0c8be5600/cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f", size = 203158, upload-time = "2025-09-08T23:23:55.169Z" }, + { url = "https://files.pythonhosted.org/packages/1f/74/cc4096ce66f5939042ae094e2e96f53426a979864aa1f96a621ad128be27/cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63", size = 216548, upload-time = "2025-09-08T23:23:56.506Z" }, + { url = "https://files.pythonhosted.org/packages/e8/be/f6424d1dc46b1091ffcc8964fa7c0ab0cd36839dd2761b49c90481a6ba1b/cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2", size = 218897, upload-time = "2025-09-08T23:23:57.825Z" }, + { url = "https://files.pythonhosted.org/packages/f7/e0/dda537c2309817edf60109e39265f24f24aa7f050767e22c98c53fe7f48b/cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65", size = 211249, upload-time = "2025-09-08T23:23:59.139Z" }, + { url = "https://files.pythonhosted.org/packages/2b/e7/7c769804eb75e4c4b35e658dba01de1640a351a9653c3d49ca89d16ccc91/cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322", size = 218041, upload-time = "2025-09-08T23:24:00.496Z" }, + { url = "https://files.pythonhosted.org/packages/aa/d9/6218d78f920dcd7507fc16a766b5ef8f3b913cc7aa938e7fc80b9978d089/cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a", size = 172138, upload-time = "2025-09-08T23:24:01.7Z" }, + { url = "https://files.pythonhosted.org/packages/54/8f/a1e836f82d8e32a97e6b29cc8f641779181ac7363734f12df27db803ebda/cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9", size = 182794, upload-time = "2025-09-08T23:24:02.943Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/28/9901804da60055b406e1a1c5ba7aac1276fb77f1dde635aabfc7fd84b8ab/charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941", size = 201818, upload-time = "2025-05-02T08:31:46.725Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9b/892a8c8af9110935e5adcbb06d9c6fe741b6bb02608c6513983048ba1a18/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd", size = 144649, upload-time = "2025-05-02T08:31:48.889Z" }, + { url = "https://files.pythonhosted.org/packages/7b/a5/4179abd063ff6414223575e008593861d62abfc22455b5d1a44995b7c101/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6", size = 155045, upload-time = "2025-05-02T08:31:50.757Z" }, + { url = "https://files.pythonhosted.org/packages/3b/95/bc08c7dfeddd26b4be8c8287b9bb055716f31077c8b0ea1cd09553794665/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d", size = 147356, upload-time = "2025-05-02T08:31:52.634Z" }, + { url = "https://files.pythonhosted.org/packages/a8/2d/7a5b635aa65284bf3eab7653e8b4151ab420ecbae918d3e359d1947b4d61/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86", size = 149471, upload-time = "2025-05-02T08:31:56.207Z" }, + { url = "https://files.pythonhosted.org/packages/ae/38/51fc6ac74251fd331a8cfdb7ec57beba8c23fd5493f1050f71c87ef77ed0/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c", size = 151317, upload-time = "2025-05-02T08:31:57.613Z" }, + { url = "https://files.pythonhosted.org/packages/b7/17/edee1e32215ee6e9e46c3e482645b46575a44a2d72c7dfd49e49f60ce6bf/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0", size = 146368, upload-time = "2025-05-02T08:31:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/26/2c/ea3e66f2b5f21fd00b2825c94cafb8c326ea6240cd80a91eb09e4a285830/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef", size = 154491, upload-time = "2025-05-02T08:32:01.219Z" }, + { url = "https://files.pythonhosted.org/packages/52/47/7be7fa972422ad062e909fd62460d45c3ef4c141805b7078dbab15904ff7/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6", size = 157695, upload-time = "2025-05-02T08:32:03.045Z" }, + { url = "https://files.pythonhosted.org/packages/2f/42/9f02c194da282b2b340f28e5fb60762de1151387a36842a92b533685c61e/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366", size = 154849, upload-time = "2025-05-02T08:32:04.651Z" }, + { url = "https://files.pythonhosted.org/packages/67/44/89cacd6628f31fb0b63201a618049be4be2a7435a31b55b5eb1c3674547a/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db", size = 150091, upload-time = "2025-05-02T08:32:06.719Z" }, + { url = "https://files.pythonhosted.org/packages/1f/79/4b8da9f712bc079c0f16b6d67b099b0b8d808c2292c937f267d816ec5ecc/charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a", size = 98445, upload-time = "2025-05-02T08:32:08.66Z" }, + { url = "https://files.pythonhosted.org/packages/7d/d7/96970afb4fb66497a40761cdf7bd4f6fca0fc7bafde3a84f836c1f57a926/charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509", size = 105782, upload-time = "2025-05-02T08:32:10.46Z" }, + { url = "https://files.pythonhosted.org/packages/05/85/4c40d00dcc6284a1c1ad5de5e0996b06f39d8232f1031cd23c2f5c07ee86/charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2", size = 198794, upload-time = "2025-05-02T08:32:11.945Z" }, + { url = "https://files.pythonhosted.org/packages/41/d9/7a6c0b9db952598e97e93cbdfcb91bacd89b9b88c7c983250a77c008703c/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645", size = 142846, upload-time = "2025-05-02T08:32:13.946Z" }, + { url = "https://files.pythonhosted.org/packages/66/82/a37989cda2ace7e37f36c1a8ed16c58cf48965a79c2142713244bf945c89/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd", size = 153350, upload-time = "2025-05-02T08:32:15.873Z" }, + { url = "https://files.pythonhosted.org/packages/df/68/a576b31b694d07b53807269d05ec3f6f1093e9545e8607121995ba7a8313/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8", size = 145657, upload-time = "2025-05-02T08:32:17.283Z" }, + { url = "https://files.pythonhosted.org/packages/92/9b/ad67f03d74554bed3aefd56fe836e1623a50780f7c998d00ca128924a499/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f", size = 147260, upload-time = "2025-05-02T08:32:18.807Z" }, + { url = "https://files.pythonhosted.org/packages/a6/e6/8aebae25e328160b20e31a7e9929b1578bbdc7f42e66f46595a432f8539e/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7", size = 149164, upload-time = "2025-05-02T08:32:20.333Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f2/b3c2f07dbcc248805f10e67a0262c93308cfa149a4cd3d1fe01f593e5fd2/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9", size = 144571, upload-time = "2025-05-02T08:32:21.86Z" }, + { url = "https://files.pythonhosted.org/packages/60/5b/c3f3a94bc345bc211622ea59b4bed9ae63c00920e2e8f11824aa5708e8b7/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544", size = 151952, upload-time = "2025-05-02T08:32:23.434Z" }, + { url = "https://files.pythonhosted.org/packages/e2/4d/ff460c8b474122334c2fa394a3f99a04cf11c646da895f81402ae54f5c42/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82", size = 155959, upload-time = "2025-05-02T08:32:24.993Z" }, + { url = "https://files.pythonhosted.org/packages/a2/2b/b964c6a2fda88611a1fe3d4c400d39c66a42d6c169c924818c848f922415/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0", size = 153030, upload-time = "2025-05-02T08:32:26.435Z" }, + { url = "https://files.pythonhosted.org/packages/59/2e/d3b9811db26a5ebf444bc0fa4f4be5aa6d76fc6e1c0fd537b16c14e849b6/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5", size = 148015, upload-time = "2025-05-02T08:32:28.376Z" }, + { url = "https://files.pythonhosted.org/packages/90/07/c5fd7c11eafd561bb51220d600a788f1c8d77c5eef37ee49454cc5c35575/charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a", size = 98106, upload-time = "2025-05-02T08:32:30.281Z" }, + { url = "https://files.pythonhosted.org/packages/a8/05/5e33dbef7e2f773d672b6d79f10ec633d4a71cd96db6673625838a4fd532/charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28", size = 105402, upload-time = "2025-05-02T08:32:32.191Z" }, + { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" }, + { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" }, + { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" }, + { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" }, + { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" }, + { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" }, + { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" }, + { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" }, + { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" }, + { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" }, + { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, + { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, + { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, + { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, + { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, + { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, + { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, + { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, + { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, + { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, + { url = "https://files.pythonhosted.org/packages/28/f8/dfb01ff6cc9af38552c69c9027501ff5a5117c4cc18dcd27cb5259fa1888/charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4", size = 201671, upload-time = "2025-05-02T08:34:12.696Z" }, + { url = "https://files.pythonhosted.org/packages/32/fb/74e26ee556a9dbfe3bd264289b67be1e6d616329403036f6507bb9f3f29c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7", size = 144744, upload-time = "2025-05-02T08:34:14.665Z" }, + { url = "https://files.pythonhosted.org/packages/ad/06/8499ee5aa7addc6f6d72e068691826ff093329fe59891e83b092ae4c851c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836", size = 154993, upload-time = "2025-05-02T08:34:17.134Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a2/5e4c187680728219254ef107a6949c60ee0e9a916a5dadb148c7ae82459c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597", size = 147382, upload-time = "2025-05-02T08:34:19.081Z" }, + { url = "https://files.pythonhosted.org/packages/4c/fe/56aca740dda674f0cc1ba1418c4d84534be51f639b5f98f538b332dc9a95/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7", size = 149536, upload-time = "2025-05-02T08:34:21.073Z" }, + { url = "https://files.pythonhosted.org/packages/53/13/db2e7779f892386b589173dd689c1b1e304621c5792046edd8a978cbf9e0/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f", size = 151349, upload-time = "2025-05-02T08:34:23.193Z" }, + { url = "https://files.pythonhosted.org/packages/69/35/e52ab9a276186f729bce7a0638585d2982f50402046e4b0faa5d2c3ef2da/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba", size = 146365, upload-time = "2025-05-02T08:34:25.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/d8/af7333f732fc2e7635867d56cb7c349c28c7094910c72267586947561b4b/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12", size = 154499, upload-time = "2025-05-02T08:34:27.359Z" }, + { url = "https://files.pythonhosted.org/packages/7a/3d/a5b2e48acef264d71e036ff30bcc49e51bde80219bb628ba3e00cf59baac/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518", size = 157735, upload-time = "2025-05-02T08:34:29.798Z" }, + { url = "https://files.pythonhosted.org/packages/85/d8/23e2c112532a29f3eef374375a8684a4f3b8e784f62b01da931186f43494/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5", size = 154786, upload-time = "2025-05-02T08:34:31.858Z" }, + { url = "https://files.pythonhosted.org/packages/c7/57/93e0169f08ecc20fe82d12254a200dfaceddc1c12a4077bf454ecc597e33/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3", size = 150203, upload-time = "2025-05-02T08:34:33.88Z" }, + { url = "https://files.pythonhosted.org/packages/2c/9d/9bf2b005138e7e060d7ebdec7503d0ef3240141587651f4b445bdf7286c2/charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471", size = 98436, upload-time = "2025-05-02T08:34:35.907Z" }, + { url = "https://files.pythonhosted.org/packages/6d/24/5849d46cf4311bbf21b424c443b09b459f5b436b1558c04e45dbb7cc478b/charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e", size = 105772, upload-time = "2025-05-02T08:34:37.935Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, +] + +[[package]] +name = "click" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "coverage" +version = "7.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/d3/3ec80acdd57a0d6a1111b978ade388824f37126446fd6750d38bfaca949c/coverage-7.5.0.tar.gz", hash = "sha256:cf62d17310f34084c59c01e027259076479128d11e4661bb6c9acb38c5e19bb8", size = 798314, upload-time = "2024-04-23T17:42:35.508Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/db/08d54dbc12fdfe5857b06105fd1235bdebb7da7c11cd1a0fae936556162a/coverage-7.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:432949a32c3e3f820af808db1833d6d1631664d53dd3ce487aa25d574e18ad1c", size = 210025, upload-time = "2024-04-23T17:40:22.328Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ff/02c4bcff1025b4a788aa3933e1cd1474d79de43e0d859273b3319ef43cd3/coverage-7.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2bd7065249703cbeb6d4ce679c734bef0ee69baa7bff9724361ada04a15b7e3b", size = 210499, upload-time = "2024-04-23T17:40:25.747Z" }, + { url = "https://files.pythonhosted.org/packages/ab/b1/7820a8ef62adeebd37612af9d2369f4467a3bc2641dea1243450def5489e/coverage-7.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbfe6389c5522b99768a93d89aca52ef92310a96b99782973b9d11e80511f932", size = 238399, upload-time = "2024-04-23T17:40:27.591Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0e/23a388f3ce16c5ea01a454fef6a9039115abd40b748027d4fef18b3628a7/coverage-7.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39793731182c4be939b4be0cdecde074b833f6171313cf53481f869937129ed3", size = 236676, upload-time = "2024-04-23T17:40:30.455Z" }, + { url = "https://files.pythonhosted.org/packages/f8/81/e871b0d58ca5d6cc27d00b2f668ce09c4643ef00512341f3a592a81fb6cd/coverage-7.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85a5dbe1ba1bf38d6c63b6d2c42132d45cbee6d9f0c51b52c59aa4afba057517", size = 237467, upload-time = "2024-04-23T17:40:32.704Z" }, + { url = "https://files.pythonhosted.org/packages/95/cb/42a6d34d5840635394f1e172aaa0e7cbd9346155e5004a8ee75d8e434c6b/coverage-7.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:357754dcdfd811462a725e7501a9b4556388e8ecf66e79df6f4b988fa3d0b39a", size = 243539, upload-time = "2024-04-23T17:40:35.068Z" }, + { url = "https://files.pythonhosted.org/packages/6a/6a/18b3819919fdfd3e2062a75219b363f895f24ae5b80e72ffe5dfb1a7e9c8/coverage-7.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a81eb64feded34f40c8986869a2f764f0fe2db58c0530d3a4afbcde50f314880", size = 241725, upload-time = "2024-04-23T17:40:37.251Z" }, + { url = "https://files.pythonhosted.org/packages/b5/3d/a0650978e8b8f78d269358421b7401acaf7cb89e957b2e1be5205ea5940e/coverage-7.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51431d0abbed3a868e967f8257c5faf283d41ec882f58413cf295a389bb22e58", size = 242913, upload-time = "2024-04-23T17:40:39.992Z" }, + { url = "https://files.pythonhosted.org/packages/8a/fe/95a74158fa0eda56d39783e918edc6fbb3dd3336be390557fc0a2815ecd4/coverage-7.5.0-cp310-cp310-win32.whl", hash = "sha256:f609ebcb0242d84b7adeee2b06c11a2ddaec5464d21888b2c8255f5fd6a98ae4", size = 212381, upload-time = "2024-04-23T17:40:42.632Z" }, + { url = "https://files.pythonhosted.org/packages/4c/26/b276e0c70cba5059becce2594a268a2731d5b4f2386e9a6afdf37ffa3d44/coverage-7.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:6782cd6216fab5a83216cc39f13ebe30adfac2fa72688c5a4d8d180cd52e8f6a", size = 213225, upload-time = "2024-04-23T17:40:45.175Z" }, + { url = "https://files.pythonhosted.org/packages/71/cf/964bb667ea37d64b25f04d4cfaf6232cdb7a6472e1f4a4faf0459ddcec40/coverage-7.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e768d870801f68c74c2b669fc909839660180c366501d4cc4b87efd6b0eee375", size = 210130, upload-time = "2024-04-23T17:40:47.325Z" }, + { url = "https://files.pythonhosted.org/packages/aa/56/31edd4baa132fe2b991437e0acf3e36c50418370044a89b65518e5581f4c/coverage-7.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:84921b10aeb2dd453247fd10de22907984eaf80901b578a5cf0bb1e279a587cb", size = 210617, upload-time = "2024-04-23T17:40:49.82Z" }, + { url = "https://files.pythonhosted.org/packages/26/6d/4cd14bd0221180c307fae4f8ef00dbd86a13507c25081858c620aa6fafd8/coverage-7.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:710c62b6e35a9a766b99b15cdc56d5aeda0914edae8bb467e9c355f75d14ee95", size = 242048, upload-time = "2024-04-23T17:40:52.779Z" }, + { url = "https://files.pythonhosted.org/packages/84/60/7eb84255bd9947b140e0382721b0a1b25fd670b4f0f176f11f90b5632d02/coverage-7.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c379cdd3efc0658e652a14112d51a7668f6bfca7445c5a10dee7eabecabba19d", size = 239619, upload-time = "2024-04-23T17:40:54.847Z" }, + { url = "https://files.pythonhosted.org/packages/76/6b/e8f4696194fdf3c19422f2a80ac10e03a9322f93e6c9ef57a89e03a8c8f7/coverage-7.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea9d3ca80bcf17edb2c08a4704259dadac196fe5e9274067e7a20511fad1743", size = 241321, upload-time = "2024-04-23T17:40:57.092Z" }, + { url = "https://files.pythonhosted.org/packages/3f/1c/6a6990fd2e6890807775852882b1ed0a8e50519a525252490b0c219aa8a5/coverage-7.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:41327143c5b1d715f5f98a397608f90ab9ebba606ae4e6f3389c2145410c52b1", size = 250419, upload-time = "2024-04-23T17:40:59.051Z" }, + { url = "https://files.pythonhosted.org/packages/1a/be/b6422a1422381704dd015cc23e503acd1a44a6bdc4e59c75f8c6a2b24151/coverage-7.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:565b2e82d0968c977e0b0f7cbf25fd06d78d4856289abc79694c8edcce6eb2de", size = 248794, upload-time = "2024-04-23T17:41:01.803Z" }, + { url = "https://files.pythonhosted.org/packages/9b/93/e8231000754d4a31fe9a6c550f6a436eacd2e50763ba2b418f10b2308e45/coverage-7.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cf3539007202ebfe03923128fedfdd245db5860a36810136ad95a564a2fdffff", size = 249873, upload-time = "2024-04-23T17:41:04.719Z" }, + { url = "https://files.pythonhosted.org/packages/d3/6f/eb5aae80bf9d01d0f293121d4caa660ac968da2cb967f82547a7b5e8d65b/coverage-7.5.0-cp311-cp311-win32.whl", hash = "sha256:bf0b4b8d9caa8d64df838e0f8dcf68fb570c5733b726d1494b87f3da85db3a2d", size = 212380, upload-time = "2024-04-23T17:41:06.879Z" }, + { url = "https://files.pythonhosted.org/packages/30/73/b70ab57f11b62f5ca9a83f43cae752fbbb4417bea651875235c32eb2fc2e/coverage-7.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c6384cc90e37cfb60435bbbe0488444e54b98700f727f16f64d8bfda0b84656", size = 213316, upload-time = "2024-04-23T17:41:09.233Z" }, + { url = "https://files.pythonhosted.org/packages/36/db/f4e17ffb5ac2d125c72ee3b235c2e04f85a4296a6a9e17730e218af113d8/coverage-7.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fed7a72d54bd52f4aeb6c6e951f363903bd7d70bc1cad64dd1f087980d309ab9", size = 210340, upload-time = "2024-04-23T17:41:11.811Z" }, + { url = "https://files.pythonhosted.org/packages/c3/bc/d7e832280f269be9e8d46cff5c4031b4840f1844674dc53ad93c5a9c1da6/coverage-7.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cbe6581fcff7c8e262eb574244f81f5faaea539e712a058e6707a9d272fe5b64", size = 210612, upload-time = "2024-04-23T17:41:14.256Z" }, + { url = "https://files.pythonhosted.org/packages/54/84/543e2cd6c1de30c7522a0afcb040677957bac756dd8677bade8bdd9274ba/coverage-7.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad97ec0da94b378e593ef532b980c15e377df9b9608c7c6da3506953182398af", size = 242926, upload-time = "2024-04-23T17:41:16.284Z" }, + { url = "https://files.pythonhosted.org/packages/ad/06/570533f747141b4fd727a193317e16c6e677ed7945e23a195b8f64e685a2/coverage-7.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd4bacd62aa2f1a1627352fe68885d6ee694bdaebb16038b6e680f2924a9b2cc", size = 240294, upload-time = "2024-04-23T17:41:19.099Z" }, + { url = "https://files.pythonhosted.org/packages/fa/d9/ec4ba0913195d240d026670d41b91f3e5b9a8a143a385f93a09e97c90f5c/coverage-7.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adf032b6c105881f9d77fa17d9eebe0ad1f9bfb2ad25777811f97c5362aa07f2", size = 242232, upload-time = "2024-04-23T17:41:21.05Z" }, + { url = "https://files.pythonhosted.org/packages/d9/3f/1a613c32aa1980d20d6ca2f54faf800df04aafad6016d7132b3276d8715d/coverage-7.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ba01d9ba112b55bfa4b24808ec431197bb34f09f66f7cb4fd0258ff9d3711b1", size = 249171, upload-time = "2024-04-23T17:41:23.723Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3b/e16b12693572fd69148453abc6ddcd20cbeae6f0a040b5ed6af2f75b646f/coverage-7.5.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f0bfe42523893c188e9616d853c47685e1c575fe25f737adf473d0405dcfa7eb", size = 247073, upload-time = "2024-04-23T17:41:25.719Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3e/04a05d40bb09f90a312296a32fb2c5ade2dfcf803edf777ad18b97547503/coverage-7.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a9a7ef30a1b02547c1b23fa9a5564f03c9982fc71eb2ecb7f98c96d7a0db5cf2", size = 248812, upload-time = "2024-04-23T17:41:27.951Z" }, + { url = "https://files.pythonhosted.org/packages/ba/f7/3a8b7b0affe548227f3d45e248c0f22c5b55bff0ee062b49afc165b3ff25/coverage-7.5.0-cp312-cp312-win32.whl", hash = "sha256:3c2b77f295edb9fcdb6a250f83e6481c679335ca7e6e4a955e4290350f2d22a4", size = 212634, upload-time = "2024-04-23T17:41:30.114Z" }, + { url = "https://files.pythonhosted.org/packages/7c/31/5f5286d2a5e21e1fe5670629bb24c79bf46383a092e74e00077e7a178e5c/coverage-7.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:427e1e627b0963ac02d7c8730ca6d935df10280d230508c0ba059505e9233475", size = 213460, upload-time = "2024-04-23T17:41:32.683Z" }, + { url = "https://files.pythonhosted.org/packages/62/18/5573216d5b8db7d9f29189350dcd81830a03a624966c35f8201ae10df09c/coverage-7.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0194d654e360b3e6cc9b774e83235bae6b9b2cac3be09040880bb0e8a88f4a1", size = 210014, upload-time = "2024-04-23T17:41:56.535Z" }, + { url = "https://files.pythonhosted.org/packages/7c/0e/e98d6c6d569d65ff3195f095e6b006b3d7780fd6182322a25e7dfe0d53d3/coverage-7.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33c020d3322662e74bc507fb11488773a96894aa82a622c35a5a28673c0c26f5", size = 210494, upload-time = "2024-04-23T17:41:58.584Z" }, + { url = "https://files.pythonhosted.org/packages/d3/63/98e5a6b7ed1bfca874729ee309cc49a6d6658ab9e479a2b6d223ccc96e03/coverage-7.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbdf2cae14a06827bec50bd58e49249452d211d9caddd8bd80e35b53cb04631", size = 237996, upload-time = "2024-04-23T17:42:01.514Z" }, + { url = "https://files.pythonhosted.org/packages/76/e4/d3c67a0a092127b8a3dffa2f75334a8cdb2cefc99e3d75a7f42cf1ff98a9/coverage-7.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3235d7c781232e525b0761730e052388a01548bd7f67d0067a253887c6e8df46", size = 236287, upload-time = "2024-04-23T17:42:03.838Z" }, + { url = "https://files.pythonhosted.org/packages/12/7f/9b787ffc31bc39aa9e98c7005b698e7c6539bd222043e4a9c83b83c782a2/coverage-7.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2de4e546f0ec4b2787d625e0b16b78e99c3e21bc1722b4977c0dddf11ca84e", size = 237070, upload-time = "2024-04-23T17:42:06.993Z" }, + { url = "https://files.pythonhosted.org/packages/31/ee/9998a0d855cad5f8e04062f7428b83c34aa643e5df468409593a480d5585/coverage-7.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0e206259b73af35c4ec1319fd04003776e11e859936658cb6ceffdeba0f5be", size = 243115, upload-time = "2024-04-23T17:42:09.281Z" }, + { url = "https://files.pythonhosted.org/packages/16/94/1e348cd4445404c588ec8199adde0b45727b1d7989d8fb097d39c93e3da5/coverage-7.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2055c4fb9a6ff624253d432aa471a37202cd8f458c033d6d989be4499aed037b", size = 241315, upload-time = "2024-04-23T17:42:11.836Z" }, + { url = "https://files.pythonhosted.org/packages/28/17/6fe1695d2a706e586b87a407598f4ed82dd218b2b43cdc790f695f259849/coverage-7.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:075299460948cd12722a970c7eae43d25d37989da682997687b34ae6b87c0ef0", size = 242467, upload-time = "2024-04-23T17:42:14.019Z" }, + { url = "https://files.pythonhosted.org/packages/81/a2/1e550272c8b1f89b980504230b1a929de83d8f3d5ecb268477b32e5996a6/coverage-7.5.0-cp39-cp39-win32.whl", hash = "sha256:280132aada3bc2f0fac939a5771db4fbb84f245cb35b94fae4994d4c1f80dae7", size = 212394, upload-time = "2024-04-23T17:42:17.655Z" }, + { url = "https://files.pythonhosted.org/packages/c9/48/7d3c31064c5adcc743fe5370cf7e198cee06cc0e2d37b5cbe930691a3f54/coverage-7.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:c58536f6892559e030e6924896a44098bc1290663ea12532c78cef71d0df8493", size = 213246, upload-time = "2024-04-23T17:42:19.777Z" }, + { url = "https://files.pythonhosted.org/packages/34/81/f00ce7ef95479085feb01fa9e352b2b5b2b9d24767acf2266d6267a6dba9/coverage-7.5.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:2b57780b51084d5223eee7b59f0d4911c31c16ee5aa12737c7a02455829ff067", size = 202381, upload-time = "2024-04-23T17:42:22.127Z" }, +] + +[package.optional-dependencies] +toml = [ + { name = "tomli", marker = "python_full_version <= '3.11'" }, +] + +[[package]] +name = "cramjam" +version = "2.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/14/12/34bf6e840a79130dfd0da7badfb6f7810b8fcfd60e75b0539372667b41b6/cramjam-2.11.0.tar.gz", hash = "sha256:5c82500ed91605c2d9781380b378397012e25127e89d64f460fea6aeac4389b4", size = 99100, upload-time = "2025-07-27T21:25:07.559Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/d3/20d0402e4e983b66603117ad3dd3b864a05d7997a830206d3ff9cacef9a2/cramjam-2.11.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:d0859c65775e8ebf2cbc084bfd51bd0ffda10266da6f9306451123b89f8e5a63", size = 3558999, upload-time = "2025-07-27T21:21:34.105Z" }, + { url = "https://files.pythonhosted.org/packages/f5/a8/a6e2744288938ccd320a5c6f6f3653faa790f933f5edd088c6e5782a2354/cramjam-2.11.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:1d77b9b0aca02a3f6eeeff27fcd315ca5972616c0919ee38e522cce257bcd349", size = 1861558, upload-time = "2025-07-27T21:21:36.624Z" }, + { url = "https://files.pythonhosted.org/packages/96/29/7961e09a849eea7d8302e7baa6f829dd3ef3faf199cb25ed29b318ae799b/cramjam-2.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:66425bc25b5481359b12a6719b6e7c90ffe76d85d0691f1da7df304bfb8ce45c", size = 1699431, upload-time = "2025-07-27T21:21:38.396Z" }, + { url = "https://files.pythonhosted.org/packages/7a/60/6665e52f01a8919bf37c43dcf0e03b6dd3866f5c4e95440b357d508ee14e/cramjam-2.11.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:bd748d3407ec63e049b3aea1595e218814fccab329b7fb10bb51120a30e9fb7e", size = 2025262, upload-time = "2025-07-27T21:21:40.417Z" }, + { url = "https://files.pythonhosted.org/packages/d7/80/79bd84dbeb109e2c6efb74e661b7bd4c3ba393208ebcf69e2ae9454ae80c/cramjam-2.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6d9a23a35b3a105c42a8de60fc2e80281ae6e758f05a3baea0b68eb1ddcb679", size = 1766177, upload-time = "2025-07-27T21:21:42.224Z" }, + { url = "https://files.pythonhosted.org/packages/28/ef/b43280767ebcde022ba31f1e9902137655a956ae30e920d75630fa67e36e/cramjam-2.11.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:40a75b95e05e38a2a055b2446f09994ce1139151721659315151d4ad6289bbff", size = 1854031, upload-time = "2025-07-27T21:21:43.651Z" }, + { url = "https://files.pythonhosted.org/packages/60/1c/79d522757c494dfd9e9b208b0604cc7e97b481483cc477144f5705a06ab7/cramjam-2.11.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e5d042c376d2025300da37d65192d06a457918b63b31140f697f85fd8e310b29", size = 2035812, upload-time = "2025-07-27T21:21:45.473Z" }, + { url = "https://files.pythonhosted.org/packages/c8/70/3bf0670380069b3abd4c6b53f61d3148f4e08935569c08efbeaf7550e87d/cramjam-2.11.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb148b35ab20c75b19a06c27f05732e2a321adbd86fadc93f9466dbd7b1154a7", size = 2067661, upload-time = "2025-07-27T21:21:47.901Z" }, + { url = "https://files.pythonhosted.org/packages/db/7e/4f6ca98a4b474348e965a529b359184785d1119ab7c4c9ec1280b8bea50a/cramjam-2.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee47c220f0f5179ddc923ab91fc9e282c27b29fabc60c433dfe06f08084f798", size = 1981523, upload-time = "2025-07-27T21:21:49.704Z" }, + { url = "https://files.pythonhosted.org/packages/8a/6c/b241511c7ffd5f1da29641429bb0e19b5fbcffafde5ba1bbcbf9394ea456/cramjam-2.11.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0cf1b5a81b21ea175c976c3ab09e00494258f4b49b7995efc86060cced3f0b2e", size = 2034251, upload-time = "2025-07-27T21:21:51.252Z" }, + { url = "https://files.pythonhosted.org/packages/14/5c/4ef926c8c3c1bf6da96f9c53450ff334cdb6d0fc1efced0aea97e2090803/cramjam-2.11.0-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:360c00338ecf48921492455007f904be607fc7818de3d681acbcc542aae2fb36", size = 2155322, upload-time = "2025-07-27T21:21:53.348Z" }, + { url = "https://files.pythonhosted.org/packages/be/fb/eb2aef7fb2730e56c5a2c9000817ee8fb4a95c92f19cc6e441afed42ec29/cramjam-2.11.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f31fcc0d30dc3f3e94ea6b4d8e1a855071757c6abf6a7b1e284050ab7d4c299c", size = 2169094, upload-time = "2025-07-27T21:21:55.187Z" }, + { url = "https://files.pythonhosted.org/packages/3b/80/925a5c668dcee1c6f61775067185c5dc9a63c766d5393e5c60d2af4217a7/cramjam-2.11.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:033be66fdceb3d63b2c99b257a98380c4ec22c9e4dca54a2bfec3718cd24e184", size = 2159089, upload-time = "2025-07-27T21:21:57.118Z" }, + { url = "https://files.pythonhosted.org/packages/1f/ac/b2819640eef0592a6de7ca832c0d23c69bd1620f765ce88b60dbc8da9ba2/cramjam-2.11.0-cp310-cp310-win32.whl", hash = "sha256:1c6cea67f6000b81f6bd27d14c8a6f62d00336ca7252fd03ee16f6b70eb5c0d2", size = 1605046, upload-time = "2025-07-27T21:21:58.617Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f4/06af04727b9556721049e2127656d727306d275c518e3d97f9ed4cffd0d8/cramjam-2.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:98aa4a351b047b0f7f9e971585982065028adc2c162c5c23c5d5734c5ccc1077", size = 1710647, upload-time = "2025-07-27T21:22:00.279Z" }, + { url = "https://files.pythonhosted.org/packages/d0/89/8001f6a9b6b6e9fa69bec5319789083475d6f26d52aaea209d3ebf939284/cramjam-2.11.0-cp311-cp311-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:04cfa39118570e70e920a9b75c733299784b6d269733dbc791d9aaed6edd2615", size = 3559272, upload-time = "2025-07-27T21:22:01.988Z" }, + { url = "https://files.pythonhosted.org/packages/0b/f3/001d00070ca92e5fbe6aacc768e455568b0cde46b0eb944561a4ea132300/cramjam-2.11.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:66a18f68506290349a256375d7aa2f645b9f7993c10fc4cc211db214e4e61d2b", size = 1861743, upload-time = "2025-07-27T21:22:03.754Z" }, + { url = "https://files.pythonhosted.org/packages/c9/35/041a3af01bf3f6158f120070f798546d4383b962b63c35cd91dcbf193e17/cramjam-2.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:50e7d65533857736cd56f6509cf2c4866f28ad84dd15b5bdbf2f8a81e77fa28a", size = 1699631, upload-time = "2025-07-27T21:22:05.192Z" }, + { url = "https://files.pythonhosted.org/packages/17/eb/5358b238808abebd0c949c42635c3751204ca7cf82b29b984abe9f5e33c8/cramjam-2.11.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1f71989668458fc327ac15396db28d92df22f8024bb12963929798b2729d2df5", size = 2025603, upload-time = "2025-07-27T21:22:06.726Z" }, + { url = "https://files.pythonhosted.org/packages/0e/79/19dba7c03a27408d8d11b5a7a4a7908459cfd4e6f375b73264dc66517bf6/cramjam-2.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee77ac543f1e2b22af1e8be3ae589f729491b6090582340aacd77d1d757d9569", size = 1766283, upload-time = "2025-07-27T21:22:08.568Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ad/40e4b3408501d886d082db465c33971655fe82573c535428e52ab905f4d0/cramjam-2.11.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad52784120e7e4d8a0b5b0517d185b8bf7f74f5e17272857ddc8951a628d9be1", size = 1854407, upload-time = "2025-07-27T21:22:10.518Z" }, + { url = "https://files.pythonhosted.org/packages/36/6e/c1b60ceb6d7ea6ff8b0bf197520aefe23f878bf2bfb0de65f2b0c2f82cd1/cramjam-2.11.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b86f8e6d9c1b3f9a75b2af870c93ceee0f1b827cd2507387540e053b35d7459", size = 2035793, upload-time = "2025-07-27T21:22:12.504Z" }, + { url = "https://files.pythonhosted.org/packages/9c/ad/32a8d5f4b1e3717787945ec6d71bd1c6e6bccba4b7e903fc0d9d4e4b08c3/cramjam-2.11.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:320d61938950d95da2371b46c406ec433e7955fae9f396c8e1bf148ffc187d11", size = 2067499, upload-time = "2025-07-27T21:22:14.067Z" }, + { url = "https://files.pythonhosted.org/packages/ff/cd/3b5a662736ea62ff7fa4c4a10a85e050bfdaad375cc53dc80427e8afe41c/cramjam-2.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41eafc8c1653a35a5c7e75ad48138f9f60085cc05cd99d592e5298552d944e9f", size = 1981853, upload-time = "2025-07-27T21:22:15.908Z" }, + { url = "https://files.pythonhosted.org/packages/26/8e/1dbcfaaa7a702ee82ee683ec3a81656934dd7e04a7bc4ee854033686f98a/cramjam-2.11.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03a7316c6bf763dfa34279335b27702321da44c455a64de58112968c0818ec4a", size = 2034514, upload-time = "2025-07-27T21:22:17.352Z" }, + { url = "https://files.pythonhosted.org/packages/50/62/f11709bfdce74af79a88b410dcb76dedc97612166e759136931bf63cfd7b/cramjam-2.11.0-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:244c2ed8bd7ccbb294a2abe7ca6498db7e89d7eb5e744691dc511a7dc82e65ca", size = 2155343, upload-time = "2025-07-27T21:22:18.854Z" }, + { url = "https://files.pythonhosted.org/packages/8a/6d/3b98b61841a5376d9a9b8468ae58753a8e6cf22be9534a0fa5af4d8621cc/cramjam-2.11.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:405f8790bad36ce0b4bbdb964ad51507bfc7942c78447f25cb828b870a1d86a0", size = 2169367, upload-time = "2025-07-27T21:22:20.389Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/bd5db5c49dbebc8b002f1c4983101b28d2e7fc9419753db1c31ec22b03ef/cramjam-2.11.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6b1b751a5411032b08fb3ac556160229ca01c6bbe4757bb3a9a40b951ebaac23", size = 2159334, upload-time = "2025-07-27T21:22:22.254Z" }, + { url = "https://files.pythonhosted.org/packages/34/32/203c57acdb6eea727e7078b2219984e64ed4ad043c996ed56321301ba167/cramjam-2.11.0-cp311-cp311-win32.whl", hash = "sha256:5251585608778b9ac8effed544933df7ad85b4ba21ee9738b551f17798b215ac", size = 1605313, upload-time = "2025-07-27T21:22:24.126Z" }, + { url = "https://files.pythonhosted.org/packages/a9/bd/102d6deb87a8524ac11cddcd31a7612b8f20bf9b473c3c645045e3b957c7/cramjam-2.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:dca88bc8b68ce6d35dafd8c4d5d59a238a56c43fa02b74c2ce5f9dfb0d1ccb46", size = 1710991, upload-time = "2025-07-27T21:22:25.661Z" }, + { url = "https://files.pythonhosted.org/packages/0b/0d/7c84c913a5fae85b773a9dcf8874390f9d68ba0fcc6630efa7ff1541b950/cramjam-2.11.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:dba5c14b8b4f73ea1e65720f5a3fe4280c1d27761238378be8274135c60bbc6e", size = 3553368, upload-time = "2025-07-27T21:22:27.162Z" }, + { url = "https://files.pythonhosted.org/packages/2b/cc/4f6d185d8a744776f53035e72831ff8eefc2354f46ab836f4bd3c4f6c138/cramjam-2.11.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:11eb40722b3fcf3e6890fba46c711bf60f8dc26360a24876c85e52d76c33b25b", size = 1860014, upload-time = "2025-07-27T21:22:28.738Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a8/626c76263085c6d5ded0e71823b411e9522bfc93ba6cc59855a5869296e7/cramjam-2.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aeb26e2898994b6e8319f19a4d37c481512acdcc6d30e1b5ecc9d8ec57e835cb", size = 1693512, upload-time = "2025-07-27T21:22:30.999Z" }, + { url = "https://files.pythonhosted.org/packages/e9/52/0851a16a62447532e30ba95a80e638926fdea869a34b4b5b9d0a020083ba/cramjam-2.11.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f8d82081ed7d8fe52c982bd1f06e4c7631a73fe1fb6d4b3b3f2404f87dc40fe", size = 2025285, upload-time = "2025-07-27T21:22:32.954Z" }, + { url = "https://files.pythonhosted.org/packages/98/76/122e444f59dbc216451d8e3d8282c9665dc79eaf822f5f1470066be1b695/cramjam-2.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:092a3ec26e0a679305018380e4f652eae1b6dfe3fc3b154ee76aa6b92221a17c", size = 1761327, upload-time = "2025-07-27T21:22:34.484Z" }, + { url = "https://files.pythonhosted.org/packages/a3/bc/3a0189aef1af2b29632c039c19a7a1b752bc21a4053582a5464183a0ad3d/cramjam-2.11.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:529d6d667c65fd105d10bd83d1cd3f9869f8fd6c66efac9415c1812281196a92", size = 1854075, upload-time = "2025-07-27T21:22:36.157Z" }, + { url = "https://files.pythonhosted.org/packages/2e/80/8a6343b13778ce52d94bb8d5365a30c3aa951276b1857201fe79d7e2ad25/cramjam-2.11.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:555eb9c90c450e0f76e27d9ff064e64a8b8c6478ab1a5594c91b7bc5c82fd9f0", size = 2032710, upload-time = "2025-07-27T21:22:38.17Z" }, + { url = "https://files.pythonhosted.org/packages/df/6b/cd1778a207c29eda10791e3dfa018b588001928086e179fc71254793c625/cramjam-2.11.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5edf4c9e32493035b514cf2ba0c969d81ccb31de63bd05490cc8bfe3b431674e", size = 2068353, upload-time = "2025-07-27T21:22:39.615Z" }, + { url = "https://files.pythonhosted.org/packages/dc/f0/5c2a5cd5711032f3b191ca50cb786c17689b4a9255f9f768866e6c9f04d9/cramjam-2.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fa2fe41f48c4d58d923803383b0737f048918b5a0d10390de9628bb6272b107", size = 1978104, upload-time = "2025-07-27T21:22:41.106Z" }, + { url = "https://files.pythonhosted.org/packages/f9/8b/b363a5fb2c3347504fe9a64f8d0f1e276844f0e532aa7162c061cd1ffee4/cramjam-2.11.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9ca14cf1cabdb0b77d606db1bb9e9ca593b1dbd421fcaf251ec9a5431ec449f3", size = 2030779, upload-time = "2025-07-27T21:22:42.969Z" }, + { url = "https://files.pythonhosted.org/packages/78/7b/d83dad46adb6c988a74361f81ad9c5c22642be53ad88616a19baedd06243/cramjam-2.11.0-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:309e95bf898829476bccf4fd2c358ec00e7ff73a12f95a3cdeeba4bb1d3683d5", size = 2155297, upload-time = "2025-07-27T21:22:44.6Z" }, + { url = "https://files.pythonhosted.org/packages/1a/be/60d9be4cb33d8740a4aa94c7513f2ef3c4eba4fd13536f086facbafade71/cramjam-2.11.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:86dca35d2f15ef22922411496c220f3c9e315d5512f316fe417461971cc1648d", size = 2169255, upload-time = "2025-07-27T21:22:46.534Z" }, + { url = "https://files.pythonhosted.org/packages/11/b0/4a595f01a243aec8ad272b160b161c44351190c35d98d7787919d962e9e5/cramjam-2.11.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:193c6488bd2f514cbc0bef5c18fad61a5f9c8d059dd56edf773b3b37f0e85496", size = 2155651, upload-time = "2025-07-27T21:22:48.46Z" }, + { url = "https://files.pythonhosted.org/packages/38/47/7776659aaa677046b77f527106e53ddd47373416d8fcdb1e1a881ec5dc06/cramjam-2.11.0-cp312-cp312-win32.whl", hash = "sha256:514e2c008a8b4fa823122ca3ecab896eac41d9aa0f5fc881bd6264486c204e32", size = 1603568, upload-time = "2025-07-27T21:22:50.084Z" }, + { url = "https://files.pythonhosted.org/packages/75/b1/d53002729cfd94c5844ddfaf1233c86d29f2dbfc1b764a6562c41c044199/cramjam-2.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:53fed080476d5f6ad7505883ec5d1ec28ba36c2273db3b3e92d7224fe5e463db", size = 1709287, upload-time = "2025-07-27T21:22:51.534Z" }, + { url = "https://files.pythonhosted.org/packages/0a/8b/406c5dc0f8e82385519d8c299c40fd6a56d97eca3fcd6f5da8dad48de75b/cramjam-2.11.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2c289729cc1c04e88bafa48b51082fb462b0a57dbc96494eab2be9b14dca62af", size = 3553330, upload-time = "2025-07-27T21:22:53.124Z" }, + { url = "https://files.pythonhosted.org/packages/00/ad/4186884083d6e4125b285903e17841827ab0d6d0cffc86216d27ed91e91d/cramjam-2.11.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:045201ee17147e36cf43d8ae2fa4b4836944ac672df5874579b81cf6d40f1a1f", size = 1859756, upload-time = "2025-07-27T21:22:54.821Z" }, + { url = "https://files.pythonhosted.org/packages/54/01/91b485cf76a7efef638151e8a7d35784dae2c4ff221b1aec2c083e4b106d/cramjam-2.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:619cd195d74c9e1d2a3ad78d63451d35379c84bd851aec552811e30842e1c67a", size = 1693609, upload-time = "2025-07-27T21:22:56.331Z" }, + { url = "https://files.pythonhosted.org/packages/cd/84/d0c80d279b2976870fc7d10f15dcb90a3c10c06566c6964b37c152694974/cramjam-2.11.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6eb3ae5ab72edb2ed68bdc0f5710f0a6cad7fd778a610ec2c31ee15e32d3921e", size = 2024912, upload-time = "2025-07-27T21:22:57.915Z" }, + { url = "https://files.pythonhosted.org/packages/d6/70/88f2a5cb904281ed5d3c111b8f7d5366639817a5470f059bcd26833fc870/cramjam-2.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df7da3f4b19e3078f9635f132d31b0a8196accb2576e3213ddd7a77f93317c20", size = 1760715, upload-time = "2025-07-27T21:22:59.528Z" }, + { url = "https://files.pythonhosted.org/packages/b2/06/cf5b02081132537d28964fb385fcef9ed9f8a017dd7d8c59d317e53ba50d/cramjam-2.11.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:57286b289cd557ac76c24479d8ecfb6c3d5b854cce54ccc7671f9a2f5e2a2708", size = 1853782, upload-time = "2025-07-27T21:23:01.07Z" }, + { url = "https://files.pythonhosted.org/packages/57/27/63525087ed40a53d1867021b9c4858b80cc86274ffe7225deed067d88d92/cramjam-2.11.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28952fbbf8b32c0cb7fa4be9bcccfca734bf0d0989f4b509dc7f2f70ba79ae06", size = 2032354, upload-time = "2025-07-27T21:23:03.021Z" }, + { url = "https://files.pythonhosted.org/packages/c3/ef/dbba082c6ebfb6410da4dd39a64e654d7194fcfd4567f85991a83fa4ec32/cramjam-2.11.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78ed2e4099812a438b545dfbca1928ec825e743cd253bc820372d6ef8c3adff4", size = 2068007, upload-time = "2025-07-27T21:23:04.526Z" }, + { url = "https://files.pythonhosted.org/packages/35/ce/d902b9358a46a086938feae83b2251720e030f06e46006f4c1fc0ac9da20/cramjam-2.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9aecd5c3845d415bd6c9957c93de8d93097e269137c2ecb0e5a5256374bdc8", size = 1977485, upload-time = "2025-07-27T21:23:06.058Z" }, + { url = "https://files.pythonhosted.org/packages/e8/03/982f54553244b0afcbdb2ad2065d460f0ab05a72a96896a969a1ca136a1e/cramjam-2.11.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:362fcf4d6f5e1242a4540812455f5a594949190f6fbc04f2ffbfd7ae0266d788", size = 2030447, upload-time = "2025-07-27T21:23:07.679Z" }, + { url = "https://files.pythonhosted.org/packages/74/5f/748e54cdb665ec098ec519e23caacc65fc5ae58718183b071e33fc1c45b4/cramjam-2.11.0-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:13240b3dea41b1174456cb9426843b085dc1a2bdcecd9ee2d8f65ac5703374b0", size = 2154949, upload-time = "2025-07-27T21:23:09.366Z" }, + { url = "https://files.pythonhosted.org/packages/69/81/c4e6cb06ed69db0dc81f9a8b1dc74995ebd4351e7a1877143f7031ff2700/cramjam-2.11.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:c54eed83726269594b9086d827decc7d2015696e31b99bf9b69b12d9063584fe", size = 2168925, upload-time = "2025-07-27T21:23:10.976Z" }, + { url = "https://files.pythonhosted.org/packages/13/5b/966365523ce8290a08e163e3b489626c5adacdff2b3da9da1b0823dfb14e/cramjam-2.11.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f8195006fdd0fc0a85b19df3d64a3ef8a240e483ae1dfc7ac6a4316019eb5df2", size = 2154950, upload-time = "2025-07-27T21:23:12.514Z" }, + { url = "https://files.pythonhosted.org/packages/3a/7d/7f8eb5c534b72b32c6eb79d74585bfee44a9a5647a14040bb65c31c2572d/cramjam-2.11.0-cp313-cp313-win32.whl", hash = "sha256:ccf30e3fe6d770a803dcdf3bb863fa44ba5dc2664d4610ba2746a3c73599f2e4", size = 1603199, upload-time = "2025-07-27T21:23:14.38Z" }, + { url = "https://files.pythonhosted.org/packages/37/05/47b5e0bf7c41a3b1cdd3b7c2147f880c93226a6bef1f5d85183040cbdece/cramjam-2.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:ee36348a204f0a68b03400f4736224e9f61d1c6a1582d7f875c1ca56f0254268", size = 1708924, upload-time = "2025-07-27T21:23:16.332Z" }, + { url = "https://files.pythonhosted.org/packages/de/07/a1051cdbbe6d723df16d756b97f09da7c1adb69e29695c58f0392bc12515/cramjam-2.11.0-cp314-cp314-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7ba5e38c9fbd06f086f4a5a64a1a5b7b417cd3f8fc07a20e5c03651f72f36100", size = 3554141, upload-time = "2025-07-27T21:23:17.938Z" }, + { url = "https://files.pythonhosted.org/packages/74/66/58487d2e16ef3d04f51a7c7f0e69823e806744b4c21101e89da4873074bc/cramjam-2.11.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:b8adeee57b41fe08e4520698a4b0bd3cc76dbd81f99424b806d70a5256a391d3", size = 1860353, upload-time = "2025-07-27T21:23:19.593Z" }, + { url = "https://files.pythonhosted.org/packages/67/b4/67f6254d166ffbcc9d5fa1b56876eaa920c32ebc8e9d3d525b27296b693b/cramjam-2.11.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b96a74fa03a636c8a7d76f700d50e9a8bc17a516d6a72d28711225d641e30968", size = 1693832, upload-time = "2025-07-27T21:23:21.185Z" }, + { url = "https://files.pythonhosted.org/packages/55/a3/4e0b31c0d454ae70c04684ed7c13d3c67b4c31790c278c1e788cb804fa4a/cramjam-2.11.0-cp314-cp314-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c3811a56fa32e00b377ef79121c0193311fd7501f0fb378f254c7f083cc1fbe0", size = 2027080, upload-time = "2025-07-27T21:23:23.303Z" }, + { url = "https://files.pythonhosted.org/packages/d9/c7/5e8eed361d1d3b8be14f38a54852c5370cc0ceb2c2d543b8ba590c34f080/cramjam-2.11.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5d927e87461f8a0d448e4ab5eb2bca9f31ca5d8ea86d70c6f470bb5bc666d7e", size = 1761543, upload-time = "2025-07-27T21:23:24.991Z" }, + { url = "https://files.pythonhosted.org/packages/09/0c/06b7f8b0ce9fde89470505116a01fc0b6cb92d406c4fb1e46f168b5d3fa5/cramjam-2.11.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f1f5c450121430fd89cb5767e0a9728ecc65997768fd4027d069cb0368af62f9", size = 1854636, upload-time = "2025-07-27T21:23:26.987Z" }, + { url = "https://files.pythonhosted.org/packages/6f/c6/6ebc02c9d5acdf4e5f2b1ec6e1252bd5feee25762246798ae823b3347457/cramjam-2.11.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:724aa7490be50235d97f07e2ca10067927c5d7f336b786ddbc868470e822aa25", size = 2032715, upload-time = "2025-07-27T21:23:28.603Z" }, + { url = "https://files.pythonhosted.org/packages/a2/77/a122971c23f5ca4b53e4322c647ac7554626c95978f92d19419315dddd05/cramjam-2.11.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:54c4637122e7cfd7aac5c1d3d4c02364f446d6923ea34cf9d0e8816d6e7a4936", size = 2069039, upload-time = "2025-07-27T21:23:30.319Z" }, + { url = "https://files.pythonhosted.org/packages/19/0f/f6121b90b86b9093c066889274d26a1de3f29969d45c2ed1ecbe2033cb78/cramjam-2.11.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17eb39b1696179fb471eea2de958fa21f40a2cd8bf6b40d428312d5541e19dc4", size = 1979566, upload-time = "2025-07-27T21:23:32.002Z" }, + { url = "https://files.pythonhosted.org/packages/e0/a3/f95bc57fd7f4166ce6da816cfa917fb7df4bb80e669eb459d85586498414/cramjam-2.11.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:36aa5a798aa34e11813a80425a30d8e052d8de4a28f27bfc0368cfc454d1b403", size = 2030905, upload-time = "2025-07-27T21:23:33.696Z" }, + { url = "https://files.pythonhosted.org/packages/fc/52/e429de4e8bc86ee65e090dae0f87f45abd271742c63fb2d03c522ffde28a/cramjam-2.11.0-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:449fca52774dc0199545fbf11f5128933e5a6833946707885cf7be8018017839", size = 2155592, upload-time = "2025-07-27T21:23:35.375Z" }, + { url = "https://files.pythonhosted.org/packages/6c/6c/65a7a0207787ad39ad804af4da7f06a60149de19481d73d270b540657234/cramjam-2.11.0-cp314-cp314-musllinux_1_1_i686.whl", hash = "sha256:d87d37b3d476f4f7623c56a232045d25bd9b988314702ea01bd9b4a94948a778", size = 2170839, upload-time = "2025-07-27T21:23:37.197Z" }, + { url = "https://files.pythonhosted.org/packages/b2/c5/5c5db505ba692bc844246b066e23901d5905a32baf2f33719c620e65887f/cramjam-2.11.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:26cb45c47d71982d76282e303931c6dd4baee1753e5d48f9a89b3a63e690b3a3", size = 2157236, upload-time = "2025-07-27T21:23:38.854Z" }, + { url = "https://files.pythonhosted.org/packages/b0/22/88e6693e60afe98901e5bbe91b8dea193e3aa7f42e2770f9c3339f5c1065/cramjam-2.11.0-cp314-cp314-win32.whl", hash = "sha256:4efe919d443c2fd112fe25fe636a52f9628250c9a50d9bddb0488d8a6c09acc6", size = 1604136, upload-time = "2025-07-27T21:23:40.56Z" }, + { url = "https://files.pythonhosted.org/packages/cc/f8/01618801cd59ccedcc99f0f96d20be67d8cfc3497da9ccaaad6b481781dd/cramjam-2.11.0-cp314-cp314-win_amd64.whl", hash = "sha256:ccec3524ea41b9abd5600e3e27001fd774199dbb4f7b9cb248fcee37d4bda84c", size = 1710272, upload-time = "2025-07-27T21:23:42.236Z" }, + { url = "https://files.pythonhosted.org/packages/40/81/6cdb3ed222d13ae86bda77aafe8d50566e81a1169d49ed195b6263610704/cramjam-2.11.0-cp314-cp314t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:966ac9358b23d21ecd895c418c048e806fd254e46d09b1ff0cdad2eba195ea3e", size = 3559671, upload-time = "2025-07-27T21:23:44.504Z" }, + { url = "https://files.pythonhosted.org/packages/cb/43/52b7e54fe5ba1ef0270d9fdc43dabd7971f70ea2d7179be918c997820247/cramjam-2.11.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:387f09d647a0d38dcb4539f8a14281f8eb6bb1d3e023471eb18a5974b2121c86", size = 1867876, upload-time = "2025-07-27T21:23:46.987Z" }, + { url = "https://files.pythonhosted.org/packages/9d/28/30d5b8d10acd30db3193bc562a313bff722888eaa45cfe32aa09389f2b24/cramjam-2.11.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:665b0d8fbbb1a7f300265b43926457ec78385200133e41fef19d85790fc1e800", size = 1695562, upload-time = "2025-07-27T21:23:48.644Z" }, + { url = "https://files.pythonhosted.org/packages/d9/86/ec806f986e01b896a650655024ea52a13e25c3ac8a3a382f493089483cdc/cramjam-2.11.0-cp314-cp314t-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ca905387c7a371531b9622d93471be4d745ef715f2890c3702479cd4fc85aa51", size = 2025056, upload-time = "2025-07-27T21:23:50.404Z" }, + { url = "https://files.pythonhosted.org/packages/09/43/c2c17586b90848d29d63181f7d14b8bd3a7d00975ad46e3edf2af8af7e1f/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c1aa56aef2c8af55a21ed39040a94a12b53fb23beea290f94d19a76027e2ffb", size = 1764084, upload-time = "2025-07-27T21:23:52.265Z" }, + { url = "https://files.pythonhosted.org/packages/2b/a9/68bc334fadb434a61df10071dc8606702aa4f5b6cdb2df62474fc21d2845/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e5db59c1cdfaa2ab85cc988e602d6919495f735ca8a5fd7603608eb1e23c26d5", size = 1854859, upload-time = "2025-07-27T21:23:54.085Z" }, + { url = "https://files.pythonhosted.org/packages/5b/4e/b48e67835b5811ec5e9cb2e2bcba9c3fd76dab3e732569fe801b542c6ca9/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1f893014f00fe5e89a660a032e813bf9f6d91de74cd1490cdb13b2b59d0c9a3", size = 2035970, upload-time = "2025-07-27T21:23:55.758Z" }, + { url = "https://files.pythonhosted.org/packages/c4/70/d2ac33d572b4d90f7f0f2c8a1d60fb48f06b128fdc2c05f9b49891bb0279/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c26a1eb487947010f5de24943bd7c422dad955b2b0f8650762539778c380ca89", size = 2069320, upload-time = "2025-07-27T21:23:57.494Z" }, + { url = "https://files.pythonhosted.org/packages/1d/4c/85cec77af4a74308ba5fca8e296c4e2f80ec465c537afc7ab1e0ca2f9a00/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d5c8bfb438d94e7b892d1426da5fc4b4a5370cc360df9b8d9d77c33b896c37e", size = 1982668, upload-time = "2025-07-27T21:23:59.126Z" }, + { url = "https://files.pythonhosted.org/packages/55/45/938546d1629e008cc3138df7c424ef892719b1796ff408a2ab8550032e5e/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:cb1fb8c9337ab0da25a01c05d69a0463209c347f16512ac43be5986f3d1ebaf4", size = 2034028, upload-time = "2025-07-27T21:24:00.865Z" }, + { url = "https://files.pythonhosted.org/packages/01/76/b5a53e20505555f1640e66dcf70394bcf51a1a3a072aa18ea35135a0f9ed/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:1f6449f6de52dde3e2f1038284910c8765a397a25e2d05083870f3f5e7fc682c", size = 2155513, upload-time = "2025-07-27T21:24:02.92Z" }, + { url = "https://files.pythonhosted.org/packages/84/12/8d3f6ceefae81bbe45a347fdfa2219d9f3ac75ebc304f92cd5fcb4fbddc5/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_i686.whl", hash = "sha256:382dec4f996be48ed9c6958d4e30c2b89435d7c2c4dbf32480b3b8886293dd65", size = 2170035, upload-time = "2025-07-27T21:24:04.558Z" }, + { url = "https://files.pythonhosted.org/packages/4b/85/3be6f0a1398f976070672be64f61895f8839857618a2d8cc0d3ab529d3dc/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:d388bd5723732c3afe1dd1d181e4213cc4e1be210b080572e7d5749f6e955656", size = 2160229, upload-time = "2025-07-27T21:24:06.729Z" }, + { url = "https://files.pythonhosted.org/packages/57/5e/66cfc3635511b20014bbb3f2ecf0095efb3049e9e96a4a9e478e4f3d7b78/cramjam-2.11.0-cp314-cp314t-win32.whl", hash = "sha256:0a70ff17f8e1d13f322df616505550f0f4c39eda62290acb56f069d4857037c8", size = 1610267, upload-time = "2025-07-27T21:24:08.428Z" }, + { url = "https://files.pythonhosted.org/packages/ce/c6/c71e82e041c95ffe6a92ac707785500aa2a515a4339c2c7dd67e3c449249/cramjam-2.11.0-cp314-cp314t-win_amd64.whl", hash = "sha256:028400d699442d40dbda02f74158c73d05cb76587a12490d0bfedd958fd49188", size = 1713108, upload-time = "2025-07-27T21:24:10.147Z" }, + { url = "https://files.pythonhosted.org/packages/8c/33/3d7a7fbfb313614d59ae2e512b9dacfc22efb07c20e4af7deb73d3409f7b/cramjam-2.11.0-cp39-cp39-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2581e82dca742b55d8b1d7f33892394c06b057a74f2853ffcb0802dcddcbf694", size = 3559843, upload-time = "2025-07-27T21:24:11.928Z" }, + { url = "https://files.pythonhosted.org/packages/d4/b0/ccf09697df7fcc750c4913dc4bf3fb91e5b778dda65fb9fa55dde61c03dc/cramjam-2.11.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a9994a42cd12f07ece04eff94dbf6e127b3986f7af9b26db1eb4545c477a6604", size = 1862081, upload-time = "2025-07-27T21:24:13.8Z" }, + { url = "https://files.pythonhosted.org/packages/41/55/d36255f1a9004a3352469143d2b8a5b769e0eb4e484a8192da41ad67e893/cramjam-2.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a4963dac24213690183110d6b41125fdc4af871a5a213589d6c6606d49e1b949", size = 1699970, upload-time = "2025-07-27T21:24:15.547Z" }, + { url = "https://files.pythonhosted.org/packages/35/52/722a2efbe104903648185411f9c634e5678035476bc556001d6ef811e191/cramjam-2.11.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c9af16f0b07d851b968c54e52d19430d820bb47c26d10a09cfb5c7127de26773", size = 2025715, upload-time = "2025-07-27T21:24:17.327Z" }, + { url = "https://files.pythonhosted.org/packages/0a/60/75084f30277d5f2481d20a544654894a32528f98f4415c1bd467823ab5b2/cramjam-2.11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e2400c09ba620e2ca91a903dbe907d75f6a1994d8337e9f3026778daa92b08d", size = 1766999, upload-time = "2025-07-27T21:24:19.163Z" }, + { url = "https://files.pythonhosted.org/packages/89/5c/2663bdfcea6ab06fcac97883b5b574a12236c5d9f70691cc05dd49cb10fb/cramjam-2.11.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b820004db8b22715cee2ef154d4b47b3d76c4677ff217c587dd46f694a3052f9", size = 1854352, upload-time = "2025-07-27T21:24:20.953Z" }, + { url = "https://files.pythonhosted.org/packages/b4/df/1db5b57ccf77e923687b2061766e69c2cbdaf41641204207dbf55ef7ebe9/cramjam-2.11.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:261e9200942189d8201a005ffa1e29339479364b5b0013ab0758b03229d9ac67", size = 2036219, upload-time = "2025-07-27T21:24:23.029Z" }, + { url = "https://files.pythonhosted.org/packages/f7/28/fa3b017668a3264068c893e57a6b923dfd8fa851a1c821c4cc1c95cd47a6/cramjam-2.11.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24c61f1fad56ca68aee53bf67b6a84cd762a2c71ee4b71064378547c2411ae6", size = 2077245, upload-time = "2025-07-27T21:24:25.127Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1d/6f6018ee81acec6c4ef6cda6bd0770959992caf2f1c41e7944a135a53eca/cramjam-2.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab86d22f69a21961f35d1a1b02278b5bb9a95c5f5b4722c6904bca343c8d219f", size = 1982235, upload-time = "2025-07-27T21:24:26.851Z" }, + { url = "https://files.pythonhosted.org/packages/31/b4/c38f6077d8ec7c9208d23d4f7f19a618f5b4940170c9deba5d3bdc722eb6/cramjam-2.11.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a88bc9b191422cd5b22a1521b28607008590628b6b2a8a7db5c54ec04dc82fa1", size = 2034629, upload-time = "2025-07-27T21:24:28.694Z" }, + { url = "https://files.pythonhosted.org/packages/66/3b/3f46a349b1a7a67e2bda10e99403e9163c87c95e34399cc69f4f86a2461a/cramjam-2.11.0-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:7855bc4df5ed5f7fb1c98ea3fd98292e9acd3c097b1b21d596a69e1e60455400", size = 2155552, upload-time = "2025-07-27T21:24:30.572Z" }, + { url = "https://files.pythonhosted.org/packages/ed/86/b431a51162d4c8f33b28bdcca047382e1038757d43625e65c8d29ed6c31f/cramjam-2.11.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:19eb43e21db9dc42613599703c1a8e40b0170514a313f11f4c8be380425a1019", size = 2169651, upload-time = "2025-07-27T21:24:32.331Z" }, + { url = "https://files.pythonhosted.org/packages/6e/d5/9aa69784da58b6bd3f5abcaad2eb76ad2a89efde7929821bad17355fd8da/cramjam-2.11.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cec977d673ad596bae6bdfc0091ee386cef05b515b23f2ce52f9fadd0156186a", size = 2159740, upload-time = "2025-07-27T21:24:34.108Z" }, + { url = "https://files.pythonhosted.org/packages/9c/e1/75706936eb81605a939e15b8b7a1241b35e805ce76a64838b4586c440f61/cramjam-2.11.0-cp39-cp39-win32.whl", hash = "sha256:dcc3b15b97f3054964b47e2a5fcfb4f5ff569e9af0a7af19f1d4c5f4231bbf3b", size = 1605449, upload-time = "2025-07-27T21:24:36.538Z" }, + { url = "https://files.pythonhosted.org/packages/37/6b/ae7626994c7285bfc0ffa0d9929c3c16f2d0aea5b9e151dad82fd0616762/cramjam-2.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:5eb0603d8f8019451fc00e1daf4022dfc9df59c16d2e68f925c77ac94555493b", size = 1710860, upload-time = "2025-07-27T21:24:38.243Z" }, + { url = "https://files.pythonhosted.org/packages/bf/8f/82e35ec3c5387f1864f46b3c24bce89a07af8bb3ef242ae47281db2c1848/cramjam-2.11.0-pp310-pypy310_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:37bed927abc4a7ae2d2669baa3675e21904d8a038ed8e4313326ea7b3be62b2b", size = 3573104, upload-time = "2025-07-27T21:24:40.069Z" }, + { url = "https://files.pythonhosted.org/packages/f0/4e/0c821918080a32ba1e52c040e12dd02dada67728f07305c5f778b808a807/cramjam-2.11.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:50e4a58635fa8c6897d84847d6e065eb69f92811670fc5e9f2d9e3b6279a02b6", size = 1873441, upload-time = "2025-07-27T21:24:42.333Z" }, + { url = "https://files.pythonhosted.org/packages/a8/fd/848d077bf6abc4ce84273d8e3f3a70d61a2240519a339462f699d8acf829/cramjam-2.11.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d1ba626dd5f81f7f09bbf59f70b534e2b75e0d6582b056b7bd31b397f1c13e9", size = 1702589, upload-time = "2025-07-27T21:24:44.305Z" }, + { url = "https://files.pythonhosted.org/packages/9d/1c/899818999bbdb59c601756b413e87d37fd65875d1315346c10e367bb3505/cramjam-2.11.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c71e140d5eb3145d61d59d0be0bf72f07cc4cf4b32cb136b09f712a3b1040f5f", size = 1773646, upload-time = "2025-07-27T21:24:46.495Z" }, + { url = "https://files.pythonhosted.org/packages/5f/26/c2813c5422c43b3dcd8b6645bc359f08870737c44325ee4accc18f24eee0/cramjam-2.11.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a6ed7926a5cca28edebad7d0fedd2ad492710ae3524d25fc59a2b20546d9ce1", size = 1994179, upload-time = "2025-07-27T21:24:49.131Z" }, + { url = "https://files.pythonhosted.org/packages/2e/4f/af984f8d7f963f0301812cdd620ddcfd8276461ed7a786c0f89e82b14739/cramjam-2.11.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5eb4ed3cea945b164b0513fd491884993acac2153a27b93a84019c522e8eda82", size = 1714790, upload-time = "2025-07-27T21:24:51.045Z" }, + { url = "https://files.pythonhosted.org/packages/81/da/b3301962ccd6fce9fefa1ecd8ea479edaeaa38fadb1f34d5391d2587216a/cramjam-2.11.0-pp311-pypy311_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:52d5db3369f95b27b9f3c14d067acb0b183333613363ed34268c9e04560f997f", size = 3573546, upload-time = "2025-07-27T21:24:52.944Z" }, + { url = "https://files.pythonhosted.org/packages/b6/c2/410ddb8ad4b9dfb129284666293cb6559479645da560f7077dc19d6bee9e/cramjam-2.11.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:4820516366d455b549a44d0e2210ee7c4575882dda677564ce79092588321d54", size = 1873654, upload-time = "2025-07-27T21:24:54.958Z" }, + { url = "https://files.pythonhosted.org/packages/d5/99/f68a443c64f7ce7aff5bed369b0aa5b2fac668fa3dfd441837e316e97a1f/cramjam-2.11.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d9e5db525dc0a950a825202f84ee68d89a072479e07da98795a3469df942d301", size = 1702846, upload-time = "2025-07-27T21:24:57.124Z" }, + { url = "https://files.pythonhosted.org/packages/6c/02/0ff358ab773def1ee3383587906c453d289953171e9c92db84fdd01bf172/cramjam-2.11.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62ab4971199b2270005359cdc379bc5736071dc7c9a228581c5122d9ffaac50c", size = 1773683, upload-time = "2025-07-27T21:24:59.28Z" }, + { url = "https://files.pythonhosted.org/packages/e9/31/3298e15f87c9cf2aabdbdd90b153d8644cf989cb42a45d68a1b71e1f7aaf/cramjam-2.11.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24758375cc5414d3035ca967ebb800e8f24604ececcba3c67d6f0218201ebf2d", size = 1994136, upload-time = "2025-07-27T21:25:01.565Z" }, + { url = "https://files.pythonhosted.org/packages/c7/90/20d1747255f1ee69a412e319da51ea594c18cca195e7a4d4c713f045eff5/cramjam-2.11.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6c2eea545fef1065c7dd4eda991666fd9c783fbc1d226592ccca8d8891c02f23", size = 1714982, upload-time = "2025-07-27T21:25:05.79Z" }, +] + +[[package]] +name = "cryptography" +version = "46.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a9/62/e3664e6ffd7743e1694b244dde70b43a394f6f7fbcacf7014a8ff5197c73/cryptography-46.0.1.tar.gz", hash = "sha256:ed570874e88f213437f5cf758f9ef26cbfc3f336d889b1e592ee11283bb8d1c7", size = 749198, upload-time = "2025-09-17T00:10:35.797Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/8c/44ee01267ec01e26e43ebfdae3f120ec2312aa72fa4c0507ebe41a26739f/cryptography-46.0.1-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:1cd6d50c1a8b79af1a6f703709d8973845f677c8e97b1268f5ff323d38ce8475", size = 7285044, upload-time = "2025-09-17T00:08:36.807Z" }, + { url = "https://files.pythonhosted.org/packages/22/59/9ae689a25047e0601adfcb159ec4f83c0b4149fdb5c3030cc94cd218141d/cryptography-46.0.1-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0ff483716be32690c14636e54a1f6e2e1b7bf8e22ca50b989f88fa1b2d287080", size = 4308182, upload-time = "2025-09-17T00:08:39.388Z" }, + { url = "https://files.pythonhosted.org/packages/c4/ee/ca6cc9df7118f2fcd142c76b1da0f14340d77518c05b1ebfbbabca6b9e7d/cryptography-46.0.1-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9873bf7c1f2a6330bdfe8621e7ce64b725784f9f0c3a6a55c3047af5849f920e", size = 4572393, upload-time = "2025-09-17T00:08:41.663Z" }, + { url = "https://files.pythonhosted.org/packages/7f/a3/0f5296f63815d8e985922b05c31f77ce44787b3127a67c0b7f70f115c45f/cryptography-46.0.1-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:0dfb7c88d4462a0cfdd0d87a3c245a7bc3feb59de101f6ff88194f740f72eda6", size = 4308400, upload-time = "2025-09-17T00:08:43.559Z" }, + { url = "https://files.pythonhosted.org/packages/5d/8c/74fcda3e4e01be1d32775d5b4dd841acaac3c1b8fa4d0774c7ac8d52463d/cryptography-46.0.1-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e22801b61613ebdebf7deb18b507919e107547a1d39a3b57f5f855032dd7cfb8", size = 4015786, upload-time = "2025-09-17T00:08:45.758Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b8/85d23287baeef273b0834481a3dd55bbed3a53587e3b8d9f0898235b8f91/cryptography-46.0.1-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:757af4f6341ce7a1e47c326ca2a81f41d236070217e5fbbad61bbfe299d55d28", size = 4982606, upload-time = "2025-09-17T00:08:47.602Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d3/de61ad5b52433b389afca0bc70f02a7a1f074651221f599ce368da0fe437/cryptography-46.0.1-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f7a24ea78de345cfa7f6a8d3bde8b242c7fac27f2bd78fa23474ca38dfaeeab9", size = 4604234, upload-time = "2025-09-17T00:08:49.879Z" }, + { url = "https://files.pythonhosted.org/packages/dc/1f/dbd4d6570d84748439237a7478d124ee0134bf166ad129267b7ed8ea6d22/cryptography-46.0.1-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e8776dac9e660c22241b6587fae51a67b4b0147daa4d176b172c3ff768ad736", size = 4307669, upload-time = "2025-09-17T00:08:52.321Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fd/ca0a14ce7f0bfe92fa727aacaf2217eb25eb7e4ed513b14d8e03b26e63ed/cryptography-46.0.1-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9f40642a140c0c8649987027867242b801486865277cbabc8c6059ddef16dc8b", size = 4947579, upload-time = "2025-09-17T00:08:54.697Z" }, + { url = "https://files.pythonhosted.org/packages/89/6b/09c30543bb93401f6f88fce556b3bdbb21e55ae14912c04b7bf355f5f96c/cryptography-46.0.1-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:449ef2b321bec7d97ef2c944173275ebdab78f3abdd005400cc409e27cd159ab", size = 4603669, upload-time = "2025-09-17T00:08:57.16Z" }, + { url = "https://files.pythonhosted.org/packages/23/9a/38cb01cb09ce0adceda9fc627c9cf98eb890fc8d50cacbe79b011df20f8a/cryptography-46.0.1-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2dd339ba3345b908fa3141ddba4025568fa6fd398eabce3ef72a29ac2d73ad75", size = 4435828, upload-time = "2025-09-17T00:08:59.606Z" }, + { url = "https://files.pythonhosted.org/packages/0f/53/435b5c36a78d06ae0bef96d666209b0ecd8f8181bfe4dda46536705df59e/cryptography-46.0.1-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7411c910fb2a412053cf33cfad0153ee20d27e256c6c3f14d7d7d1d9fec59fd5", size = 4709553, upload-time = "2025-09-17T00:09:01.832Z" }, + { url = "https://files.pythonhosted.org/packages/f5/c4/0da6e55595d9b9cd3b6eb5dc22f3a07ded7f116a3ea72629cab595abb804/cryptography-46.0.1-cp311-abi3-win32.whl", hash = "sha256:cbb8e769d4cac884bb28e3ff620ef1001b75588a5c83c9c9f1fdc9afbe7f29b0", size = 3058327, upload-time = "2025-09-17T00:09:03.726Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/cd29a35e0d6e78a0ee61793564c8cff0929c38391cb0de27627bdc7525aa/cryptography-46.0.1-cp311-abi3-win_amd64.whl", hash = "sha256:92e8cfe8bd7dd86eac0a677499894862cd5cc2fd74de917daa881d00871ac8e7", size = 3523893, upload-time = "2025-09-17T00:09:06.272Z" }, + { url = "https://files.pythonhosted.org/packages/f2/dd/eea390f3e78432bc3d2f53952375f8b37cb4d37783e626faa6a51e751719/cryptography-46.0.1-cp311-abi3-win_arm64.whl", hash = "sha256:db5597a4c7353b2e5fb05a8e6cb74b56a4658a2b7bf3cb6b1821ae7e7fd6eaa0", size = 2932145, upload-time = "2025-09-17T00:09:08.568Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fb/c73588561afcd5e24b089952bd210b14676c0c5bf1213376350ae111945c/cryptography-46.0.1-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:4c49eda9a23019e11d32a0eb51a27b3e7ddedde91e099c0ac6373e3aacc0d2ee", size = 7193928, upload-time = "2025-09-17T00:09:10.595Z" }, + { url = "https://files.pythonhosted.org/packages/26/34/0ff0bb2d2c79f25a2a63109f3b76b9108a906dd2a2eb5c1d460b9938adbb/cryptography-46.0.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9babb7818fdd71394e576cf26c5452df77a355eac1a27ddfa24096665a27f8fd", size = 4293515, upload-time = "2025-09-17T00:09:12.861Z" }, + { url = "https://files.pythonhosted.org/packages/df/b7/d4f848aee24ecd1be01db6c42c4a270069a4f02a105d9c57e143daf6cf0f/cryptography-46.0.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9f2c4cc63be3ef43c0221861177cee5d14b505cd4d4599a89e2cd273c4d3542a", size = 4545619, upload-time = "2025-09-17T00:09:15.397Z" }, + { url = "https://files.pythonhosted.org/packages/44/a5/42fedefc754fd1901e2d95a69815ea4ec8a9eed31f4c4361fcab80288661/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:41c281a74df173876da1dc9a9b6953d387f06e3d3ed9284e3baae3ab3f40883a", size = 4299160, upload-time = "2025-09-17T00:09:17.155Z" }, + { url = "https://files.pythonhosted.org/packages/86/a1/cd21174f56e769c831fbbd6399a1b7519b0ff6280acec1b826d7b072640c/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0a17377fa52563d730248ba1f68185461fff36e8bc75d8787a7dd2e20a802b7a", size = 3994491, upload-time = "2025-09-17T00:09:18.971Z" }, + { url = "https://files.pythonhosted.org/packages/8d/2f/a8cbfa1c029987ddc746fd966711d4fa71efc891d37fbe9f030fe5ab4eec/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:0d1922d9280e08cde90b518a10cd66831f632960a8d08cb3418922d83fce6f12", size = 4960157, upload-time = "2025-09-17T00:09:20.923Z" }, + { url = "https://files.pythonhosted.org/packages/67/ae/63a84e6789e0d5a2502edf06b552bcb0fa9ff16147265d5c44a211942abe/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:af84e8e99f1a82cea149e253014ea9dc89f75b82c87bb6c7242203186f465129", size = 4577263, upload-time = "2025-09-17T00:09:23.356Z" }, + { url = "https://files.pythonhosted.org/packages/ef/8f/1b9fa8e92bd9cbcb3b7e1e593a5232f2c1e6f9bd72b919c1a6b37d315f92/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:ef648d2c690703501714588b2ba640facd50fd16548133b11b2859e8655a69da", size = 4298703, upload-time = "2025-09-17T00:09:25.566Z" }, + { url = "https://files.pythonhosted.org/packages/c3/af/bb95db070e73fea3fae31d8a69ac1463d89d1c084220f549b00dd01094a8/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:e94eb5fa32a8a9f9bf991f424f002913e3dd7c699ef552db9b14ba6a76a6313b", size = 4926363, upload-time = "2025-09-17T00:09:27.451Z" }, + { url = "https://files.pythonhosted.org/packages/f5/3b/d8fb17ffeb3a83157a1cc0aa5c60691d062aceecba09c2e5e77ebfc1870c/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:534b96c0831855e29fc3b069b085fd185aa5353033631a585d5cd4dd5d40d657", size = 4576958, upload-time = "2025-09-17T00:09:29.924Z" }, + { url = "https://files.pythonhosted.org/packages/d9/46/86bc3a05c10c8aa88c8ae7e953a8b4e407c57823ed201dbcba55c4d655f4/cryptography-46.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9b55038b5c6c47559aa33626d8ecd092f354e23de3c6975e4bb205df128a2a0", size = 4422507, upload-time = "2025-09-17T00:09:32.222Z" }, + { url = "https://files.pythonhosted.org/packages/a8/4e/387e5a21dfd2b4198e74968a541cfd6128f66f8ec94ed971776e15091ac3/cryptography-46.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ec13b7105117dbc9afd023300fb9954d72ca855c274fe563e72428ece10191c0", size = 4683964, upload-time = "2025-09-17T00:09:34.118Z" }, + { url = "https://files.pythonhosted.org/packages/25/a3/f9f5907b166adb8f26762071474b38bbfcf89858a5282f032899075a38a1/cryptography-46.0.1-cp314-cp314t-win32.whl", hash = "sha256:504e464944f2c003a0785b81668fe23c06f3b037e9cb9f68a7c672246319f277", size = 3029705, upload-time = "2025-09-17T00:09:36.381Z" }, + { url = "https://files.pythonhosted.org/packages/12/66/4d3a4f1850db2e71c2b1628d14b70b5e4c1684a1bd462f7fffb93c041c38/cryptography-46.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:c52fded6383f7e20eaf70a60aeddd796b3677c3ad2922c801be330db62778e05", size = 3502175, upload-time = "2025-09-17T00:09:38.261Z" }, + { url = "https://files.pythonhosted.org/packages/52/c7/9f10ad91435ef7d0d99a0b93c4360bea3df18050ff5b9038c489c31ac2f5/cryptography-46.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:9495d78f52c804b5ec8878b5b8c7873aa8e63db9cd9ee387ff2db3fffe4df784", size = 2912354, upload-time = "2025-09-17T00:09:40.078Z" }, + { url = "https://files.pythonhosted.org/packages/98/e5/fbd632385542a3311915976f88e0dfcf09e62a3fc0aff86fb6762162a24d/cryptography-46.0.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:d84c40bdb8674c29fa192373498b6cb1e84f882889d21a471b45d1f868d8d44b", size = 7255677, upload-time = "2025-09-17T00:09:42.407Z" }, + { url = "https://files.pythonhosted.org/packages/56/3e/13ce6eab9ad6eba1b15a7bd476f005a4c1b3f299f4c2f32b22408b0edccf/cryptography-46.0.1-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9ed64e5083fa806709e74fc5ea067dfef9090e5b7a2320a49be3c9df3583a2d8", size = 4301110, upload-time = "2025-09-17T00:09:45.614Z" }, + { url = "https://files.pythonhosted.org/packages/a2/67/65dc233c1ddd688073cf7b136b06ff4b84bf517ba5529607c9d79720fc67/cryptography-46.0.1-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:341fb7a26bc9d6093c1b124b9f13acc283d2d51da440b98b55ab3f79f2522ead", size = 4562369, upload-time = "2025-09-17T00:09:47.601Z" }, + { url = "https://files.pythonhosted.org/packages/17/db/d64ae4c6f4e98c3dac5bf35dd4d103f4c7c345703e43560113e5e8e31b2b/cryptography-46.0.1-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6ef1488967e729948d424d09c94753d0167ce59afba8d0f6c07a22b629c557b2", size = 4302126, upload-time = "2025-09-17T00:09:49.335Z" }, + { url = "https://files.pythonhosted.org/packages/3d/19/5f1eea17d4805ebdc2e685b7b02800c4f63f3dd46cfa8d4c18373fea46c8/cryptography-46.0.1-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7823bc7cdf0b747ecfb096d004cc41573c2f5c7e3a29861603a2871b43d3ef32", size = 4009431, upload-time = "2025-09-17T00:09:51.239Z" }, + { url = "https://files.pythonhosted.org/packages/81/b5/229ba6088fe7abccbfe4c5edb96c7a5ad547fac5fdd0d40aa6ea540b2985/cryptography-46.0.1-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:f736ab8036796f5a119ff8211deda416f8c15ce03776db704a7a4e17381cb2ef", size = 4980739, upload-time = "2025-09-17T00:09:54.181Z" }, + { url = "https://files.pythonhosted.org/packages/3a/9c/50aa38907b201e74bc43c572f9603fa82b58e831bd13c245613a23cff736/cryptography-46.0.1-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:e46710a240a41d594953012213ea8ca398cd2448fbc5d0f1be8160b5511104a0", size = 4592289, upload-time = "2025-09-17T00:09:56.731Z" }, + { url = "https://files.pythonhosted.org/packages/5a/33/229858f8a5bb22f82468bb285e9f4c44a31978d5f5830bb4ea1cf8a4e454/cryptography-46.0.1-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:84ef1f145de5aee82ea2447224dc23f065ff4cc5791bb3b506615957a6ba8128", size = 4301815, upload-time = "2025-09-17T00:09:58.548Z" }, + { url = "https://files.pythonhosted.org/packages/52/cb/b76b2c87fbd6ed4a231884bea3ce073406ba8e2dae9defad910d33cbf408/cryptography-46.0.1-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9394c7d5a7565ac5f7d9ba38b2617448eba384d7b107b262d63890079fad77ca", size = 4943251, upload-time = "2025-09-17T00:10:00.475Z" }, + { url = "https://files.pythonhosted.org/packages/94/0f/f66125ecf88e4cb5b8017ff43f3a87ede2d064cb54a1c5893f9da9d65093/cryptography-46.0.1-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:ed957044e368ed295257ae3d212b95456bd9756df490e1ac4538857f67531fcc", size = 4591247, upload-time = "2025-09-17T00:10:02.874Z" }, + { url = "https://files.pythonhosted.org/packages/f6/22/9f3134ae436b63b463cfdf0ff506a0570da6873adb4bf8c19b8a5b4bac64/cryptography-46.0.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f7de12fa0eee6234de9a9ce0ffcfa6ce97361db7a50b09b65c63ac58e5f22fc7", size = 4428534, upload-time = "2025-09-17T00:10:04.994Z" }, + { url = "https://files.pythonhosted.org/packages/89/39/e6042bcb2638650b0005c752c38ea830cbfbcbb1830e4d64d530000aa8dc/cryptography-46.0.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7fab1187b6c6b2f11a326f33b036f7168f5b996aedd0c059f9738915e4e8f53a", size = 4699541, upload-time = "2025-09-17T00:10:06.925Z" }, + { url = "https://files.pythonhosted.org/packages/68/46/753d457492d15458c7b5a653fc9a84a1c9c7a83af6ebdc94c3fc373ca6e8/cryptography-46.0.1-cp38-abi3-win32.whl", hash = "sha256:45f790934ac1018adeba46a0f7289b2b8fe76ba774a88c7f1922213a56c98bc1", size = 3043779, upload-time = "2025-09-17T00:10:08.951Z" }, + { url = "https://files.pythonhosted.org/packages/2f/50/b6f3b540c2f6ee712feeb5fa780bb11fad76634e71334718568e7695cb55/cryptography-46.0.1-cp38-abi3-win_amd64.whl", hash = "sha256:7176a5ab56fac98d706921f6416a05e5aff7df0e4b91516f450f8627cda22af3", size = 3517226, upload-time = "2025-09-17T00:10:10.769Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e8/77d17d00981cdd27cc493e81e1749a0b8bbfb843780dbd841e30d7f50743/cryptography-46.0.1-cp38-abi3-win_arm64.whl", hash = "sha256:efc9e51c3e595267ff84adf56e9b357db89ab2279d7e375ffcaf8f678606f3d9", size = 2923149, upload-time = "2025-09-17T00:10:13.236Z" }, + { url = "https://files.pythonhosted.org/packages/14/b9/b260180b31a66859648cfed5c980544ee22b15f8bd20ef82a23f58c0b83e/cryptography-46.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd4b5e2ee4e60425711ec65c33add4e7a626adef79d66f62ba0acfd493af282d", size = 3714683, upload-time = "2025-09-17T00:10:15.601Z" }, + { url = "https://files.pythonhosted.org/packages/c5/5a/1cd3ef86e5884edcbf8b27c3aa8f9544e9b9fcce5d3ed8b86959741f4f8e/cryptography-46.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48948940d0ae00483e85e9154bb42997d0b77c21e43a77b7773c8c80de532ac5", size = 3443784, upload-time = "2025-09-17T00:10:18.014Z" }, + { url = "https://files.pythonhosted.org/packages/27/27/077e09fd92075dd1338ea0ffaf5cfee641535545925768350ad90d8c36ca/cryptography-46.0.1-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b9c79af2c3058430d911ff1a5b2b96bbfe8da47d5ed961639ce4681886614e70", size = 3722319, upload-time = "2025-09-17T00:10:20.273Z" }, + { url = "https://files.pythonhosted.org/packages/db/32/6fc7250280920418651640d76cee34d91c1e0601d73acd44364570cf041f/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0ca4be2af48c24df689a150d9cd37404f689e2968e247b6b8ff09bff5bcd786f", size = 4249030, upload-time = "2025-09-17T00:10:22.396Z" }, + { url = "https://files.pythonhosted.org/packages/32/33/8d5398b2da15a15110b2478480ab512609f95b45ead3a105c9a9c76f9980/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:13e67c4d3fb8b6bc4ef778a7ccdd8df4cd15b4bcc18f4239c8440891a11245cc", size = 4528009, upload-time = "2025-09-17T00:10:24.418Z" }, + { url = "https://files.pythonhosted.org/packages/fd/1c/4012edad2a8977ab386c36b6e21f5065974d37afa3eade83a9968cba4855/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:15b5fd9358803b0d1cc42505a18d8bca81dabb35b5cfbfea1505092e13a9d96d", size = 4248902, upload-time = "2025-09-17T00:10:26.255Z" }, + { url = "https://files.pythonhosted.org/packages/58/a3/257cd5ae677302de8fa066fca9de37128f6729d1e63c04dd6a15555dd450/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:e34da95e29daf8a71cb2841fd55df0511539a6cdf33e6f77c1e95e44006b9b46", size = 4527150, upload-time = "2025-09-17T00:10:28.28Z" }, + { url = "https://files.pythonhosted.org/packages/6a/cd/fe6b65e1117ec7631f6be8951d3db076bac3e1b096e3e12710ed071ffc3c/cryptography-46.0.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:34f04b7311174469ab3ac2647469743720f8b6c8b046f238e5cb27905695eb2a", size = 3448210, upload-time = "2025-09-17T00:10:30.145Z" }, +] + +[[package]] +name = "decorator" +version = "5.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, +] + +[[package]] +name = "dnspython" +version = "2.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197, upload-time = "2024-10-05T20:14:59.362Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632, upload-time = "2024-10-05T20:14:57.687Z" }, +] + +[[package]] +name = "docutils" +version = "0.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444, upload-time = "2024-04-23T18:57:18.24Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408, upload-time = "2024-04-23T18:57:14.835Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, +] + +[[package]] +name = "furo" +version = "2025.9.25" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "accessible-pygments" }, + { name = "beautifulsoup4" }, + { name = "pygments" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "sphinx-basic-ng" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/29/ff3b83a1ffce74676043ab3e7540d398e0b1ce7660917a00d7c4958b93da/furo-2025.9.25.tar.gz", hash = "sha256:3eac05582768fdbbc2bdfa1cdbcdd5d33cfc8b4bd2051729ff4e026a1d7e0a98", size = 1662007, upload-time = "2025-09-25T21:37:19.221Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/69/964b55f389c289e16ba2a5dfe587c3c462aac09e24123f09ddf703889584/furo-2025.9.25-py3-none-any.whl", hash = "sha256:2937f68e823b8e37b410c972c371bc2b1d88026709534927158e0cb3fac95afe", size = 340409, upload-time = "2025-09-25T21:37:17.244Z" }, +] + +[[package]] +name = "gevent" +version = "25.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, + { name = "greenlet", marker = "platform_python_implementation == 'CPython'" }, + { name = "zope-event" }, + { name = "zope-interface" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/58/267e8160aea00ab00acd2de97197eecfe307064a376fb5c892870a8a6159/gevent-25.5.1.tar.gz", hash = "sha256:582c948fa9a23188b890d0bc130734a506d039a2e5ad87dae276a456cc683e61", size = 6388207, upload-time = "2025-05-12T12:57:59.833Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/a7/438568c37fb255f80e710318bfcad04731b92ce764bc16adee278fdc6b4d/gevent-25.5.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8e5a0fab5e245b15ec1005b3666b0a2e867c26f411c8fe66ae1afe07174a30e9", size = 2922800, upload-time = "2025-05-12T11:11:46.728Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b3/b44d8b1c4a4d01097a7f82ffbc582d054007365c27b28867f0b2d4241d73/gevent-25.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7b80a37f2fb45ee4a8f7e64b77dd8a842d364384046e394227b974a4e9c9a52", size = 1812954, upload-time = "2025-05-12T11:52:27.059Z" }, + { url = "https://files.pythonhosted.org/packages/1e/c6/935b4c973ad827c9ec49c354d68d047da1d23e3018bda63d3723cce43178/gevent-25.5.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29ab729d50ae85077a68e0385f129f5b01052d01a0ae6d7fdc1824f5337905e4", size = 1900169, upload-time = "2025-05-12T11:54:17.797Z" }, + { url = "https://files.pythonhosted.org/packages/38/8a/b745bddfec35fb723cafb036f191e5e0a0013f1698bf0ba4fa2cb8e01879/gevent-25.5.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80d20592aeabcc4e294fd441fd43d45cb537437fd642c374ea9d964622fad229", size = 1849786, upload-time = "2025-05-12T12:00:01.962Z" }, + { url = "https://files.pythonhosted.org/packages/7c/b3/7aa7b09d91207bebe7608699558bbadd34f63e32904351867c29f8be25de/gevent-25.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8ba0257542ccbb72a8229dc34d00844ccdfba110417e4b7b34599548d0e20e9", size = 2139021, upload-time = "2025-05-12T11:32:58.961Z" }, + { url = "https://files.pythonhosted.org/packages/74/da/cf52ae0c84361f4164a04f3338508b1234331ce79719db103e50dbc5598c/gevent-25.5.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cad0821dff998c7c60dd238f92cd61380342c47fb9e92e1a8705d9b5ac7c16e8", size = 1830758, upload-time = "2025-05-12T11:59:55.666Z" }, + { url = "https://files.pythonhosted.org/packages/93/93/73a49b896d78eec27f0895ce3008f9825db748a5aacbca47404d1014da4b/gevent-25.5.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:017a7384c0cd1a5907751c991535a0699596e89725468a7fc39228312e10efa1", size = 2199993, upload-time = "2025-05-12T11:40:50.845Z" }, + { url = "https://files.pythonhosted.org/packages/df/c7/34680b7d2a75492fa032fa8ecaacc03c1940767a35125f6740954a0132a3/gevent-25.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:469c86d02fccad7e2a3d82fe22237e47ecb376fbf4710bc18747b49c50716817", size = 1652665, upload-time = "2025-05-12T12:35:58.105Z" }, + { url = "https://files.pythonhosted.org/packages/c6/eb/015e93f16a718e2f836ecebecae9bcd7b4d2a5695d1c8bd5bba2d5d91548/gevent-25.5.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:12380aba5c316e9ff53cc21d8ab80f4a91c0df3ada58f65d4f5eb2cf693db00e", size = 2877441, upload-time = "2025-05-12T11:14:57.735Z" }, + { url = "https://files.pythonhosted.org/packages/7b/86/42d191a6f6672ca59d6d79b4cd9b89d4a15f59c843fbbad42f2b749f8ea9/gevent-25.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f0694daab1a041b69a53f53c2141c12994892b2503870515cabe6a5dbd2a928", size = 1774873, upload-time = "2025-05-12T11:52:29.015Z" }, + { url = "https://files.pythonhosted.org/packages/f5/9f/42dd255849c9ca2e814f5cbe180980594007ba19044a132cf674069e38bf/gevent-25.5.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2797885e9aeffdc98e1846723e5aa212e7ce53007dbef40d6fd2add264235c41", size = 1857911, upload-time = "2025-05-12T11:54:19.523Z" }, + { url = "https://files.pythonhosted.org/packages/3e/fc/8e799a733be48f6114bfc531b94e28812741664d8af89872dd90e117f8a4/gevent-25.5.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cde6aaac36b54332e10ea2a5bc0de6a8aba6c205c92603fe4396e3777c88e05d", size = 1812751, upload-time = "2025-05-12T12:00:03.719Z" }, + { url = "https://files.pythonhosted.org/packages/52/4f/a3f3acd961887da10cb0b49c3d915201973d59ce6bf49e2922eaf2058d5f/gevent-25.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24484f80f14befb8822bf29554cfb3a26a26cb69cd1e5a8be9e23b4bd7a96e25", size = 2087115, upload-time = "2025-05-12T11:33:01.128Z" }, + { url = "https://files.pythonhosted.org/packages/b6/27/bb38e005106a53787c13ad1f9f73ed990e403e462108acae6320ab11d442/gevent-25.5.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc7446895fa184890d8ca5ea61e502691114f9db55c9b76adc33f3086c4368", size = 1793549, upload-time = "2025-05-12T11:59:57.854Z" }, + { url = "https://files.pythonhosted.org/packages/ee/56/da817bc69e1f0ae8438f12f2cd150656b09a8c3576c6d12f992dc9ca64ef/gevent-25.5.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5b6106e2414b1797133786258fa1962a5e836480e4d5e861577f9fc63b673a5a", size = 2145899, upload-time = "2025-05-12T11:40:53.275Z" }, + { url = "https://files.pythonhosted.org/packages/b8/42/989403abbdbb1346a1507083c02018bee3fedaef3f9648940c767d8c0958/gevent-25.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:bc899212d90f311784c58938a9c09c59802fb6dc287a35fabdc36d180f57f575", size = 1635771, upload-time = "2025-05-12T12:26:47.644Z" }, + { url = "https://files.pythonhosted.org/packages/58/c5/cf71423666a0b83db3d7e3f85788bc47d573fca5fe62b798fe2c4273de7c/gevent-25.5.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d87c0a1bd809d8f70f96b9b229779ec6647339830b8888a192beed33ac8d129f", size = 2909333, upload-time = "2025-05-12T11:11:34.883Z" }, + { url = "https://files.pythonhosted.org/packages/26/7e/d2f174ee8bec6eb85d961ca203bc599d059c857b8412e367b8fa206603a5/gevent-25.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b87a4b66edb3808d4d07bbdb0deed5a710cf3d3c531e082759afd283758bb649", size = 1788420, upload-time = "2025-05-12T11:52:30.306Z" }, + { url = "https://files.pythonhosted.org/packages/fe/f3/3aba8c147b9108e62ba348c726fe38ae69735a233db425565227336e8ce6/gevent-25.5.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f076779050029a82feb0cb1462021d3404d22f80fa76a181b1a7889cd4d6b519", size = 1868854, upload-time = "2025-05-12T11:54:21.564Z" }, + { url = "https://files.pythonhosted.org/packages/c6/b1/11a5453f8fcebe90a456471fad48bd154c6a62fcb96e3475a5e408d05fc8/gevent-25.5.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb673eb291c19370f69295f7a881a536451408481e2e3deec3f41dedb7c281ec", size = 1833946, upload-time = "2025-05-12T12:00:05.514Z" }, + { url = "https://files.pythonhosted.org/packages/70/1c/37d4a62303f86e6af67660a8df38c1171b7290df61b358e618c6fea79567/gevent-25.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1325ed44225c8309c0dd188bdbbbee79e1df8c11ceccac226b861c7d52e4837", size = 2070583, upload-time = "2025-05-12T11:33:02.803Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8f/3b14929ff28263aba1d268ea97bcf104be1a86ba6f6bb4633838e7a1905e/gevent-25.5.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fcd5bcad3102bde686d0adcc341fade6245186050ce14386d547ccab4bd54310", size = 1808341, upload-time = "2025-05-12T11:59:59.154Z" }, + { url = "https://files.pythonhosted.org/packages/2f/fc/674ec819fb8a96e482e4d21f8baa43d34602dba09dfce7bbdc8700899d1b/gevent-25.5.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1a93062609e8fa67ec97cd5fb9206886774b2a09b24887f40148c9c37e6fb71c", size = 2137974, upload-time = "2025-05-12T11:40:54.78Z" }, + { url = "https://files.pythonhosted.org/packages/05/9a/048b7f5e28c54e4595ad4a8ad3c338fa89560e558db2bbe8273f44f030de/gevent-25.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:2534c23dc32bed62b659ed4fd9e198906179e68b26c9276a897e04163bdde806", size = 1638344, upload-time = "2025-05-12T12:08:31.776Z" }, + { url = "https://files.pythonhosted.org/packages/10/25/2162b38d7b48e08865db6772d632bd1648136ce2bb50e340565e45607cad/gevent-25.5.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a022a9de9275ce0b390b7315595454258c525dc8287a03f1a6cacc5878ab7cbc", size = 2928044, upload-time = "2025-05-12T11:11:36.33Z" }, + { url = "https://files.pythonhosted.org/packages/1b/e0/dbd597a964ed00176da122ea759bf2a6c1504f1e9f08e185379f92dc355f/gevent-25.5.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fae8533f9d0ef3348a1f503edcfb531ef7a0236b57da1e24339aceb0ce52922", size = 1788751, upload-time = "2025-05-12T11:52:32.643Z" }, + { url = "https://files.pythonhosted.org/packages/f1/74/960cc4cf4c9c90eafbe0efc238cdf588862e8e278d0b8c0d15a0da4ed480/gevent-25.5.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c7b32d9c3b5294b39ea9060e20c582e49e1ec81edbfeae6cf05f8ad0829cb13d", size = 1869766, upload-time = "2025-05-12T11:54:23.903Z" }, + { url = "https://files.pythonhosted.org/packages/56/78/fa84b1c7db79b156929685db09a7c18c3127361dca18a09e998e98118506/gevent-25.5.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b95815fe44f318ebbfd733b6428b4cb18cc5e68f1c40e8501dd69cc1f42a83d", size = 1835358, upload-time = "2025-05-12T12:00:06.794Z" }, + { url = "https://files.pythonhosted.org/packages/00/5c/bfefe3822bbca5b83bfad256c82251b3f5be13d52d14e17a786847b9b625/gevent-25.5.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d316529b70d325b183b2f3f5cde958911ff7be12eb2b532b5c301f915dbbf1e", size = 2073071, upload-time = "2025-05-12T11:33:04.2Z" }, + { url = "https://files.pythonhosted.org/packages/20/e4/08a77a3839a37db96393dea952e992d5846a881b887986dde62ead6b48a1/gevent-25.5.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f6ba33c13db91ffdbb489a4f3d177a261ea1843923e1d68a5636c53fe98fa5ce", size = 1809805, upload-time = "2025-05-12T12:00:00.537Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ac/28848348f790c1283df74b0fc0a554271d0606676470f848eccf84eae42a/gevent-25.5.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ee34b77c7553777c0b8379915f75934c3f9c8cd32f7cd098ea43c9323c2276", size = 2138305, upload-time = "2025-05-12T11:40:56.566Z" }, + { url = "https://files.pythonhosted.org/packages/52/9e/0e9e40facd2d714bfb00f71fc6dacaacc82c24c1c2e097bf6461e00dec9f/gevent-25.5.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fa6aa0da224ed807d3b76cdb4ee8b54d4d4d5e018aed2478098e685baae7896", size = 1637444, upload-time = "2025-05-12T12:17:45.995Z" }, + { url = "https://files.pythonhosted.org/packages/60/16/b71171e97ec7b4ded8669542f4369d88d5a289e2704efbbde51e858e062a/gevent-25.5.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:0bacf89a65489d26c7087669af89938d5bfd9f7afb12a07b57855b9fad6ccbd0", size = 2937113, upload-time = "2025-05-12T11:12:03.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/54/e5908beb092c2745aa8390f15b9559cc3ebd77bf1ba71c81c606f7b1fb92/gevent-25.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e30169ef9cc0a57930bfd8fe14d86bc9d39fb96d278e3891e85cbe7b46058a97", size = 2147450, upload-time = "2025-05-12T11:33:05.883Z" }, + { url = "https://files.pythonhosted.org/packages/ee/39/206c9da2395a7df11c13e2989f7c7c65a7799babdb8b4b055cccae4d5c14/gevent-25.5.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e72ad5f8d9c92df017fb91a1f6a438cfb63b0eff4b40904ff81b40cb8150078c", size = 2210122, upload-time = "2025-05-12T11:40:58.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/18/d10ca3841b686143c1973cac816651a72ff77ad9e79a5300cbbbe310fced/gevent-25.5.1-cp39-cp39-win32.whl", hash = "sha256:e5f358e81e27b1a7f2fb2f5219794e13ab5f59ce05571aa3877cfac63adb97db", size = 1548447, upload-time = "2025-05-12T12:48:21.565Z" }, + { url = "https://files.pythonhosted.org/packages/ac/9d/48c01ff8324ce4bfaba0760c0f1db6f4e2c976838655f6b80333cfd47999/gevent-25.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:b83aff2441c7d4ee93e519989713b7c2607d4510abe990cd1d04f641bc6c03af", size = 1659832, upload-time = "2025-05-12T12:45:00.794Z" }, + { url = "https://files.pythonhosted.org/packages/11/81/834da3c1ea5e71e4dc1a78a034a15f2813d9760d135464aae5d1f058a8c6/gevent-25.5.1-pp310-pypy310_pp73-macosx_11_0_universal2.whl", hash = "sha256:60ad4ca9ca2c4cc8201b607c229cd17af749831e371d006d8a91303bb5568eb1", size = 1291540, upload-time = "2025-05-12T11:11:55.456Z" }, +] + +[[package]] +name = "greenlet" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/92/bb85bd6e80148a4d2e0c59f7c0c2891029f8fd510183afc7d8d2feeed9b6/greenlet-3.2.3.tar.gz", hash = "sha256:8b0dd8ae4c0d6f5e54ee55ba935eeb3d735a9b58a8a1e5b5cbab64e01a39f365", size = 185752, upload-time = "2025-06-05T16:16:09.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/db/b4c12cff13ebac2786f4f217f06588bccd8b53d260453404ef22b121fc3a/greenlet-3.2.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:1afd685acd5597349ee6d7a88a8bec83ce13c106ac78c196ee9dde7c04fe87be", size = 268977, upload-time = "2025-06-05T16:10:24.001Z" }, + { url = "https://files.pythonhosted.org/packages/52/61/75b4abd8147f13f70986df2801bf93735c1bd87ea780d70e3b3ecda8c165/greenlet-3.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:761917cac215c61e9dc7324b2606107b3b292a8349bdebb31503ab4de3f559ac", size = 627351, upload-time = "2025-06-05T16:38:50.685Z" }, + { url = "https://files.pythonhosted.org/packages/35/aa/6894ae299d059d26254779a5088632874b80ee8cf89a88bca00b0709d22f/greenlet-3.2.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a433dbc54e4a37e4fff90ef34f25a8c00aed99b06856f0119dcf09fbafa16392", size = 638599, upload-time = "2025-06-05T16:41:34.057Z" }, + { url = "https://files.pythonhosted.org/packages/30/64/e01a8261d13c47f3c082519a5e9dbf9e143cc0498ed20c911d04e54d526c/greenlet-3.2.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:72e77ed69312bab0434d7292316d5afd6896192ac4327d44f3d613ecb85b037c", size = 634482, upload-time = "2025-06-05T16:48:16.26Z" }, + { url = "https://files.pythonhosted.org/packages/47/48/ff9ca8ba9772d083a4f5221f7b4f0ebe8978131a9ae0909cf202f94cd879/greenlet-3.2.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:68671180e3849b963649254a882cd544a3c75bfcd2c527346ad8bb53494444db", size = 633284, upload-time = "2025-06-05T16:13:01.599Z" }, + { url = "https://files.pythonhosted.org/packages/e9/45/626e974948713bc15775b696adb3eb0bd708bec267d6d2d5c47bb47a6119/greenlet-3.2.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49c8cfb18fb419b3d08e011228ef8a25882397f3a859b9fe1436946140b6756b", size = 582206, upload-time = "2025-06-05T16:12:48.51Z" }, + { url = "https://files.pythonhosted.org/packages/b1/8e/8b6f42c67d5df7db35b8c55c9a850ea045219741bb14416255616808c690/greenlet-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:efc6dc8a792243c31f2f5674b670b3a95d46fa1c6a912b8e310d6f542e7b0712", size = 1111412, upload-time = "2025-06-05T16:36:45.479Z" }, + { url = "https://files.pythonhosted.org/packages/05/46/ab58828217349500a7ebb81159d52ca357da747ff1797c29c6023d79d798/greenlet-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:731e154aba8e757aedd0781d4b240f1225b075b4409f1bb83b05ff410582cf00", size = 1135054, upload-time = "2025-06-05T16:12:36.478Z" }, + { url = "https://files.pythonhosted.org/packages/68/7f/d1b537be5080721c0f0089a8447d4ef72839039cdb743bdd8ffd23046e9a/greenlet-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:96c20252c2f792defe9a115d3287e14811036d51e78b3aaddbee23b69b216302", size = 296573, upload-time = "2025-06-05T16:34:26.521Z" }, + { url = "https://files.pythonhosted.org/packages/fc/2e/d4fcb2978f826358b673f779f78fa8a32ee37df11920dc2bb5589cbeecef/greenlet-3.2.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:784ae58bba89fa1fa5733d170d42486580cab9decda3484779f4759345b29822", size = 270219, upload-time = "2025-06-05T16:10:10.414Z" }, + { url = "https://files.pythonhosted.org/packages/16/24/929f853e0202130e4fe163bc1d05a671ce8dcd604f790e14896adac43a52/greenlet-3.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0921ac4ea42a5315d3446120ad48f90c3a6b9bb93dd9b3cf4e4d84a66e42de83", size = 630383, upload-time = "2025-06-05T16:38:51.785Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b2/0320715eb61ae70c25ceca2f1d5ae620477d246692d9cc284c13242ec31c/greenlet-3.2.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d2971d93bb99e05f8c2c0c2f4aa9484a18d98c4c3bd3c62b65b7e6ae33dfcfaf", size = 642422, upload-time = "2025-06-05T16:41:35.259Z" }, + { url = "https://files.pythonhosted.org/packages/bd/49/445fd1a210f4747fedf77615d941444349c6a3a4a1135bba9701337cd966/greenlet-3.2.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c667c0bf9d406b77a15c924ef3285e1e05250948001220368e039b6aa5b5034b", size = 638375, upload-time = "2025-06-05T16:48:18.235Z" }, + { url = "https://files.pythonhosted.org/packages/7e/c8/ca19760cf6eae75fa8dc32b487e963d863b3ee04a7637da77b616703bc37/greenlet-3.2.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:592c12fb1165be74592f5de0d70f82bc5ba552ac44800d632214b76089945147", size = 637627, upload-time = "2025-06-05T16:13:02.858Z" }, + { url = "https://files.pythonhosted.org/packages/65/89/77acf9e3da38e9bcfca881e43b02ed467c1dedc387021fc4d9bd9928afb8/greenlet-3.2.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:29e184536ba333003540790ba29829ac14bb645514fbd7e32af331e8202a62a5", size = 585502, upload-time = "2025-06-05T16:12:49.642Z" }, + { url = "https://files.pythonhosted.org/packages/97/c6/ae244d7c95b23b7130136e07a9cc5aadd60d59b5951180dc7dc7e8edaba7/greenlet-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:93c0bb79844a367782ec4f429d07589417052e621aa39a5ac1fb99c5aa308edc", size = 1114498, upload-time = "2025-06-05T16:36:46.598Z" }, + { url = "https://files.pythonhosted.org/packages/89/5f/b16dec0cbfd3070658e0d744487919740c6d45eb90946f6787689a7efbce/greenlet-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:751261fc5ad7b6705f5f76726567375bb2104a059454e0226e1eef6c756748ba", size = 1139977, upload-time = "2025-06-05T16:12:38.262Z" }, + { url = "https://files.pythonhosted.org/packages/66/77/d48fb441b5a71125bcac042fc5b1494c806ccb9a1432ecaa421e72157f77/greenlet-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:83a8761c75312361aa2b5b903b79da97f13f556164a7dd2d5448655425bd4c34", size = 297017, upload-time = "2025-06-05T16:25:05.225Z" }, + { url = "https://files.pythonhosted.org/packages/f3/94/ad0d435f7c48debe960c53b8f60fb41c2026b1d0fa4a99a1cb17c3461e09/greenlet-3.2.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:25ad29caed5783d4bd7a85c9251c651696164622494c00802a139c00d639242d", size = 271992, upload-time = "2025-06-05T16:11:23.467Z" }, + { url = "https://files.pythonhosted.org/packages/93/5d/7c27cf4d003d6e77749d299c7c8f5fd50b4f251647b5c2e97e1f20da0ab5/greenlet-3.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88cd97bf37fe24a6710ec6a3a7799f3f81d9cd33317dcf565ff9950c83f55e0b", size = 638820, upload-time = "2025-06-05T16:38:52.882Z" }, + { url = "https://files.pythonhosted.org/packages/c6/7e/807e1e9be07a125bb4c169144937910bf59b9d2f6d931578e57f0bce0ae2/greenlet-3.2.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:baeedccca94880d2f5666b4fa16fc20ef50ba1ee353ee2d7092b383a243b0b0d", size = 653046, upload-time = "2025-06-05T16:41:36.343Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ab/158c1a4ea1068bdbc78dba5a3de57e4c7aeb4e7fa034320ea94c688bfb61/greenlet-3.2.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:be52af4b6292baecfa0f397f3edb3c6092ce071b499dd6fe292c9ac9f2c8f264", size = 647701, upload-time = "2025-06-05T16:48:19.604Z" }, + { url = "https://files.pythonhosted.org/packages/cc/0d/93729068259b550d6a0288da4ff72b86ed05626eaf1eb7c0d3466a2571de/greenlet-3.2.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0cc73378150b8b78b0c9fe2ce56e166695e67478550769536a6742dca3651688", size = 649747, upload-time = "2025-06-05T16:13:04.628Z" }, + { url = "https://files.pythonhosted.org/packages/f6/f6/c82ac1851c60851302d8581680573245c8fc300253fc1ff741ae74a6c24d/greenlet-3.2.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:706d016a03e78df129f68c4c9b4c4f963f7d73534e48a24f5f5a7101ed13dbbb", size = 605461, upload-time = "2025-06-05T16:12:50.792Z" }, + { url = "https://files.pythonhosted.org/packages/98/82/d022cf25ca39cf1200650fc58c52af32c90f80479c25d1cbf57980ec3065/greenlet-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:419e60f80709510c343c57b4bb5a339d8767bf9aef9b8ce43f4f143240f88b7c", size = 1121190, upload-time = "2025-06-05T16:36:48.59Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e1/25297f70717abe8104c20ecf7af0a5b82d2f5a980eb1ac79f65654799f9f/greenlet-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:93d48533fade144203816783373f27a97e4193177ebaaf0fc396db19e5d61163", size = 1149055, upload-time = "2025-06-05T16:12:40.457Z" }, + { url = "https://files.pythonhosted.org/packages/1f/8f/8f9e56c5e82eb2c26e8cde787962e66494312dc8cb261c460e1f3a9c88bc/greenlet-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:7454d37c740bb27bdeddfc3f358f26956a07d5220818ceb467a483197d84f849", size = 297817, upload-time = "2025-06-05T16:29:49.244Z" }, + { url = "https://files.pythonhosted.org/packages/b1/cf/f5c0b23309070ae93de75c90d29300751a5aacefc0a3ed1b1d8edb28f08b/greenlet-3.2.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:500b8689aa9dd1ab26872a34084503aeddefcb438e2e7317b89b11eaea1901ad", size = 270732, upload-time = "2025-06-05T16:10:08.26Z" }, + { url = "https://files.pythonhosted.org/packages/48/ae/91a957ba60482d3fecf9be49bc3948f341d706b52ddb9d83a70d42abd498/greenlet-3.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a07d3472c2a93117af3b0136f246b2833fdc0b542d4a9799ae5f41c28323faef", size = 639033, upload-time = "2025-06-05T16:38:53.983Z" }, + { url = "https://files.pythonhosted.org/packages/6f/df/20ffa66dd5a7a7beffa6451bdb7400d66251374ab40b99981478c69a67a8/greenlet-3.2.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8704b3768d2f51150626962f4b9a9e4a17d2e37c8a8d9867bbd9fa4eb938d3b3", size = 652999, upload-time = "2025-06-05T16:41:37.89Z" }, + { url = "https://files.pythonhosted.org/packages/51/b4/ebb2c8cb41e521f1d72bf0465f2f9a2fd803f674a88db228887e6847077e/greenlet-3.2.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5035d77a27b7c62db6cf41cf786cfe2242644a7a337a0e155c80960598baab95", size = 647368, upload-time = "2025-06-05T16:48:21.467Z" }, + { url = "https://files.pythonhosted.org/packages/8e/6a/1e1b5aa10dced4ae876a322155705257748108b7fd2e4fae3f2a091fe81a/greenlet-3.2.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2d8aa5423cd4a396792f6d4580f88bdc6efcb9205891c9d40d20f6e670992efb", size = 650037, upload-time = "2025-06-05T16:13:06.402Z" }, + { url = "https://files.pythonhosted.org/packages/26/f2/ad51331a157c7015c675702e2d5230c243695c788f8f75feba1af32b3617/greenlet-3.2.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c724620a101f8170065d7dded3f962a2aea7a7dae133a009cada42847e04a7b", size = 608402, upload-time = "2025-06-05T16:12:51.91Z" }, + { url = "https://files.pythonhosted.org/packages/26/bc/862bd2083e6b3aff23300900a956f4ea9a4059de337f5c8734346b9b34fc/greenlet-3.2.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:873abe55f134c48e1f2a6f53f7d1419192a3d1a4e873bace00499a4e45ea6af0", size = 1119577, upload-time = "2025-06-05T16:36:49.787Z" }, + { url = "https://files.pythonhosted.org/packages/86/94/1fc0cc068cfde885170e01de40a619b00eaa8f2916bf3541744730ffb4c3/greenlet-3.2.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:024571bbce5f2c1cfff08bf3fbaa43bbc7444f580ae13b0099e95d0e6e67ed36", size = 1147121, upload-time = "2025-06-05T16:12:42.527Z" }, + { url = "https://files.pythonhosted.org/packages/27/1a/199f9587e8cb08a0658f9c30f3799244307614148ffe8b1e3aa22f324dea/greenlet-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5195fb1e75e592dd04ce79881c8a22becdfa3e6f500e7feb059b1e6fdd54d3e3", size = 297603, upload-time = "2025-06-05T16:20:12.651Z" }, + { url = "https://files.pythonhosted.org/packages/d8/ca/accd7aa5280eb92b70ed9e8f7fd79dc50a2c21d8c73b9a0856f5b564e222/greenlet-3.2.3-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:3d04332dddb10b4a211b68111dabaee2e1a073663d117dc10247b5b1642bac86", size = 271479, upload-time = "2025-06-05T16:10:47.525Z" }, + { url = "https://files.pythonhosted.org/packages/55/71/01ed9895d9eb49223280ecc98a557585edfa56b3d0e965b9fa9f7f06b6d9/greenlet-3.2.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8186162dffde068a465deab08fc72c767196895c39db26ab1c17c0b77a6d8b97", size = 683952, upload-time = "2025-06-05T16:38:55.125Z" }, + { url = "https://files.pythonhosted.org/packages/ea/61/638c4bdf460c3c678a0a1ef4c200f347dff80719597e53b5edb2fb27ab54/greenlet-3.2.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f4bfbaa6096b1b7a200024784217defedf46a07c2eee1a498e94a1b5f8ec5728", size = 696917, upload-time = "2025-06-05T16:41:38.959Z" }, + { url = "https://files.pythonhosted.org/packages/22/cc/0bd1a7eb759d1f3e3cc2d1bc0f0b487ad3cc9f34d74da4b80f226fde4ec3/greenlet-3.2.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:ed6cfa9200484d234d8394c70f5492f144b20d4533f69262d530a1a082f6ee9a", size = 692443, upload-time = "2025-06-05T16:48:23.113Z" }, + { url = "https://files.pythonhosted.org/packages/67/10/b2a4b63d3f08362662e89c103f7fe28894a51ae0bc890fabf37d1d780e52/greenlet-3.2.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:02b0df6f63cd15012bed5401b47829cfd2e97052dc89da3cfaf2c779124eb892", size = 692995, upload-time = "2025-06-05T16:13:07.972Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c6/ad82f148a4e3ce9564056453a71529732baf5448ad53fc323e37efe34f66/greenlet-3.2.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86c2d68e87107c1792e2e8d5399acec2487a4e993ab76c792408e59394d52141", size = 655320, upload-time = "2025-06-05T16:12:53.453Z" }, + { url = "https://files.pythonhosted.org/packages/5c/4f/aab73ecaa6b3086a4c89863d94cf26fa84cbff63f52ce9bc4342b3087a06/greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a", size = 301236, upload-time = "2025-06-05T16:15:20.111Z" }, + { url = "https://files.pythonhosted.org/packages/3d/d9/a3114df5fba2bf9823e0acc01e9e2abdcd8ea4c5487cf1c3dcd4cc0b48cf/greenlet-3.2.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:42efc522c0bd75ffa11a71e09cd8a399d83fafe36db250a87cf1dacfaa15dc64", size = 267769, upload-time = "2025-06-05T16:10:44.802Z" }, + { url = "https://files.pythonhosted.org/packages/bc/da/47dfc50f6e5673116e66a737dc58d1eca651db9a9aa8797c1d27e940e211/greenlet-3.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d760f9bdfe79bff803bad32b4d8ffb2c1d2ce906313fc10a83976ffb73d64ca7", size = 625472, upload-time = "2025-06-05T16:38:56.882Z" }, + { url = "https://files.pythonhosted.org/packages/f5/74/f6ef9f85d981b2fcd665bbee3e69e3c0a10fb962eb4c6a5889ac3b6debfa/greenlet-3.2.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8324319cbd7b35b97990090808fdc99c27fe5338f87db50514959f8059999805", size = 637253, upload-time = "2025-06-05T16:41:40.542Z" }, + { url = "https://files.pythonhosted.org/packages/66/69/4919bb1c9e43bfc16dc886e7a37fe1bc04bfa4101aba177936a10f313cad/greenlet-3.2.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:8c37ef5b3787567d322331d5250e44e42b58c8c713859b8a04c6065f27efbf72", size = 632611, upload-time = "2025-06-05T16:48:24.976Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8d/97d988d019f40b6b360b0c71c99e5b4c877a3d92666fe48b081d0e1ea1cd/greenlet-3.2.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ce539fb52fb774d0802175d37fcff5c723e2c7d249c65916257f0a940cee8904", size = 631843, upload-time = "2025-06-05T16:13:09.476Z" }, + { url = "https://files.pythonhosted.org/packages/59/24/d5e1504ec00768755d4ccc2168b76d9f4524e96694a14ad45bd87796e9bb/greenlet-3.2.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:003c930e0e074db83559edc8705f3a2d066d4aa8c2f198aff1e454946efd0f26", size = 580781, upload-time = "2025-06-05T16:12:55.029Z" }, + { url = "https://files.pythonhosted.org/packages/9c/df/d009bcca566dbfd2283b306b4e424f4c0e59bf984868f8b789802fe9e607/greenlet-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7e70ea4384b81ef9e84192e8a77fb87573138aa5d4feee541d8014e452b434da", size = 1109903, upload-time = "2025-06-05T16:36:51.491Z" }, + { url = "https://files.pythonhosted.org/packages/33/54/5036097197a78388aa6901a5b90b562f3a154a9fbee89c301a26f56f3942/greenlet-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:22eb5ba839c4b2156f18f76768233fe44b23a31decd9cc0d4cc8141c211fd1b4", size = 1133975, upload-time = "2025-06-05T16:12:43.866Z" }, + { url = "https://files.pythonhosted.org/packages/e2/15/b001456a430805fdd8b600a788d19a790664eee8863739523395f68df752/greenlet-3.2.3-cp39-cp39-win32.whl", hash = "sha256:4532f0d25df67f896d137431b13f4cdce89f7e3d4a96387a41290910df4d3a57", size = 279320, upload-time = "2025-06-05T16:43:34.043Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4c/bf2100cbc1bd07f39bee3b09e7eef39beffe29f5453dc2477a2693737913/greenlet-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:aaa7aae1e7f75eaa3ae400ad98f8644bb81e1dc6ba47ce8a93d3f17274e08322", size = 296444, upload-time = "2025-06-05T16:39:22.664Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "imagesize" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026, upload-time = "2022-07-01T12:21:05.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769, upload-time = "2022-07-01T12:21:02.467Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp", marker = "python_full_version != '3.14.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357, upload-time = "2024-10-18T15:20:51.44Z" }, + { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393, upload-time = "2024-10-18T15:20:52.426Z" }, + { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732, upload-time = "2024-10-18T15:20:53.578Z" }, + { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866, upload-time = "2024-10-18T15:20:55.06Z" }, + { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964, upload-time = "2024-10-18T15:20:55.906Z" }, + { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977, upload-time = "2024-10-18T15:20:57.189Z" }, + { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366, upload-time = "2024-10-18T15:20:58.235Z" }, + { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091, upload-time = "2024-10-18T15:20:59.235Z" }, + { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065, upload-time = "2024-10-18T15:21:00.307Z" }, + { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514, upload-time = "2024-10-18T15:21:01.122Z" }, + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353, upload-time = "2024-10-18T15:21:02.187Z" }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392, upload-time = "2024-10-18T15:21:02.941Z" }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984, upload-time = "2024-10-18T15:21:03.953Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120, upload-time = "2024-10-18T15:21:06.495Z" }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032, upload-time = "2024-10-18T15:21:07.295Z" }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057, upload-time = "2024-10-18T15:21:08.073Z" }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359, upload-time = "2024-10-18T15:21:09.318Z" }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306, upload-time = "2024-10-18T15:21:10.185Z" }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094, upload-time = "2024-10-18T15:21:11.005Z" }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521, upload-time = "2024-10-18T15:21:12.911Z" }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" }, + { url = "https://files.pythonhosted.org/packages/a7/ea/9b1530c3fdeeca613faeb0fb5cbcf2389d816072fab72a71b45749ef6062/MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", size = 14344, upload-time = "2024-10-18T15:21:43.721Z" }, + { url = "https://files.pythonhosted.org/packages/4b/c2/fbdbfe48848e7112ab05e627e718e854d20192b674952d9042ebd8c9e5de/MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", size = 12389, upload-time = "2024-10-18T15:21:44.666Z" }, + { url = "https://files.pythonhosted.org/packages/f0/25/7a7c6e4dbd4f867d95d94ca15449e91e52856f6ed1905d58ef1de5e211d0/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", size = 21607, upload-time = "2024-10-18T15:21:45.452Z" }, + { url = "https://files.pythonhosted.org/packages/53/8f/f339c98a178f3c1e545622206b40986a4c3307fe39f70ccd3d9df9a9e425/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", size = 20728, upload-time = "2024-10-18T15:21:46.295Z" }, + { url = "https://files.pythonhosted.org/packages/1a/03/8496a1a78308456dbd50b23a385c69b41f2e9661c67ea1329849a598a8f9/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", size = 20826, upload-time = "2024-10-18T15:21:47.134Z" }, + { url = "https://files.pythonhosted.org/packages/e6/cf/0a490a4bd363048c3022f2f475c8c05582179bb179defcee4766fb3dcc18/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", size = 21843, upload-time = "2024-10-18T15:21:48.334Z" }, + { url = "https://files.pythonhosted.org/packages/19/a3/34187a78613920dfd3cdf68ef6ce5e99c4f3417f035694074beb8848cd77/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", size = 21219, upload-time = "2024-10-18T15:21:49.587Z" }, + { url = "https://files.pythonhosted.org/packages/17/d8/5811082f85bb88410ad7e452263af048d685669bbbfb7b595e8689152498/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", size = 20946, upload-time = "2024-10-18T15:21:50.441Z" }, + { url = "https://files.pythonhosted.org/packages/7c/31/bd635fb5989440d9365c5e3c47556cfea121c7803f5034ac843e8f37c2f2/MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", size = 15063, upload-time = "2024-10-18T15:21:51.385Z" }, + { url = "https://files.pythonhosted.org/packages/b3/73/085399401383ce949f727afec55ec3abd76648d04b9f22e1c0e99cb4bec3/MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", size = 15506, upload-time = "2024-10-18T15:21:52.974Z" }, +] + +[[package]] +name = "mockupdb" +version = "1.9.0.dev1" +source = { git = "https://github.com/mongodb-labs/mongo-mockup-db?rev=master#317c4e049965f9d99423698a81e52d0ab37b7599" } +dependencies = [ + { name = "pymongo" }, +] + +[[package]] +name = "mypy" +version = "1.18.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "pathspec" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/77/8f0d0001ffad290cef2f7f216f96c814866248a0b92a722365ed54648e7e/mypy-1.18.2.tar.gz", hash = "sha256:06a398102a5f203d7477b2923dda3634c36727fa5c237d8f859ef90c42a9924b", size = 3448846, upload-time = "2025-09-19T00:11:10.519Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/6f/657961a0743cff32e6c0611b63ff1c1970a0b482ace35b069203bf705187/mypy-1.18.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1eab0cf6294dafe397c261a75f96dc2c31bffe3b944faa24db5def4e2b0f77c", size = 12807973, upload-time = "2025-09-19T00:10:35.282Z" }, + { url = "https://files.pythonhosted.org/packages/10/e9/420822d4f661f13ca8900f5fa239b40ee3be8b62b32f3357df9a3045a08b/mypy-1.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a780ca61fc239e4865968ebc5240bb3bf610ef59ac398de9a7421b54e4a207e", size = 11896527, upload-time = "2025-09-19T00:10:55.791Z" }, + { url = "https://files.pythonhosted.org/packages/aa/73/a05b2bbaa7005f4642fcfe40fb73f2b4fb6bb44229bd585b5878e9a87ef8/mypy-1.18.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:448acd386266989ef11662ce3c8011fd2a7b632e0ec7d61a98edd8e27472225b", size = 12507004, upload-time = "2025-09-19T00:11:05.411Z" }, + { url = "https://files.pythonhosted.org/packages/4f/01/f6e4b9f0d031c11ccbd6f17da26564f3a0f3c4155af344006434b0a05a9d/mypy-1.18.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f9e171c465ad3901dc652643ee4bffa8e9fef4d7d0eece23b428908c77a76a66", size = 13245947, upload-time = "2025-09-19T00:10:46.923Z" }, + { url = "https://files.pythonhosted.org/packages/d7/97/19727e7499bfa1ae0773d06afd30ac66a58ed7437d940c70548634b24185/mypy-1.18.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:592ec214750bc00741af1f80cbf96b5013d81486b7bb24cb052382c19e40b428", size = 13499217, upload-time = "2025-09-19T00:09:39.472Z" }, + { url = "https://files.pythonhosted.org/packages/9f/4f/90dc8c15c1441bf31cf0f9918bb077e452618708199e530f4cbd5cede6ff/mypy-1.18.2-cp310-cp310-win_amd64.whl", hash = "sha256:7fb95f97199ea11769ebe3638c29b550b5221e997c63b14ef93d2e971606ebed", size = 9766753, upload-time = "2025-09-19T00:10:49.161Z" }, + { url = "https://files.pythonhosted.org/packages/88/87/cafd3ae563f88f94eec33f35ff722d043e09832ea8530ef149ec1efbaf08/mypy-1.18.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:807d9315ab9d464125aa9fcf6d84fde6e1dc67da0b6f80e7405506b8ac72bc7f", size = 12731198, upload-time = "2025-09-19T00:09:44.857Z" }, + { url = "https://files.pythonhosted.org/packages/0f/e0/1e96c3d4266a06d4b0197ace5356d67d937d8358e2ee3ffac71faa843724/mypy-1.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:776bb00de1778caf4db739c6e83919c1d85a448f71979b6a0edd774ea8399341", size = 11817879, upload-time = "2025-09-19T00:09:47.131Z" }, + { url = "https://files.pythonhosted.org/packages/72/ef/0c9ba89eb03453e76bdac5a78b08260a848c7bfc5d6603634774d9cd9525/mypy-1.18.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1379451880512ffce14505493bd9fe469e0697543717298242574882cf8cdb8d", size = 12427292, upload-time = "2025-09-19T00:10:22.472Z" }, + { url = "https://files.pythonhosted.org/packages/1a/52/ec4a061dd599eb8179d5411d99775bec2a20542505988f40fc2fee781068/mypy-1.18.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1331eb7fd110d60c24999893320967594ff84c38ac6d19e0a76c5fd809a84c86", size = 13163750, upload-time = "2025-09-19T00:09:51.472Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5f/2cf2ceb3b36372d51568f2208c021870fe7834cf3186b653ac6446511839/mypy-1.18.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3ca30b50a51e7ba93b00422e486cbb124f1c56a535e20eff7b2d6ab72b3b2e37", size = 13351827, upload-time = "2025-09-19T00:09:58.311Z" }, + { url = "https://files.pythonhosted.org/packages/c8/7d/2697b930179e7277529eaaec1513f8de622818696857f689e4a5432e5e27/mypy-1.18.2-cp311-cp311-win_amd64.whl", hash = "sha256:664dc726e67fa54e14536f6e1224bcfce1d9e5ac02426d2326e2bb4e081d1ce8", size = 9757983, upload-time = "2025-09-19T00:10:09.071Z" }, + { url = "https://files.pythonhosted.org/packages/07/06/dfdd2bc60c66611dd8335f463818514733bc763e4760dee289dcc33df709/mypy-1.18.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:33eca32dd124b29400c31d7cf784e795b050ace0e1f91b8dc035672725617e34", size = 12908273, upload-time = "2025-09-19T00:10:58.321Z" }, + { url = "https://files.pythonhosted.org/packages/81/14/6a9de6d13a122d5608e1a04130724caf9170333ac5a924e10f670687d3eb/mypy-1.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a3c47adf30d65e89b2dcd2fa32f3aeb5e94ca970d2c15fcb25e297871c8e4764", size = 11920910, upload-time = "2025-09-19T00:10:20.043Z" }, + { url = "https://files.pythonhosted.org/packages/5f/a9/b29de53e42f18e8cc547e38daa9dfa132ffdc64f7250e353f5c8cdd44bee/mypy-1.18.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d6c838e831a062f5f29d11c9057c6009f60cb294fea33a98422688181fe2893", size = 12465585, upload-time = "2025-09-19T00:10:33.005Z" }, + { url = "https://files.pythonhosted.org/packages/77/ae/6c3d2c7c61ff21f2bee938c917616c92ebf852f015fb55917fd6e2811db2/mypy-1.18.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01199871b6110a2ce984bde85acd481232d17413868c9807e95c1b0739a58914", size = 13348562, upload-time = "2025-09-19T00:10:11.51Z" }, + { url = "https://files.pythonhosted.org/packages/4d/31/aec68ab3b4aebdf8f36d191b0685d99faa899ab990753ca0fee60fb99511/mypy-1.18.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a2afc0fa0b0e91b4599ddfe0f91e2c26c2b5a5ab263737e998d6817874c5f7c8", size = 13533296, upload-time = "2025-09-19T00:10:06.568Z" }, + { url = "https://files.pythonhosted.org/packages/9f/83/abcb3ad9478fca3ebeb6a5358bb0b22c95ea42b43b7789c7fb1297ca44f4/mypy-1.18.2-cp312-cp312-win_amd64.whl", hash = "sha256:d8068d0afe682c7c4897c0f7ce84ea77f6de953262b12d07038f4d296d547074", size = 9828828, upload-time = "2025-09-19T00:10:28.203Z" }, + { url = "https://files.pythonhosted.org/packages/5f/04/7f462e6fbba87a72bc8097b93f6842499c428a6ff0c81dd46948d175afe8/mypy-1.18.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:07b8b0f580ca6d289e69209ec9d3911b4a26e5abfde32228a288eb79df129fcc", size = 12898728, upload-time = "2025-09-19T00:10:01.33Z" }, + { url = "https://files.pythonhosted.org/packages/99/5b/61ed4efb64f1871b41fd0b82d29a64640f3516078f6c7905b68ab1ad8b13/mypy-1.18.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ed4482847168439651d3feee5833ccedbf6657e964572706a2adb1f7fa4dfe2e", size = 11910758, upload-time = "2025-09-19T00:10:42.607Z" }, + { url = "https://files.pythonhosted.org/packages/3c/46/d297d4b683cc89a6e4108c4250a6a6b717f5fa96e1a30a7944a6da44da35/mypy-1.18.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ad2afadd1e9fea5cf99a45a822346971ede8685cc581ed9cd4d42eaf940986", size = 12475342, upload-time = "2025-09-19T00:11:00.371Z" }, + { url = "https://files.pythonhosted.org/packages/83/45/4798f4d00df13eae3bfdf726c9244bcb495ab5bd588c0eed93a2f2dd67f3/mypy-1.18.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a431a6f1ef14cf8c144c6b14793a23ec4eae3db28277c358136e79d7d062f62d", size = 13338709, upload-time = "2025-09-19T00:11:03.358Z" }, + { url = "https://files.pythonhosted.org/packages/d7/09/479f7358d9625172521a87a9271ddd2441e1dab16a09708f056e97007207/mypy-1.18.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7ab28cc197f1dd77a67e1c6f35cd1f8e8b73ed2217e4fc005f9e6a504e46e7ba", size = 13529806, upload-time = "2025-09-19T00:10:26.073Z" }, + { url = "https://files.pythonhosted.org/packages/71/cf/ac0f2c7e9d0ea3c75cd99dff7aec1c9df4a1376537cb90e4c882267ee7e9/mypy-1.18.2-cp313-cp313-win_amd64.whl", hash = "sha256:0e2785a84b34a72ba55fb5daf079a1003a34c05b22238da94fcae2bbe46f3544", size = 9833262, upload-time = "2025-09-19T00:10:40.035Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0c/7d5300883da16f0063ae53996358758b2a2df2a09c72a5061fa79a1f5006/mypy-1.18.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:62f0e1e988ad41c2a110edde6c398383a889d95b36b3e60bcf155f5164c4fdce", size = 12893775, upload-time = "2025-09-19T00:10:03.814Z" }, + { url = "https://files.pythonhosted.org/packages/50/df/2cffbf25737bdb236f60c973edf62e3e7b4ee1c25b6878629e88e2cde967/mypy-1.18.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8795a039bab805ff0c1dfdb8cd3344642c2b99b8e439d057aba30850b8d3423d", size = 11936852, upload-time = "2025-09-19T00:10:51.631Z" }, + { url = "https://files.pythonhosted.org/packages/be/50/34059de13dd269227fb4a03be1faee6e2a4b04a2051c82ac0a0b5a773c9a/mypy-1.18.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6ca1e64b24a700ab5ce10133f7ccd956a04715463d30498e64ea8715236f9c9c", size = 12480242, upload-time = "2025-09-19T00:11:07.955Z" }, + { url = "https://files.pythonhosted.org/packages/5b/11/040983fad5132d85914c874a2836252bbc57832065548885b5bb5b0d4359/mypy-1.18.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d924eef3795cc89fecf6bedc6ed32b33ac13e8321344f6ddbf8ee89f706c05cb", size = 13326683, upload-time = "2025-09-19T00:09:55.572Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ba/89b2901dd77414dd7a8c8729985832a5735053be15b744c18e4586e506ef/mypy-1.18.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:20c02215a080e3a2be3aa50506c67242df1c151eaba0dcbc1e4e557922a26075", size = 13514749, upload-time = "2025-09-19T00:10:44.827Z" }, + { url = "https://files.pythonhosted.org/packages/25/bc/cc98767cffd6b2928ba680f3e5bc969c4152bf7c2d83f92f5a504b92b0eb/mypy-1.18.2-cp314-cp314-win_amd64.whl", hash = "sha256:749b5f83198f1ca64345603118a6f01a4e99ad4bf9d103ddc5a3200cc4614adf", size = 9982959, upload-time = "2025-09-19T00:10:37.344Z" }, + { url = "https://files.pythonhosted.org/packages/3f/a6/490ff491d8ecddf8ab91762d4f67635040202f76a44171420bcbe38ceee5/mypy-1.18.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25a9c8fb67b00599f839cf472713f54249a62efd53a54b565eb61956a7e3296b", size = 12807230, upload-time = "2025-09-19T00:09:49.471Z" }, + { url = "https://files.pythonhosted.org/packages/eb/2e/60076fc829645d167ece9e80db9e8375648d210dab44cc98beb5b322a826/mypy-1.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2b9c7e284ee20e7598d6f42e13ca40b4928e6957ed6813d1ab6348aa3f47133", size = 11895666, upload-time = "2025-09-19T00:10:53.678Z" }, + { url = "https://files.pythonhosted.org/packages/97/4a/1e2880a2a5dda4dc8d9ecd1a7e7606bc0b0e14813637eeda40c38624e037/mypy-1.18.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d6985ed057513e344e43a26cc1cd815c7a94602fb6a3130a34798625bc2f07b6", size = 12499608, upload-time = "2025-09-19T00:09:36.204Z" }, + { url = "https://files.pythonhosted.org/packages/00/81/a117f1b73a3015b076b20246b1f341c34a578ebd9662848c6b80ad5c4138/mypy-1.18.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22f27105f1525ec024b5c630c0b9f36d5c1cc4d447d61fe51ff4bd60633f47ac", size = 13244551, upload-time = "2025-09-19T00:10:17.531Z" }, + { url = "https://files.pythonhosted.org/packages/9b/61/b9f48e1714ce87c7bf0358eb93f60663740ebb08f9ea886ffc670cea7933/mypy-1.18.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:030c52d0ea8144e721e49b1f68391e39553d7451f0c3f8a7565b59e19fcb608b", size = 13491552, upload-time = "2025-09-19T00:10:13.753Z" }, + { url = "https://files.pythonhosted.org/packages/c9/66/b2c0af3b684fa80d1b27501a8bdd3d2daa467ea3992a8aa612f5ca17c2db/mypy-1.18.2-cp39-cp39-win_amd64.whl", hash = "sha256:aa5e07ac1a60a253445797e42b8b2963c9675563a94f11291ab40718b016a7a0", size = 9765635, upload-time = "2025-09-19T00:10:30.993Z" }, + { url = "https://files.pythonhosted.org/packages/87/e3/be76d87158ebafa0309946c4a73831974d4d6ab4f4ef40c3b53a385a66fd/mypy-1.18.2-py3-none-any.whl", hash = "sha256:22a1748707dd62b58d2ae53562ffc4d7f8bcc727e8ac7cbc69c053ddc874d47e", size = 2352367, upload-time = "2025-09-19T00:10:15.489Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "pip" +version = "25.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/16/650289cd3f43d5a2fadfd98c68bd1e1e7f2550a1a5326768cddfbcedb2c5/pip-25.2.tar.gz", hash = "sha256:578283f006390f85bb6282dffb876454593d637f5d1be494b5202ce4877e71f2", size = 1840021, upload-time = "2025-07-30T21:50:15.401Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/3f/945ef7ab14dc4f9d7f40288d2df998d1837ee0888ec3659c813487572faa/pip-25.2-py3-none-any.whl", hash = "sha256:6d67a2b4e7f14d8b31b8b52648866fa717f45a1eb70e83002f4331d07e953717", size = 1752557, upload-time = "2025-07-30T21:50:13.323Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pykerberos" +version = "1.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/07/e9/ae44ea7d7605df9e5ca1ed745a2c5672dc838a8398101051dd5f255b130d/pykerberos-1.2.4.tar.gz", hash = "sha256:9d701ebd8fc596c99d3155d5ba45813bd5908d26ef83ba0add250edb622abed4", size = 25046, upload-time = "2022-03-09T03:54:08.546Z" } + +[[package]] +name = "pymongo" +source = { editable = "." } +dependencies = [ + { name = "dnspython" }, +] + +[package.optional-dependencies] +aws = [ + { name = "pymongo-auth-aws" }, +] +docs = [ + { name = "furo" }, + { name = "readthedocs-sphinx-search" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "sphinx-autobuild" }, + { name = "sphinx-rtd-theme" }, + { name = "sphinxcontrib-shellcheck" }, +] +encryption = [ + { name = "certifi", marker = "os_name == 'nt' or sys_platform == 'darwin'" }, + { name = "pymongo-auth-aws" }, + { name = "pymongocrypt" }, +] +gssapi = [ + { name = "pykerberos", marker = "os_name != 'nt'" }, + { name = "winkerberos", marker = "os_name == 'nt'" }, +] +ocsp = [ + { name = "certifi", marker = "os_name == 'nt' or sys_platform == 'darwin'" }, + { name = "cryptography" }, + { name = "pyopenssl" }, + { name = "requests" }, + { name = "service-identity" }, +] +snappy = [ + { name = "python-snappy" }, +] +test = [ + { name = "importlib-metadata", marker = "python_full_version < '3.13'" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, +] +zstd = [ + { name = "zstandard" }, +] + +[package.dev-dependencies] +coverage = [ + { name = "coverage" }, + { name = "pytest-cov" }, +] +gevent = [ + { name = "gevent" }, +] +mockupdb = [ + { name = "mockupdb" }, +] +perf = [ + { name = "simplejson" }, +] +pip = [ + { name = "pip" }, +] +typing = [ + { name = "mypy" }, + { name = "pip" }, + { name = "pyright" }, + { name = "typing-extensions" }, +] + +[package.metadata] +requires-dist = [ + { name = "certifi", marker = "(os_name == 'nt' and extra == 'encryption') or (sys_platform == 'darwin' and extra == 'encryption')", specifier = ">=2023.7.22" }, + { name = "certifi", marker = "(os_name == 'nt' and extra == 'ocsp') or (sys_platform == 'darwin' and extra == 'ocsp')", specifier = ">=2023.7.22" }, + { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, + { name = "dnspython", specifier = ">=2.6.1,<3.0.0" }, + { name = "furo", marker = "extra == 'docs'", specifier = "==2025.9.25" }, + { name = "importlib-metadata", marker = "python_full_version < '3.13' and extra == 'test'", specifier = ">=7.0" }, + { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, + { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.0,<2.0.0" }, + { name = "pymongo-auth-aws", marker = "extra == 'encryption'", specifier = ">=1.1.0,<2.0.0" }, + { name = "pymongocrypt", marker = "extra == 'encryption'", specifier = ">=1.13.0,<2.0.0" }, + { name = "pyopenssl", marker = "extra == 'ocsp'", specifier = ">=17.2.0" }, + { name = "pytest", marker = "extra == 'test'", specifier = ">=8.2" }, + { name = "pytest-asyncio", marker = "extra == 'test'", specifier = ">=0.24.0" }, + { name = "python-snappy", marker = "extra == 'snappy'" }, + { name = "readthedocs-sphinx-search", marker = "extra == 'docs'", specifier = "~=0.3" }, + { name = "requests", marker = "extra == 'ocsp'", specifier = "<3.0.0" }, + { name = "service-identity", marker = "extra == 'ocsp'", specifier = ">=18.1.0" }, + { name = "sphinx", marker = "extra == 'docs'", specifier = ">=5.3,<9" }, + { name = "sphinx-autobuild", marker = "extra == 'docs'", specifier = ">=2020.9.1" }, + { name = "sphinx-rtd-theme", marker = "extra == 'docs'", specifier = ">=2,<4" }, + { name = "sphinxcontrib-shellcheck", marker = "extra == 'docs'", specifier = ">=1,<2" }, + { name = "winkerberos", marker = "os_name == 'nt' and extra == 'gssapi'", specifier = ">=0.5.0" }, + { name = "zstandard", marker = "extra == 'zstd'" }, +] +provides-extras = ["aws", "docs", "encryption", "gssapi", "ocsp", "snappy", "test", "zstd"] + +[package.metadata.requires-dev] +coverage = [ + { name = "coverage", specifier = ">=5,<=7.10.6" }, + { name = "pytest-cov" }, +] +dev = [] +gevent = [{ name = "gevent", specifier = ">=20.6.0" }] +mockupdb = [{ name = "mockupdb", git = "https://github.com/mongodb-labs/mongo-mockup-db?rev=master" }] +perf = [{ name = "simplejson", specifier = ">=3.17.0" }] +pip = [{ name = "pip" }] +typing = [ + { name = "mypy", specifier = "==1.18.2" }, + { name = "pip" }, + { name = "pyright", specifier = "==1.1.406" }, + { name = "typing-extensions" }, +] + +[[package]] +name = "pymongo-auth-aws" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c7/37/ca8d840f322f0047b71afcec7a489b1ea1f59a5f6d29f91ad8004024736f/pymongo_auth_aws-1.3.0.tar.gz", hash = "sha256:d0fa893958dc525ca29f601c34f2ca73c860f66bc6511ec0a7da6eb7ea44e94f", size = 18559, upload-time = "2024-09-11T20:29:17.668Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/12/a997fc108416f31fac55748e5406c1c8c4e976a4073f07b5553825641611/pymongo_auth_aws-1.3.0-py3-none-any.whl", hash = "sha256:367f6d853da428a02e9e450422756133715d40f8141f47ae5d98f139a88c0ce5", size = 15470, upload-time = "2024-09-11T20:29:16.637Z" }, +] + +[[package]] +name = "pymongocrypt" +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, + { name = "cryptography" }, + { name = "httpx" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/8e/dd9ed710e8fd4eec127dac1db3b3e9156ffcf340a0463a82087a12ae924e/pymongocrypt-1.16.0.tar.gz", hash = "sha256:0db0812055d00e6f5562a8d66711c4cba4b75014c363306c9b298a9fd68fccdd", size = 65354, upload-time = "2025-09-09T18:54:25.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/8b/dda0f19ce16f7b257e4aa2a8831a1a1307c1ea124a00f571cda83a04adcb/pymongocrypt-1.16.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:fbd85534880ea8525956b96e583a7021c721abbf3b51a6dbe48a57d7eba8e74a", size = 4721169, upload-time = "2025-09-09T18:54:18.642Z" }, + { url = "https://files.pythonhosted.org/packages/99/48/512a5b597d71407f9b06a14cd8e5ac376e06b780d4d54a4e69726bd48703/pymongocrypt-1.16.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:85df0a78480e91bdd3a5a6da3e4cdc7d9700de8a871aa8168588981c041f1914", size = 4038242, upload-time = "2025-09-09T18:54:20.496Z" }, + { url = "https://files.pythonhosted.org/packages/3f/67/3bdeda347191d6c1ee257eb3da8c85f1278d86dfb493cc9bc26352a41d0a/pymongocrypt-1.16.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8d2ebeb1b5e4f4554bf44f726e8009c59c4d7d0b412beebfece875991714676", size = 3775742, upload-time = "2025-09-09T18:54:22.254Z" }, + { url = "https://files.pythonhosted.org/packages/dc/81/70f6947afbd1ac7be54482b44cb1b99e8e9b9cac41985e6250c4fc279e58/pymongocrypt-1.16.0-py3-none-win_amd64.whl", hash = "sha256:c20afcd89ec5fc53305e924c05c4a0321ddc73f1e4e7c8240ee2fd0123e23609", size = 1607917, upload-time = "2025-09-09T18:54:24.182Z" }, +] + +[[package]] +name = "pyopenssl" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/80/be/97b83a464498a79103036bc74d1038df4a7ef0e402cfaf4d5e113fb14759/pyopenssl-25.3.0.tar.gz", hash = "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329", size = 184073, upload-time = "2025-09-17T00:32:21.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/81/ef2b1dfd1862567d573a4fdbc9f969067621764fbb74338496840a1d2977/pyopenssl-25.3.0-py3-none-any.whl", hash = "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6", size = 57268, upload-time = "2025-09-17T00:32:19.474Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.406" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/16/6b4fbdd1fef59a0292cbb99f790b44983e390321eccbc5921b4d161da5d1/pyright-1.1.406.tar.gz", hash = "sha256:c4872bc58c9643dac09e8a2e74d472c62036910b3bd37a32813989ef7576ea2c", size = 4113151, upload-time = "2025-10-02T01:04:45.488Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/a2/e309afbb459f50507103793aaef85ca4348b66814c86bc73908bdeb66d12/pyright-1.1.406-py3-none-any.whl", hash = "sha256:1d81fb43c2407bf566e97e57abb01c811973fdb21b2df8df59f870f688bdca71", size = 5980982, upload-time = "2025-10-02T01:04:43.137Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714, upload-time = "2025-06-18T05:48:06.109Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474, upload-time = "2025-06-18T05:48:03.955Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" }, + { name = "pytest" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/51/f8794af39eeb870e87a8c8068642fc07bce0c854d6865d7dd0f2a9d338c2/pytest_asyncio-1.1.0.tar.gz", hash = "sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea", size = 46652, upload-time = "2025-07-16T04:29:26.393Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/9d/bf86eddabf8c6c9cb1ea9a869d6873b46f105a5d292d3a6f7071f5b07935/pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf", size = 15157, upload-time = "2025-07-16T04:29:24.929Z" }, +] + +[[package]] +name = "pytest-cov" +version = "6.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage", extra = ["toml"] }, + { name = "pluggy" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/18/99/668cade231f434aaa59bbfbf49469068d2ddd945000621d3d165d2e7dd7b/pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2", size = 69432, upload-time = "2025-06-12T10:47:47.684Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/16/4ea354101abb1287856baa4af2732be351c7bee728065aed451b678153fd/pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5", size = 24644, upload-time = "2025-06-12T10:47:45.932Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-snappy" +version = "0.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cramjam" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/66/9185fbb6605ba92716d9f77fbb13c97eb671cd13c3ad56bd154016fbf08b/python_snappy-0.7.3.tar.gz", hash = "sha256:40216c1badfb2d38ac781ecb162a1d0ec40f8ee9747e610bcfefdfa79486cee3", size = 9337, upload-time = "2024-08-29T13:16:05.705Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/c1/0ee413ddd639aebf22c85d6db39f136ccc10e6a4b4dd275a92b5c839de8d/python_snappy-0.7.3-py3-none-any.whl", hash = "sha256:074c0636cfcd97e7251330f428064050ac81a52c62ed884fc2ddebbb60ed7f50", size = 9155, upload-time = "2024-08-29T13:16:04.773Z" }, +] + +[[package]] +name = "readthedocs-sphinx-search" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8f/96/0c51439e3dbc634cf5328ffb173ff759b7fc9abf3276e78bf71d9fc0aa51/readthedocs-sphinx-search-0.3.2.tar.gz", hash = "sha256:277773bfa28566a86694c08e568d5a648cd80f22826545555a764d6d20c365fb", size = 21949, upload-time = "2024-01-15T16:46:22.565Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/3c/41bc9d7d4d936a73e380423f23996bee1691e17598d8a03c062be6aac640/readthedocs_sphinx_search-0.3.2-py3-none-any.whl", hash = "sha256:58716fd21f01581e6e67bf3bc02e79c77e10dc58b5f8e4c7cc1977e013eda173", size = 21379, upload-time = "2024-01-15T16:46:20.552Z" }, +] + +[[package]] +name = "requests" +version = "2.32.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "urllib3", version = "2.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, +] + +[[package]] +name = "roman-numerals-py" +version = "3.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/76/48fd56d17c5bdbdf65609abbc67288728a98ed4c02919428d4f52d23b24b/roman_numerals_py-3.1.0.tar.gz", hash = "sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d", size = 9017, upload-time = "2025-02-22T07:34:54.333Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/97/d2cbbaa10c9b826af0e10fdf836e1bf344d9f0abb873ebc34d1f49642d3f/roman_numerals_py-3.1.0-py3-none-any.whl", hash = "sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c", size = 7742, upload-time = "2025-02-22T07:34:52.422Z" }, +] + +[[package]] +name = "s3transfer" +version = "0.13.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/05/d52bf1e65044b4e5e27d4e63e8d1579dbdec54fce685908ae09bc3720030/s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf", size = 150589, upload-time = "2025-07-18T19:22:42.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/4f/d073e09df851cfa251ef7840007d04db3293a0482ce607d2b993926089be/s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724", size = 85308, upload-time = "2025-07-18T19:22:40.947Z" }, +] + +[[package]] +name = "service-identity" +version = "24.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "cryptography" }, + { name = "pyasn1" }, + { name = "pyasn1-modules" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/07/a5/dfc752b979067947261dbbf2543470c58efe735c3c1301dd870ef27830ee/service_identity-24.2.0.tar.gz", hash = "sha256:b8683ba13f0d39c6cd5d625d2c5f65421d6d707b013b375c355751557cbe8e09", size = 39245, upload-time = "2024-10-26T07:21:57.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/2c/ca6dd598b384bc1ce581e24aaae0f2bed4ccac57749d5c3befbb5e742081/service_identity-24.2.0-py3-none-any.whl", hash = "sha256:6b047fbd8a84fd0bb0d55ebce4031e400562b9196e1e0d3e0fe2b8a59f6d4a85", size = 11364, upload-time = "2024-10-26T07:21:56.302Z" }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "simplejson" +version = "3.20.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/92/51b417685abd96b31308b61b9acce7ec50d8e1de8fbc39a7fd4962c60689/simplejson-3.20.1.tar.gz", hash = "sha256:e64139b4ec4f1f24c142ff7dcafe55a22b811a74d86d66560c8815687143037d", size = 85591, upload-time = "2025-02-15T05:18:53.15Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/c4/627214fb418cd4a17fb0230ff0b6c3bb4a85cbb48dd69c85dcc3b85df828/simplejson-3.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e580aa65d5f6c3bf41b9b4afe74be5d5ddba9576701c107c772d936ea2b5043a", size = 93790, upload-time = "2025-02-15T05:15:32.954Z" }, + { url = "https://files.pythonhosted.org/packages/15/ca/56a6a2a33cbcf330c4d71af3f827c47e4e0ba791e78f2642f3d1ab02ff31/simplejson-3.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a586ce4f78cec11f22fe55c5bee0f067e803aab9bad3441afe2181693b5ebb5", size = 75707, upload-time = "2025-02-15T05:15:34.954Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c8/3d92b67e03a3b6207d97202669f9454ed700b35ade9bd4428265a078fb6c/simplejson-3.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74a1608f9e6e8c27a4008d70a54270868306d80ed48c9df7872f9f4b8ac87808", size = 75700, upload-time = "2025-02-15T05:15:37.144Z" }, + { url = "https://files.pythonhosted.org/packages/74/30/20001219d6fdca4aaa3974c96dfb6955a766b4e2cc950505a5b51fd050b0/simplejson-3.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03db8cb64154189a92a7786209f24e391644f3a3fa335658be2df2af1960b8d8", size = 138672, upload-time = "2025-02-15T05:15:38.547Z" }, + { url = "https://files.pythonhosted.org/packages/21/47/50157810876c2a7ebbd6e6346ec25eda841fe061fecaa02538a7742a3d2a/simplejson-3.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eea7e2b7d858f6fdfbf0fe3cb846d6bd8a45446865bc09960e51f3d473c2271b", size = 146616, upload-time = "2025-02-15T05:15:39.871Z" }, + { url = "https://files.pythonhosted.org/packages/95/60/8c97cdc93096437b0aca2745aca63c880fe2315fd7f6a6ce6edbb344a2ae/simplejson-3.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e66712b17d8425bb7ff8968d4c7c7fd5a2dd7bd63728b28356223c000dd2f91f", size = 134344, upload-time = "2025-02-15T05:15:42.091Z" }, + { url = "https://files.pythonhosted.org/packages/bb/9e/da184f0e9bb3a5d7ffcde713bd41b4fe46cca56b6f24d9bd155fac56805a/simplejson-3.20.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2cc4f6486f9f515b62f5831ff1888886619b84fc837de68f26d919ba7bbdcbc", size = 138017, upload-time = "2025-02-15T05:15:43.542Z" }, + { url = "https://files.pythonhosted.org/packages/31/db/00d1a8d9b036db98f678c8a3c69ed17d2894d1768d7a00576e787ad3e546/simplejson-3.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a3c2df555ee4016148fa192e2b9cd9e60bc1d40769366134882685e90aee2a1e", size = 140118, upload-time = "2025-02-15T05:15:45.7Z" }, + { url = "https://files.pythonhosted.org/packages/52/21/57fc47eab8c1c73390b933a5ba9271f08e3e1ec83162c580357f28f5b97c/simplejson-3.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:78520f04b7548a5e476b5396c0847e066f1e0a4c0c5e920da1ad65e95f410b11", size = 140314, upload-time = "2025-02-15T05:16:07.949Z" }, + { url = "https://files.pythonhosted.org/packages/ad/cc/7cfd78d1e0fa5e57350b98cfe77353b6dfa13dce21afa4060e1019223852/simplejson-3.20.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f4bd49ecde87b0fe9f55cc971449a32832bca9910821f7072bbfae1155eaa007", size = 148544, upload-time = "2025-02-15T05:16:09.455Z" }, + { url = "https://files.pythonhosted.org/packages/63/26/1c894a1c2bd95dc8be0cf5a2fa73b0d173105b6ca18c90cb981ff10443d0/simplejson-3.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7eaae2b88eb5da53caaffdfa50e2e12022553949b88c0df4f9a9663609373f72", size = 141172, upload-time = "2025-02-15T05:16:10.966Z" }, + { url = "https://files.pythonhosted.org/packages/93/27/0717dccc10cd9988dbf1314def52ab32678a95a95328bb37cafacf499400/simplejson-3.20.1-cp310-cp310-win32.whl", hash = "sha256:e836fb88902799eac8debc2b642300748f4860a197fa3d9ea502112b6bb8e142", size = 74181, upload-time = "2025-02-15T05:16:12.361Z" }, + { url = "https://files.pythonhosted.org/packages/5f/af/593f896573f306519332d4287b1ab8b7b888c239bbd5159f7054d7055c2d/simplejson-3.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a19b552b212fc3b5b96fc5ce92333d4a9ac0a800803e1f17ebb16dac4be5", size = 75738, upload-time = "2025-02-15T05:16:14.438Z" }, + { url = "https://files.pythonhosted.org/packages/76/59/74bc90d1c051bc2432c96b34bd4e8036875ab58b4fcbe4d6a5a76985f853/simplejson-3.20.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:325b8c107253d3217e89d7b50c71015b5b31e2433e6c5bf38967b2f80630a8ca", size = 92132, upload-time = "2025-02-15T05:16:15.743Z" }, + { url = "https://files.pythonhosted.org/packages/71/c7/1970916e0c51794fff89f76da2f632aaf0b259b87753c88a8c409623d3e1/simplejson-3.20.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88a7baa8211089b9e58d78fbc1b0b322103f3f3d459ff16f03a36cece0d0fcf0", size = 74956, upload-time = "2025-02-15T05:16:17.062Z" }, + { url = "https://files.pythonhosted.org/packages/c8/0d/98cc5909180463f1d75fac7180de62d4cdb4e82c4fef276b9e591979372c/simplejson-3.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:299b1007b8101d50d95bc0db1bf5c38dc372e85b504cf77f596462083ee77e3f", size = 74772, upload-time = "2025-02-15T05:16:19.204Z" }, + { url = "https://files.pythonhosted.org/packages/e1/94/a30a5211a90d67725a3e8fcc1c788189f2ae2ed2b96b63ed15d0b7f5d6bb/simplejson-3.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ec618ed65caab48e81e3ed29586236a8e57daef792f1f3bb59504a7e98cd10", size = 143575, upload-time = "2025-02-15T05:16:21.337Z" }, + { url = "https://files.pythonhosted.org/packages/ee/08/cdb6821f1058eb5db46d252de69ff7e6c53f05f1bae6368fe20d5b51d37e/simplejson-3.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2cdead1d3197f0ff43373cf4730213420523ba48697743e135e26f3d179f38", size = 153241, upload-time = "2025-02-15T05:16:22.859Z" }, + { url = "https://files.pythonhosted.org/packages/4c/2d/ca3caeea0bdc5efc5503d5f57a2dfb56804898fb196dfada121323ee0ccb/simplejson-3.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3466d2839fdc83e1af42e07b90bc8ff361c4e8796cd66722a40ba14e458faddd", size = 141500, upload-time = "2025-02-15T05:16:25.068Z" }, + { url = "https://files.pythonhosted.org/packages/e1/33/d3e0779d5c58245e7370c98eb969275af6b7a4a5aec3b97cbf85f09ad328/simplejson-3.20.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d492ed8e92f3a9f9be829205f44b1d0a89af6582f0cf43e0d129fa477b93fe0c", size = 144757, upload-time = "2025-02-15T05:16:28.301Z" }, + { url = "https://files.pythonhosted.org/packages/54/53/2d93128bb55861b2fa36c5944f38da51a0bc6d83e513afc6f7838440dd15/simplejson-3.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f924b485537b640dc69434565463fd6fc0c68c65a8c6e01a823dd26c9983cf79", size = 144409, upload-time = "2025-02-15T05:16:29.687Z" }, + { url = "https://files.pythonhosted.org/packages/99/4c/dac310a98f897ad3435b4bdc836d92e78f09e38c5dbf28211ed21dc59fa2/simplejson-3.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e8eacf6a3491bf76ea91a8d46726368a6be0eb94993f60b8583550baae9439e", size = 146082, upload-time = "2025-02-15T05:16:31.064Z" }, + { url = "https://files.pythonhosted.org/packages/ee/22/d7ba958cfed39827335b82656b1c46f89678faecda9a7677b47e87b48ee6/simplejson-3.20.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d34d04bf90b4cea7c22d8b19091633908f14a096caa301b24c2f3d85b5068fb8", size = 154339, upload-time = "2025-02-15T05:16:32.719Z" }, + { url = "https://files.pythonhosted.org/packages/b8/c8/b072b741129406a7086a0799c6f5d13096231bf35fdd87a0cffa789687fc/simplejson-3.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69dd28d4ce38390ea4aaf212902712c0fd1093dc4c1ff67e09687c3c3e15a749", size = 147915, upload-time = "2025-02-15T05:16:34.291Z" }, + { url = "https://files.pythonhosted.org/packages/6c/46/8347e61e9cf3db5342a42f7fd30a81b4f5cf85977f916852d7674a540907/simplejson-3.20.1-cp311-cp311-win32.whl", hash = "sha256:dfe7a9da5fd2a3499436cd350f31539e0a6ded5da6b5b3d422df016444d65e43", size = 73972, upload-time = "2025-02-15T05:16:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/01/85/b52f24859237b4e9d523d5655796d911ba3d46e242eb1959c45b6af5aedd/simplejson-3.20.1-cp311-cp311-win_amd64.whl", hash = "sha256:896a6c04d7861d507d800da7642479c3547060bf97419d9ef73d98ced8258766", size = 75595, upload-time = "2025-02-15T05:16:36.957Z" }, + { url = "https://files.pythonhosted.org/packages/8d/eb/34c16a1ac9ba265d024dc977ad84e1659d931c0a700967c3e59a98ed7514/simplejson-3.20.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f31c4a3a7ab18467ee73a27f3e59158255d1520f3aad74315edde7a940f1be23", size = 93100, upload-time = "2025-02-15T05:16:38.801Z" }, + { url = "https://files.pythonhosted.org/packages/41/fc/2c2c007d135894971e6814e7c0806936e5bade28f8db4dd7e2a58b50debd/simplejson-3.20.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:884e6183d16b725e113b83a6fc0230152ab6627d4d36cb05c89c2c5bccfa7bc6", size = 75464, upload-time = "2025-02-15T05:16:40.905Z" }, + { url = "https://files.pythonhosted.org/packages/0f/05/2b5ecb33b776c34bb5cace5de5d7669f9b60e3ca13c113037b2ca86edfbd/simplejson-3.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03d7a426e416fe0d3337115f04164cd9427eb4256e843a6b8751cacf70abc832", size = 75112, upload-time = "2025-02-15T05:16:42.246Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/1f3609a2792f06cd4b71030485f78e91eb09cfd57bebf3116bf2980a8bac/simplejson-3.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:000602141d0bddfcff60ea6a6e97d5e10c9db6b17fd2d6c66199fa481b6214bb", size = 150182, upload-time = "2025-02-15T05:16:43.557Z" }, + { url = "https://files.pythonhosted.org/packages/2f/b0/053fbda38b8b602a77a4f7829def1b4f316cd8deb5440a6d3ee90790d2a4/simplejson-3.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:af8377a8af78226e82e3a4349efdde59ffa421ae88be67e18cef915e4023a595", size = 158363, upload-time = "2025-02-15T05:16:45.748Z" }, + { url = "https://files.pythonhosted.org/packages/d1/4b/2eb84ae867539a80822e92f9be4a7200dffba609275faf99b24141839110/simplejson-3.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15c7de4c88ab2fbcb8781a3b982ef883696736134e20b1210bca43fb42ff1acf", size = 148415, upload-time = "2025-02-15T05:16:47.861Z" }, + { url = "https://files.pythonhosted.org/packages/e0/bd/400b0bd372a5666addf2540c7358bfc3841b9ce5cdbc5cc4ad2f61627ad8/simplejson-3.20.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:455a882ff3f97d810709f7b620007d4e0aca8da71d06fc5c18ba11daf1c4df49", size = 152213, upload-time = "2025-02-15T05:16:49.25Z" }, + { url = "https://files.pythonhosted.org/packages/50/12/143f447bf6a827ee9472693768dc1a5eb96154f8feb140a88ce6973a3cfa/simplejson-3.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fc0f523ce923e7f38eb67804bc80e0a028c76d7868500aa3f59225574b5d0453", size = 150048, upload-time = "2025-02-15T05:16:51.5Z" }, + { url = "https://files.pythonhosted.org/packages/5e/ea/dd9b3e8e8ed710a66f24a22c16a907c9b539b6f5f45fd8586bd5c231444e/simplejson-3.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76461ec929282dde4a08061071a47281ad939d0202dc4e63cdd135844e162fbc", size = 151668, upload-time = "2025-02-15T05:16:53Z" }, + { url = "https://files.pythonhosted.org/packages/99/af/ee52a8045426a0c5b89d755a5a70cc821815ef3c333b56fbcad33c4435c0/simplejson-3.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19c2da8c043607bde4d4ef3a6b633e668a7d2e3d56f40a476a74c5ea71949f", size = 158840, upload-time = "2025-02-15T05:16:54.851Z" }, + { url = "https://files.pythonhosted.org/packages/68/db/ab32869acea6b5de7d75fa0dac07a112ded795d41eaa7e66c7813b17be95/simplejson-3.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2578bedaedf6294415197b267d4ef678fea336dd78ee2a6d2f4b028e9d07be3", size = 154212, upload-time = "2025-02-15T05:16:56.318Z" }, + { url = "https://files.pythonhosted.org/packages/fa/7a/e3132d454977d75a3bf9a6d541d730f76462ebf42a96fea2621498166f41/simplejson-3.20.1-cp312-cp312-win32.whl", hash = "sha256:339f407373325a36b7fd744b688ba5bae0666b5d340ec6d98aebc3014bf3d8ea", size = 74101, upload-time = "2025-02-15T05:16:57.746Z" }, + { url = "https://files.pythonhosted.org/packages/bc/5d/4e243e937fa3560107c69f6f7c2eed8589163f5ed14324e864871daa2dd9/simplejson-3.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:627d4486a1ea7edf1f66bb044ace1ce6b4c1698acd1b05353c97ba4864ea2e17", size = 75736, upload-time = "2025-02-15T05:16:59.017Z" }, + { url = "https://files.pythonhosted.org/packages/c4/03/0f453a27877cb5a5fff16a975925f4119102cc8552f52536b9a98ef0431e/simplejson-3.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:71e849e7ceb2178344998cbe5ade101f1b329460243c79c27fbfc51c0447a7c3", size = 93109, upload-time = "2025-02-15T05:17:00.377Z" }, + { url = "https://files.pythonhosted.org/packages/74/1f/a729f4026850cabeaff23e134646c3f455e86925d2533463420635ae54de/simplejson-3.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b63fdbab29dc3868d6f009a59797cefaba315fd43cd32ddd998ee1da28e50e29", size = 75475, upload-time = "2025-02-15T05:17:02.544Z" }, + { url = "https://files.pythonhosted.org/packages/e2/14/50a2713fee8ff1f8d655b1a14f4a0f1c0c7246768a1b3b3d12964a4ed5aa/simplejson-3.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1190f9a3ce644fd50ec277ac4a98c0517f532cfebdcc4bd975c0979a9f05e1fb", size = 75112, upload-time = "2025-02-15T05:17:03.875Z" }, + { url = "https://files.pythonhosted.org/packages/45/86/ea9835abb646755140e2d482edc9bc1e91997ed19a59fd77ae4c6a0facea/simplejson-3.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1336ba7bcb722ad487cd265701ff0583c0bb6de638364ca947bb84ecc0015d1", size = 150245, upload-time = "2025-02-15T05:17:06.899Z" }, + { url = "https://files.pythonhosted.org/packages/12/b4/53084809faede45da829fe571c65fbda8479d2a5b9c633f46b74124d56f5/simplejson-3.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e975aac6a5acd8b510eba58d5591e10a03e3d16c1cf8a8624ca177491f7230f0", size = 158465, upload-time = "2025-02-15T05:17:08.707Z" }, + { url = "https://files.pythonhosted.org/packages/a9/7d/d56579468d1660b3841e1f21c14490d103e33cf911886b22652d6e9683ec/simplejson-3.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a6dd11ee282937ad749da6f3b8d87952ad585b26e5edfa10da3ae2536c73078", size = 148514, upload-time = "2025-02-15T05:17:11.323Z" }, + { url = "https://files.pythonhosted.org/packages/19/e3/874b1cca3d3897b486d3afdccc475eb3a09815bf1015b01cf7fcb52a55f0/simplejson-3.20.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab980fcc446ab87ea0879edad41a5c28f2d86020014eb035cf5161e8de4474c6", size = 152262, upload-time = "2025-02-15T05:17:13.543Z" }, + { url = "https://files.pythonhosted.org/packages/32/84/f0fdb3625292d945c2bd13a814584603aebdb38cfbe5fe9be6b46fe598c4/simplejson-3.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f5aee2a4cb6b146bd17333ac623610f069f34e8f31d2f4f0c1a2186e50c594f0", size = 150164, upload-time = "2025-02-15T05:17:15.021Z" }, + { url = "https://files.pythonhosted.org/packages/95/51/6d625247224f01eaaeabace9aec75ac5603a42f8ebcce02c486fbda8b428/simplejson-3.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:652d8eecbb9a3b6461b21ec7cf11fd0acbab144e45e600c817ecf18e4580b99e", size = 151795, upload-time = "2025-02-15T05:17:16.542Z" }, + { url = "https://files.pythonhosted.org/packages/7f/d9/bb921df6b35be8412f519e58e86d1060fddf3ad401b783e4862e0a74c4c1/simplejson-3.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8c09948f1a486a89251ee3a67c9f8c969b379f6ffff1a6064b41fea3bce0a112", size = 159027, upload-time = "2025-02-15T05:17:18.083Z" }, + { url = "https://files.pythonhosted.org/packages/03/c5/5950605e4ad023a6621cf4c931b29fd3d2a9c1f36be937230bfc83d7271d/simplejson-3.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cbbd7b215ad4fc6f058b5dd4c26ee5c59f72e031dfda3ac183d7968a99e4ca3a", size = 154380, upload-time = "2025-02-15T05:17:20.334Z" }, + { url = "https://files.pythonhosted.org/packages/66/ad/b74149557c5ec1e4e4d55758bda426f5d2ec0123cd01a53ae63b8de51fa3/simplejson-3.20.1-cp313-cp313-win32.whl", hash = "sha256:ae81e482476eaa088ef9d0120ae5345de924f23962c0c1e20abbdff597631f87", size = 74102, upload-time = "2025-02-15T05:17:22.475Z" }, + { url = "https://files.pythonhosted.org/packages/db/a9/25282fdd24493e1022f30b7f5cdf804255c007218b2bfaa655bd7ad34b2d/simplejson-3.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:1b9fd15853b90aec3b1739f4471efbf1ac05066a2c7041bf8db821bb73cd2ddc", size = 75736, upload-time = "2025-02-15T05:17:24.122Z" }, + { url = "https://files.pythonhosted.org/packages/4c/ba/d32fe890a5edaf4a8518adf043bccf7866b600123f512a6de0988cf36810/simplejson-3.20.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a8011f1dd1d676befcd4d675ebdbfdbbefd3bf350052b956ba8c699fca7d8cef", size = 93773, upload-time = "2025-02-15T05:18:28.231Z" }, + { url = "https://files.pythonhosted.org/packages/48/c7/361e7f6695b56001a04e0a5cc623cd6c82ea2f45e872e61213e405cc8a24/simplejson-3.20.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e91703a4c5fec53e36875ae426ad785f4120bd1d93b65bed4752eeccd1789e0c", size = 75697, upload-time = "2025-02-15T05:18:30.006Z" }, + { url = "https://files.pythonhosted.org/packages/3c/2f/d0ff0b772d4ef092876eb85c99bc591c446b0502715551dad7dfc7f7c2c0/simplejson-3.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e39eaa57c7757daa25bcd21f976c46be443b73dd6c3da47fe5ce7b7048ccefe2", size = 75692, upload-time = "2025-02-15T05:18:31.424Z" }, + { url = "https://files.pythonhosted.org/packages/26/94/cab4db9530b6ca9d62f16a260e8311b04130ccd670dab75e958fcb44590e/simplejson-3.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceab2ce2acdc7fbaa433a93006758db6ba9a659e80c4faa13b80b9d2318e9b17", size = 138106, upload-time = "2025-02-15T05:18:32.907Z" }, + { url = "https://files.pythonhosted.org/packages/40/22/11c0f746bdb44c297cea8a37d8f7ccb75ea6681132aadfb9f820d9a52647/simplejson-3.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d4f320c33277a5b715db5bf5b10dae10c19076bd6d66c2843e04bd12d1f1ea5", size = 146242, upload-time = "2025-02-15T05:18:35.223Z" }, + { url = "https://files.pythonhosted.org/packages/78/e9/b7c4c26f29b41cc41ba5f0224c47adbfa7f28427418edfd58ab122f3b584/simplejson-3.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b6436c48e64378fa844d8c9e58a5ed0352bbcfd4028369a9b46679b7ab79d2d", size = 133866, upload-time = "2025-02-15T05:18:36.998Z" }, + { url = "https://files.pythonhosted.org/packages/09/68/1e81ed83f38906c8859f2b973afb19302357d6003e724a6105cee0f61ec7/simplejson-3.20.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e18345c8dda5d699be8166b61f9d80aaee4545b709f1363f60813dc032dac53", size = 137444, upload-time = "2025-02-15T05:18:38.763Z" }, + { url = "https://files.pythonhosted.org/packages/9a/6b/8d1e076c543277c1d603230eec24f4dd75ebce46d351c0679526d202981f/simplejson-3.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:90b573693d1526bed576f6817e2a492eaaef68f088b57d7a9e83d122bbb49e51", size = 139617, upload-time = "2025-02-15T05:18:40.36Z" }, + { url = "https://files.pythonhosted.org/packages/d1/46/7b74803de10d4157c5cd2e89028897fa733374667bc5520a44b23b6c887a/simplejson-3.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:272cc767826e924a6bd369ea3dbf18e166ded29059c7a4d64d21a9a22424b5b5", size = 139725, upload-time = "2025-02-15T05:18:42.012Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8f/9991582665a7b6d95415e439bb4fbaa4faf0f77231666675a0fd1de54107/simplejson-3.20.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:51b41f284d603c4380732d7d619f8b34bd04bc4aa0ed0ed5f4ffd0539b14da44", size = 148010, upload-time = "2025-02-15T05:18:43.749Z" }, + { url = "https://files.pythonhosted.org/packages/54/ee/3c6e91989cdf65ec75e75662d9f15cfe167a792b893806169ea5b1da6fd2/simplejson-3.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6e6697a3067d281f01de0fe96fc7cba4ea870d96d7deb7bfcf85186d74456503", size = 140624, upload-time = "2025-02-15T05:18:45.498Z" }, + { url = "https://files.pythonhosted.org/packages/9d/bd/05e13ebb7ead81c8b555f4ccc741ea7dfa0ef5c2a0c183d6a7bc50a02bca/simplejson-3.20.1-cp39-cp39-win32.whl", hash = "sha256:6dd3a1d5aca87bf947f3339b0f8e8e329f1badf548bdbff37fac63c17936da8e", size = 74148, upload-time = "2025-02-15T05:18:47.27Z" }, + { url = "https://files.pythonhosted.org/packages/88/c9/d8bf87aaebec5a4c3ccfd5228689578e2fe77027d6114a259255d54969bf/simplejson-3.20.1-cp39-cp39-win_amd64.whl", hash = "sha256:463f1fca8fbf23d088e5850fdd0dd4d5faea8900a9f9680270bd98fd649814ca", size = 75732, upload-time = "2025-02-15T05:18:49.598Z" }, + { url = "https://files.pythonhosted.org/packages/4b/30/00f02a0a921556dd5a6db1ef2926a1bc7a8bbbfb1c49cfed68a275b8ab2b/simplejson-3.20.1-py3-none-any.whl", hash = "sha256:8a6c1bbac39fa4a79f83cbf1df6ccd8ff7069582a9fd8db1e52cea073bc2c697", size = 57121, upload-time = "2025-02-15T05:18:51.243Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "snowballstemmer" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/75/a7/9810d872919697c9d01295633f5d574fb416d47e535f258272ca1f01f447/snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895", size = 105575, upload-time = "2025-05-09T16:34:51.843Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/78/3565d011c61f5a43488987ee32b6f3f656e7f107ac2782dd57bdd7d91d9a/snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064", size = 103274, upload-time = "2025-05-09T16:34:50.371Z" }, +] + +[[package]] +name = "soupsieve" +version = "2.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/f4/4a80cd6ef364b2e8b65b15816a843c0980f7a5a2b4dc701fc574952aa19f/soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a", size = 103418, upload-time = "2025-04-20T18:50:08.518Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677, upload-time = "2025-04-20T18:50:07.196Z" }, +] + +[[package]] +name = "sphinx" +version = "7.4.7" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +dependencies = [ + { name = "alabaster", version = "0.7.16", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "babel", marker = "python_full_version < '3.10'" }, + { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version < '3.10'" }, + { name = "imagesize", marker = "python_full_version < '3.10'" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "jinja2", marker = "python_full_version < '3.10'" }, + { name = "packaging", marker = "python_full_version < '3.10'" }, + { name = "pygments", marker = "python_full_version < '3.10'" }, + { name = "requests", marker = "python_full_version < '3.10'" }, + { name = "snowballstemmer", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version < '3.10'" }, + { name = "tomli", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/be/50e50cb4f2eff47df05673d361095cafd95521d2a22521b920c67a372dcb/sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe", size = 8067911, upload-time = "2024-07-20T14:46:56.059Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/ef/153f6803c5d5f8917dbb7f7fcf6d34a871ede3296fa89c2c703f5f8a6c8e/sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239", size = 3401624, upload-time = "2024-07-20T14:46:52.142Z" }, +] + +[[package]] +name = "sphinx" +version = "8.1.3" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.10.*'", +] +dependencies = [ + { name = "alabaster", version = "1.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "babel", marker = "python_full_version == '3.10.*'" }, + { name = "colorama", marker = "python_full_version == '3.10.*' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version == '3.10.*'" }, + { name = "imagesize", marker = "python_full_version == '3.10.*'" }, + { name = "jinja2", marker = "python_full_version == '3.10.*'" }, + { name = "packaging", marker = "python_full_version == '3.10.*'" }, + { name = "pygments", marker = "python_full_version == '3.10.*'" }, + { name = "requests", marker = "python_full_version == '3.10.*'" }, + { name = "snowballstemmer", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version == '3.10.*'" }, + { name = "tomli", marker = "python_full_version == '3.10.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/be0b61178fe2cdcb67e2a92fc9ebb488e3c51c4f74a36a7824c0adf23425/sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927", size = 8184611, upload-time = "2024-10-13T20:27:13.93Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/60/1ddff83a56d33aaf6f10ec8ce84b4c007d9368b21008876fceda7e7381ef/sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2", size = 3487125, upload-time = "2024-10-13T20:27:10.448Z" }, +] + +[[package]] +name = "sphinx" +version = "8.2.3" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", +] +dependencies = [ + { name = "alabaster", version = "1.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "babel", marker = "python_full_version >= '3.11'" }, + { name = "colorama", marker = "python_full_version >= '3.11' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version >= '3.11'" }, + { name = "imagesize", marker = "python_full_version >= '3.11'" }, + { name = "jinja2", marker = "python_full_version >= '3.11'" }, + { name = "packaging", marker = "python_full_version >= '3.11'" }, + { name = "pygments", marker = "python_full_version >= '3.11'" }, + { name = "requests", marker = "python_full_version >= '3.11'" }, + { name = "roman-numerals-py", marker = "python_full_version >= '3.11'" }, + { name = "snowballstemmer", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/ad/4360e50ed56cb483667b8e6dadf2d3fda62359593faabbe749a27c4eaca6/sphinx-8.2.3.tar.gz", hash = "sha256:398ad29dee7f63a75888314e9424d40f52ce5a6a87ae88e7071e80af296ec348", size = 8321876, upload-time = "2025-03-02T22:31:59.658Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/53/136e9eca6e0b9dc0e1962e2c908fbea2e5ac000c2a2fbd9a35797958c48b/sphinx-8.2.3-py3-none-any.whl", hash = "sha256:4405915165f13521d875a8c29c8970800a0141c14cc5416a38feca4ea5d9b9c3", size = 3589741, upload-time = "2025-03-02T22:31:56.836Z" }, +] + +[[package]] +name = "sphinx-autobuild" +version = "2024.10.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "starlette" }, + { name = "uvicorn" }, + { name = "watchfiles" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a5/2c/155e1de2c1ba96a72e5dba152c509a8b41e047ee5c2def9e9f0d812f8be7/sphinx_autobuild-2024.10.3.tar.gz", hash = "sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1", size = 14023, upload-time = "2024-10-02T23:15:30.172Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/c0/eba125db38c84d3c74717008fd3cb5000b68cd7e2cbafd1349c6a38c3d3b/sphinx_autobuild-2024.10.3-py3-none-any.whl", hash = "sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa", size = 11908, upload-time = "2024-10-02T23:15:28.739Z" }, +] + +[[package]] +name = "sphinx-basic-ng" +version = "1.0.0b2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/0b/a866924ded68efec7a1759587a4e478aec7559d8165fac8b2ad1c0e774d6/sphinx_basic_ng-1.0.0b2.tar.gz", hash = "sha256:9ec55a47c90c8c002b5960c57492ec3021f5193cb26cebc2dc4ea226848651c9", size = 20736, upload-time = "2023-07-08T18:40:54.166Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/dd/018ce05c532a22007ac58d4f45232514cd9d6dd0ee1dc374e309db830983/sphinx_basic_ng-1.0.0b2-py3-none-any.whl", hash = "sha256:eb09aedbabfb650607e9b4b68c9d240b90b1e1be221d6ad71d61c52e29f7932b", size = 22496, upload-time = "2023-07-08T18:40:52.659Z" }, +] + +[[package]] +name = "sphinx-rtd-theme" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-jquery" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/44/c97faec644d29a5ceddd3020ae2edffa69e7d00054a8c7a6021e82f20335/sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85", size = 7620463, upload-time = "2024-11-13T11:06:04.545Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/77/46e3bac77b82b4df5bb5b61f2de98637724f246b4966cfc34bc5895d852a/sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13", size = 7655561, upload-time = "2024-11-13T11:06:02.094Z" }, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053, upload-time = "2024-07-29T01:09:00.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300, upload-time = "2024-07-29T01:08:58.99Z" }, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967, upload-time = "2024-07-29T01:09:23.417Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530, upload-time = "2024-07-29T01:09:21.945Z" }, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617, upload-time = "2024-07-29T01:09:37.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705, upload-time = "2024-07-29T01:09:36.407Z" }, +] + +[[package]] +name = "sphinxcontrib-jquery" +version = "4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/f3/aa67467e051df70a6330fe7770894b3e4f09436dea6881ae0b4f3d87cad8/sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a", size = 122331, upload-time = "2023-03-14T15:01:01.944Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/85/749bd22d1a68db7291c89e2ebca53f4306c3f205853cf31e9de279034c3c/sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae", size = 121104, upload-time = "2023-03-14T15:01:00.356Z" }, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787, upload-time = "2019-01-21T16:10:16.347Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071, upload-time = "2019-01-21T16:10:14.333Z" }, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165, upload-time = "2024-07-29T01:09:56.435Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743, upload-time = "2024-07-29T01:09:54.885Z" }, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080, upload-time = "2024-07-29T01:10:09.332Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072, upload-time = "2024-07-29T01:10:08.203Z" }, +] + +[[package]] +name = "sphinxcontrib-shellcheck" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "decorator" }, + { name = "docutils" }, + { name = "six" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/2b/20717a5e0c7ee99dfd5fcdf11a8cf0ab02533cf62775f24d344ea5cf48c1/sphinxcontrib-shellcheck-1.1.2.zip", hash = "sha256:475a3ae12a1cfc1bc26cff57f0dd15561213818e3b470b3eacc4bb8be7b129c0", size = 338739, upload-time = "2020-03-30T01:51:39.993Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/9c/1ff7fe5174f944fac0fcb53bdaac7b98d73a98dd2ca800d95af6af9edb9a/sphinxcontrib_shellcheck-1.1.2-py35-none-any.whl", hash = "sha256:c0449dc9402521ab1d05a1b9eb8c9099707da64824341686dab4f620dc688514", size = 11532, upload-time = "2020-03-30T01:51:34.913Z" }, + { url = "https://files.pythonhosted.org/packages/9f/8c/833388d3127d8dc0d5558bf52225eb20ed024ac46ef8ef4bffe7298ceb3d/sphinxcontrib_shellcheck-1.1.2-py36-none-any.whl", hash = "sha256:bcd8ffd26e6430deff9ffd10705683b502ace3fc8b4d1ba84496b3752f65fe52", size = 11533, upload-time = "2020-03-30T01:51:36.422Z" }, + { url = "https://files.pythonhosted.org/packages/9d/b5/cdc74763bcf0916f47d053830c00114f1de65d97ea2281b66bbf2a587b8a/sphinxcontrib_shellcheck-1.1.2-py37-none-any.whl", hash = "sha256:46d1aba8201bbfc7a2c51e08446cab36bdab318c997223c8fc40733a5eedc71f", size = 11533, upload-time = "2020-03-30T01:51:37.351Z" }, + { url = "https://files.pythonhosted.org/packages/58/ba/cf15480bc238a15e10604ee7f0e3e20ea0bf9a55a4f0b4e50571e8d13e60/sphinxcontrib_shellcheck-1.1.2-py38-none-any.whl", hash = "sha256:4c5f2840418cd1d7d662c0b3f51a07625f1a8f92755b19347ce85e8258e9d847", size = 11532, upload-time = "2020-03-30T01:51:38.858Z" }, +] + +[[package]] +name = "starlette" +version = "0.47.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/57/d062573f391d062710d4088fa1369428c38d51460ab6fedff920efef932e/starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8", size = 2583948, upload-time = "2025-07-20T17:31:58.522Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/1f/b876b1f83aef204198a42dc101613fefccb32258e5428b5f9259677864b4/starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b", size = 72984, upload-time = "2025-07-20T17:31:56.738Z" }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175, upload-time = "2024-11-27T22:38:36.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077, upload-time = "2024-11-27T22:37:54.956Z" }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429, upload-time = "2024-11-27T22:37:56.698Z" }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067, upload-time = "2024-11-27T22:37:57.63Z" }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030, upload-time = "2024-11-27T22:37:59.344Z" }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898, upload-time = "2024-11-27T22:38:00.429Z" }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894, upload-time = "2024-11-27T22:38:02.094Z" }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319, upload-time = "2024-11-27T22:38:03.206Z" }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273, upload-time = "2024-11-27T22:38:04.217Z" }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310, upload-time = "2024-11-27T22:38:05.908Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309, upload-time = "2024-11-27T22:38:06.812Z" }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762, upload-time = "2024-11-27T22:38:07.731Z" }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453, upload-time = "2024-11-27T22:38:09.384Z" }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486, upload-time = "2024-11-27T22:38:10.329Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349, upload-time = "2024-11-27T22:38:11.443Z" }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159, upload-time = "2024-11-27T22:38:13.099Z" }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243, upload-time = "2024-11-27T22:38:14.766Z" }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645, upload-time = "2024-11-27T22:38:15.843Z" }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584, upload-time = "2024-11-27T22:38:17.645Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875, upload-time = "2024-11-27T22:38:19.159Z" }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418, upload-time = "2024-11-27T22:38:20.064Z" }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708, upload-time = "2024-11-27T22:38:21.659Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582, upload-time = "2024-11-27T22:38:22.693Z" }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543, upload-time = "2024-11-27T22:38:24.367Z" }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691, upload-time = "2024-11-27T22:38:26.081Z" }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170, upload-time = "2024-11-27T22:38:27.921Z" }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530, upload-time = "2024-11-27T22:38:29.591Z" }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666, upload-time = "2024-11-27T22:38:30.639Z" }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954, upload-time = "2024-11-27T22:38:31.702Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724, upload-time = "2024-11-27T22:38:32.837Z" }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383, upload-time = "2024-11-27T22:38:34.455Z" }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.14.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/5a/da40306b885cc8c09109dc2e1abd358d5684b1425678151cdaed4731c822/typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36", size = 107673, upload-time = "2025-07-04T13:28:34.16Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/00/d631e67a838026495268c2f6884f3711a15a9a2a96cd244fdaea53b823fb/typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76", size = 43906, upload-time = "2025-07-04T13:28:32.743Z" }, +] + +[[package]] +name = "urllib3" +version = "1.26.20" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/e4/e8/6ff5e6bc22095cfc59b6ea711b687e2b7ed4bdb373f7eeec370a97d7392f/urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32", size = 307380, upload-time = "2024-08-29T15:43:11.37Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/cf/8435d5a7159e2a9c83a95896ed596f68cf798005fe107cc655b5c5c14704/urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e", size = 144225, upload-time = "2024-08-29T15:43:08.921Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", +] +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.35.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "click", version = "8.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/42/e0e305207bb88c6b8d3061399c6a961ffe5fbb7e2aa63c9234df7259e9cd/uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01", size = 78473, upload-time = "2025-06-28T16:15:46.058Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" }, +] + +[[package]] +name = "watchfiles" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/9a/d451fcc97d029f5812e898fd30a53fd8c15c7bbd058fd75cfc6beb9bd761/watchfiles-1.1.0.tar.gz", hash = "sha256:693ed7ec72cbfcee399e92c895362b6e66d63dac6b91e2c11ae03d10d503e575", size = 94406, upload-time = "2025-06-15T19:06:59.42Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/dd/579d1dc57f0f895426a1211c4ef3b0cb37eb9e642bb04bdcd962b5df206a/watchfiles-1.1.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:27f30e14aa1c1e91cb653f03a63445739919aef84c8d2517997a83155e7a2fcc", size = 405757, upload-time = "2025-06-15T19:04:51.058Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a0/7a0318cd874393344d48c34d53b3dd419466adf59a29ba5b51c88dd18b86/watchfiles-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3366f56c272232860ab45c77c3ca7b74ee819c8e1f6f35a7125556b198bbc6df", size = 397511, upload-time = "2025-06-15T19:04:52.79Z" }, + { url = "https://files.pythonhosted.org/packages/06/be/503514656d0555ec2195f60d810eca29b938772e9bfb112d5cd5ad6f6a9e/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8412eacef34cae2836d891836a7fff7b754d6bcac61f6c12ba5ca9bc7e427b68", size = 450739, upload-time = "2025-06-15T19:04:54.203Z" }, + { url = "https://files.pythonhosted.org/packages/4e/0d/a05dd9e5f136cdc29751816d0890d084ab99f8c17b86f25697288ca09bc7/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df670918eb7dd719642e05979fc84704af913d563fd17ed636f7c4783003fdcc", size = 458106, upload-time = "2025-06-15T19:04:55.607Z" }, + { url = "https://files.pythonhosted.org/packages/f1/fa/9cd16e4dfdb831072b7ac39e7bea986e52128526251038eb481effe9f48e/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7642b9bc4827b5518ebdb3b82698ada8c14c7661ddec5fe719f3e56ccd13c97", size = 484264, upload-time = "2025-06-15T19:04:57.009Z" }, + { url = "https://files.pythonhosted.org/packages/32/04/1da8a637c7e2b70e750a0308e9c8e662ada0cca46211fa9ef24a23937e0b/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:199207b2d3eeaeb80ef4411875a6243d9ad8bc35b07fc42daa6b801cc39cc41c", size = 597612, upload-time = "2025-06-15T19:04:58.409Z" }, + { url = "https://files.pythonhosted.org/packages/30/01/109f2762e968d3e58c95731a206e5d7d2a7abaed4299dd8a94597250153c/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a479466da6db5c1e8754caee6c262cd373e6e6c363172d74394f4bff3d84d7b5", size = 477242, upload-time = "2025-06-15T19:04:59.786Z" }, + { url = "https://files.pythonhosted.org/packages/b5/b8/46f58cf4969d3b7bc3ca35a98e739fa4085b0657a1540ccc29a1a0bc016f/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:935f9edd022ec13e447e5723a7d14456c8af254544cefbc533f6dd276c9aa0d9", size = 453148, upload-time = "2025-06-15T19:05:01.103Z" }, + { url = "https://files.pythonhosted.org/packages/a5/cd/8267594263b1770f1eb76914940d7b2d03ee55eca212302329608208e061/watchfiles-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8076a5769d6bdf5f673a19d51da05fc79e2bbf25e9fe755c47595785c06a8c72", size = 626574, upload-time = "2025-06-15T19:05:02.582Z" }, + { url = "https://files.pythonhosted.org/packages/a1/2f/7f2722e85899bed337cba715723e19185e288ef361360718973f891805be/watchfiles-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:86b1e28d4c37e89220e924305cd9f82866bb0ace666943a6e4196c5df4d58dcc", size = 624378, upload-time = "2025-06-15T19:05:03.719Z" }, + { url = "https://files.pythonhosted.org/packages/bf/20/64c88ec43d90a568234d021ab4b2a6f42a5230d772b987c3f9c00cc27b8b/watchfiles-1.1.0-cp310-cp310-win32.whl", hash = "sha256:d1caf40c1c657b27858f9774d5c0e232089bca9cb8ee17ce7478c6e9264d2587", size = 279829, upload-time = "2025-06-15T19:05:04.822Z" }, + { url = "https://files.pythonhosted.org/packages/39/5c/a9c1ed33de7af80935e4eac09570de679c6e21c07070aa99f74b4431f4d6/watchfiles-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:a89c75a5b9bc329131115a409d0acc16e8da8dfd5867ba59f1dd66ae7ea8fa82", size = 292192, upload-time = "2025-06-15T19:05:06.348Z" }, + { url = "https://files.pythonhosted.org/packages/8b/78/7401154b78ab484ccaaeef970dc2af0cb88b5ba8a1b415383da444cdd8d3/watchfiles-1.1.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c9649dfc57cc1f9835551deb17689e8d44666315f2e82d337b9f07bd76ae3aa2", size = 405751, upload-time = "2025-06-15T19:05:07.679Z" }, + { url = "https://files.pythonhosted.org/packages/76/63/e6c3dbc1f78d001589b75e56a288c47723de28c580ad715eb116639152b5/watchfiles-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:406520216186b99374cdb58bc48e34bb74535adec160c8459894884c983a149c", size = 397313, upload-time = "2025-06-15T19:05:08.764Z" }, + { url = "https://files.pythonhosted.org/packages/6c/a2/8afa359ff52e99af1632f90cbf359da46184207e893a5f179301b0c8d6df/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb45350fd1dc75cd68d3d72c47f5b513cb0578da716df5fba02fff31c69d5f2d", size = 450792, upload-time = "2025-06-15T19:05:09.869Z" }, + { url = "https://files.pythonhosted.org/packages/1d/bf/7446b401667f5c64972a57a0233be1104157fc3abf72c4ef2666c1bd09b2/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11ee4444250fcbeb47459a877e5e80ed994ce8e8d20283857fc128be1715dac7", size = 458196, upload-time = "2025-06-15T19:05:11.91Z" }, + { url = "https://files.pythonhosted.org/packages/58/2f/501ddbdfa3fa874ea5597c77eeea3d413579c29af26c1091b08d0c792280/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bda8136e6a80bdea23e5e74e09df0362744d24ffb8cd59c4a95a6ce3d142f79c", size = 484788, upload-time = "2025-06-15T19:05:13.373Z" }, + { url = "https://files.pythonhosted.org/packages/61/1e/9c18eb2eb5c953c96bc0e5f626f0e53cfef4bd19bd50d71d1a049c63a575/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b915daeb2d8c1f5cee4b970f2e2c988ce6514aace3c9296e58dd64dc9aa5d575", size = 597879, upload-time = "2025-06-15T19:05:14.725Z" }, + { url = "https://files.pythonhosted.org/packages/8b/6c/1467402e5185d89388b4486745af1e0325007af0017c3384cc786fff0542/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed8fc66786de8d0376f9f913c09e963c66e90ced9aa11997f93bdb30f7c872a8", size = 477447, upload-time = "2025-06-15T19:05:15.775Z" }, + { url = "https://files.pythonhosted.org/packages/2b/a1/ec0a606bde4853d6c4a578f9391eeb3684a9aea736a8eb217e3e00aa89a1/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe4371595edf78c41ef8ac8df20df3943e13defd0efcb732b2e393b5a8a7a71f", size = 453145, upload-time = "2025-06-15T19:05:17.17Z" }, + { url = "https://files.pythonhosted.org/packages/90/b9/ef6f0c247a6a35d689fc970dc7f6734f9257451aefb30def5d100d6246a5/watchfiles-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b7c5f6fe273291f4d414d55b2c80d33c457b8a42677ad14b4b47ff025d0893e4", size = 626539, upload-time = "2025-06-15T19:05:18.557Z" }, + { url = "https://files.pythonhosted.org/packages/34/44/6ffda5537085106ff5aaa762b0d130ac6c75a08015dd1621376f708c94de/watchfiles-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7738027989881e70e3723c75921f1efa45225084228788fc59ea8c6d732eb30d", size = 624472, upload-time = "2025-06-15T19:05:19.588Z" }, + { url = "https://files.pythonhosted.org/packages/c3/e3/71170985c48028fa3f0a50946916a14055e741db11c2e7bc2f3b61f4d0e3/watchfiles-1.1.0-cp311-cp311-win32.whl", hash = "sha256:622d6b2c06be19f6e89b1d951485a232e3b59618def88dbeda575ed8f0d8dbf2", size = 279348, upload-time = "2025-06-15T19:05:20.856Z" }, + { url = "https://files.pythonhosted.org/packages/89/1b/3e39c68b68a7a171070f81fc2561d23ce8d6859659406842a0e4bebf3bba/watchfiles-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:48aa25e5992b61debc908a61ab4d3f216b64f44fdaa71eb082d8b2de846b7d12", size = 292607, upload-time = "2025-06-15T19:05:21.937Z" }, + { url = "https://files.pythonhosted.org/packages/61/9f/2973b7539f2bdb6ea86d2c87f70f615a71a1fc2dba2911795cea25968aea/watchfiles-1.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:00645eb79a3faa70d9cb15c8d4187bb72970b2470e938670240c7998dad9f13a", size = 285056, upload-time = "2025-06-15T19:05:23.12Z" }, + { url = "https://files.pythonhosted.org/packages/f6/b8/858957045a38a4079203a33aaa7d23ea9269ca7761c8a074af3524fbb240/watchfiles-1.1.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9dc001c3e10de4725c749d4c2f2bdc6ae24de5a88a339c4bce32300a31ede179", size = 402339, upload-time = "2025-06-15T19:05:24.516Z" }, + { url = "https://files.pythonhosted.org/packages/80/28/98b222cca751ba68e88521fabd79a4fab64005fc5976ea49b53fa205d1fa/watchfiles-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9ba68ec283153dead62cbe81872d28e053745f12335d037de9cbd14bd1877f5", size = 394409, upload-time = "2025-06-15T19:05:25.469Z" }, + { url = "https://files.pythonhosted.org/packages/86/50/dee79968566c03190677c26f7f47960aff738d32087087bdf63a5473e7df/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130fc497b8ee68dce163e4254d9b0356411d1490e868bd8790028bc46c5cc297", size = 450939, upload-time = "2025-06-15T19:05:26.494Z" }, + { url = "https://files.pythonhosted.org/packages/40/45/a7b56fb129700f3cfe2594a01aa38d033b92a33dddce86c8dfdfc1247b72/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:50a51a90610d0845a5931a780d8e51d7bd7f309ebc25132ba975aca016b576a0", size = 457270, upload-time = "2025-06-15T19:05:27.466Z" }, + { url = "https://files.pythonhosted.org/packages/b5/c8/fa5ef9476b1d02dc6b5e258f515fcaaecf559037edf8b6feffcbc097c4b8/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc44678a72ac0910bac46fa6a0de6af9ba1355669b3dfaf1ce5f05ca7a74364e", size = 483370, upload-time = "2025-06-15T19:05:28.548Z" }, + { url = "https://files.pythonhosted.org/packages/98/68/42cfcdd6533ec94f0a7aab83f759ec11280f70b11bfba0b0f885e298f9bd/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a543492513a93b001975ae283a51f4b67973662a375a403ae82f420d2c7205ee", size = 598654, upload-time = "2025-06-15T19:05:29.997Z" }, + { url = "https://files.pythonhosted.org/packages/d3/74/b2a1544224118cc28df7e59008a929e711f9c68ce7d554e171b2dc531352/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ac164e20d17cc285f2b94dc31c384bc3aa3dd5e7490473b3db043dd70fbccfd", size = 478667, upload-time = "2025-06-15T19:05:31.172Z" }, + { url = "https://files.pythonhosted.org/packages/8c/77/e3362fe308358dc9f8588102481e599c83e1b91c2ae843780a7ded939a35/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7590d5a455321e53857892ab8879dce62d1f4b04748769f5adf2e707afb9d4f", size = 452213, upload-time = "2025-06-15T19:05:32.299Z" }, + { url = "https://files.pythonhosted.org/packages/6e/17/c8f1a36540c9a1558d4faf08e909399e8133599fa359bf52ec8fcee5be6f/watchfiles-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:37d3d3f7defb13f62ece99e9be912afe9dd8a0077b7c45ee5a57c74811d581a4", size = 626718, upload-time = "2025-06-15T19:05:33.415Z" }, + { url = "https://files.pythonhosted.org/packages/26/45/fb599be38b4bd38032643783d7496a26a6f9ae05dea1a42e58229a20ac13/watchfiles-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7080c4bb3efd70a07b1cc2df99a7aa51d98685be56be6038c3169199d0a1c69f", size = 623098, upload-time = "2025-06-15T19:05:34.534Z" }, + { url = "https://files.pythonhosted.org/packages/a1/e7/fdf40e038475498e160cd167333c946e45d8563ae4dd65caf757e9ffe6b4/watchfiles-1.1.0-cp312-cp312-win32.whl", hash = "sha256:cbcf8630ef4afb05dc30107bfa17f16c0896bb30ee48fc24bf64c1f970f3b1fd", size = 279209, upload-time = "2025-06-15T19:05:35.577Z" }, + { url = "https://files.pythonhosted.org/packages/3f/d3/3ae9d5124ec75143bdf088d436cba39812122edc47709cd2caafeac3266f/watchfiles-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:cbd949bdd87567b0ad183d7676feb98136cde5bb9025403794a4c0db28ed3a47", size = 292786, upload-time = "2025-06-15T19:05:36.559Z" }, + { url = "https://files.pythonhosted.org/packages/26/2f/7dd4fc8b5f2b34b545e19629b4a018bfb1de23b3a496766a2c1165ca890d/watchfiles-1.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:0a7d40b77f07be87c6faa93d0951a0fcd8cbca1ddff60a1b65d741bac6f3a9f6", size = 284343, upload-time = "2025-06-15T19:05:37.5Z" }, + { url = "https://files.pythonhosted.org/packages/d3/42/fae874df96595556a9089ade83be34a2e04f0f11eb53a8dbf8a8a5e562b4/watchfiles-1.1.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:5007f860c7f1f8df471e4e04aaa8c43673429047d63205d1630880f7637bca30", size = 402004, upload-time = "2025-06-15T19:05:38.499Z" }, + { url = "https://files.pythonhosted.org/packages/fa/55/a77e533e59c3003d9803c09c44c3651224067cbe7fb5d574ddbaa31e11ca/watchfiles-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:20ecc8abbd957046f1fe9562757903f5eaf57c3bce70929fda6c7711bb58074a", size = 393671, upload-time = "2025-06-15T19:05:39.52Z" }, + { url = "https://files.pythonhosted.org/packages/05/68/b0afb3f79c8e832e6571022611adbdc36e35a44e14f129ba09709aa4bb7a/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2f0498b7d2a3c072766dba3274fe22a183dbea1f99d188f1c6c72209a1063dc", size = 449772, upload-time = "2025-06-15T19:05:40.897Z" }, + { url = "https://files.pythonhosted.org/packages/ff/05/46dd1f6879bc40e1e74c6c39a1b9ab9e790bf1f5a2fe6c08b463d9a807f4/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:239736577e848678e13b201bba14e89718f5c2133dfd6b1f7846fa1b58a8532b", size = 456789, upload-time = "2025-06-15T19:05:42.045Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ca/0eeb2c06227ca7f12e50a47a3679df0cd1ba487ea19cf844a905920f8e95/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eff4b8d89f444f7e49136dc695599a591ff769300734446c0a86cba2eb2f9895", size = 482551, upload-time = "2025-06-15T19:05:43.781Z" }, + { url = "https://files.pythonhosted.org/packages/31/47/2cecbd8694095647406645f822781008cc524320466ea393f55fe70eed3b/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12b0a02a91762c08f7264e2e79542f76870c3040bbc847fb67410ab81474932a", size = 597420, upload-time = "2025-06-15T19:05:45.244Z" }, + { url = "https://files.pythonhosted.org/packages/d9/7e/82abc4240e0806846548559d70f0b1a6dfdca75c1b4f9fa62b504ae9b083/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:29e7bc2eee15cbb339c68445959108803dc14ee0c7b4eea556400131a8de462b", size = 477950, upload-time = "2025-06-15T19:05:46.332Z" }, + { url = "https://files.pythonhosted.org/packages/25/0d/4d564798a49bf5482a4fa9416dea6b6c0733a3b5700cb8a5a503c4b15853/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9481174d3ed982e269c090f780122fb59cee6c3796f74efe74e70f7780ed94c", size = 451706, upload-time = "2025-06-15T19:05:47.459Z" }, + { url = "https://files.pythonhosted.org/packages/81/b5/5516cf46b033192d544102ea07c65b6f770f10ed1d0a6d388f5d3874f6e4/watchfiles-1.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:80f811146831c8c86ab17b640801c25dc0a88c630e855e2bef3568f30434d52b", size = 625814, upload-time = "2025-06-15T19:05:48.654Z" }, + { url = "https://files.pythonhosted.org/packages/0c/dd/7c1331f902f30669ac3e754680b6edb9a0dd06dea5438e61128111fadd2c/watchfiles-1.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:60022527e71d1d1fda67a33150ee42869042bce3d0fcc9cc49be009a9cded3fb", size = 622820, upload-time = "2025-06-15T19:05:50.088Z" }, + { url = "https://files.pythonhosted.org/packages/1b/14/36d7a8e27cd128d7b1009e7715a7c02f6c131be9d4ce1e5c3b73d0e342d8/watchfiles-1.1.0-cp313-cp313-win32.whl", hash = "sha256:32d6d4e583593cb8576e129879ea0991660b935177c0f93c6681359b3654bfa9", size = 279194, upload-time = "2025-06-15T19:05:51.186Z" }, + { url = "https://files.pythonhosted.org/packages/25/41/2dd88054b849aa546dbeef5696019c58f8e0774f4d1c42123273304cdb2e/watchfiles-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:f21af781a4a6fbad54f03c598ab620e3a77032c5878f3d780448421a6e1818c7", size = 292349, upload-time = "2025-06-15T19:05:52.201Z" }, + { url = "https://files.pythonhosted.org/packages/c8/cf/421d659de88285eb13941cf11a81f875c176f76a6d99342599be88e08d03/watchfiles-1.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:5366164391873ed76bfdf618818c82084c9db7fac82b64a20c44d335eec9ced5", size = 283836, upload-time = "2025-06-15T19:05:53.265Z" }, + { url = "https://files.pythonhosted.org/packages/45/10/6faf6858d527e3599cc50ec9fcae73590fbddc1420bd4fdccfebffeedbc6/watchfiles-1.1.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:17ab167cca6339c2b830b744eaf10803d2a5b6683be4d79d8475d88b4a8a4be1", size = 400343, upload-time = "2025-06-15T19:05:54.252Z" }, + { url = "https://files.pythonhosted.org/packages/03/20/5cb7d3966f5e8c718006d0e97dfe379a82f16fecd3caa7810f634412047a/watchfiles-1.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:328dbc9bff7205c215a7807da7c18dce37da7da718e798356212d22696404339", size = 392916, upload-time = "2025-06-15T19:05:55.264Z" }, + { url = "https://files.pythonhosted.org/packages/8c/07/d8f1176328fa9e9581b6f120b017e286d2a2d22ae3f554efd9515c8e1b49/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7208ab6e009c627b7557ce55c465c98967e8caa8b11833531fdf95799372633", size = 449582, upload-time = "2025-06-15T19:05:56.317Z" }, + { url = "https://files.pythonhosted.org/packages/66/e8/80a14a453cf6038e81d072a86c05276692a1826471fef91df7537dba8b46/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a8f6f72974a19efead54195bc9bed4d850fc047bb7aa971268fd9a8387c89011", size = 456752, upload-time = "2025-06-15T19:05:57.359Z" }, + { url = "https://files.pythonhosted.org/packages/5a/25/0853b3fe0e3c2f5af9ea60eb2e781eade939760239a72c2d38fc4cc335f6/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d181ef50923c29cf0450c3cd47e2f0557b62218c50b2ab8ce2ecaa02bd97e670", size = 481436, upload-time = "2025-06-15T19:05:58.447Z" }, + { url = "https://files.pythonhosted.org/packages/fe/9e/4af0056c258b861fbb29dcb36258de1e2b857be4a9509e6298abcf31e5c9/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:adb4167043d3a78280d5d05ce0ba22055c266cf8655ce942f2fb881262ff3cdf", size = 596016, upload-time = "2025-06-15T19:05:59.59Z" }, + { url = "https://files.pythonhosted.org/packages/c5/fa/95d604b58aa375e781daf350897aaaa089cff59d84147e9ccff2447c8294/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c5701dc474b041e2934a26d31d39f90fac8a3dee2322b39f7729867f932b1d4", size = 476727, upload-time = "2025-06-15T19:06:01.086Z" }, + { url = "https://files.pythonhosted.org/packages/65/95/fe479b2664f19be4cf5ceeb21be05afd491d95f142e72d26a42f41b7c4f8/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b067915e3c3936966a8607f6fe5487df0c9c4afb85226613b520890049deea20", size = 451864, upload-time = "2025-06-15T19:06:02.144Z" }, + { url = "https://files.pythonhosted.org/packages/d3/8a/3c4af14b93a15ce55901cd7a92e1a4701910f1768c78fb30f61d2b79785b/watchfiles-1.1.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:9c733cda03b6d636b4219625a4acb5c6ffb10803338e437fb614fef9516825ef", size = 625626, upload-time = "2025-06-15T19:06:03.578Z" }, + { url = "https://files.pythonhosted.org/packages/da/f5/cf6aa047d4d9e128f4b7cde615236a915673775ef171ff85971d698f3c2c/watchfiles-1.1.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:cc08ef8b90d78bfac66f0def80240b0197008e4852c9f285907377b2947ffdcb", size = 622744, upload-time = "2025-06-15T19:06:05.066Z" }, + { url = "https://files.pythonhosted.org/packages/2c/00/70f75c47f05dea6fd30df90f047765f6fc2d6eb8b5a3921379b0b04defa2/watchfiles-1.1.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:9974d2f7dc561cce3bb88dfa8eb309dab64c729de85fba32e98d75cf24b66297", size = 402114, upload-time = "2025-06-15T19:06:06.186Z" }, + { url = "https://files.pythonhosted.org/packages/53/03/acd69c48db4a1ed1de26b349d94077cca2238ff98fd64393f3e97484cae6/watchfiles-1.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c68e9f1fcb4d43798ad8814c4c1b61547b014b667216cb754e606bfade587018", size = 393879, upload-time = "2025-06-15T19:06:07.369Z" }, + { url = "https://files.pythonhosted.org/packages/2f/c8/a9a2a6f9c8baa4eceae5887fecd421e1b7ce86802bcfc8b6a942e2add834/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95ab1594377effac17110e1352989bdd7bdfca9ff0e5eeccd8c69c5389b826d0", size = 450026, upload-time = "2025-06-15T19:06:08.476Z" }, + { url = "https://files.pythonhosted.org/packages/fe/51/d572260d98388e6e2b967425c985e07d47ee6f62e6455cefb46a6e06eda5/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fba9b62da882c1be1280a7584ec4515d0a6006a94d6e5819730ec2eab60ffe12", size = 457917, upload-time = "2025-06-15T19:06:09.988Z" }, + { url = "https://files.pythonhosted.org/packages/c6/2d/4258e52917bf9f12909b6ec314ff9636276f3542f9d3807d143f27309104/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3434e401f3ce0ed6b42569128b3d1e3af773d7ec18751b918b89cd49c14eaafb", size = 483602, upload-time = "2025-06-15T19:06:11.088Z" }, + { url = "https://files.pythonhosted.org/packages/84/99/bee17a5f341a4345fe7b7972a475809af9e528deba056f8963d61ea49f75/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa257a4d0d21fcbca5b5fcba9dca5a78011cb93c0323fb8855c6d2dfbc76eb77", size = 596758, upload-time = "2025-06-15T19:06:12.197Z" }, + { url = "https://files.pythonhosted.org/packages/40/76/e4bec1d59b25b89d2b0716b41b461ed655a9a53c60dc78ad5771fda5b3e6/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fd1b3879a578a8ec2076c7961076df540b9af317123f84569f5a9ddee64ce92", size = 477601, upload-time = "2025-06-15T19:06:13.391Z" }, + { url = "https://files.pythonhosted.org/packages/1f/fa/a514292956f4a9ce3c567ec0c13cce427c158e9f272062685a8a727d08fc/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc7a30eeb0e20ecc5f4bd113cd69dcdb745a07c68c0370cea919f373f65d9e", size = 451936, upload-time = "2025-06-15T19:06:14.656Z" }, + { url = "https://files.pythonhosted.org/packages/32/5d/c3bf927ec3bbeb4566984eba8dd7a8eb69569400f5509904545576741f88/watchfiles-1.1.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:891c69e027748b4a73847335d208e374ce54ca3c335907d381fde4e41661b13b", size = 626243, upload-time = "2025-06-15T19:06:16.232Z" }, + { url = "https://files.pythonhosted.org/packages/e6/65/6e12c042f1a68c556802a84d54bb06d35577c81e29fba14019562479159c/watchfiles-1.1.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:12fe8eaffaf0faa7906895b4f8bb88264035b3f0243275e0bf24af0436b27259", size = 623073, upload-time = "2025-06-15T19:06:17.457Z" }, + { url = "https://files.pythonhosted.org/packages/89/ab/7f79d9bf57329e7cbb0a6fd4c7bd7d0cee1e4a8ef0041459f5409da3506c/watchfiles-1.1.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:bfe3c517c283e484843cb2e357dd57ba009cff351edf45fb455b5fbd1f45b15f", size = 400872, upload-time = "2025-06-15T19:06:18.57Z" }, + { url = "https://files.pythonhosted.org/packages/df/d5/3f7bf9912798e9e6c516094db6b8932df53b223660c781ee37607030b6d3/watchfiles-1.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a9ccbf1f129480ed3044f540c0fdbc4ee556f7175e5ab40fe077ff6baf286d4e", size = 392877, upload-time = "2025-06-15T19:06:19.55Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c5/54ec7601a2798604e01c75294770dbee8150e81c6e471445d7601610b495/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba0e3255b0396cac3cc7bbace76404dd72b5438bf0d8e7cefa2f79a7f3649caa", size = 449645, upload-time = "2025-06-15T19:06:20.66Z" }, + { url = "https://files.pythonhosted.org/packages/0a/04/c2f44afc3b2fce21ca0b7802cbd37ed90a29874f96069ed30a36dfe57c2b/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4281cd9fce9fc0a9dbf0fc1217f39bf9cf2b4d315d9626ef1d4e87b84699e7e8", size = 457424, upload-time = "2025-06-15T19:06:21.712Z" }, + { url = "https://files.pythonhosted.org/packages/9f/b0/eec32cb6c14d248095261a04f290636da3df3119d4040ef91a4a50b29fa5/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d2404af8db1329f9a3c9b79ff63e0ae7131986446901582067d9304ae8aaf7f", size = 481584, upload-time = "2025-06-15T19:06:22.777Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e2/ca4bb71c68a937d7145aa25709e4f5d68eb7698a25ce266e84b55d591bbd/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e78b6ed8165996013165eeabd875c5dfc19d41b54f94b40e9fff0eb3193e5e8e", size = 596675, upload-time = "2025-06-15T19:06:24.226Z" }, + { url = "https://files.pythonhosted.org/packages/a1/dd/b0e4b7fb5acf783816bc950180a6cd7c6c1d2cf7e9372c0ea634e722712b/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:249590eb75ccc117f488e2fabd1bfa33c580e24b96f00658ad88e38844a040bb", size = 477363, upload-time = "2025-06-15T19:06:25.42Z" }, + { url = "https://files.pythonhosted.org/packages/69/c4/088825b75489cb5b6a761a4542645718893d395d8c530b38734f19da44d2/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05686b5487cfa2e2c28ff1aa370ea3e6c5accfe6435944ddea1e10d93872147", size = 452240, upload-time = "2025-06-15T19:06:26.552Z" }, + { url = "https://files.pythonhosted.org/packages/10/8c/22b074814970eeef43b7c44df98c3e9667c1f7bf5b83e0ff0201b0bd43f9/watchfiles-1.1.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:d0e10e6f8f6dc5762adee7dece33b722282e1f59aa6a55da5d493a97282fedd8", size = 625607, upload-time = "2025-06-15T19:06:27.606Z" }, + { url = "https://files.pythonhosted.org/packages/32/fa/a4f5c2046385492b2273213ef815bf71a0d4c1943b784fb904e184e30201/watchfiles-1.1.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:af06c863f152005c7592df1d6a7009c836a247c9d8adb78fef8575a5a98699db", size = 623315, upload-time = "2025-06-15T19:06:29.076Z" }, + { url = "https://files.pythonhosted.org/packages/47/8a/a45db804b9f0740f8408626ab2bca89c3136432e57c4673b50180bf85dd9/watchfiles-1.1.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:865c8e95713744cf5ae261f3067861e9da5f1370ba91fc536431e29b418676fa", size = 406400, upload-time = "2025-06-15T19:06:30.233Z" }, + { url = "https://files.pythonhosted.org/packages/64/06/a08684f628fb41addd451845aceedc2407dc3d843b4b060a7c4350ddee0c/watchfiles-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:42f92befc848bb7a19658f21f3e7bae80d7d005d13891c62c2cd4d4d0abb3433", size = 397920, upload-time = "2025-06-15T19:06:31.315Z" }, + { url = "https://files.pythonhosted.org/packages/79/e6/e10d5675af653b1b07d4156906858041149ca222edaf8995877f2605ba9e/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa0cc8365ab29487eb4f9979fd41b22549853389e22d5de3f134a6796e1b05a4", size = 451196, upload-time = "2025-06-15T19:06:32.435Z" }, + { url = "https://files.pythonhosted.org/packages/f6/8a/facd6988100cd0f39e89f6c550af80edb28e3a529e1ee662e750663e6b36/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:90ebb429e933645f3da534c89b29b665e285048973b4d2b6946526888c3eb2c7", size = 458218, upload-time = "2025-06-15T19:06:33.503Z" }, + { url = "https://files.pythonhosted.org/packages/90/26/34cbcbc4d0f2f8f9cc243007e65d741ae039f7a11ef8ec6e9cd25bee08d1/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c588c45da9b08ab3da81d08d7987dae6d2a3badd63acdb3e206a42dbfa7cb76f", size = 484851, upload-time = "2025-06-15T19:06:34.541Z" }, + { url = "https://files.pythonhosted.org/packages/d7/1f/f59faa9fc4b0e36dbcdd28a18c430416443b309d295d8b82e18192d120ad/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c55b0f9f68590115c25272b06e63f0824f03d4fc7d6deed43d8ad5660cabdbf", size = 599520, upload-time = "2025-06-15T19:06:35.785Z" }, + { url = "https://files.pythonhosted.org/packages/83/72/3637abecb3bf590529f5154ca000924003e5f4bbb9619744feeaf6f0b70b/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd17a1e489f02ce9117b0de3c0b1fab1c3e2eedc82311b299ee6b6faf6c23a29", size = 477956, upload-time = "2025-06-15T19:06:36.965Z" }, + { url = "https://files.pythonhosted.org/packages/f7/f3/d14ffd9acc0c1bd4790378995e320981423263a5d70bd3929e2e0dc87fff/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da71945c9ace018d8634822f16cbc2a78323ef6c876b1d34bbf5d5222fd6a72e", size = 453196, upload-time = "2025-06-15T19:06:38.024Z" }, + { url = "https://files.pythonhosted.org/packages/7f/38/78ad77bd99e20c0fdc82262be571ef114fc0beef9b43db52adb939768c38/watchfiles-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:51556d5004887045dba3acdd1fdf61dddea2be0a7e18048b5e853dcd37149b86", size = 627479, upload-time = "2025-06-15T19:06:39.442Z" }, + { url = "https://files.pythonhosted.org/packages/e6/cf/549d50a22fcc83f1017c6427b1c76c053233f91b526f4ad7a45971e70c0b/watchfiles-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04e4ed5d1cd3eae68c89bcc1a485a109f39f2fd8de05f705e98af6b5f1861f1f", size = 624414, upload-time = "2025-06-15T19:06:40.859Z" }, + { url = "https://files.pythonhosted.org/packages/72/de/57d6e40dc9140af71c12f3a9fc2d3efc5529d93981cd4d265d484d7c9148/watchfiles-1.1.0-cp39-cp39-win32.whl", hash = "sha256:c600e85f2ffd9f1035222b1a312aff85fd11ea39baff1d705b9b047aad2ce267", size = 280020, upload-time = "2025-06-15T19:06:41.89Z" }, + { url = "https://files.pythonhosted.org/packages/88/bb/7d287fc2a762396b128a0fca2dbae29386e0a242b81d1046daf389641db3/watchfiles-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3aba215958d88182e8d2acba0fdaf687745180974946609119953c0e112397dc", size = 292758, upload-time = "2025-06-15T19:06:43.251Z" }, + { url = "https://files.pythonhosted.org/packages/be/7c/a3d7c55cfa377c2f62c4ae3c6502b997186bc5e38156bafcb9b653de9a6d/watchfiles-1.1.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a6fd40bbb50d24976eb275ccb55cd1951dfb63dbc27cae3066a6ca5f4beabd5", size = 406748, upload-time = "2025-06-15T19:06:44.2Z" }, + { url = "https://files.pythonhosted.org/packages/38/d0/c46f1b2c0ca47f3667b144de6f0515f6d1c670d72f2ca29861cac78abaa1/watchfiles-1.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9f811079d2f9795b5d48b55a37aa7773680a5659afe34b54cc1d86590a51507d", size = 398801, upload-time = "2025-06-15T19:06:45.774Z" }, + { url = "https://files.pythonhosted.org/packages/70/9c/9a6a42e97f92eeed77c3485a43ea96723900aefa3ac739a8c73f4bff2cd7/watchfiles-1.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2726d7bfd9f76158c84c10a409b77a320426540df8c35be172444394b17f7ea", size = 451528, upload-time = "2025-06-15T19:06:46.791Z" }, + { url = "https://files.pythonhosted.org/packages/51/7b/98c7f4f7ce7ff03023cf971cd84a3ee3b790021ae7584ffffa0eb2554b96/watchfiles-1.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df32d59cb9780f66d165a9a7a26f19df2c7d24e3bd58713108b41d0ff4f929c6", size = 454095, upload-time = "2025-06-15T19:06:48.211Z" }, + { url = "https://files.pythonhosted.org/packages/8c/6b/686dcf5d3525ad17b384fd94708e95193529b460a1b7bf40851f1328ec6e/watchfiles-1.1.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0ece16b563b17ab26eaa2d52230c9a7ae46cf01759621f4fbbca280e438267b3", size = 406910, upload-time = "2025-06-15T19:06:49.335Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d3/71c2dcf81dc1edcf8af9f4d8d63b1316fb0a2dd90cbfd427e8d9dd584a90/watchfiles-1.1.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:51b81e55d40c4b4aa8658427a3ee7ea847c591ae9e8b81ef94a90b668999353c", size = 398816, upload-time = "2025-06-15T19:06:50.433Z" }, + { url = "https://files.pythonhosted.org/packages/b8/fa/12269467b2fc006f8fce4cd6c3acfa77491dd0777d2a747415f28ccc8c60/watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2bcdc54ea267fe72bfc7d83c041e4eb58d7d8dc6f578dfddb52f037ce62f432", size = 451584, upload-time = "2025-06-15T19:06:51.834Z" }, + { url = "https://files.pythonhosted.org/packages/bd/d3/254cea30f918f489db09d6a8435a7de7047f8cb68584477a515f160541d6/watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:923fec6e5461c42bd7e3fd5ec37492c6f3468be0499bc0707b4bbbc16ac21792", size = 454009, upload-time = "2025-06-15T19:06:52.896Z" }, + { url = "https://files.pythonhosted.org/packages/48/93/5c96bdb65e7f88f7da40645f34c0a3c317a2931ed82161e93c91e8eddd27/watchfiles-1.1.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7b3443f4ec3ba5aa00b0e9fa90cf31d98321cbff8b925a7c7b84161619870bc9", size = 406640, upload-time = "2025-06-15T19:06:54.868Z" }, + { url = "https://files.pythonhosted.org/packages/e3/25/09204836e93e1b99cce88802ce87264a1d20610c7a8f6de24def27ad95b1/watchfiles-1.1.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7049e52167fc75fc3cc418fc13d39a8e520cbb60ca08b47f6cedb85e181d2f2a", size = 398543, upload-time = "2025-06-15T19:06:55.95Z" }, + { url = "https://files.pythonhosted.org/packages/5e/dc/6f324a6f32c5ab73b54311b5f393a79df34c1584b8d2404cf7e6d780aa5d/watchfiles-1.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54062ef956807ba806559b3c3d52105ae1827a0d4ab47b621b31132b6b7e2866", size = 451787, upload-time = "2025-06-15T19:06:56.998Z" }, + { url = "https://files.pythonhosted.org/packages/45/5d/1d02ef4caa4ec02389e72d5594cdf9c67f1800a7c380baa55063c30c6598/watchfiles-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a7bd57a1bb02f9d5c398c0c1675384e7ab1dd39da0ca50b7f09af45fa435277", size = 454272, upload-time = "2025-06-15T19:06:58.055Z" }, +] + +[[package]] +name = "websockets" +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/da/6462a9f510c0c49837bbc9345aca92d767a56c1fb2939e1579df1e1cdcf7/websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b", size = 175423, upload-time = "2025-03-05T20:01:35.363Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9f/9d11c1a4eb046a9e106483b9ff69bce7ac880443f00e5ce64261b47b07e7/websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205", size = 173080, upload-time = "2025-03-05T20:01:37.304Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4f/b462242432d93ea45f297b6179c7333dd0402b855a912a04e7fc61c0d71f/websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a", size = 173329, upload-time = "2025-03-05T20:01:39.668Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0c/6afa1f4644d7ed50284ac59cc70ef8abd44ccf7d45850d989ea7310538d0/websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e", size = 182312, upload-time = "2025-03-05T20:01:41.815Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d4/ffc8bd1350b229ca7a4db2a3e1c482cf87cea1baccd0ef3e72bc720caeec/websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf", size = 181319, upload-time = "2025-03-05T20:01:43.967Z" }, + { url = "https://files.pythonhosted.org/packages/97/3a/5323a6bb94917af13bbb34009fac01e55c51dfde354f63692bf2533ffbc2/websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb", size = 181631, upload-time = "2025-03-05T20:01:46.104Z" }, + { url = "https://files.pythonhosted.org/packages/a6/cc/1aeb0f7cee59ef065724041bb7ed667b6ab1eeffe5141696cccec2687b66/websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d", size = 182016, upload-time = "2025-03-05T20:01:47.603Z" }, + { url = "https://files.pythonhosted.org/packages/79/f9/c86f8f7af208e4161a7f7e02774e9d0a81c632ae76db2ff22549e1718a51/websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9", size = 181426, upload-time = "2025-03-05T20:01:48.949Z" }, + { url = "https://files.pythonhosted.org/packages/c7/b9/828b0bc6753db905b91df6ae477c0b14a141090df64fb17f8a9d7e3516cf/websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c", size = 181360, upload-time = "2025-03-05T20:01:50.938Z" }, + { url = "https://files.pythonhosted.org/packages/89/fb/250f5533ec468ba6327055b7d98b9df056fb1ce623b8b6aaafb30b55d02e/websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256", size = 176388, upload-time = "2025-03-05T20:01:52.213Z" }, + { url = "https://files.pythonhosted.org/packages/1c/46/aca7082012768bb98e5608f01658ff3ac8437e563eca41cf068bd5849a5e/websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41", size = 176830, upload-time = "2025-03-05T20:01:53.922Z" }, + { url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423, upload-time = "2025-03-05T20:01:56.276Z" }, + { url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082, upload-time = "2025-03-05T20:01:57.563Z" }, + { url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330, upload-time = "2025-03-05T20:01:59.063Z" }, + { url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878, upload-time = "2025-03-05T20:02:00.305Z" }, + { url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883, upload-time = "2025-03-05T20:02:03.148Z" }, + { url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252, upload-time = "2025-03-05T20:02:05.29Z" }, + { url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521, upload-time = "2025-03-05T20:02:07.458Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958, upload-time = "2025-03-05T20:02:09.842Z" }, + { url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918, upload-time = "2025-03-05T20:02:11.968Z" }, + { url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388, upload-time = "2025-03-05T20:02:13.32Z" }, + { url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828, upload-time = "2025-03-05T20:02:14.585Z" }, + { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" }, + { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" }, + { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" }, + { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" }, + { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" }, + { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" }, + { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" }, + { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" }, + { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" }, + { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" }, + { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" }, + { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" }, + { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" }, + { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" }, + { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" }, + { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" }, + { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" }, + { url = "https://files.pythonhosted.org/packages/36/db/3fff0bcbe339a6fa6a3b9e3fbc2bfb321ec2f4cd233692272c5a8d6cf801/websockets-15.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5", size = 175424, upload-time = "2025-03-05T20:02:56.505Z" }, + { url = "https://files.pythonhosted.org/packages/46/e6/519054c2f477def4165b0ec060ad664ed174e140b0d1cbb9fafa4a54f6db/websockets-15.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a", size = 173077, upload-time = "2025-03-05T20:02:58.37Z" }, + { url = "https://files.pythonhosted.org/packages/1a/21/c0712e382df64c93a0d16449ecbf87b647163485ca1cc3f6cbadb36d2b03/websockets-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b", size = 173324, upload-time = "2025-03-05T20:02:59.773Z" }, + { url = "https://files.pythonhosted.org/packages/1c/cb/51ba82e59b3a664df54beed8ad95517c1b4dc1a913730e7a7db778f21291/websockets-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770", size = 182094, upload-time = "2025-03-05T20:03:01.827Z" }, + { url = "https://files.pythonhosted.org/packages/fb/0f/bf3788c03fec679bcdaef787518dbe60d12fe5615a544a6d4cf82f045193/websockets-15.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb", size = 181094, upload-time = "2025-03-05T20:03:03.123Z" }, + { url = "https://files.pythonhosted.org/packages/5e/da/9fb8c21edbc719b66763a571afbaf206cb6d3736d28255a46fc2fe20f902/websockets-15.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054", size = 181397, upload-time = "2025-03-05T20:03:04.443Z" }, + { url = "https://files.pythonhosted.org/packages/2e/65/65f379525a2719e91d9d90c38fe8b8bc62bd3c702ac651b7278609b696c4/websockets-15.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee", size = 181794, upload-time = "2025-03-05T20:03:06.708Z" }, + { url = "https://files.pythonhosted.org/packages/d9/26/31ac2d08f8e9304d81a1a7ed2851c0300f636019a57cbaa91342015c72cc/websockets-15.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed", size = 181194, upload-time = "2025-03-05T20:03:08.844Z" }, + { url = "https://files.pythonhosted.org/packages/98/72/1090de20d6c91994cd4b357c3f75a4f25ee231b63e03adea89671cc12a3f/websockets-15.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880", size = 181164, upload-time = "2025-03-05T20:03:10.242Z" }, + { url = "https://files.pythonhosted.org/packages/2d/37/098f2e1c103ae8ed79b0e77f08d83b0ec0b241cf4b7f2f10edd0126472e1/websockets-15.0.1-cp39-cp39-win32.whl", hash = "sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411", size = 176381, upload-time = "2025-03-05T20:03:12.77Z" }, + { url = "https://files.pythonhosted.org/packages/75/8b/a32978a3ab42cebb2ebdd5b05df0696a09f4d436ce69def11893afa301f0/websockets-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4", size = 176841, upload-time = "2025-03-05T20:03:14.367Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/d40f779fa16f74d3468357197af8d6ad07e7c5a27ea1ca74ceb38986f77a/websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3", size = 173109, upload-time = "2025-03-05T20:03:17.769Z" }, + { url = "https://files.pythonhosted.org/packages/bc/cd/5b887b8585a593073fd92f7c23ecd3985cd2c3175025a91b0d69b0551372/websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1", size = 173343, upload-time = "2025-03-05T20:03:19.094Z" }, + { url = "https://files.pythonhosted.org/packages/fe/ae/d34f7556890341e900a95acf4886833646306269f899d58ad62f588bf410/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475", size = 174599, upload-time = "2025-03-05T20:03:21.1Z" }, + { url = "https://files.pythonhosted.org/packages/71/e6/5fd43993a87db364ec60fc1d608273a1a465c0caba69176dd160e197ce42/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9", size = 174207, upload-time = "2025-03-05T20:03:23.221Z" }, + { url = "https://files.pythonhosted.org/packages/2b/fb/c492d6daa5ec067c2988ac80c61359ace5c4c674c532985ac5a123436cec/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04", size = 174155, upload-time = "2025-03-05T20:03:25.321Z" }, + { url = "https://files.pythonhosted.org/packages/68/a1/dcb68430b1d00b698ae7a7e0194433bce4f07ded185f0ee5fb21e2a2e91e/websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122", size = 176884, upload-time = "2025-03-05T20:03:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/b7/48/4b67623bac4d79beb3a6bb27b803ba75c1bdedc06bd827e465803690a4b2/websockets-15.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940", size = 173106, upload-time = "2025-03-05T20:03:29.404Z" }, + { url = "https://files.pythonhosted.org/packages/ed/f0/adb07514a49fe5728192764e04295be78859e4a537ab8fcc518a3dbb3281/websockets-15.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e", size = 173339, upload-time = "2025-03-05T20:03:30.755Z" }, + { url = "https://files.pythonhosted.org/packages/87/28/bd23c6344b18fb43df40d0700f6d3fffcd7cef14a6995b4f976978b52e62/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9", size = 174597, upload-time = "2025-03-05T20:03:32.247Z" }, + { url = "https://files.pythonhosted.org/packages/6d/79/ca288495863d0f23a60f546f0905ae8f3ed467ad87f8b6aceb65f4c013e4/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b", size = 174205, upload-time = "2025-03-05T20:03:33.731Z" }, + { url = "https://files.pythonhosted.org/packages/04/e4/120ff3180b0872b1fe6637f6f995bcb009fb5c87d597c1fc21456f50c848/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f", size = 174150, upload-time = "2025-03-05T20:03:35.757Z" }, + { url = "https://files.pythonhosted.org/packages/cb/c3/30e2f9c539b8da8b1d76f64012f3b19253271a63413b2d3adb94b143407f/websockets-15.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123", size = 176877, upload-time = "2025-03-05T20:03:37.199Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, +] + +[[package]] +name = "winkerberos" +version = "0.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/75/86d470935167eb1c40d53498993e14cc021d9611a539d61c9b4202c291ab/winkerberos-0.12.2.tar.gz", hash = "sha256:ff91daed04727a0362892802ee093d8da11f08536393526bdf3bc64e04079faa", size = 35672, upload-time = "2025-04-02T14:41:48.274Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/59/ac/c6ce495af45371ffd85a6a3d24c2ced679b8dbcf3b8c6beca093706b1620/winkerberos-0.12.2-cp310-cp310-win32.whl", hash = "sha256:f8b751bd5a28e6a9146f154bed395c30ce4f245448addc763f98cb8843879027", size = 25331, upload-time = "2025-04-02T14:41:36.398Z" }, + { url = "https://files.pythonhosted.org/packages/cb/7b/ad32174c3ed4710cd2ad8f20171f5061cb13603f091d714d5aa6b30d51f0/winkerberos-0.12.2-cp310-cp310-win_amd64.whl", hash = "sha256:4be3b0de548b80f52a6544dff9d571da6cdfde590176a01477358b3808b12dfa", size = 27670, upload-time = "2025-04-02T14:41:37.68Z" }, + { url = "https://files.pythonhosted.org/packages/91/12/23b29d359dee9f7a8243cb0040ea1834acd1af8cbc38cfe1c7ca82ab4ec0/winkerberos-0.12.2-cp311-cp311-win32.whl", hash = "sha256:ff2b2ec9b9246bbc05f0d4e6fe5f3f3563237357b9b35eaa58ec1a9ddf349ab8", size = 25332, upload-time = "2025-04-02T14:41:38.671Z" }, + { url = "https://files.pythonhosted.org/packages/23/d2/2bfa1dcdb4a47b7f989a9e758c892bd7393a156b0e1f0df63eca8304e892/winkerberos-0.12.2-cp311-cp311-win_amd64.whl", hash = "sha256:e6ac2b2cc329a68502821905f6ffe48e109d54a46aba7414ea231a30c75bb2d9", size = 27671, upload-time = "2025-04-02T14:41:40.104Z" }, + { url = "https://files.pythonhosted.org/packages/4f/01/26c5b1435654596c07b314653183ffe42b64ea07041c328f0fd4c68fe9f9/winkerberos-0.12.2-cp312-cp312-win32.whl", hash = "sha256:46dac1300e20738cbaf6c17c2e4832062ed7faee346c7a96f0e57f8bbe279c25", size = 25396, upload-time = "2025-04-02T14:41:41.6Z" }, + { url = "https://files.pythonhosted.org/packages/64/b1/6c4a1e4e50553798eb44dbb0d71ba6af48e2a62a0eb01bd0d4e2b41914e3/winkerberos-0.12.2-cp312-cp312-win_amd64.whl", hash = "sha256:2c5c7a70c0d4a43546b20d5654e7e7e5e5e96f42084a7f293864f7ad0fb1e953", size = 27710, upload-time = "2025-04-02T14:41:42.656Z" }, + { url = "https://files.pythonhosted.org/packages/5f/91/cff6750c7c3b2a9f35e12cd7c4df901251fc3be985edef707a3458c43e9a/winkerberos-0.12.2-cp313-cp313-win32.whl", hash = "sha256:482a72500b7822cc8f941d0c6eed668a24c030ac145c97732e175b51441bebbf", size = 25391, upload-time = "2025-04-02T14:41:43.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/98/defb037ad127c4006c4e992dd55ce0df92059626d3df5f5f4c5fc8502c26/winkerberos-0.12.2-cp313-cp313-win_amd64.whl", hash = "sha256:efd65ba54534512070916cb9c91ef9798a0f9fb0b04e12732c9631e71553fd69", size = 27704, upload-time = "2025-04-02T14:41:45.203Z" }, + { url = "https://files.pythonhosted.org/packages/be/17/b16e72e0b896cdf05666994cbc402a66f5911d56ea28d4e858714328b698/winkerberos-0.12.2-cp39-cp39-win32.whl", hash = "sha256:0c80eed53472a38d7f1dd015e27d93705b22a2acd2557bad13d8b5d688037b29", size = 25326, upload-time = "2025-04-02T14:41:46.216Z" }, + { url = "https://files.pythonhosted.org/packages/65/04/ae42e839e8d836fde613f94f30395953292a7b9be388247237196d1e5caa/winkerberos-0.12.2-cp39-cp39-win_amd64.whl", hash = "sha256:4b908aab5ab42e98bee44eca67dfebe4733d210bccf021e42b669bf4af2005a4", size = 27663, upload-time = "2025-04-02T14:41:47.294Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] + +[[package]] +name = "zope-event" +version = "5.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/9f/c443569a68d3844c044d9fa9711e08adb33649b527b4d432433f4c2a6a02/zope_event-5.1.1.tar.gz", hash = "sha256:c1ac931abf57efba71a2a313c5f4d57768a19b15c37e3f02f50eb1536be12d4e", size = 18811, upload-time = "2025-07-22T07:04:00.924Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/04/fd55695f6448abd22295fc68b2d3a135389558f0f49a24b0dffe019d0ecb/zope_event-5.1.1-py3-none-any.whl", hash = "sha256:8d5ea7b992c42ce73a6fa9c2ba99a004c52cd9f05d87f3220768ef0329b92df7", size = 7014, upload-time = "2025-07-22T07:03:59.9Z" }, +] + +[[package]] +name = "zope-interface" +version = "7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/30/93/9210e7606be57a2dfc6277ac97dcc864fd8d39f142ca194fdc186d596fda/zope.interface-7.2.tar.gz", hash = "sha256:8b49f1a3d1ee4cdaf5b32d2e738362c7f5e40ac8b46dd7d1a65e82a4872728fe", size = 252960, upload-time = "2024-11-28T08:45:39.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/71/e6177f390e8daa7e75378505c5ab974e0bf59c1d3b19155638c7afbf4b2d/zope.interface-7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce290e62229964715f1011c3dbeab7a4a1e4971fd6f31324c4519464473ef9f2", size = 208243, upload-time = "2024-11-28T08:47:29.781Z" }, + { url = "https://files.pythonhosted.org/packages/52/db/7e5f4226bef540f6d55acfd95cd105782bc6ee044d9b5587ce2c95558a5e/zope.interface-7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:05b910a5afe03256b58ab2ba6288960a2892dfeef01336dc4be6f1b9ed02ab0a", size = 208759, upload-time = "2024-11-28T08:47:31.908Z" }, + { url = "https://files.pythonhosted.org/packages/28/ea/fdd9813c1eafd333ad92464d57a4e3a82b37ae57c19497bcffa42df673e4/zope.interface-7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:550f1c6588ecc368c9ce13c44a49b8d6b6f3ca7588873c679bd8fd88a1b557b6", size = 254922, upload-time = "2024-11-28T09:18:11.795Z" }, + { url = "https://files.pythonhosted.org/packages/3b/d3/0000a4d497ef9fbf4f66bb6828b8d0a235e690d57c333be877bec763722f/zope.interface-7.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ef9e2f865721553c6f22a9ff97da0f0216c074bd02b25cf0d3af60ea4d6931d", size = 249367, upload-time = "2024-11-28T08:48:24.238Z" }, + { url = "https://files.pythonhosted.org/packages/3e/e5/0b359e99084f033d413419eff23ee9c2bd33bca2ca9f4e83d11856f22d10/zope.interface-7.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27f926f0dcb058211a3bb3e0e501c69759613b17a553788b2caeb991bed3b61d", size = 254488, upload-time = "2024-11-28T08:48:28.816Z" }, + { url = "https://files.pythonhosted.org/packages/7b/90/12d50b95f40e3b2fc0ba7f7782104093b9fd62806b13b98ef4e580f2ca61/zope.interface-7.2-cp310-cp310-win_amd64.whl", hash = "sha256:144964649eba4c5e4410bb0ee290d338e78f179cdbfd15813de1a664e7649b3b", size = 211947, upload-time = "2024-11-28T08:48:18.831Z" }, + { url = "https://files.pythonhosted.org/packages/98/7d/2e8daf0abea7798d16a58f2f3a2bf7588872eee54ac119f99393fdd47b65/zope.interface-7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1909f52a00c8c3dcab6c4fad5d13de2285a4b3c7be063b239b8dc15ddfb73bd2", size = 208776, upload-time = "2024-11-28T08:47:53.009Z" }, + { url = "https://files.pythonhosted.org/packages/a0/2a/0c03c7170fe61d0d371e4c7ea5b62b8cb79b095b3d630ca16719bf8b7b18/zope.interface-7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:80ecf2451596f19fd607bb09953f426588fc1e79e93f5968ecf3367550396b22", size = 209296, upload-time = "2024-11-28T08:47:57.993Z" }, + { url = "https://files.pythonhosted.org/packages/49/b4/451f19448772b4a1159519033a5f72672221e623b0a1bd2b896b653943d8/zope.interface-7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:033b3923b63474800b04cba480b70f6e6243a62208071fc148354f3f89cc01b7", size = 260997, upload-time = "2024-11-28T09:18:13.935Z" }, + { url = "https://files.pythonhosted.org/packages/65/94/5aa4461c10718062c8f8711161faf3249d6d3679c24a0b81dd6fc8ba1dd3/zope.interface-7.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a102424e28c6b47c67923a1f337ede4a4c2bba3965b01cf707978a801fc7442c", size = 255038, upload-time = "2024-11-28T08:48:26.381Z" }, + { url = "https://files.pythonhosted.org/packages/9f/aa/1a28c02815fe1ca282b54f6705b9ddba20328fabdc37b8cf73fc06b172f0/zope.interface-7.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25e6a61dcb184453bb00eafa733169ab6d903e46f5c2ace4ad275386f9ab327a", size = 259806, upload-time = "2024-11-28T08:48:30.78Z" }, + { url = "https://files.pythonhosted.org/packages/a7/2c/82028f121d27c7e68632347fe04f4a6e0466e77bb36e104c8b074f3d7d7b/zope.interface-7.2-cp311-cp311-win_amd64.whl", hash = "sha256:3f6771d1647b1fc543d37640b45c06b34832a943c80d1db214a37c31161a93f1", size = 212305, upload-time = "2024-11-28T08:49:14.525Z" }, + { url = "https://files.pythonhosted.org/packages/68/0b/c7516bc3bad144c2496f355e35bd699443b82e9437aa02d9867653203b4a/zope.interface-7.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:086ee2f51eaef1e4a52bd7d3111a0404081dadae87f84c0ad4ce2649d4f708b7", size = 208959, upload-time = "2024-11-28T08:47:47.788Z" }, + { url = "https://files.pythonhosted.org/packages/a2/e9/1463036df1f78ff8c45a02642a7bf6931ae4a38a4acd6a8e07c128e387a7/zope.interface-7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:21328fcc9d5b80768bf051faa35ab98fb979080c18e6f84ab3f27ce703bce465", size = 209357, upload-time = "2024-11-28T08:47:50.897Z" }, + { url = "https://files.pythonhosted.org/packages/07/a8/106ca4c2add440728e382f1b16c7d886563602487bdd90004788d45eb310/zope.interface-7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6dd02ec01f4468da0f234da9d9c8545c5412fef80bc590cc51d8dd084138a89", size = 264235, upload-time = "2024-11-28T09:18:15.56Z" }, + { url = "https://files.pythonhosted.org/packages/fc/ca/57286866285f4b8a4634c12ca1957c24bdac06eae28fd4a3a578e30cf906/zope.interface-7.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e7da17f53e25d1a3bde5da4601e026adc9e8071f9f6f936d0fe3fe84ace6d54", size = 259253, upload-time = "2024-11-28T08:48:29.025Z" }, + { url = "https://files.pythonhosted.org/packages/96/08/2103587ebc989b455cf05e858e7fbdfeedfc3373358320e9c513428290b1/zope.interface-7.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cab15ff4832580aa440dc9790b8a6128abd0b88b7ee4dd56abacbc52f212209d", size = 264702, upload-time = "2024-11-28T08:48:37.363Z" }, + { url = "https://files.pythonhosted.org/packages/5f/c7/3c67562e03b3752ba4ab6b23355f15a58ac2d023a6ef763caaca430f91f2/zope.interface-7.2-cp312-cp312-win_amd64.whl", hash = "sha256:29caad142a2355ce7cfea48725aa8bcf0067e2b5cc63fcf5cd9f97ad12d6afb5", size = 212466, upload-time = "2024-11-28T08:49:14.397Z" }, + { url = "https://files.pythonhosted.org/packages/c6/3b/e309d731712c1a1866d61b5356a069dd44e5b01e394b6cb49848fa2efbff/zope.interface-7.2-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:3e0350b51e88658d5ad126c6a57502b19d5f559f6cb0a628e3dc90442b53dd98", size = 208961, upload-time = "2024-11-28T08:48:29.865Z" }, + { url = "https://files.pythonhosted.org/packages/49/65/78e7cebca6be07c8fc4032bfbb123e500d60efdf7b86727bb8a071992108/zope.interface-7.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15398c000c094b8855d7d74f4fdc9e73aa02d4d0d5c775acdef98cdb1119768d", size = 209356, upload-time = "2024-11-28T08:48:33.297Z" }, + { url = "https://files.pythonhosted.org/packages/11/b1/627384b745310d082d29e3695db5f5a9188186676912c14b61a78bbc6afe/zope.interface-7.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:802176a9f99bd8cc276dcd3b8512808716492f6f557c11196d42e26c01a69a4c", size = 264196, upload-time = "2024-11-28T09:18:17.584Z" }, + { url = "https://files.pythonhosted.org/packages/b8/f6/54548df6dc73e30ac6c8a7ff1da73ac9007ba38f866397091d5a82237bd3/zope.interface-7.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb23f58a446a7f09db85eda09521a498e109f137b85fb278edb2e34841055398", size = 259237, upload-time = "2024-11-28T08:48:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/b6/66/ac05b741c2129fdf668b85631d2268421c5cd1a9ff99be1674371139d665/zope.interface-7.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a71a5b541078d0ebe373a81a3b7e71432c61d12e660f1d67896ca62d9628045b", size = 264696, upload-time = "2024-11-28T08:48:41.161Z" }, + { url = "https://files.pythonhosted.org/packages/0a/2f/1bccc6f4cc882662162a1158cda1a7f616add2ffe322b28c99cb031b4ffc/zope.interface-7.2-cp313-cp313-win_amd64.whl", hash = "sha256:4893395d5dd2ba655c38ceb13014fd65667740f09fa5bb01caa1e6284e48c0cd", size = 212472, upload-time = "2024-11-28T08:49:56.587Z" }, + { url = "https://files.pythonhosted.org/packages/8c/2c/1f49dc8b4843c4f0848d8e43191aed312bad946a1563d1bf9e46cf2816ee/zope.interface-7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7bd449c306ba006c65799ea7912adbbfed071089461a19091a228998b82b1fdb", size = 208349, upload-time = "2024-11-28T08:49:28.872Z" }, + { url = "https://files.pythonhosted.org/packages/ed/7d/83ddbfc8424c69579a90fc8edc2b797223da2a8083a94d8dfa0e374c5ed4/zope.interface-7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a19a6cc9c6ce4b1e7e3d319a473cf0ee989cbbe2b39201d7c19e214d2dfb80c7", size = 208799, upload-time = "2024-11-28T08:49:30.616Z" }, + { url = "https://files.pythonhosted.org/packages/36/22/b1abd91854c1be03f5542fe092e6a745096d2eca7704d69432e119100583/zope.interface-7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cd1790b48c16db85d51fbbd12d20949d7339ad84fd971427cf00d990c1f137", size = 254267, upload-time = "2024-11-28T09:18:21.059Z" }, + { url = "https://files.pythonhosted.org/packages/2a/dd/fcd313ee216ad0739ae00e6126bc22a0af62a74f76a9ca668d16cd276222/zope.interface-7.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52e446f9955195440e787596dccd1411f543743c359eeb26e9b2c02b077b0519", size = 248614, upload-time = "2024-11-28T08:48:41.953Z" }, + { url = "https://files.pythonhosted.org/packages/88/d4/4ba1569b856870527cec4bf22b91fe704b81a3c1a451b2ccf234e9e0666f/zope.interface-7.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad9913fd858274db8dd867012ebe544ef18d218f6f7d1e3c3e6d98000f14b75", size = 253800, upload-time = "2024-11-28T08:48:46.637Z" }, + { url = "https://files.pythonhosted.org/packages/69/da/c9cfb384c18bd3a26d9fc6a9b5f32ccea49ae09444f097eaa5ca9814aff9/zope.interface-7.2-cp39-cp39-win_amd64.whl", hash = "sha256:1090c60116b3da3bfdd0c03406e2f14a1ff53e5771aebe33fec1edc0a350175d", size = 211980, upload-time = "2024-11-28T08:50:35.681Z" }, +] + +[[package]] +name = "zstandard" +version = "0.23.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/f6/2ac0287b442160a89d726b17a9184a4c615bb5237db763791a7fd16d9df1/zstandard-0.23.0.tar.gz", hash = "sha256:b2d8c62d08e7255f68f7a740bae85b3c9b8e5466baa9cbf7f57f1cde0ac6bc09", size = 681701, upload-time = "2024-07-15T00:18:06.141Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/55/bd0487e86679db1823fc9ee0d8c9c78ae2413d34c0b461193b5f4c31d22f/zstandard-0.23.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf0a05b6059c0528477fba9054d09179beb63744355cab9f38059548fedd46a9", size = 788701, upload-time = "2024-07-15T00:13:27.351Z" }, + { url = "https://files.pythonhosted.org/packages/e1/8a/ccb516b684f3ad987dfee27570d635822e3038645b1a950c5e8022df1145/zstandard-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc9ca1c9718cb3b06634c7c8dec57d24e9438b2aa9a0f02b8bb36bf478538880", size = 633678, upload-time = "2024-07-15T00:13:30.24Z" }, + { url = "https://files.pythonhosted.org/packages/12/89/75e633d0611c028e0d9af6df199423bf43f54bea5007e6718ab7132e234c/zstandard-0.23.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77da4c6bfa20dd5ea25cbf12c76f181a8e8cd7ea231c673828d0386b1740b8dc", size = 4941098, upload-time = "2024-07-15T00:13:32.526Z" }, + { url = "https://files.pythonhosted.org/packages/4a/7a/bd7f6a21802de358b63f1ee636ab823711c25ce043a3e9f043b4fcb5ba32/zstandard-0.23.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2170c7e0367dde86a2647ed5b6f57394ea7f53545746104c6b09fc1f4223573", size = 5308798, upload-time = "2024-07-15T00:13:34.925Z" }, + { url = "https://files.pythonhosted.org/packages/79/3b/775f851a4a65013e88ca559c8ae42ac1352db6fcd96b028d0df4d7d1d7b4/zstandard-0.23.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c16842b846a8d2a145223f520b7e18b57c8f476924bda92aeee3a88d11cfc391", size = 5341840, upload-time = "2024-07-15T00:13:37.376Z" }, + { url = "https://files.pythonhosted.org/packages/09/4f/0cc49570141dd72d4d95dd6fcf09328d1b702c47a6ec12fbed3b8aed18a5/zstandard-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:157e89ceb4054029a289fb504c98c6a9fe8010f1680de0201b3eb5dc20aa6d9e", size = 5440337, upload-time = "2024-07-15T00:13:39.772Z" }, + { url = "https://files.pythonhosted.org/packages/e7/7c/aaa7cd27148bae2dc095191529c0570d16058c54c4597a7d118de4b21676/zstandard-0.23.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:203d236f4c94cd8379d1ea61db2fce20730b4c38d7f1c34506a31b34edc87bdd", size = 4861182, upload-time = "2024-07-15T00:13:42.495Z" }, + { url = "https://files.pythonhosted.org/packages/ac/eb/4b58b5c071d177f7dc027129d20bd2a44161faca6592a67f8fcb0b88b3ae/zstandard-0.23.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dc5d1a49d3f8262be192589a4b72f0d03b72dcf46c51ad5852a4fdc67be7b9e4", size = 4932936, upload-time = "2024-07-15T00:13:44.234Z" }, + { url = "https://files.pythonhosted.org/packages/44/f9/21a5fb9bb7c9a274b05ad700a82ad22ce82f7ef0f485980a1e98ed6e8c5f/zstandard-0.23.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:752bf8a74412b9892f4e5b58f2f890a039f57037f52c89a740757ebd807f33ea", size = 5464705, upload-time = "2024-07-15T00:13:46.822Z" }, + { url = "https://files.pythonhosted.org/packages/49/74/b7b3e61db3f88632776b78b1db597af3f44c91ce17d533e14a25ce6a2816/zstandard-0.23.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80080816b4f52a9d886e67f1f96912891074903238fe54f2de8b786f86baded2", size = 4857882, upload-time = "2024-07-15T00:13:49.297Z" }, + { url = "https://files.pythonhosted.org/packages/4a/7f/d8eb1cb123d8e4c541d4465167080bec88481ab54cd0b31eb4013ba04b95/zstandard-0.23.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:84433dddea68571a6d6bd4fbf8ff398236031149116a7fff6f777ff95cad3df9", size = 4697672, upload-time = "2024-07-15T00:13:51.447Z" }, + { url = "https://files.pythonhosted.org/packages/5e/05/f7dccdf3d121309b60342da454d3e706453a31073e2c4dac8e1581861e44/zstandard-0.23.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19a2d91963ed9e42b4e8d77cd847ae8381576585bad79dbd0a8837a9f6620a", size = 5206043, upload-time = "2024-07-15T00:13:53.587Z" }, + { url = "https://files.pythonhosted.org/packages/86/9d/3677a02e172dccd8dd3a941307621c0cbd7691d77cb435ac3c75ab6a3105/zstandard-0.23.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:59556bf80a7094d0cfb9f5e50bb2db27fefb75d5138bb16fb052b61b0e0eeeb0", size = 5667390, upload-time = "2024-07-15T00:13:56.137Z" }, + { url = "https://files.pythonhosted.org/packages/41/7e/0012a02458e74a7ba122cd9cafe491facc602c9a17f590367da369929498/zstandard-0.23.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:27d3ef2252d2e62476389ca8f9b0cf2bbafb082a3b6bfe9d90cbcbb5529ecf7c", size = 5198901, upload-time = "2024-07-15T00:13:58.584Z" }, + { url = "https://files.pythonhosted.org/packages/65/3a/8f715b97bd7bcfc7342d8adcd99a026cb2fb550e44866a3b6c348e1b0f02/zstandard-0.23.0-cp310-cp310-win32.whl", hash = "sha256:5d41d5e025f1e0bccae4928981e71b2334c60f580bdc8345f824e7c0a4c2a813", size = 430596, upload-time = "2024-07-15T00:14:00.693Z" }, + { url = "https://files.pythonhosted.org/packages/19/b7/b2b9eca5e5a01111e4fe8a8ffb56bdcdf56b12448a24effe6cfe4a252034/zstandard-0.23.0-cp310-cp310-win_amd64.whl", hash = "sha256:519fbf169dfac1222a76ba8861ef4ac7f0530c35dd79ba5727014613f91613d4", size = 495498, upload-time = "2024-07-15T00:14:02.741Z" }, + { url = "https://files.pythonhosted.org/packages/9e/40/f67e7d2c25a0e2dc1744dd781110b0b60306657f8696cafb7ad7579469bd/zstandard-0.23.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:34895a41273ad33347b2fc70e1bff4240556de3c46c6ea430a7ed91f9042aa4e", size = 788699, upload-time = "2024-07-15T00:14:04.909Z" }, + { url = "https://files.pythonhosted.org/packages/e8/46/66d5b55f4d737dd6ab75851b224abf0afe5774976fe511a54d2eb9063a41/zstandard-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:77ea385f7dd5b5676d7fd943292ffa18fbf5c72ba98f7d09fc1fb9e819b34c23", size = 633681, upload-time = "2024-07-15T00:14:13.99Z" }, + { url = "https://files.pythonhosted.org/packages/63/b6/677e65c095d8e12b66b8f862b069bcf1f1d781b9c9c6f12eb55000d57583/zstandard-0.23.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:983b6efd649723474f29ed42e1467f90a35a74793437d0bc64a5bf482bedfa0a", size = 4944328, upload-time = "2024-07-15T00:14:16.588Z" }, + { url = "https://files.pythonhosted.org/packages/59/cc/e76acb4c42afa05a9d20827116d1f9287e9c32b7ad58cc3af0721ce2b481/zstandard-0.23.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80a539906390591dd39ebb8d773771dc4db82ace6372c4d41e2d293f8e32b8db", size = 5311955, upload-time = "2024-07-15T00:14:19.389Z" }, + { url = "https://files.pythonhosted.org/packages/78/e4/644b8075f18fc7f632130c32e8f36f6dc1b93065bf2dd87f03223b187f26/zstandard-0.23.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:445e4cb5048b04e90ce96a79b4b63140e3f4ab5f662321975679b5f6360b90e2", size = 5344944, upload-time = "2024-07-15T00:14:22.173Z" }, + { url = "https://files.pythonhosted.org/packages/76/3f/dbafccf19cfeca25bbabf6f2dd81796b7218f768ec400f043edc767015a6/zstandard-0.23.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd30d9c67d13d891f2360b2a120186729c111238ac63b43dbd37a5a40670b8ca", size = 5442927, upload-time = "2024-07-15T00:14:24.825Z" }, + { url = "https://files.pythonhosted.org/packages/0c/c3/d24a01a19b6733b9f218e94d1a87c477d523237e07f94899e1c10f6fd06c/zstandard-0.23.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d20fd853fbb5807c8e84c136c278827b6167ded66c72ec6f9a14b863d809211c", size = 4864910, upload-time = "2024-07-15T00:14:26.982Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a9/cf8f78ead4597264f7618d0875be01f9bc23c9d1d11afb6d225b867cb423/zstandard-0.23.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed1708dbf4d2e3a1c5c69110ba2b4eb6678262028afd6c6fbcc5a8dac9cda68e", size = 4935544, upload-time = "2024-07-15T00:14:29.582Z" }, + { url = "https://files.pythonhosted.org/packages/2c/96/8af1e3731b67965fb995a940c04a2c20997a7b3b14826b9d1301cf160879/zstandard-0.23.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:be9b5b8659dff1f913039c2feee1aca499cfbc19e98fa12bc85e037c17ec6ca5", size = 5467094, upload-time = "2024-07-15T00:14:40.126Z" }, + { url = "https://files.pythonhosted.org/packages/ff/57/43ea9df642c636cb79f88a13ab07d92d88d3bfe3e550b55a25a07a26d878/zstandard-0.23.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:65308f4b4890aa12d9b6ad9f2844b7ee42c7f7a4fd3390425b242ffc57498f48", size = 4860440, upload-time = "2024-07-15T00:14:42.786Z" }, + { url = "https://files.pythonhosted.org/packages/46/37/edb78f33c7f44f806525f27baa300341918fd4c4af9472fbc2c3094be2e8/zstandard-0.23.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:98da17ce9cbf3bfe4617e836d561e433f871129e3a7ac16d6ef4c680f13a839c", size = 4700091, upload-time = "2024-07-15T00:14:45.184Z" }, + { url = "https://files.pythonhosted.org/packages/c1/f1/454ac3962671a754f3cb49242472df5c2cced4eb959ae203a377b45b1a3c/zstandard-0.23.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8ed7d27cb56b3e058d3cf684d7200703bcae623e1dcc06ed1e18ecda39fee003", size = 5208682, upload-time = "2024-07-15T00:14:47.407Z" }, + { url = "https://files.pythonhosted.org/packages/85/b2/1734b0fff1634390b1b887202d557d2dd542de84a4c155c258cf75da4773/zstandard-0.23.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:b69bb4f51daf461b15e7b3db033160937d3ff88303a7bc808c67bbc1eaf98c78", size = 5669707, upload-time = "2024-07-15T00:15:03.529Z" }, + { url = "https://files.pythonhosted.org/packages/52/5a/87d6971f0997c4b9b09c495bf92189fb63de86a83cadc4977dc19735f652/zstandard-0.23.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034b88913ecc1b097f528e42b539453fa82c3557e414b3de9d5632c80439a473", size = 5201792, upload-time = "2024-07-15T00:15:28.372Z" }, + { url = "https://files.pythonhosted.org/packages/79/02/6f6a42cc84459d399bd1a4e1adfc78d4dfe45e56d05b072008d10040e13b/zstandard-0.23.0-cp311-cp311-win32.whl", hash = "sha256:f2d4380bf5f62daabd7b751ea2339c1a21d1c9463f1feb7fc2bdcea2c29c3160", size = 430586, upload-time = "2024-07-15T00:15:32.26Z" }, + { url = "https://files.pythonhosted.org/packages/be/a2/4272175d47c623ff78196f3c10e9dc7045c1b9caf3735bf041e65271eca4/zstandard-0.23.0-cp311-cp311-win_amd64.whl", hash = "sha256:62136da96a973bd2557f06ddd4e8e807f9e13cbb0bfb9cc06cfe6d98ea90dfe0", size = 495420, upload-time = "2024-07-15T00:15:34.004Z" }, + { url = "https://files.pythonhosted.org/packages/7b/83/f23338c963bd9de687d47bf32efe9fd30164e722ba27fb59df33e6b1719b/zstandard-0.23.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b4567955a6bc1b20e9c31612e615af6b53733491aeaa19a6b3b37f3b65477094", size = 788713, upload-time = "2024-07-15T00:15:35.815Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b3/1a028f6750fd9227ee0b937a278a434ab7f7fdc3066c3173f64366fe2466/zstandard-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e172f57cd78c20f13a3415cc8dfe24bf388614324d25539146594c16d78fcc8", size = 633459, upload-time = "2024-07-15T00:15:37.995Z" }, + { url = "https://files.pythonhosted.org/packages/26/af/36d89aae0c1f95a0a98e50711bc5d92c144939efc1f81a2fcd3e78d7f4c1/zstandard-0.23.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0e166f698c5a3e914947388c162be2583e0c638a4703fc6a543e23a88dea3c1", size = 4945707, upload-time = "2024-07-15T00:15:39.872Z" }, + { url = "https://files.pythonhosted.org/packages/cd/2e/2051f5c772f4dfc0aae3741d5fc72c3dcfe3aaeb461cc231668a4db1ce14/zstandard-0.23.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a289832e520c6bd4dcaad68e944b86da3bad0d339ef7989fb7e88f92e96072", size = 5306545, upload-time = "2024-07-15T00:15:41.75Z" }, + { url = "https://files.pythonhosted.org/packages/0a/9e/a11c97b087f89cab030fa71206963090d2fecd8eb83e67bb8f3ffb84c024/zstandard-0.23.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d50d31bfedd53a928fed6707b15a8dbeef011bb6366297cc435accc888b27c20", size = 5337533, upload-time = "2024-07-15T00:15:44.114Z" }, + { url = "https://files.pythonhosted.org/packages/fc/79/edeb217c57fe1bf16d890aa91a1c2c96b28c07b46afed54a5dcf310c3f6f/zstandard-0.23.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72c68dda124a1a138340fb62fa21b9bf4848437d9ca60bd35db36f2d3345f373", size = 5436510, upload-time = "2024-07-15T00:15:46.509Z" }, + { url = "https://files.pythonhosted.org/packages/81/4f/c21383d97cb7a422ddf1ae824b53ce4b51063d0eeb2afa757eb40804a8ef/zstandard-0.23.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53dd9d5e3d29f95acd5de6802e909ada8d8d8cfa37a3ac64836f3bc4bc5512db", size = 4859973, upload-time = "2024-07-15T00:15:49.939Z" }, + { url = "https://files.pythonhosted.org/packages/ab/15/08d22e87753304405ccac8be2493a495f529edd81d39a0870621462276ef/zstandard-0.23.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6a41c120c3dbc0d81a8e8adc73312d668cd34acd7725f036992b1b72d22c1772", size = 4936968, upload-time = "2024-07-15T00:15:52.025Z" }, + { url = "https://files.pythonhosted.org/packages/eb/fa/f3670a597949fe7dcf38119a39f7da49a8a84a6f0b1a2e46b2f71a0ab83f/zstandard-0.23.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:40b33d93c6eddf02d2c19f5773196068d875c41ca25730e8288e9b672897c105", size = 5467179, upload-time = "2024-07-15T00:15:54.971Z" }, + { url = "https://files.pythonhosted.org/packages/4e/a9/dad2ab22020211e380adc477a1dbf9f109b1f8d94c614944843e20dc2a99/zstandard-0.23.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9206649ec587e6b02bd124fb7799b86cddec350f6f6c14bc82a2b70183e708ba", size = 4848577, upload-time = "2024-07-15T00:15:57.634Z" }, + { url = "https://files.pythonhosted.org/packages/08/03/dd28b4484b0770f1e23478413e01bee476ae8227bbc81561f9c329e12564/zstandard-0.23.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76e79bc28a65f467e0409098fa2c4376931fd3207fbeb6b956c7c476d53746dd", size = 4693899, upload-time = "2024-07-15T00:16:00.811Z" }, + { url = "https://files.pythonhosted.org/packages/2b/64/3da7497eb635d025841e958bcd66a86117ae320c3b14b0ae86e9e8627518/zstandard-0.23.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:66b689c107857eceabf2cf3d3fc699c3c0fe8ccd18df2219d978c0283e4c508a", size = 5199964, upload-time = "2024-07-15T00:16:03.669Z" }, + { url = "https://files.pythonhosted.org/packages/43/a4/d82decbab158a0e8a6ebb7fc98bc4d903266bce85b6e9aaedea1d288338c/zstandard-0.23.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9c236e635582742fee16603042553d276cca506e824fa2e6489db04039521e90", size = 5655398, upload-time = "2024-07-15T00:16:06.694Z" }, + { url = "https://files.pythonhosted.org/packages/f2/61/ac78a1263bc83a5cf29e7458b77a568eda5a8f81980691bbc6eb6a0d45cc/zstandard-0.23.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8fffdbd9d1408006baaf02f1068d7dd1f016c6bcb7538682622c556e7b68e35", size = 5191313, upload-time = "2024-07-15T00:16:09.758Z" }, + { url = "https://files.pythonhosted.org/packages/e7/54/967c478314e16af5baf849b6ee9d6ea724ae5b100eb506011f045d3d4e16/zstandard-0.23.0-cp312-cp312-win32.whl", hash = "sha256:dc1d33abb8a0d754ea4763bad944fd965d3d95b5baef6b121c0c9013eaf1907d", size = 430877, upload-time = "2024-07-15T00:16:11.758Z" }, + { url = "https://files.pythonhosted.org/packages/75/37/872d74bd7739639c4553bf94c84af7d54d8211b626b352bc57f0fd8d1e3f/zstandard-0.23.0-cp312-cp312-win_amd64.whl", hash = "sha256:64585e1dba664dc67c7cdabd56c1e5685233fbb1fc1966cfba2a340ec0dfff7b", size = 495595, upload-time = "2024-07-15T00:16:13.731Z" }, + { url = "https://files.pythonhosted.org/packages/80/f1/8386f3f7c10261fe85fbc2c012fdb3d4db793b921c9abcc995d8da1b7a80/zstandard-0.23.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:576856e8594e6649aee06ddbfc738fec6a834f7c85bf7cadd1c53d4a58186ef9", size = 788975, upload-time = "2024-07-15T00:16:16.005Z" }, + { url = "https://files.pythonhosted.org/packages/16/e8/cbf01077550b3e5dc86089035ff8f6fbbb312bc0983757c2d1117ebba242/zstandard-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38302b78a850ff82656beaddeb0bb989a0322a8bbb1bf1ab10c17506681d772a", size = 633448, upload-time = "2024-07-15T00:16:17.897Z" }, + { url = "https://files.pythonhosted.org/packages/06/27/4a1b4c267c29a464a161aeb2589aff212b4db653a1d96bffe3598f3f0d22/zstandard-0.23.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2240ddc86b74966c34554c49d00eaafa8200a18d3a5b6ffbf7da63b11d74ee2", size = 4945269, upload-time = "2024-07-15T00:16:20.136Z" }, + { url = "https://files.pythonhosted.org/packages/7c/64/d99261cc57afd9ae65b707e38045ed8269fbdae73544fd2e4a4d50d0ed83/zstandard-0.23.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ef230a8fd217a2015bc91b74f6b3b7d6522ba48be29ad4ea0ca3a3775bf7dd5", size = 5306228, upload-time = "2024-07-15T00:16:23.398Z" }, + { url = "https://files.pythonhosted.org/packages/7a/cf/27b74c6f22541f0263016a0fd6369b1b7818941de639215c84e4e94b2a1c/zstandard-0.23.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:774d45b1fac1461f48698a9d4b5fa19a69d47ece02fa469825b442263f04021f", size = 5336891, upload-time = "2024-07-15T00:16:26.391Z" }, + { url = "https://files.pythonhosted.org/packages/fa/18/89ac62eac46b69948bf35fcd90d37103f38722968e2981f752d69081ec4d/zstandard-0.23.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f77fa49079891a4aab203d0b1744acc85577ed16d767b52fc089d83faf8d8ed", size = 5436310, upload-time = "2024-07-15T00:16:29.018Z" }, + { url = "https://files.pythonhosted.org/packages/a8/a8/5ca5328ee568a873f5118d5b5f70d1f36c6387716efe2e369010289a5738/zstandard-0.23.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac184f87ff521f4840e6ea0b10c0ec90c6b1dcd0bad2f1e4a9a1b4fa177982ea", size = 4859912, upload-time = "2024-07-15T00:16:31.871Z" }, + { url = "https://files.pythonhosted.org/packages/ea/ca/3781059c95fd0868658b1cf0440edd832b942f84ae60685d0cfdb808bca1/zstandard-0.23.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c363b53e257246a954ebc7c488304b5592b9c53fbe74d03bc1c64dda153fb847", size = 4936946, upload-time = "2024-07-15T00:16:34.593Z" }, + { url = "https://files.pythonhosted.org/packages/ce/11/41a58986f809532742c2b832c53b74ba0e0a5dae7e8ab4642bf5876f35de/zstandard-0.23.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e7792606d606c8df5277c32ccb58f29b9b8603bf83b48639b7aedf6df4fe8171", size = 5466994, upload-time = "2024-07-15T00:16:36.887Z" }, + { url = "https://files.pythonhosted.org/packages/83/e3/97d84fe95edd38d7053af05159465d298c8b20cebe9ccb3d26783faa9094/zstandard-0.23.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a0817825b900fcd43ac5d05b8b3079937073d2b1ff9cf89427590718b70dd840", size = 4848681, upload-time = "2024-07-15T00:16:39.709Z" }, + { url = "https://files.pythonhosted.org/packages/6e/99/cb1e63e931de15c88af26085e3f2d9af9ce53ccafac73b6e48418fd5a6e6/zstandard-0.23.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9da6bc32faac9a293ddfdcb9108d4b20416219461e4ec64dfea8383cac186690", size = 4694239, upload-time = "2024-07-15T00:16:41.83Z" }, + { url = "https://files.pythonhosted.org/packages/ab/50/b1e703016eebbc6501fc92f34db7b1c68e54e567ef39e6e59cf5fb6f2ec0/zstandard-0.23.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fd7699e8fd9969f455ef2926221e0233f81a2542921471382e77a9e2f2b57f4b", size = 5200149, upload-time = "2024-07-15T00:16:44.287Z" }, + { url = "https://files.pythonhosted.org/packages/aa/e0/932388630aaba70197c78bdb10cce2c91fae01a7e553b76ce85471aec690/zstandard-0.23.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d477ed829077cd945b01fc3115edd132c47e6540ddcd96ca169facff28173057", size = 5655392, upload-time = "2024-07-15T00:16:46.423Z" }, + { url = "https://files.pythonhosted.org/packages/02/90/2633473864f67a15526324b007a9f96c96f56d5f32ef2a56cc12f9548723/zstandard-0.23.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ce8b52c5987b3e34d5674b0ab529a4602b632ebab0a93b07bfb4dfc8f8a33", size = 5191299, upload-time = "2024-07-15T00:16:49.053Z" }, + { url = "https://files.pythonhosted.org/packages/b0/4c/315ca5c32da7e2dc3455f3b2caee5c8c2246074a61aac6ec3378a97b7136/zstandard-0.23.0-cp313-cp313-win32.whl", hash = "sha256:a9b07268d0c3ca5c170a385a0ab9fb7fdd9f5fd866be004c4ea39e44edce47dd", size = 430862, upload-time = "2024-07-15T00:16:51.003Z" }, + { url = "https://files.pythonhosted.org/packages/a2/bf/c6aaba098e2d04781e8f4f7c0ba3c7aa73d00e4c436bcc0cf059a66691d1/zstandard-0.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:f3513916e8c645d0610815c257cbfd3242adfd5c4cfa78be514e5a3ebb42a41b", size = 495578, upload-time = "2024-07-15T00:16:53.135Z" }, + { url = "https://files.pythonhosted.org/packages/fb/96/4fcafeb7e013a2386d22f974b5b97a0b9a65004ed58c87ae001599bfbd48/zstandard-0.23.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa014d55c3af933c1315eb4bb06dd0459661cc0b15cd61077afa6489bec63bb", size = 788697, upload-time = "2024-07-15T00:17:31.236Z" }, + { url = "https://files.pythonhosted.org/packages/83/ff/a52ce725be69b86a2967ecba0497a8184540cc284c0991125515449e54e2/zstandard-0.23.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7f0804bb3799414af278e9ad51be25edf67f78f916e08afdb983e74161b916", size = 633679, upload-time = "2024-07-15T00:17:32.911Z" }, + { url = "https://files.pythonhosted.org/packages/34/0f/3dc62db122f6a9c481c335fff6fc9f4e88d8f6e2d47321ee3937328addb4/zstandard-0.23.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb2b1ecfef1e67897d336de3a0e3f52478182d6a47eda86cbd42504c5cbd009a", size = 4940416, upload-time = "2024-07-15T00:17:34.849Z" }, + { url = "https://files.pythonhosted.org/packages/1d/e5/9fe0dd8c85fdc2f635e6660d07872a5dc4b366db566630161e39f9f804e1/zstandard-0.23.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:837bb6764be6919963ef41235fd56a6486b132ea64afe5fafb4cb279ac44f259", size = 5307693, upload-time = "2024-07-15T00:17:37.355Z" }, + { url = "https://files.pythonhosted.org/packages/73/bf/fe62c0cd865c171ee8ed5bc83174b5382a2cb729c8d6162edfb99a83158b/zstandard-0.23.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1516c8c37d3a053b01c1c15b182f3b5f5eef19ced9b930b684a73bad121addf4", size = 5341236, upload-time = "2024-07-15T00:17:40.213Z" }, + { url = "https://files.pythonhosted.org/packages/39/86/4fe79b30c794286110802a6cd44a73b6a314ac8196b9338c0fbd78c2407d/zstandard-0.23.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48ef6a43b1846f6025dde6ed9fee0c24e1149c1c25f7fb0a0585572b2f3adc58", size = 5439101, upload-time = "2024-07-15T00:17:42.284Z" }, + { url = "https://files.pythonhosted.org/packages/72/ed/cacec235c581ebf8c608c7fb3d4b6b70d1b490d0e5128ea6996f809ecaef/zstandard-0.23.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11e3bf3c924853a2d5835b24f03eeba7fc9b07d8ca499e247e06ff5676461a15", size = 4860320, upload-time = "2024-07-15T00:17:44.21Z" }, + { url = "https://files.pythonhosted.org/packages/f6/1e/2c589a2930f93946b132fc852c574a19d5edc23fad2b9e566f431050c7ec/zstandard-0.23.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2fb4535137de7e244c230e24f9d1ec194f61721c86ebea04e1581d9d06ea1269", size = 4931933, upload-time = "2024-07-15T00:17:46.455Z" }, + { url = "https://files.pythonhosted.org/packages/8e/f5/30eadde3686d902b5d4692bb5f286977cbc4adc082145eb3f49d834b2eae/zstandard-0.23.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8c24f21fa2af4bb9f2c492a86fe0c34e6d2c63812a839590edaf177b7398f700", size = 5463878, upload-time = "2024-07-15T00:17:48.866Z" }, + { url = "https://files.pythonhosted.org/packages/e0/c8/8aed1f0ab9854ef48e5ad4431367fcb23ce73f0304f7b72335a8edc66556/zstandard-0.23.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a8c86881813a78a6f4508ef9daf9d4995b8ac2d147dcb1a450448941398091c9", size = 4857192, upload-time = "2024-07-15T00:17:51.558Z" }, + { url = "https://files.pythonhosted.org/packages/a8/c6/55e666cfbcd032b9e271865e8578fec56e5594d4faeac379d371526514f5/zstandard-0.23.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fe3b385d996ee0822fd46528d9f0443b880d4d05528fd26a9119a54ec3f91c69", size = 4696513, upload-time = "2024-07-15T00:17:53.924Z" }, + { url = "https://files.pythonhosted.org/packages/dc/bd/720b65bea63ec9de0ac7414c33b9baf271c8de8996e5ff324dc93fc90ff1/zstandard-0.23.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:82d17e94d735c99621bf8ebf9995f870a6b3e6d14543b99e201ae046dfe7de70", size = 5204823, upload-time = "2024-07-15T00:17:55.948Z" }, + { url = "https://files.pythonhosted.org/packages/d8/40/d678db1556e3941d330cd4e95623a63ef235b18547da98fa184cbc028ecf/zstandard-0.23.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c7c517d74bea1a6afd39aa612fa025e6b8011982a0897768a2f7c8ab4ebb78a2", size = 5666490, upload-time = "2024-07-15T00:17:58.327Z" }, + { url = "https://files.pythonhosted.org/packages/ed/cc/c89329723d7515898a1fc7ef5d251264078548c505719d13e9511800a103/zstandard-0.23.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fd7e0f1cfb70eb2f95a19b472ee7ad6d9a0a992ec0ae53286870c104ca939e5", size = 5196622, upload-time = "2024-07-15T00:18:00.404Z" }, + { url = "https://files.pythonhosted.org/packages/78/4c/634289d41e094327a94500dfc919e58841b10ea3a9efdfafbac614797ec2/zstandard-0.23.0-cp39-cp39-win32.whl", hash = "sha256:43da0f0092281bf501f9c5f6f3b4c975a8a0ea82de49ba3f7100e64d422a1274", size = 430620, upload-time = "2024-07-15T00:18:02.613Z" }, + { url = "https://files.pythonhosted.org/packages/a2/e2/0b0c5a0f4f7699fecd92c1ba6278ef9b01f2b0b0dd46f62bfc6729c05659/zstandard-0.23.0-cp39-cp39-win_amd64.whl", hash = "sha256:f8346bfa098532bc1fb6c7ef06783e969d87a99dd1d2a5a18a892c1d7a643c58", size = 495528, upload-time = "2024-07-15T00:18:04.452Z" }, +]